2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4 Copyright (C) 2011 ProFUSION Embedded Systems
6 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License version 2 as
10 published by the Free Software Foundation;
12 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
16 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
19 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
21 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
23 SOFTWARE IS DISCLAIMED.
26 /* Bluetooth HCI core. */
28 #include <linux/export.h>
29 #include <linux/idr.h>
30 #include <linux/rfkill.h>
31 #include <linux/debugfs.h>
32 #include <linux/crypto.h>
33 #include <asm/unaligned.h>
35 #include <net/bluetooth/bluetooth.h>
36 #include <net/bluetooth/hci_core.h>
37 #include <net/bluetooth/l2cap.h>
38 #include <net/bluetooth/mgmt.h>
42 static void hci_rx_work(struct work_struct *work);
43 static void hci_cmd_work(struct work_struct *work);
44 static void hci_tx_work(struct work_struct *work);
47 LIST_HEAD(hci_dev_list);
48 DEFINE_RWLOCK(hci_dev_list_lock);
50 /* HCI callback list */
51 LIST_HEAD(hci_cb_list);
52 DEFINE_RWLOCK(hci_cb_list_lock);
54 /* HCI ID Numbering */
55 static DEFINE_IDA(hci_index_ida);
57 /* ---- HCI notifications ---- */
59 static void hci_notify(struct hci_dev *hdev, int event)
61 hci_sock_dev_event(hdev, event);
64 /* ---- HCI debugfs entries ---- */
66 static ssize_t dut_mode_read(struct file *file, char __user *user_buf,
67 size_t count, loff_t *ppos)
69 struct hci_dev *hdev = file->private_data;
72 buf[0] = test_bit(HCI_DUT_MODE, &hdev->dbg_flags) ? 'Y': 'N';
75 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
78 static ssize_t dut_mode_write(struct file *file, const char __user *user_buf,
79 size_t count, loff_t *ppos)
81 struct hci_dev *hdev = file->private_data;
84 size_t buf_size = min(count, (sizeof(buf)-1));
88 if (!test_bit(HCI_UP, &hdev->flags))
91 if (copy_from_user(buf, user_buf, buf_size))
95 if (strtobool(buf, &enable))
98 if (enable == test_bit(HCI_DUT_MODE, &hdev->dbg_flags))
103 skb = __hci_cmd_sync(hdev, HCI_OP_ENABLE_DUT_MODE, 0, NULL,
106 skb = __hci_cmd_sync(hdev, HCI_OP_RESET, 0, NULL,
108 hci_req_unlock(hdev);
113 err = -bt_to_errno(skb->data[0]);
119 change_bit(HCI_DUT_MODE, &hdev->dbg_flags);
124 static const struct file_operations dut_mode_fops = {
126 .read = dut_mode_read,
127 .write = dut_mode_write,
128 .llseek = default_llseek,
131 static int features_show(struct seq_file *f, void *ptr)
133 struct hci_dev *hdev = f->private;
137 for (p = 0; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
138 seq_printf(f, "%2u: 0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x "
139 "0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x\n", p,
140 hdev->features[p][0], hdev->features[p][1],
141 hdev->features[p][2], hdev->features[p][3],
142 hdev->features[p][4], hdev->features[p][5],
143 hdev->features[p][6], hdev->features[p][7]);
145 if (lmp_le_capable(hdev))
146 seq_printf(f, "LE: 0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x "
147 "0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x\n",
148 hdev->le_features[0], hdev->le_features[1],
149 hdev->le_features[2], hdev->le_features[3],
150 hdev->le_features[4], hdev->le_features[5],
151 hdev->le_features[6], hdev->le_features[7]);
152 hci_dev_unlock(hdev);
157 static int features_open(struct inode *inode, struct file *file)
159 return single_open(file, features_show, inode->i_private);
162 static const struct file_operations features_fops = {
163 .open = features_open,
166 .release = single_release,
169 static int blacklist_show(struct seq_file *f, void *p)
171 struct hci_dev *hdev = f->private;
172 struct bdaddr_list *b;
175 list_for_each_entry(b, &hdev->blacklist, list)
176 seq_printf(f, "%pMR (type %u)\n", &b->bdaddr, b->bdaddr_type);
177 hci_dev_unlock(hdev);
182 static int blacklist_open(struct inode *inode, struct file *file)
184 return single_open(file, blacklist_show, inode->i_private);
187 static const struct file_operations blacklist_fops = {
188 .open = blacklist_open,
191 .release = single_release,
194 static int whitelist_show(struct seq_file *f, void *p)
196 struct hci_dev *hdev = f->private;
197 struct bdaddr_list *b;
200 list_for_each_entry(b, &hdev->whitelist, list)
201 seq_printf(f, "%pMR (type %u)\n", &b->bdaddr, b->bdaddr_type);
202 hci_dev_unlock(hdev);
207 static int whitelist_open(struct inode *inode, struct file *file)
209 return single_open(file, whitelist_show, inode->i_private);
212 static const struct file_operations whitelist_fops = {
213 .open = whitelist_open,
216 .release = single_release,
219 static int uuids_show(struct seq_file *f, void *p)
221 struct hci_dev *hdev = f->private;
222 struct bt_uuid *uuid;
225 list_for_each_entry(uuid, &hdev->uuids, list) {
228 /* The Bluetooth UUID values are stored in big endian,
229 * but with reversed byte order. So convert them into
230 * the right order for the %pUb modifier.
232 for (i = 0; i < 16; i++)
233 val[i] = uuid->uuid[15 - i];
235 seq_printf(f, "%pUb\n", val);
237 hci_dev_unlock(hdev);
242 static int uuids_open(struct inode *inode, struct file *file)
244 return single_open(file, uuids_show, inode->i_private);
247 static const struct file_operations uuids_fops = {
251 .release = single_release,
254 static int inquiry_cache_show(struct seq_file *f, void *p)
256 struct hci_dev *hdev = f->private;
257 struct discovery_state *cache = &hdev->discovery;
258 struct inquiry_entry *e;
262 list_for_each_entry(e, &cache->all, all) {
263 struct inquiry_data *data = &e->data;
264 seq_printf(f, "%pMR %d %d %d 0x%.2x%.2x%.2x 0x%.4x %d %d %u\n",
266 data->pscan_rep_mode, data->pscan_period_mode,
267 data->pscan_mode, data->dev_class[2],
268 data->dev_class[1], data->dev_class[0],
269 __le16_to_cpu(data->clock_offset),
270 data->rssi, data->ssp_mode, e->timestamp);
273 hci_dev_unlock(hdev);
278 static int inquiry_cache_open(struct inode *inode, struct file *file)
280 return single_open(file, inquiry_cache_show, inode->i_private);
283 static const struct file_operations inquiry_cache_fops = {
284 .open = inquiry_cache_open,
287 .release = single_release,
290 static int link_keys_show(struct seq_file *f, void *ptr)
292 struct hci_dev *hdev = f->private;
293 struct list_head *p, *n;
296 list_for_each_safe(p, n, &hdev->link_keys) {
297 struct link_key *key = list_entry(p, struct link_key, list);
298 seq_printf(f, "%pMR %u %*phN %u\n", &key->bdaddr, key->type,
299 HCI_LINK_KEY_SIZE, key->val, key->pin_len);
301 hci_dev_unlock(hdev);
306 static int link_keys_open(struct inode *inode, struct file *file)
308 return single_open(file, link_keys_show, inode->i_private);
311 static const struct file_operations link_keys_fops = {
312 .open = link_keys_open,
315 .release = single_release,
318 static int dev_class_show(struct seq_file *f, void *ptr)
320 struct hci_dev *hdev = f->private;
323 seq_printf(f, "0x%.2x%.2x%.2x\n", hdev->dev_class[2],
324 hdev->dev_class[1], hdev->dev_class[0]);
325 hci_dev_unlock(hdev);
330 static int dev_class_open(struct inode *inode, struct file *file)
332 return single_open(file, dev_class_show, inode->i_private);
335 static const struct file_operations dev_class_fops = {
336 .open = dev_class_open,
339 .release = single_release,
342 static int voice_setting_get(void *data, u64 *val)
344 struct hci_dev *hdev = data;
347 *val = hdev->voice_setting;
348 hci_dev_unlock(hdev);
353 DEFINE_SIMPLE_ATTRIBUTE(voice_setting_fops, voice_setting_get,
354 NULL, "0x%4.4llx\n");
356 static int auto_accept_delay_set(void *data, u64 val)
358 struct hci_dev *hdev = data;
361 hdev->auto_accept_delay = val;
362 hci_dev_unlock(hdev);
367 static int auto_accept_delay_get(void *data, u64 *val)
369 struct hci_dev *hdev = data;
372 *val = hdev->auto_accept_delay;
373 hci_dev_unlock(hdev);
378 DEFINE_SIMPLE_ATTRIBUTE(auto_accept_delay_fops, auto_accept_delay_get,
379 auto_accept_delay_set, "%llu\n");
381 static ssize_t force_sc_support_read(struct file *file, char __user *user_buf,
382 size_t count, loff_t *ppos)
384 struct hci_dev *hdev = file->private_data;
387 buf[0] = test_bit(HCI_FORCE_SC, &hdev->dbg_flags) ? 'Y': 'N';
390 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
393 static ssize_t force_sc_support_write(struct file *file,
394 const char __user *user_buf,
395 size_t count, loff_t *ppos)
397 struct hci_dev *hdev = file->private_data;
399 size_t buf_size = min(count, (sizeof(buf)-1));
402 if (test_bit(HCI_UP, &hdev->flags))
405 if (copy_from_user(buf, user_buf, buf_size))
408 buf[buf_size] = '\0';
409 if (strtobool(buf, &enable))
412 if (enable == test_bit(HCI_FORCE_SC, &hdev->dbg_flags))
415 change_bit(HCI_FORCE_SC, &hdev->dbg_flags);
420 static const struct file_operations force_sc_support_fops = {
422 .read = force_sc_support_read,
423 .write = force_sc_support_write,
424 .llseek = default_llseek,
427 static ssize_t sc_only_mode_read(struct file *file, char __user *user_buf,
428 size_t count, loff_t *ppos)
430 struct hci_dev *hdev = file->private_data;
433 buf[0] = test_bit(HCI_SC_ONLY, &hdev->dev_flags) ? 'Y': 'N';
436 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
439 static const struct file_operations sc_only_mode_fops = {
441 .read = sc_only_mode_read,
442 .llseek = default_llseek,
445 static int idle_timeout_set(void *data, u64 val)
447 struct hci_dev *hdev = data;
449 if (val != 0 && (val < 500 || val > 3600000))
453 hdev->idle_timeout = val;
454 hci_dev_unlock(hdev);
459 static int idle_timeout_get(void *data, u64 *val)
461 struct hci_dev *hdev = data;
464 *val = hdev->idle_timeout;
465 hci_dev_unlock(hdev);
470 DEFINE_SIMPLE_ATTRIBUTE(idle_timeout_fops, idle_timeout_get,
471 idle_timeout_set, "%llu\n");
473 static int rpa_timeout_set(void *data, u64 val)
475 struct hci_dev *hdev = data;
477 /* Require the RPA timeout to be at least 30 seconds and at most
480 if (val < 30 || val > (60 * 60 * 24))
484 hdev->rpa_timeout = val;
485 hci_dev_unlock(hdev);
490 static int rpa_timeout_get(void *data, u64 *val)
492 struct hci_dev *hdev = data;
495 *val = hdev->rpa_timeout;
496 hci_dev_unlock(hdev);
501 DEFINE_SIMPLE_ATTRIBUTE(rpa_timeout_fops, rpa_timeout_get,
502 rpa_timeout_set, "%llu\n");
504 static int sniff_min_interval_set(void *data, u64 val)
506 struct hci_dev *hdev = data;
508 if (val == 0 || val % 2 || val > hdev->sniff_max_interval)
512 hdev->sniff_min_interval = val;
513 hci_dev_unlock(hdev);
518 static int sniff_min_interval_get(void *data, u64 *val)
520 struct hci_dev *hdev = data;
523 *val = hdev->sniff_min_interval;
524 hci_dev_unlock(hdev);
529 DEFINE_SIMPLE_ATTRIBUTE(sniff_min_interval_fops, sniff_min_interval_get,
530 sniff_min_interval_set, "%llu\n");
532 static int sniff_max_interval_set(void *data, u64 val)
534 struct hci_dev *hdev = data;
536 if (val == 0 || val % 2 || val < hdev->sniff_min_interval)
540 hdev->sniff_max_interval = val;
541 hci_dev_unlock(hdev);
546 static int sniff_max_interval_get(void *data, u64 *val)
548 struct hci_dev *hdev = data;
551 *val = hdev->sniff_max_interval;
552 hci_dev_unlock(hdev);
557 DEFINE_SIMPLE_ATTRIBUTE(sniff_max_interval_fops, sniff_max_interval_get,
558 sniff_max_interval_set, "%llu\n");
560 static int conn_info_min_age_set(void *data, u64 val)
562 struct hci_dev *hdev = data;
564 if (val == 0 || val > hdev->conn_info_max_age)
568 hdev->conn_info_min_age = val;
569 hci_dev_unlock(hdev);
574 static int conn_info_min_age_get(void *data, u64 *val)
576 struct hci_dev *hdev = data;
579 *val = hdev->conn_info_min_age;
580 hci_dev_unlock(hdev);
585 DEFINE_SIMPLE_ATTRIBUTE(conn_info_min_age_fops, conn_info_min_age_get,
586 conn_info_min_age_set, "%llu\n");
588 static int conn_info_max_age_set(void *data, u64 val)
590 struct hci_dev *hdev = data;
592 if (val == 0 || val < hdev->conn_info_min_age)
596 hdev->conn_info_max_age = val;
597 hci_dev_unlock(hdev);
602 static int conn_info_max_age_get(void *data, u64 *val)
604 struct hci_dev *hdev = data;
607 *val = hdev->conn_info_max_age;
608 hci_dev_unlock(hdev);
613 DEFINE_SIMPLE_ATTRIBUTE(conn_info_max_age_fops, conn_info_max_age_get,
614 conn_info_max_age_set, "%llu\n");
616 static int identity_show(struct seq_file *f, void *p)
618 struct hci_dev *hdev = f->private;
624 hci_copy_identity_address(hdev, &addr, &addr_type);
626 seq_printf(f, "%pMR (type %u) %*phN %pMR\n", &addr, addr_type,
627 16, hdev->irk, &hdev->rpa);
629 hci_dev_unlock(hdev);
634 static int identity_open(struct inode *inode, struct file *file)
636 return single_open(file, identity_show, inode->i_private);
639 static const struct file_operations identity_fops = {
640 .open = identity_open,
643 .release = single_release,
646 static int random_address_show(struct seq_file *f, void *p)
648 struct hci_dev *hdev = f->private;
651 seq_printf(f, "%pMR\n", &hdev->random_addr);
652 hci_dev_unlock(hdev);
657 static int random_address_open(struct inode *inode, struct file *file)
659 return single_open(file, random_address_show, inode->i_private);
662 static const struct file_operations random_address_fops = {
663 .open = random_address_open,
666 .release = single_release,
669 static int static_address_show(struct seq_file *f, void *p)
671 struct hci_dev *hdev = f->private;
674 seq_printf(f, "%pMR\n", &hdev->static_addr);
675 hci_dev_unlock(hdev);
680 static int static_address_open(struct inode *inode, struct file *file)
682 return single_open(file, static_address_show, inode->i_private);
685 static const struct file_operations static_address_fops = {
686 .open = static_address_open,
689 .release = single_release,
692 static ssize_t force_static_address_read(struct file *file,
693 char __user *user_buf,
694 size_t count, loff_t *ppos)
696 struct hci_dev *hdev = file->private_data;
699 buf[0] = test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags) ? 'Y': 'N';
702 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
705 static ssize_t force_static_address_write(struct file *file,
706 const char __user *user_buf,
707 size_t count, loff_t *ppos)
709 struct hci_dev *hdev = file->private_data;
711 size_t buf_size = min(count, (sizeof(buf)-1));
714 if (test_bit(HCI_UP, &hdev->flags))
717 if (copy_from_user(buf, user_buf, buf_size))
720 buf[buf_size] = '\0';
721 if (strtobool(buf, &enable))
724 if (enable == test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags))
727 change_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags);
732 static const struct file_operations force_static_address_fops = {
734 .read = force_static_address_read,
735 .write = force_static_address_write,
736 .llseek = default_llseek,
739 static int white_list_show(struct seq_file *f, void *ptr)
741 struct hci_dev *hdev = f->private;
742 struct bdaddr_list *b;
745 list_for_each_entry(b, &hdev->le_white_list, list)
746 seq_printf(f, "%pMR (type %u)\n", &b->bdaddr, b->bdaddr_type);
747 hci_dev_unlock(hdev);
752 static int white_list_open(struct inode *inode, struct file *file)
754 return single_open(file, white_list_show, inode->i_private);
757 static const struct file_operations white_list_fops = {
758 .open = white_list_open,
761 .release = single_release,
764 static int identity_resolving_keys_show(struct seq_file *f, void *ptr)
766 struct hci_dev *hdev = f->private;
767 struct list_head *p, *n;
770 list_for_each_safe(p, n, &hdev->identity_resolving_keys) {
771 struct smp_irk *irk = list_entry(p, struct smp_irk, list);
772 seq_printf(f, "%pMR (type %u) %*phN %pMR\n",
773 &irk->bdaddr, irk->addr_type,
774 16, irk->val, &irk->rpa);
776 hci_dev_unlock(hdev);
781 static int identity_resolving_keys_open(struct inode *inode, struct file *file)
783 return single_open(file, identity_resolving_keys_show,
787 static const struct file_operations identity_resolving_keys_fops = {
788 .open = identity_resolving_keys_open,
791 .release = single_release,
794 static int long_term_keys_show(struct seq_file *f, void *ptr)
796 struct hci_dev *hdev = f->private;
797 struct list_head *p, *n;
800 list_for_each_safe(p, n, &hdev->long_term_keys) {
801 struct smp_ltk *ltk = list_entry(p, struct smp_ltk, list);
802 seq_printf(f, "%pMR (type %u) %u 0x%02x %u %.4x %.16llx %*phN\n",
803 <k->bdaddr, ltk->bdaddr_type, ltk->authenticated,
804 ltk->type, ltk->enc_size, __le16_to_cpu(ltk->ediv),
805 __le64_to_cpu(ltk->rand), 16, ltk->val);
807 hci_dev_unlock(hdev);
812 static int long_term_keys_open(struct inode *inode, struct file *file)
814 return single_open(file, long_term_keys_show, inode->i_private);
817 static const struct file_operations long_term_keys_fops = {
818 .open = long_term_keys_open,
821 .release = single_release,
824 static int conn_min_interval_set(void *data, u64 val)
826 struct hci_dev *hdev = data;
828 if (val < 0x0006 || val > 0x0c80 || val > hdev->le_conn_max_interval)
832 hdev->le_conn_min_interval = val;
833 hci_dev_unlock(hdev);
838 static int conn_min_interval_get(void *data, u64 *val)
840 struct hci_dev *hdev = data;
843 *val = hdev->le_conn_min_interval;
844 hci_dev_unlock(hdev);
849 DEFINE_SIMPLE_ATTRIBUTE(conn_min_interval_fops, conn_min_interval_get,
850 conn_min_interval_set, "%llu\n");
852 static int conn_max_interval_set(void *data, u64 val)
854 struct hci_dev *hdev = data;
856 if (val < 0x0006 || val > 0x0c80 || val < hdev->le_conn_min_interval)
860 hdev->le_conn_max_interval = val;
861 hci_dev_unlock(hdev);
866 static int conn_max_interval_get(void *data, u64 *val)
868 struct hci_dev *hdev = data;
871 *val = hdev->le_conn_max_interval;
872 hci_dev_unlock(hdev);
877 DEFINE_SIMPLE_ATTRIBUTE(conn_max_interval_fops, conn_max_interval_get,
878 conn_max_interval_set, "%llu\n");
880 static int conn_latency_set(void *data, u64 val)
882 struct hci_dev *hdev = data;
888 hdev->le_conn_latency = val;
889 hci_dev_unlock(hdev);
894 static int conn_latency_get(void *data, u64 *val)
896 struct hci_dev *hdev = data;
899 *val = hdev->le_conn_latency;
900 hci_dev_unlock(hdev);
905 DEFINE_SIMPLE_ATTRIBUTE(conn_latency_fops, conn_latency_get,
906 conn_latency_set, "%llu\n");
908 static int supervision_timeout_set(void *data, u64 val)
910 struct hci_dev *hdev = data;
912 if (val < 0x000a || val > 0x0c80)
916 hdev->le_supv_timeout = val;
917 hci_dev_unlock(hdev);
922 static int supervision_timeout_get(void *data, u64 *val)
924 struct hci_dev *hdev = data;
927 *val = hdev->le_supv_timeout;
928 hci_dev_unlock(hdev);
933 DEFINE_SIMPLE_ATTRIBUTE(supervision_timeout_fops, supervision_timeout_get,
934 supervision_timeout_set, "%llu\n");
936 static int adv_channel_map_set(void *data, u64 val)
938 struct hci_dev *hdev = data;
940 if (val < 0x01 || val > 0x07)
944 hdev->le_adv_channel_map = val;
945 hci_dev_unlock(hdev);
950 static int adv_channel_map_get(void *data, u64 *val)
952 struct hci_dev *hdev = data;
955 *val = hdev->le_adv_channel_map;
956 hci_dev_unlock(hdev);
961 DEFINE_SIMPLE_ATTRIBUTE(adv_channel_map_fops, adv_channel_map_get,
962 adv_channel_map_set, "%llu\n");
964 static int device_list_show(struct seq_file *f, void *ptr)
966 struct hci_dev *hdev = f->private;
967 struct hci_conn_params *p;
970 list_for_each_entry(p, &hdev->le_conn_params, list) {
971 seq_printf(f, "%pMR %u %u\n", &p->addr, p->addr_type,
974 hci_dev_unlock(hdev);
979 static int device_list_open(struct inode *inode, struct file *file)
981 return single_open(file, device_list_show, inode->i_private);
984 static const struct file_operations device_list_fops = {
985 .open = device_list_open,
988 .release = single_release,
991 /* ---- HCI requests ---- */
993 static void hci_req_sync_complete(struct hci_dev *hdev, u8 result)
995 BT_DBG("%s result 0x%2.2x", hdev->name, result);
997 if (hdev->req_status == HCI_REQ_PEND) {
998 hdev->req_result = result;
999 hdev->req_status = HCI_REQ_DONE;
1000 wake_up_interruptible(&hdev->req_wait_q);
1004 static void hci_req_cancel(struct hci_dev *hdev, int err)
1006 BT_DBG("%s err 0x%2.2x", hdev->name, err);
1008 if (hdev->req_status == HCI_REQ_PEND) {
1009 hdev->req_result = err;
1010 hdev->req_status = HCI_REQ_CANCELED;
1011 wake_up_interruptible(&hdev->req_wait_q);
1015 static struct sk_buff *hci_get_cmd_complete(struct hci_dev *hdev, u16 opcode,
1018 struct hci_ev_cmd_complete *ev;
1019 struct hci_event_hdr *hdr;
1020 struct sk_buff *skb;
1024 skb = hdev->recv_evt;
1025 hdev->recv_evt = NULL;
1027 hci_dev_unlock(hdev);
1030 return ERR_PTR(-ENODATA);
1032 if (skb->len < sizeof(*hdr)) {
1033 BT_ERR("Too short HCI event");
1037 hdr = (void *) skb->data;
1038 skb_pull(skb, HCI_EVENT_HDR_SIZE);
1041 if (hdr->evt != event)
1046 if (hdr->evt != HCI_EV_CMD_COMPLETE) {
1047 BT_DBG("Last event is not cmd complete (0x%2.2x)", hdr->evt);
1051 if (skb->len < sizeof(*ev)) {
1052 BT_ERR("Too short cmd_complete event");
1056 ev = (void *) skb->data;
1057 skb_pull(skb, sizeof(*ev));
1059 if (opcode == __le16_to_cpu(ev->opcode))
1062 BT_DBG("opcode doesn't match (0x%2.2x != 0x%2.2x)", opcode,
1063 __le16_to_cpu(ev->opcode));
1067 return ERR_PTR(-ENODATA);
1070 struct sk_buff *__hci_cmd_sync_ev(struct hci_dev *hdev, u16 opcode, u32 plen,
1071 const void *param, u8 event, u32 timeout)
1073 DECLARE_WAITQUEUE(wait, current);
1074 struct hci_request req;
1077 BT_DBG("%s", hdev->name);
1079 hci_req_init(&req, hdev);
1081 hci_req_add_ev(&req, opcode, plen, param, event);
1083 hdev->req_status = HCI_REQ_PEND;
1085 err = hci_req_run(&req, hci_req_sync_complete);
1087 return ERR_PTR(err);
1089 add_wait_queue(&hdev->req_wait_q, &wait);
1090 set_current_state(TASK_INTERRUPTIBLE);
1092 schedule_timeout(timeout);
1094 remove_wait_queue(&hdev->req_wait_q, &wait);
1096 if (signal_pending(current))
1097 return ERR_PTR(-EINTR);
1099 switch (hdev->req_status) {
1101 err = -bt_to_errno(hdev->req_result);
1104 case HCI_REQ_CANCELED:
1105 err = -hdev->req_result;
1113 hdev->req_status = hdev->req_result = 0;
1115 BT_DBG("%s end: err %d", hdev->name, err);
1118 return ERR_PTR(err);
1120 return hci_get_cmd_complete(hdev, opcode, event);
1122 EXPORT_SYMBOL(__hci_cmd_sync_ev);
1124 struct sk_buff *__hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
1125 const void *param, u32 timeout)
1127 return __hci_cmd_sync_ev(hdev, opcode, plen, param, 0, timeout);
1129 EXPORT_SYMBOL(__hci_cmd_sync);
1131 /* Execute request and wait for completion. */
1132 static int __hci_req_sync(struct hci_dev *hdev,
1133 void (*func)(struct hci_request *req,
1135 unsigned long opt, __u32 timeout)
1137 struct hci_request req;
1138 DECLARE_WAITQUEUE(wait, current);
1141 BT_DBG("%s start", hdev->name);
1143 hci_req_init(&req, hdev);
1145 hdev->req_status = HCI_REQ_PEND;
1149 err = hci_req_run(&req, hci_req_sync_complete);
1151 hdev->req_status = 0;
1153 /* ENODATA means the HCI request command queue is empty.
1154 * This can happen when a request with conditionals doesn't
1155 * trigger any commands to be sent. This is normal behavior
1156 * and should not trigger an error return.
1158 if (err == -ENODATA)
1164 add_wait_queue(&hdev->req_wait_q, &wait);
1165 set_current_state(TASK_INTERRUPTIBLE);
1167 schedule_timeout(timeout);
1169 remove_wait_queue(&hdev->req_wait_q, &wait);
1171 if (signal_pending(current))
1174 switch (hdev->req_status) {
1176 err = -bt_to_errno(hdev->req_result);
1179 case HCI_REQ_CANCELED:
1180 err = -hdev->req_result;
1188 hdev->req_status = hdev->req_result = 0;
1190 BT_DBG("%s end: err %d", hdev->name, err);
1195 static int hci_req_sync(struct hci_dev *hdev,
1196 void (*req)(struct hci_request *req,
1198 unsigned long opt, __u32 timeout)
1202 if (!test_bit(HCI_UP, &hdev->flags))
1205 /* Serialize all requests */
1207 ret = __hci_req_sync(hdev, req, opt, timeout);
1208 hci_req_unlock(hdev);
1213 static void hci_reset_req(struct hci_request *req, unsigned long opt)
1215 BT_DBG("%s %ld", req->hdev->name, opt);
1218 set_bit(HCI_RESET, &req->hdev->flags);
1219 hci_req_add(req, HCI_OP_RESET, 0, NULL);
1222 static void bredr_init(struct hci_request *req)
1224 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;
1226 /* Read Local Supported Features */
1227 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
1229 /* Read Local Version */
1230 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
1232 /* Read BD Address */
1233 hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
1236 static void amp_init(struct hci_request *req)
1238 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
1240 /* Read Local Version */
1241 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
1243 /* Read Local Supported Commands */
1244 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
1246 /* Read Local Supported Features */
1247 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
1249 /* Read Local AMP Info */
1250 hci_req_add(req, HCI_OP_READ_LOCAL_AMP_INFO, 0, NULL);
1252 /* Read Data Blk size */
1253 hci_req_add(req, HCI_OP_READ_DATA_BLOCK_SIZE, 0, NULL);
1255 /* Read Flow Control Mode */
1256 hci_req_add(req, HCI_OP_READ_FLOW_CONTROL_MODE, 0, NULL);
1258 /* Read Location Data */
1259 hci_req_add(req, HCI_OP_READ_LOCATION_DATA, 0, NULL);
1262 static void hci_init1_req(struct hci_request *req, unsigned long opt)
1264 struct hci_dev *hdev = req->hdev;
1266 BT_DBG("%s %ld", hdev->name, opt);
1269 if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
1270 hci_reset_req(req, 0);
1272 switch (hdev->dev_type) {
1282 BT_ERR("Unknown device type %d", hdev->dev_type);
1287 static void bredr_setup(struct hci_request *req)
1289 struct hci_dev *hdev = req->hdev;
1294 /* Read Buffer Size (ACL mtu, max pkt, etc.) */
1295 hci_req_add(req, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
1297 /* Read Class of Device */
1298 hci_req_add(req, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
1300 /* Read Local Name */
1301 hci_req_add(req, HCI_OP_READ_LOCAL_NAME, 0, NULL);
1303 /* Read Voice Setting */
1304 hci_req_add(req, HCI_OP_READ_VOICE_SETTING, 0, NULL);
1306 /* Read Number of Supported IAC */
1307 hci_req_add(req, HCI_OP_READ_NUM_SUPPORTED_IAC, 0, NULL);
1309 /* Read Current IAC LAP */
1310 hci_req_add(req, HCI_OP_READ_CURRENT_IAC_LAP, 0, NULL);
1312 /* Clear Event Filters */
1313 flt_type = HCI_FLT_CLEAR_ALL;
1314 hci_req_add(req, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
1316 /* Connection accept timeout ~20 secs */
1317 param = cpu_to_le16(0x7d00);
1318 hci_req_add(req, HCI_OP_WRITE_CA_TIMEOUT, 2, ¶m);
1320 /* AVM Berlin (31), aka "BlueFRITZ!", reports version 1.2,
1321 * but it does not support page scan related HCI commands.
1323 if (hdev->manufacturer != 31 && hdev->hci_ver > BLUETOOTH_VER_1_1) {
1324 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_ACTIVITY, 0, NULL);
1325 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_TYPE, 0, NULL);
1329 static void le_setup(struct hci_request *req)
1331 struct hci_dev *hdev = req->hdev;
1333 /* Read LE Buffer Size */
1334 hci_req_add(req, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
1336 /* Read LE Local Supported Features */
1337 hci_req_add(req, HCI_OP_LE_READ_LOCAL_FEATURES, 0, NULL);
1339 /* Read LE Supported States */
1340 hci_req_add(req, HCI_OP_LE_READ_SUPPORTED_STATES, 0, NULL);
1342 /* Read LE Advertising Channel TX Power */
1343 hci_req_add(req, HCI_OP_LE_READ_ADV_TX_POWER, 0, NULL);
1345 /* Read LE White List Size */
1346 hci_req_add(req, HCI_OP_LE_READ_WHITE_LIST_SIZE, 0, NULL);
1348 /* Clear LE White List */
1349 hci_req_add(req, HCI_OP_LE_CLEAR_WHITE_LIST, 0, NULL);
1351 /* LE-only controllers have LE implicitly enabled */
1352 if (!lmp_bredr_capable(hdev))
1353 set_bit(HCI_LE_ENABLED, &hdev->dev_flags);
1356 static u8 hci_get_inquiry_mode(struct hci_dev *hdev)
1358 if (lmp_ext_inq_capable(hdev))
1361 if (lmp_inq_rssi_capable(hdev))
1364 if (hdev->manufacturer == 11 && hdev->hci_rev == 0x00 &&
1365 hdev->lmp_subver == 0x0757)
1368 if (hdev->manufacturer == 15) {
1369 if (hdev->hci_rev == 0x03 && hdev->lmp_subver == 0x6963)
1371 if (hdev->hci_rev == 0x09 && hdev->lmp_subver == 0x6963)
1373 if (hdev->hci_rev == 0x00 && hdev->lmp_subver == 0x6965)
1377 if (hdev->manufacturer == 31 && hdev->hci_rev == 0x2005 &&
1378 hdev->lmp_subver == 0x1805)
1384 static void hci_setup_inquiry_mode(struct hci_request *req)
1388 mode = hci_get_inquiry_mode(req->hdev);
1390 hci_req_add(req, HCI_OP_WRITE_INQUIRY_MODE, 1, &mode);
1393 static void hci_setup_event_mask(struct hci_request *req)
1395 struct hci_dev *hdev = req->hdev;
1397 /* The second byte is 0xff instead of 0x9f (two reserved bits
1398 * disabled) since a Broadcom 1.2 dongle doesn't respond to the
1399 * command otherwise.
1401 u8 events[8] = { 0xff, 0xff, 0xfb, 0xff, 0x00, 0x00, 0x00, 0x00 };
1403 /* CSR 1.1 dongles does not accept any bitfield so don't try to set
1404 * any event mask for pre 1.2 devices.
1406 if (hdev->hci_ver < BLUETOOTH_VER_1_2)
1409 if (lmp_bredr_capable(hdev)) {
1410 events[4] |= 0x01; /* Flow Specification Complete */
1411 events[4] |= 0x02; /* Inquiry Result with RSSI */
1412 events[4] |= 0x04; /* Read Remote Extended Features Complete */
1413 events[5] |= 0x08; /* Synchronous Connection Complete */
1414 events[5] |= 0x10; /* Synchronous Connection Changed */
1416 /* Use a different default for LE-only devices */
1417 memset(events, 0, sizeof(events));
1418 events[0] |= 0x10; /* Disconnection Complete */
1419 events[0] |= 0x80; /* Encryption Change */
1420 events[1] |= 0x08; /* Read Remote Version Information Complete */
1421 events[1] |= 0x20; /* Command Complete */
1422 events[1] |= 0x40; /* Command Status */
1423 events[1] |= 0x80; /* Hardware Error */
1424 events[2] |= 0x04; /* Number of Completed Packets */
1425 events[3] |= 0x02; /* Data Buffer Overflow */
1426 events[5] |= 0x80; /* Encryption Key Refresh Complete */
1429 if (lmp_inq_rssi_capable(hdev))
1430 events[4] |= 0x02; /* Inquiry Result with RSSI */
1432 if (lmp_sniffsubr_capable(hdev))
1433 events[5] |= 0x20; /* Sniff Subrating */
1435 if (lmp_pause_enc_capable(hdev))
1436 events[5] |= 0x80; /* Encryption Key Refresh Complete */
1438 if (lmp_ext_inq_capable(hdev))
1439 events[5] |= 0x40; /* Extended Inquiry Result */
1441 if (lmp_no_flush_capable(hdev))
1442 events[7] |= 0x01; /* Enhanced Flush Complete */
1444 if (lmp_lsto_capable(hdev))
1445 events[6] |= 0x80; /* Link Supervision Timeout Changed */
1447 if (lmp_ssp_capable(hdev)) {
1448 events[6] |= 0x01; /* IO Capability Request */
1449 events[6] |= 0x02; /* IO Capability Response */
1450 events[6] |= 0x04; /* User Confirmation Request */
1451 events[6] |= 0x08; /* User Passkey Request */
1452 events[6] |= 0x10; /* Remote OOB Data Request */
1453 events[6] |= 0x20; /* Simple Pairing Complete */
1454 events[7] |= 0x04; /* User Passkey Notification */
1455 events[7] |= 0x08; /* Keypress Notification */
1456 events[7] |= 0x10; /* Remote Host Supported
1457 * Features Notification
1461 if (lmp_le_capable(hdev))
1462 events[7] |= 0x20; /* LE Meta-Event */
1464 hci_req_add(req, HCI_OP_SET_EVENT_MASK, sizeof(events), events);
1467 static void hci_init2_req(struct hci_request *req, unsigned long opt)
1469 struct hci_dev *hdev = req->hdev;
1471 if (lmp_bredr_capable(hdev))
1474 clear_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
1476 if (lmp_le_capable(hdev))
1479 hci_setup_event_mask(req);
1481 /* AVM Berlin (31), aka "BlueFRITZ!", doesn't support the read
1482 * local supported commands HCI command.
1484 if (hdev->manufacturer != 31 && hdev->hci_ver > BLUETOOTH_VER_1_1)
1485 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
1487 if (lmp_ssp_capable(hdev)) {
1488 /* When SSP is available, then the host features page
1489 * should also be available as well. However some
1490 * controllers list the max_page as 0 as long as SSP
1491 * has not been enabled. To achieve proper debugging
1492 * output, force the minimum max_page to 1 at least.
1494 hdev->max_page = 0x01;
1496 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
1498 hci_req_add(req, HCI_OP_WRITE_SSP_MODE,
1499 sizeof(mode), &mode);
1501 struct hci_cp_write_eir cp;
1503 memset(hdev->eir, 0, sizeof(hdev->eir));
1504 memset(&cp, 0, sizeof(cp));
1506 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
1510 if (lmp_inq_rssi_capable(hdev))
1511 hci_setup_inquiry_mode(req);
1513 if (lmp_inq_tx_pwr_capable(hdev))
1514 hci_req_add(req, HCI_OP_READ_INQ_RSP_TX_POWER, 0, NULL);
1516 if (lmp_ext_feat_capable(hdev)) {
1517 struct hci_cp_read_local_ext_features cp;
1520 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
1524 if (test_bit(HCI_LINK_SECURITY, &hdev->dev_flags)) {
1526 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, sizeof(enable),
1531 static void hci_setup_link_policy(struct hci_request *req)
1533 struct hci_dev *hdev = req->hdev;
1534 struct hci_cp_write_def_link_policy cp;
1535 u16 link_policy = 0;
1537 if (lmp_rswitch_capable(hdev))
1538 link_policy |= HCI_LP_RSWITCH;
1539 if (lmp_hold_capable(hdev))
1540 link_policy |= HCI_LP_HOLD;
1541 if (lmp_sniff_capable(hdev))
1542 link_policy |= HCI_LP_SNIFF;
1543 if (lmp_park_capable(hdev))
1544 link_policy |= HCI_LP_PARK;
1546 cp.policy = cpu_to_le16(link_policy);
1547 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, sizeof(cp), &cp);
1550 static void hci_set_le_support(struct hci_request *req)
1552 struct hci_dev *hdev = req->hdev;
1553 struct hci_cp_write_le_host_supported cp;
1555 /* LE-only devices do not support explicit enablement */
1556 if (!lmp_bredr_capable(hdev))
1559 memset(&cp, 0, sizeof(cp));
1561 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
1563 cp.simul = lmp_le_br_capable(hdev);
1566 if (cp.le != lmp_host_le_capable(hdev))
1567 hci_req_add(req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(cp),
1571 static void hci_set_event_mask_page_2(struct hci_request *req)
1573 struct hci_dev *hdev = req->hdev;
1574 u8 events[8] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
1576 /* If Connectionless Slave Broadcast master role is supported
1577 * enable all necessary events for it.
1579 if (lmp_csb_master_capable(hdev)) {
1580 events[1] |= 0x40; /* Triggered Clock Capture */
1581 events[1] |= 0x80; /* Synchronization Train Complete */
1582 events[2] |= 0x10; /* Slave Page Response Timeout */
1583 events[2] |= 0x20; /* CSB Channel Map Change */
1586 /* If Connectionless Slave Broadcast slave role is supported
1587 * enable all necessary events for it.
1589 if (lmp_csb_slave_capable(hdev)) {
1590 events[2] |= 0x01; /* Synchronization Train Received */
1591 events[2] |= 0x02; /* CSB Receive */
1592 events[2] |= 0x04; /* CSB Timeout */
1593 events[2] |= 0x08; /* Truncated Page Complete */
1596 /* Enable Authenticated Payload Timeout Expired event if supported */
1597 if (lmp_ping_capable(hdev) || hdev->le_features[0] & HCI_LE_PING)
1600 hci_req_add(req, HCI_OP_SET_EVENT_MASK_PAGE_2, sizeof(events), events);
1603 static void hci_init3_req(struct hci_request *req, unsigned long opt)
1605 struct hci_dev *hdev = req->hdev;
1608 /* Some Broadcom based Bluetooth controllers do not support the
1609 * Delete Stored Link Key command. They are clearly indicating its
1610 * absence in the bit mask of supported commands.
1612 * Check the supported commands and only if the the command is marked
1613 * as supported send it. If not supported assume that the controller
1614 * does not have actual support for stored link keys which makes this
1615 * command redundant anyway.
1617 * Some controllers indicate that they support handling deleting
1618 * stored link keys, but they don't. The quirk lets a driver
1619 * just disable this command.
1621 if (hdev->commands[6] & 0x80 &&
1622 !test_bit(HCI_QUIRK_BROKEN_STORED_LINK_KEY, &hdev->quirks)) {
1623 struct hci_cp_delete_stored_link_key cp;
1625 bacpy(&cp.bdaddr, BDADDR_ANY);
1626 cp.delete_all = 0x01;
1627 hci_req_add(req, HCI_OP_DELETE_STORED_LINK_KEY,
1631 if (hdev->commands[5] & 0x10)
1632 hci_setup_link_policy(req);
1634 if (lmp_le_capable(hdev)) {
1637 memset(events, 0, sizeof(events));
1640 /* If controller supports the Connection Parameters Request
1641 * Link Layer Procedure, enable the corresponding event.
1643 if (hdev->le_features[0] & HCI_LE_CONN_PARAM_REQ_PROC)
1644 events[0] |= 0x20; /* LE Remote Connection
1648 hci_req_add(req, HCI_OP_LE_SET_EVENT_MASK, sizeof(events),
1651 hci_set_le_support(req);
1654 /* Read features beyond page 1 if available */
1655 for (p = 2; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
1656 struct hci_cp_read_local_ext_features cp;
1659 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
1664 static void hci_init4_req(struct hci_request *req, unsigned long opt)
1666 struct hci_dev *hdev = req->hdev;
1668 /* Set event mask page 2 if the HCI command for it is supported */
1669 if (hdev->commands[22] & 0x04)
1670 hci_set_event_mask_page_2(req);
1672 /* Check for Synchronization Train support */
1673 if (lmp_sync_train_capable(hdev))
1674 hci_req_add(req, HCI_OP_READ_SYNC_TRAIN_PARAMS, 0, NULL);
1676 /* Enable Secure Connections if supported and configured */
1677 if ((lmp_sc_capable(hdev) ||
1678 test_bit(HCI_FORCE_SC, &hdev->dbg_flags)) &&
1679 test_bit(HCI_SC_ENABLED, &hdev->dev_flags)) {
1681 hci_req_add(req, HCI_OP_WRITE_SC_SUPPORT,
1682 sizeof(support), &support);
1686 static int __hci_init(struct hci_dev *hdev)
1690 err = __hci_req_sync(hdev, hci_init1_req, 0, HCI_INIT_TIMEOUT);
1694 /* The Device Under Test (DUT) mode is special and available for
1695 * all controller types. So just create it early on.
1697 if (test_bit(HCI_SETUP, &hdev->dev_flags)) {
1698 debugfs_create_file("dut_mode", 0644, hdev->debugfs, hdev,
1702 /* HCI_BREDR covers both single-mode LE, BR/EDR and dual-mode
1703 * BR/EDR/LE type controllers. AMP controllers only need the
1706 if (hdev->dev_type != HCI_BREDR)
1709 err = __hci_req_sync(hdev, hci_init2_req, 0, HCI_INIT_TIMEOUT);
1713 err = __hci_req_sync(hdev, hci_init3_req, 0, HCI_INIT_TIMEOUT);
1717 err = __hci_req_sync(hdev, hci_init4_req, 0, HCI_INIT_TIMEOUT);
1721 /* Only create debugfs entries during the initial setup
1722 * phase and not every time the controller gets powered on.
1724 if (!test_bit(HCI_SETUP, &hdev->dev_flags))
1727 debugfs_create_file("features", 0444, hdev->debugfs, hdev,
1729 debugfs_create_u16("manufacturer", 0444, hdev->debugfs,
1730 &hdev->manufacturer);
1731 debugfs_create_u8("hci_version", 0444, hdev->debugfs, &hdev->hci_ver);
1732 debugfs_create_u16("hci_revision", 0444, hdev->debugfs, &hdev->hci_rev);
1733 debugfs_create_file("blacklist", 0444, hdev->debugfs, hdev,
1735 debugfs_create_file("whitelist", 0444, hdev->debugfs, hdev,
1737 debugfs_create_file("uuids", 0444, hdev->debugfs, hdev, &uuids_fops);
1739 debugfs_create_file("conn_info_min_age", 0644, hdev->debugfs, hdev,
1740 &conn_info_min_age_fops);
1741 debugfs_create_file("conn_info_max_age", 0644, hdev->debugfs, hdev,
1742 &conn_info_max_age_fops);
1744 if (lmp_bredr_capable(hdev)) {
1745 debugfs_create_file("inquiry_cache", 0444, hdev->debugfs,
1746 hdev, &inquiry_cache_fops);
1747 debugfs_create_file("link_keys", 0400, hdev->debugfs,
1748 hdev, &link_keys_fops);
1749 debugfs_create_file("dev_class", 0444, hdev->debugfs,
1750 hdev, &dev_class_fops);
1751 debugfs_create_file("voice_setting", 0444, hdev->debugfs,
1752 hdev, &voice_setting_fops);
1755 if (lmp_ssp_capable(hdev)) {
1756 debugfs_create_file("auto_accept_delay", 0644, hdev->debugfs,
1757 hdev, &auto_accept_delay_fops);
1758 debugfs_create_file("force_sc_support", 0644, hdev->debugfs,
1759 hdev, &force_sc_support_fops);
1760 debugfs_create_file("sc_only_mode", 0444, hdev->debugfs,
1761 hdev, &sc_only_mode_fops);
1764 if (lmp_sniff_capable(hdev)) {
1765 debugfs_create_file("idle_timeout", 0644, hdev->debugfs,
1766 hdev, &idle_timeout_fops);
1767 debugfs_create_file("sniff_min_interval", 0644, hdev->debugfs,
1768 hdev, &sniff_min_interval_fops);
1769 debugfs_create_file("sniff_max_interval", 0644, hdev->debugfs,
1770 hdev, &sniff_max_interval_fops);
1773 if (lmp_le_capable(hdev)) {
1774 debugfs_create_file("identity", 0400, hdev->debugfs,
1775 hdev, &identity_fops);
1776 debugfs_create_file("rpa_timeout", 0644, hdev->debugfs,
1777 hdev, &rpa_timeout_fops);
1778 debugfs_create_file("random_address", 0444, hdev->debugfs,
1779 hdev, &random_address_fops);
1780 debugfs_create_file("static_address", 0444, hdev->debugfs,
1781 hdev, &static_address_fops);
1783 /* For controllers with a public address, provide a debug
1784 * option to force the usage of the configured static
1785 * address. By default the public address is used.
1787 if (bacmp(&hdev->bdaddr, BDADDR_ANY))
1788 debugfs_create_file("force_static_address", 0644,
1789 hdev->debugfs, hdev,
1790 &force_static_address_fops);
1792 debugfs_create_u8("white_list_size", 0444, hdev->debugfs,
1793 &hdev->le_white_list_size);
1794 debugfs_create_file("white_list", 0444, hdev->debugfs, hdev,
1796 debugfs_create_file("identity_resolving_keys", 0400,
1797 hdev->debugfs, hdev,
1798 &identity_resolving_keys_fops);
1799 debugfs_create_file("long_term_keys", 0400, hdev->debugfs,
1800 hdev, &long_term_keys_fops);
1801 debugfs_create_file("conn_min_interval", 0644, hdev->debugfs,
1802 hdev, &conn_min_interval_fops);
1803 debugfs_create_file("conn_max_interval", 0644, hdev->debugfs,
1804 hdev, &conn_max_interval_fops);
1805 debugfs_create_file("conn_latency", 0644, hdev->debugfs,
1806 hdev, &conn_latency_fops);
1807 debugfs_create_file("supervision_timeout", 0644, hdev->debugfs,
1808 hdev, &supervision_timeout_fops);
1809 debugfs_create_file("adv_channel_map", 0644, hdev->debugfs,
1810 hdev, &adv_channel_map_fops);
1811 debugfs_create_file("device_list", 0444, hdev->debugfs, hdev,
1813 debugfs_create_u16("discov_interleaved_timeout", 0644,
1815 &hdev->discov_interleaved_timeout);
1821 static void hci_init0_req(struct hci_request *req, unsigned long opt)
1823 struct hci_dev *hdev = req->hdev;
1825 BT_DBG("%s %ld", hdev->name, opt);
1828 if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
1829 hci_reset_req(req, 0);
1831 /* Read Local Version */
1832 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
1834 /* Read BD Address */
1835 if (hdev->set_bdaddr)
1836 hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
1839 static int __hci_unconf_init(struct hci_dev *hdev)
1843 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
1846 err = __hci_req_sync(hdev, hci_init0_req, 0, HCI_INIT_TIMEOUT);
1853 static void hci_scan_req(struct hci_request *req, unsigned long opt)
1857 BT_DBG("%s %x", req->hdev->name, scan);
1859 /* Inquiry and Page scans */
1860 hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
1863 static void hci_auth_req(struct hci_request *req, unsigned long opt)
1867 BT_DBG("%s %x", req->hdev->name, auth);
1869 /* Authentication */
1870 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
1873 static void hci_encrypt_req(struct hci_request *req, unsigned long opt)
1877 BT_DBG("%s %x", req->hdev->name, encrypt);
1880 hci_req_add(req, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
1883 static void hci_linkpol_req(struct hci_request *req, unsigned long opt)
1885 __le16 policy = cpu_to_le16(opt);
1887 BT_DBG("%s %x", req->hdev->name, policy);
1889 /* Default link policy */
1890 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
1893 /* Get HCI device by index.
1894 * Device is held on return. */
1895 struct hci_dev *hci_dev_get(int index)
1897 struct hci_dev *hdev = NULL, *d;
1899 BT_DBG("%d", index);
1904 read_lock(&hci_dev_list_lock);
1905 list_for_each_entry(d, &hci_dev_list, list) {
1906 if (d->id == index) {
1907 hdev = hci_dev_hold(d);
1911 read_unlock(&hci_dev_list_lock);
1915 /* ---- Inquiry support ---- */
1917 bool hci_discovery_active(struct hci_dev *hdev)
1919 struct discovery_state *discov = &hdev->discovery;
1921 switch (discov->state) {
1922 case DISCOVERY_FINDING:
1923 case DISCOVERY_RESOLVING:
1931 void hci_discovery_set_state(struct hci_dev *hdev, int state)
1933 int old_state = hdev->discovery.state;
1935 BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
1937 if (old_state == state)
1940 hdev->discovery.state = state;
1943 case DISCOVERY_STOPPED:
1944 hci_update_background_scan(hdev);
1946 if (old_state != DISCOVERY_STARTING)
1947 mgmt_discovering(hdev, 0);
1949 case DISCOVERY_STARTING:
1951 case DISCOVERY_FINDING:
1952 mgmt_discovering(hdev, 1);
1954 case DISCOVERY_RESOLVING:
1956 case DISCOVERY_STOPPING:
1961 void hci_inquiry_cache_flush(struct hci_dev *hdev)
1963 struct discovery_state *cache = &hdev->discovery;
1964 struct inquiry_entry *p, *n;
1966 list_for_each_entry_safe(p, n, &cache->all, all) {
1971 INIT_LIST_HEAD(&cache->unknown);
1972 INIT_LIST_HEAD(&cache->resolve);
1975 struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev,
1978 struct discovery_state *cache = &hdev->discovery;
1979 struct inquiry_entry *e;
1981 BT_DBG("cache %p, %pMR", cache, bdaddr);
1983 list_for_each_entry(e, &cache->all, all) {
1984 if (!bacmp(&e->data.bdaddr, bdaddr))
1991 struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
1994 struct discovery_state *cache = &hdev->discovery;
1995 struct inquiry_entry *e;
1997 BT_DBG("cache %p, %pMR", cache, bdaddr);
1999 list_for_each_entry(e, &cache->unknown, list) {
2000 if (!bacmp(&e->data.bdaddr, bdaddr))
2007 struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
2011 struct discovery_state *cache = &hdev->discovery;
2012 struct inquiry_entry *e;
2014 BT_DBG("cache %p bdaddr %pMR state %d", cache, bdaddr, state);
2016 list_for_each_entry(e, &cache->resolve, list) {
2017 if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
2019 if (!bacmp(&e->data.bdaddr, bdaddr))
2026 void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
2027 struct inquiry_entry *ie)
2029 struct discovery_state *cache = &hdev->discovery;
2030 struct list_head *pos = &cache->resolve;
2031 struct inquiry_entry *p;
2033 list_del(&ie->list);
2035 list_for_each_entry(p, &cache->resolve, list) {
2036 if (p->name_state != NAME_PENDING &&
2037 abs(p->data.rssi) >= abs(ie->data.rssi))
2042 list_add(&ie->list, pos);
2045 u32 hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
2048 struct discovery_state *cache = &hdev->discovery;
2049 struct inquiry_entry *ie;
2052 BT_DBG("cache %p, %pMR", cache, &data->bdaddr);
2054 hci_remove_remote_oob_data(hdev, &data->bdaddr);
2056 if (!data->ssp_mode)
2057 flags |= MGMT_DEV_FOUND_LEGACY_PAIRING;
2059 ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
2061 if (!ie->data.ssp_mode)
2062 flags |= MGMT_DEV_FOUND_LEGACY_PAIRING;
2064 if (ie->name_state == NAME_NEEDED &&
2065 data->rssi != ie->data.rssi) {
2066 ie->data.rssi = data->rssi;
2067 hci_inquiry_cache_update_resolve(hdev, ie);
2073 /* Entry not in the cache. Add new one. */
2074 ie = kzalloc(sizeof(struct inquiry_entry), GFP_ATOMIC);
2076 flags |= MGMT_DEV_FOUND_CONFIRM_NAME;
2080 list_add(&ie->all, &cache->all);
2083 ie->name_state = NAME_KNOWN;
2085 ie->name_state = NAME_NOT_KNOWN;
2086 list_add(&ie->list, &cache->unknown);
2090 if (name_known && ie->name_state != NAME_KNOWN &&
2091 ie->name_state != NAME_PENDING) {
2092 ie->name_state = NAME_KNOWN;
2093 list_del(&ie->list);
2096 memcpy(&ie->data, data, sizeof(*data));
2097 ie->timestamp = jiffies;
2098 cache->timestamp = jiffies;
2100 if (ie->name_state == NAME_NOT_KNOWN)
2101 flags |= MGMT_DEV_FOUND_CONFIRM_NAME;
2107 static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
2109 struct discovery_state *cache = &hdev->discovery;
2110 struct inquiry_info *info = (struct inquiry_info *) buf;
2111 struct inquiry_entry *e;
2114 list_for_each_entry(e, &cache->all, all) {
2115 struct inquiry_data *data = &e->data;
2120 bacpy(&info->bdaddr, &data->bdaddr);
2121 info->pscan_rep_mode = data->pscan_rep_mode;
2122 info->pscan_period_mode = data->pscan_period_mode;
2123 info->pscan_mode = data->pscan_mode;
2124 memcpy(info->dev_class, data->dev_class, 3);
2125 info->clock_offset = data->clock_offset;
2131 BT_DBG("cache %p, copied %d", cache, copied);
2135 static void hci_inq_req(struct hci_request *req, unsigned long opt)
2137 struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
2138 struct hci_dev *hdev = req->hdev;
2139 struct hci_cp_inquiry cp;
2141 BT_DBG("%s", hdev->name);
2143 if (test_bit(HCI_INQUIRY, &hdev->flags))
2147 memcpy(&cp.lap, &ir->lap, 3);
2148 cp.length = ir->length;
2149 cp.num_rsp = ir->num_rsp;
2150 hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
2153 static int wait_inquiry(void *word)
2156 return signal_pending(current);
2159 int hci_inquiry(void __user *arg)
2161 __u8 __user *ptr = arg;
2162 struct hci_inquiry_req ir;
2163 struct hci_dev *hdev;
2164 int err = 0, do_inquiry = 0, max_rsp;
2168 if (copy_from_user(&ir, ptr, sizeof(ir)))
2171 hdev = hci_dev_get(ir.dev_id);
2175 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2180 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
2185 if (hdev->dev_type != HCI_BREDR) {
2190 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
2196 if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
2197 inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) {
2198 hci_inquiry_cache_flush(hdev);
2201 hci_dev_unlock(hdev);
2203 timeo = ir.length * msecs_to_jiffies(2000);
2206 err = hci_req_sync(hdev, hci_inq_req, (unsigned long) &ir,
2211 /* Wait until Inquiry procedure finishes (HCI_INQUIRY flag is
2212 * cleared). If it is interrupted by a signal, return -EINTR.
2214 if (wait_on_bit(&hdev->flags, HCI_INQUIRY, wait_inquiry,
2215 TASK_INTERRUPTIBLE))
2219 /* for unlimited number of responses we will use buffer with
2222 max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
2224 /* cache_dump can't sleep. Therefore we allocate temp buffer and then
2225 * copy it to the user space.
2227 buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
2234 ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
2235 hci_dev_unlock(hdev);
2237 BT_DBG("num_rsp %d", ir.num_rsp);
2239 if (!copy_to_user(ptr, &ir, sizeof(ir))) {
2241 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
2254 static int hci_dev_do_open(struct hci_dev *hdev)
2258 BT_DBG("%s %p", hdev->name, hdev);
2262 if (test_bit(HCI_UNREGISTER, &hdev->dev_flags)) {
2267 if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
2268 !test_bit(HCI_CONFIG, &hdev->dev_flags)) {
2269 /* Check for rfkill but allow the HCI setup stage to
2270 * proceed (which in itself doesn't cause any RF activity).
2272 if (test_bit(HCI_RFKILLED, &hdev->dev_flags)) {
2277 /* Check for valid public address or a configured static
2278 * random adddress, but let the HCI setup proceed to
2279 * be able to determine if there is a public address
2282 * In case of user channel usage, it is not important
2283 * if a public address or static random address is
2286 * This check is only valid for BR/EDR controllers
2287 * since AMP controllers do not have an address.
2289 if (!test_bit(HCI_USER_CHANNEL, &hdev->dev_flags) &&
2290 hdev->dev_type == HCI_BREDR &&
2291 !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
2292 !bacmp(&hdev->static_addr, BDADDR_ANY)) {
2293 ret = -EADDRNOTAVAIL;
2298 if (test_bit(HCI_UP, &hdev->flags)) {
2303 if (hdev->open(hdev)) {
2308 atomic_set(&hdev->cmd_cnt, 1);
2309 set_bit(HCI_INIT, &hdev->flags);
2311 if (test_bit(HCI_SETUP, &hdev->dev_flags)) {
2313 ret = hdev->setup(hdev);
2315 /* The transport driver can set these quirks before
2316 * creating the HCI device or in its setup callback.
2318 * In case any of them is set, the controller has to
2319 * start up as unconfigured.
2321 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) ||
2322 test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks))
2323 set_bit(HCI_UNCONFIGURED, &hdev->dev_flags);
2325 /* For an unconfigured controller it is required to
2326 * read at least the version information provided by
2327 * the Read Local Version Information command.
2329 * If the set_bdaddr driver callback is provided, then
2330 * also the original Bluetooth public device address
2331 * will be read using the Read BD Address command.
2333 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags))
2334 ret = __hci_unconf_init(hdev);
2337 if (test_bit(HCI_CONFIG, &hdev->dev_flags)) {
2338 /* If public address change is configured, ensure that
2339 * the address gets programmed. If the driver does not
2340 * support changing the public address, fail the power
2343 if (bacmp(&hdev->public_addr, BDADDR_ANY) &&
2345 ret = hdev->set_bdaddr(hdev, &hdev->public_addr);
2347 ret = -EADDRNOTAVAIL;
2351 if (!test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) &&
2352 !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags))
2353 ret = __hci_init(hdev);
2356 clear_bit(HCI_INIT, &hdev->flags);
2360 set_bit(HCI_RPA_EXPIRED, &hdev->dev_flags);
2361 set_bit(HCI_UP, &hdev->flags);
2362 hci_notify(hdev, HCI_DEV_UP);
2363 if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
2364 !test_bit(HCI_CONFIG, &hdev->dev_flags) &&
2365 !test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) &&
2366 !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags) &&
2367 hdev->dev_type == HCI_BREDR) {
2369 mgmt_powered(hdev, 1);
2370 hci_dev_unlock(hdev);
2373 /* Init failed, cleanup */
2374 flush_work(&hdev->tx_work);
2375 flush_work(&hdev->cmd_work);
2376 flush_work(&hdev->rx_work);
2378 skb_queue_purge(&hdev->cmd_q);
2379 skb_queue_purge(&hdev->rx_q);
2384 if (hdev->sent_cmd) {
2385 kfree_skb(hdev->sent_cmd);
2386 hdev->sent_cmd = NULL;
2390 hdev->flags &= BIT(HCI_RAW);
2394 hci_req_unlock(hdev);
2398 /* ---- HCI ioctl helpers ---- */
2400 int hci_dev_open(__u16 dev)
2402 struct hci_dev *hdev;
2405 hdev = hci_dev_get(dev);
2409 /* Devices that are marked as unconfigured can only be powered
2410 * up as user channel. Trying to bring them up as normal devices
2411 * will result into a failure. Only user channel operation is
2414 * When this function is called for a user channel, the flag
2415 * HCI_USER_CHANNEL will be set first before attempting to
2418 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) &&
2419 !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2424 /* We need to ensure that no other power on/off work is pending
2425 * before proceeding to call hci_dev_do_open. This is
2426 * particularly important if the setup procedure has not yet
2429 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
2430 cancel_delayed_work(&hdev->power_off);
2432 /* After this call it is guaranteed that the setup procedure
2433 * has finished. This means that error conditions like RFKILL
2434 * or no valid public or static random address apply.
2436 flush_workqueue(hdev->req_workqueue);
2438 err = hci_dev_do_open(hdev);
2445 /* This function requires the caller holds hdev->lock */
2446 static void hci_pend_le_actions_clear(struct hci_dev *hdev)
2448 struct hci_conn_params *p;
2450 list_for_each_entry(p, &hdev->le_conn_params, list)
2451 list_del_init(&p->action);
2453 BT_DBG("All LE pending actions cleared");
2456 static int hci_dev_do_close(struct hci_dev *hdev)
2458 BT_DBG("%s %p", hdev->name, hdev);
2460 cancel_delayed_work(&hdev->power_off);
2462 hci_req_cancel(hdev, ENODEV);
2465 if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
2466 cancel_delayed_work_sync(&hdev->cmd_timer);
2467 hci_req_unlock(hdev);
2471 /* Flush RX and TX works */
2472 flush_work(&hdev->tx_work);
2473 flush_work(&hdev->rx_work);
2475 if (hdev->discov_timeout > 0) {
2476 cancel_delayed_work(&hdev->discov_off);
2477 hdev->discov_timeout = 0;
2478 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
2479 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
2482 if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
2483 cancel_delayed_work(&hdev->service_cache);
2485 cancel_delayed_work_sync(&hdev->le_scan_disable);
2487 if (test_bit(HCI_MGMT, &hdev->dev_flags))
2488 cancel_delayed_work_sync(&hdev->rpa_expired);
2491 hci_inquiry_cache_flush(hdev);
2492 hci_conn_hash_flush(hdev);
2493 hci_pend_le_actions_clear(hdev);
2494 hci_dev_unlock(hdev);
2496 hci_notify(hdev, HCI_DEV_DOWN);
2502 skb_queue_purge(&hdev->cmd_q);
2503 atomic_set(&hdev->cmd_cnt, 1);
2504 if (!test_bit(HCI_AUTO_OFF, &hdev->dev_flags) &&
2505 !test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) &&
2506 test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks)) {
2507 set_bit(HCI_INIT, &hdev->flags);
2508 __hci_req_sync(hdev, hci_reset_req, 0, HCI_CMD_TIMEOUT);
2509 clear_bit(HCI_INIT, &hdev->flags);
2512 /* flush cmd work */
2513 flush_work(&hdev->cmd_work);
2516 skb_queue_purge(&hdev->rx_q);
2517 skb_queue_purge(&hdev->cmd_q);
2518 skb_queue_purge(&hdev->raw_q);
2520 /* Drop last sent command */
2521 if (hdev->sent_cmd) {
2522 cancel_delayed_work_sync(&hdev->cmd_timer);
2523 kfree_skb(hdev->sent_cmd);
2524 hdev->sent_cmd = NULL;
2527 kfree_skb(hdev->recv_evt);
2528 hdev->recv_evt = NULL;
2530 /* After this point our queues are empty
2531 * and no tasks are scheduled. */
2535 hdev->flags &= BIT(HCI_RAW);
2536 hdev->dev_flags &= ~HCI_PERSISTENT_MASK;
2538 if (!test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
2539 if (hdev->dev_type == HCI_BREDR) {
2541 mgmt_powered(hdev, 0);
2542 hci_dev_unlock(hdev);
2546 /* Controller radio is available but is currently powered down */
2547 hdev->amp_status = AMP_STATUS_POWERED_DOWN;
2549 memset(hdev->eir, 0, sizeof(hdev->eir));
2550 memset(hdev->dev_class, 0, sizeof(hdev->dev_class));
2551 bacpy(&hdev->random_addr, BDADDR_ANY);
2553 hci_req_unlock(hdev);
2559 int hci_dev_close(__u16 dev)
2561 struct hci_dev *hdev;
2564 hdev = hci_dev_get(dev);
2568 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2573 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
2574 cancel_delayed_work(&hdev->power_off);
2576 err = hci_dev_do_close(hdev);
2583 int hci_dev_reset(__u16 dev)
2585 struct hci_dev *hdev;
2588 hdev = hci_dev_get(dev);
2594 if (!test_bit(HCI_UP, &hdev->flags)) {
2599 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2604 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
2610 skb_queue_purge(&hdev->rx_q);
2611 skb_queue_purge(&hdev->cmd_q);
2614 hci_inquiry_cache_flush(hdev);
2615 hci_conn_hash_flush(hdev);
2616 hci_dev_unlock(hdev);
2621 atomic_set(&hdev->cmd_cnt, 1);
2622 hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
2624 ret = __hci_req_sync(hdev, hci_reset_req, 0, HCI_INIT_TIMEOUT);
2627 hci_req_unlock(hdev);
2632 int hci_dev_reset_stat(__u16 dev)
2634 struct hci_dev *hdev;
2637 hdev = hci_dev_get(dev);
2641 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2646 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
2651 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
2658 int hci_dev_cmd(unsigned int cmd, void __user *arg)
2660 struct hci_dev *hdev;
2661 struct hci_dev_req dr;
2664 if (copy_from_user(&dr, arg, sizeof(dr)))
2667 hdev = hci_dev_get(dr.dev_id);
2671 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2676 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
2681 if (hdev->dev_type != HCI_BREDR) {
2686 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
2693 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
2698 if (!lmp_encrypt_capable(hdev)) {
2703 if (!test_bit(HCI_AUTH, &hdev->flags)) {
2704 /* Auth must be enabled first */
2705 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
2711 err = hci_req_sync(hdev, hci_encrypt_req, dr.dev_opt,
2716 err = hci_req_sync(hdev, hci_scan_req, dr.dev_opt,
2721 err = hci_req_sync(hdev, hci_linkpol_req, dr.dev_opt,
2725 case HCISETLINKMODE:
2726 hdev->link_mode = ((__u16) dr.dev_opt) &
2727 (HCI_LM_MASTER | HCI_LM_ACCEPT);
2731 hdev->pkt_type = (__u16) dr.dev_opt;
2735 hdev->acl_mtu = *((__u16 *) &dr.dev_opt + 1);
2736 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
2740 hdev->sco_mtu = *((__u16 *) &dr.dev_opt + 1);
2741 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
2754 int hci_get_dev_list(void __user *arg)
2756 struct hci_dev *hdev;
2757 struct hci_dev_list_req *dl;
2758 struct hci_dev_req *dr;
2759 int n = 0, size, err;
2762 if (get_user(dev_num, (__u16 __user *) arg))
2765 if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
2768 size = sizeof(*dl) + dev_num * sizeof(*dr);
2770 dl = kzalloc(size, GFP_KERNEL);
2776 read_lock(&hci_dev_list_lock);
2777 list_for_each_entry(hdev, &hci_dev_list, list) {
2778 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
2779 cancel_delayed_work(&hdev->power_off);
2781 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
2782 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
2784 (dr + n)->dev_id = hdev->id;
2785 (dr + n)->dev_opt = hdev->flags;
2790 read_unlock(&hci_dev_list_lock);
2793 size = sizeof(*dl) + n * sizeof(*dr);
2795 err = copy_to_user(arg, dl, size);
2798 return err ? -EFAULT : 0;
2801 int hci_get_dev_info(void __user *arg)
2803 struct hci_dev *hdev;
2804 struct hci_dev_info di;
2807 if (copy_from_user(&di, arg, sizeof(di)))
2810 hdev = hci_dev_get(di.dev_id);
2814 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
2815 cancel_delayed_work_sync(&hdev->power_off);
2817 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
2818 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
2820 strcpy(di.name, hdev->name);
2821 di.bdaddr = hdev->bdaddr;
2822 di.type = (hdev->bus & 0x0f) | ((hdev->dev_type & 0x03) << 4);
2823 di.flags = hdev->flags;
2824 di.pkt_type = hdev->pkt_type;
2825 if (lmp_bredr_capable(hdev)) {
2826 di.acl_mtu = hdev->acl_mtu;
2827 di.acl_pkts = hdev->acl_pkts;
2828 di.sco_mtu = hdev->sco_mtu;
2829 di.sco_pkts = hdev->sco_pkts;
2831 di.acl_mtu = hdev->le_mtu;
2832 di.acl_pkts = hdev->le_pkts;
2836 di.link_policy = hdev->link_policy;
2837 di.link_mode = hdev->link_mode;
2839 memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
2840 memcpy(&di.features, &hdev->features, sizeof(di.features));
2842 if (copy_to_user(arg, &di, sizeof(di)))
2850 /* ---- Interface to HCI drivers ---- */
2852 static int hci_rfkill_set_block(void *data, bool blocked)
2854 struct hci_dev *hdev = data;
2856 BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
2858 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags))
2862 set_bit(HCI_RFKILLED, &hdev->dev_flags);
2863 if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
2864 !test_bit(HCI_CONFIG, &hdev->dev_flags))
2865 hci_dev_do_close(hdev);
2867 clear_bit(HCI_RFKILLED, &hdev->dev_flags);
2873 static const struct rfkill_ops hci_rfkill_ops = {
2874 .set_block = hci_rfkill_set_block,
2877 static void hci_power_on(struct work_struct *work)
2879 struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
2882 BT_DBG("%s", hdev->name);
2884 err = hci_dev_do_open(hdev);
2886 mgmt_set_powered_failed(hdev, err);
2890 /* During the HCI setup phase, a few error conditions are
2891 * ignored and they need to be checked now. If they are still
2892 * valid, it is important to turn the device back off.
2894 if (test_bit(HCI_RFKILLED, &hdev->dev_flags) ||
2895 test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) ||
2896 (hdev->dev_type == HCI_BREDR &&
2897 !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
2898 !bacmp(&hdev->static_addr, BDADDR_ANY))) {
2899 clear_bit(HCI_AUTO_OFF, &hdev->dev_flags);
2900 hci_dev_do_close(hdev);
2901 } else if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
2902 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
2903 HCI_AUTO_OFF_TIMEOUT);
2906 if (test_and_clear_bit(HCI_SETUP, &hdev->dev_flags)) {
2907 /* For unconfigured devices, set the HCI_RAW flag
2908 * so that userspace can easily identify them.
2910 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags))
2911 set_bit(HCI_RAW, &hdev->flags);
2913 /* For fully configured devices, this will send
2914 * the Index Added event. For unconfigured devices,
2915 * it will send Unconfigued Index Added event.
2917 * Devices with HCI_QUIRK_RAW_DEVICE are ignored
2918 * and no event will be send.
2920 mgmt_index_added(hdev);
2921 } else if (test_and_clear_bit(HCI_CONFIG, &hdev->dev_flags)) {
2922 /* When the controller is now configured, then it
2923 * is important to clear the HCI_RAW flag.
2925 if (!test_bit(HCI_UNCONFIGURED, &hdev->dev_flags))
2926 clear_bit(HCI_RAW, &hdev->flags);
2928 /* Powering on the controller with HCI_CONFIG set only
2929 * happens with the transition from unconfigured to
2930 * configured. This will send the Index Added event.
2932 mgmt_index_added(hdev);
2936 static void hci_power_off(struct work_struct *work)
2938 struct hci_dev *hdev = container_of(work, struct hci_dev,
2941 BT_DBG("%s", hdev->name);
2943 hci_dev_do_close(hdev);
2946 static void hci_discov_off(struct work_struct *work)
2948 struct hci_dev *hdev;
2950 hdev = container_of(work, struct hci_dev, discov_off.work);
2952 BT_DBG("%s", hdev->name);
2954 mgmt_discoverable_timeout(hdev);
2957 void hci_uuids_clear(struct hci_dev *hdev)
2959 struct bt_uuid *uuid, *tmp;
2961 list_for_each_entry_safe(uuid, tmp, &hdev->uuids, list) {
2962 list_del(&uuid->list);
2967 void hci_link_keys_clear(struct hci_dev *hdev)
2969 struct list_head *p, *n;
2971 list_for_each_safe(p, n, &hdev->link_keys) {
2972 struct link_key *key;
2974 key = list_entry(p, struct link_key, list);
2981 void hci_smp_ltks_clear(struct hci_dev *hdev)
2983 struct smp_ltk *k, *tmp;
2985 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
2991 void hci_smp_irks_clear(struct hci_dev *hdev)
2993 struct smp_irk *k, *tmp;
2995 list_for_each_entry_safe(k, tmp, &hdev->identity_resolving_keys, list) {
3001 struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
3005 list_for_each_entry(k, &hdev->link_keys, list)
3006 if (bacmp(bdaddr, &k->bdaddr) == 0)
3012 static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
3013 u8 key_type, u8 old_key_type)
3016 if (key_type < 0x03)
3019 /* Debug keys are insecure so don't store them persistently */
3020 if (key_type == HCI_LK_DEBUG_COMBINATION)
3023 /* Changed combination key and there's no previous one */
3024 if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
3027 /* Security mode 3 case */
3031 /* Neither local nor remote side had no-bonding as requirement */
3032 if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
3035 /* Local side had dedicated bonding as requirement */
3036 if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
3039 /* Remote side had dedicated bonding as requirement */
3040 if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
3043 /* If none of the above criteria match, then don't store the key
3048 static bool ltk_type_master(u8 type)
3050 return (type == SMP_LTK);
3053 struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, __le16 ediv, __le64 rand,
3058 list_for_each_entry(k, &hdev->long_term_keys, list) {
3059 if (k->ediv != ediv || k->rand != rand)
3062 if (ltk_type_master(k->type) != master)
3071 struct smp_ltk *hci_find_ltk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
3072 u8 addr_type, bool master)
3076 list_for_each_entry(k, &hdev->long_term_keys, list)
3077 if (addr_type == k->bdaddr_type &&
3078 bacmp(bdaddr, &k->bdaddr) == 0 &&
3079 ltk_type_master(k->type) == master)
3085 struct smp_irk *hci_find_irk_by_rpa(struct hci_dev *hdev, bdaddr_t *rpa)
3087 struct smp_irk *irk;
3089 list_for_each_entry(irk, &hdev->identity_resolving_keys, list) {
3090 if (!bacmp(&irk->rpa, rpa))
3094 list_for_each_entry(irk, &hdev->identity_resolving_keys, list) {
3095 if (smp_irk_matches(hdev->tfm_aes, irk->val, rpa)) {
3096 bacpy(&irk->rpa, rpa);
3104 struct smp_irk *hci_find_irk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
3107 struct smp_irk *irk;
3109 /* Identity Address must be public or static random */
3110 if (addr_type == ADDR_LE_DEV_RANDOM && (bdaddr->b[5] & 0xc0) != 0xc0)
3113 list_for_each_entry(irk, &hdev->identity_resolving_keys, list) {
3114 if (addr_type == irk->addr_type &&
3115 bacmp(bdaddr, &irk->bdaddr) == 0)
3122 struct link_key *hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn,
3123 bdaddr_t *bdaddr, u8 *val, u8 type,
3124 u8 pin_len, bool *persistent)
3126 struct link_key *key, *old_key;
3129 old_key = hci_find_link_key(hdev, bdaddr);
3131 old_key_type = old_key->type;
3134 old_key_type = conn ? conn->key_type : 0xff;
3135 key = kzalloc(sizeof(*key), GFP_KERNEL);
3138 list_add(&key->list, &hdev->link_keys);
3141 BT_DBG("%s key for %pMR type %u", hdev->name, bdaddr, type);
3143 /* Some buggy controller combinations generate a changed
3144 * combination key for legacy pairing even when there's no
3146 if (type == HCI_LK_CHANGED_COMBINATION &&
3147 (!conn || conn->remote_auth == 0xff) && old_key_type == 0xff) {
3148 type = HCI_LK_COMBINATION;
3150 conn->key_type = type;
3153 bacpy(&key->bdaddr, bdaddr);
3154 memcpy(key->val, val, HCI_LINK_KEY_SIZE);
3155 key->pin_len = pin_len;
3157 if (type == HCI_LK_CHANGED_COMBINATION)
3158 key->type = old_key_type;
3163 *persistent = hci_persistent_key(hdev, conn, type,
3169 struct smp_ltk *hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr,
3170 u8 addr_type, u8 type, u8 authenticated,
3171 u8 tk[16], u8 enc_size, __le16 ediv, __le64 rand)
3173 struct smp_ltk *key, *old_key;
3174 bool master = ltk_type_master(type);
3176 old_key = hci_find_ltk_by_addr(hdev, bdaddr, addr_type, master);
3180 key = kzalloc(sizeof(*key), GFP_KERNEL);
3183 list_add(&key->list, &hdev->long_term_keys);
3186 bacpy(&key->bdaddr, bdaddr);
3187 key->bdaddr_type = addr_type;
3188 memcpy(key->val, tk, sizeof(key->val));
3189 key->authenticated = authenticated;
3192 key->enc_size = enc_size;
3198 struct smp_irk *hci_add_irk(struct hci_dev *hdev, bdaddr_t *bdaddr,
3199 u8 addr_type, u8 val[16], bdaddr_t *rpa)
3201 struct smp_irk *irk;
3203 irk = hci_find_irk_by_addr(hdev, bdaddr, addr_type);
3205 irk = kzalloc(sizeof(*irk), GFP_KERNEL);
3209 bacpy(&irk->bdaddr, bdaddr);
3210 irk->addr_type = addr_type;
3212 list_add(&irk->list, &hdev->identity_resolving_keys);
3215 memcpy(irk->val, val, 16);
3216 bacpy(&irk->rpa, rpa);
3221 int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
3223 struct link_key *key;
3225 key = hci_find_link_key(hdev, bdaddr);
3229 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
3231 list_del(&key->list);
3237 int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 bdaddr_type)
3239 struct smp_ltk *k, *tmp;
3242 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
3243 if (bacmp(bdaddr, &k->bdaddr) || k->bdaddr_type != bdaddr_type)
3246 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
3253 return removed ? 0 : -ENOENT;
3256 void hci_remove_irk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type)
3258 struct smp_irk *k, *tmp;
3260 list_for_each_entry_safe(k, tmp, &hdev->identity_resolving_keys, list) {
3261 if (bacmp(bdaddr, &k->bdaddr) || k->addr_type != addr_type)
3264 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
3271 /* HCI command timer function */
3272 static void hci_cmd_timeout(struct work_struct *work)
3274 struct hci_dev *hdev = container_of(work, struct hci_dev,
3277 if (hdev->sent_cmd) {
3278 struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
3279 u16 opcode = __le16_to_cpu(sent->opcode);
3281 BT_ERR("%s command 0x%4.4x tx timeout", hdev->name, opcode);
3283 BT_ERR("%s command tx timeout", hdev->name);
3286 atomic_set(&hdev->cmd_cnt, 1);
3287 queue_work(hdev->workqueue, &hdev->cmd_work);
3290 struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
3293 struct oob_data *data;
3295 list_for_each_entry(data, &hdev->remote_oob_data, list)
3296 if (bacmp(bdaddr, &data->bdaddr) == 0)
3302 int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr)
3304 struct oob_data *data;
3306 data = hci_find_remote_oob_data(hdev, bdaddr);
3310 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
3312 list_del(&data->list);
3318 void hci_remote_oob_data_clear(struct hci_dev *hdev)
3320 struct oob_data *data, *n;
3322 list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
3323 list_del(&data->list);
3328 int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
3329 u8 *hash, u8 *randomizer)
3331 struct oob_data *data;
3333 data = hci_find_remote_oob_data(hdev, bdaddr);
3335 data = kmalloc(sizeof(*data), GFP_KERNEL);
3339 bacpy(&data->bdaddr, bdaddr);
3340 list_add(&data->list, &hdev->remote_oob_data);
3343 memcpy(data->hash192, hash, sizeof(data->hash192));
3344 memcpy(data->randomizer192, randomizer, sizeof(data->randomizer192));
3346 memset(data->hash256, 0, sizeof(data->hash256));
3347 memset(data->randomizer256, 0, sizeof(data->randomizer256));
3349 BT_DBG("%s for %pMR", hdev->name, bdaddr);
3354 int hci_add_remote_oob_ext_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
3355 u8 *hash192, u8 *randomizer192,
3356 u8 *hash256, u8 *randomizer256)
3358 struct oob_data *data;
3360 data = hci_find_remote_oob_data(hdev, bdaddr);
3362 data = kmalloc(sizeof(*data), GFP_KERNEL);
3366 bacpy(&data->bdaddr, bdaddr);
3367 list_add(&data->list, &hdev->remote_oob_data);
3370 memcpy(data->hash192, hash192, sizeof(data->hash192));
3371 memcpy(data->randomizer192, randomizer192, sizeof(data->randomizer192));
3373 memcpy(data->hash256, hash256, sizeof(data->hash256));
3374 memcpy(data->randomizer256, randomizer256, sizeof(data->randomizer256));
3376 BT_DBG("%s for %pMR", hdev->name, bdaddr);
3381 struct bdaddr_list *hci_bdaddr_list_lookup(struct list_head *bdaddr_list,
3382 bdaddr_t *bdaddr, u8 type)
3384 struct bdaddr_list *b;
3386 list_for_each_entry(b, bdaddr_list, list) {
3387 if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
3394 void hci_bdaddr_list_clear(struct list_head *bdaddr_list)
3396 struct list_head *p, *n;
3398 list_for_each_safe(p, n, bdaddr_list) {
3399 struct bdaddr_list *b = list_entry(p, struct bdaddr_list, list);
3406 int hci_bdaddr_list_add(struct list_head *list, bdaddr_t *bdaddr, u8 type)
3408 struct bdaddr_list *entry;
3410 if (!bacmp(bdaddr, BDADDR_ANY))
3413 if (hci_bdaddr_list_lookup(list, bdaddr, type))
3416 entry = kzalloc(sizeof(struct bdaddr_list), GFP_KERNEL);
3420 bacpy(&entry->bdaddr, bdaddr);
3421 entry->bdaddr_type = type;
3423 list_add(&entry->list, list);
3428 int hci_bdaddr_list_del(struct list_head *list, bdaddr_t *bdaddr, u8 type)
3430 struct bdaddr_list *entry;
3432 if (!bacmp(bdaddr, BDADDR_ANY)) {
3433 hci_bdaddr_list_clear(list);
3437 entry = hci_bdaddr_list_lookup(list, bdaddr, type);
3441 list_del(&entry->list);
3447 /* This function requires the caller holds hdev->lock */
3448 struct hci_conn_params *hci_conn_params_lookup(struct hci_dev *hdev,
3449 bdaddr_t *addr, u8 addr_type)
3451 struct hci_conn_params *params;
3453 /* The conn params list only contains identity addresses */
3454 if (!hci_is_identity_address(addr, addr_type))
3457 list_for_each_entry(params, &hdev->le_conn_params, list) {
3458 if (bacmp(¶ms->addr, addr) == 0 &&
3459 params->addr_type == addr_type) {
3467 static bool is_connected(struct hci_dev *hdev, bdaddr_t *addr, u8 type)
3469 struct hci_conn *conn;
3471 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, addr);
3475 if (conn->dst_type != type)
3478 if (conn->state != BT_CONNECTED)
3484 /* This function requires the caller holds hdev->lock */
3485 struct hci_conn_params *hci_pend_le_action_lookup(struct list_head *list,
3486 bdaddr_t *addr, u8 addr_type)
3488 struct hci_conn_params *param;
3490 /* The list only contains identity addresses */
3491 if (!hci_is_identity_address(addr, addr_type))
3494 list_for_each_entry(param, list, action) {
3495 if (bacmp(¶m->addr, addr) == 0 &&
3496 param->addr_type == addr_type)
3503 /* This function requires the caller holds hdev->lock */
3504 struct hci_conn_params *hci_conn_params_add(struct hci_dev *hdev,
3505 bdaddr_t *addr, u8 addr_type)
3507 struct hci_conn_params *params;
3509 if (!hci_is_identity_address(addr, addr_type))
3512 params = hci_conn_params_lookup(hdev, addr, addr_type);
3516 params = kzalloc(sizeof(*params), GFP_KERNEL);
3518 BT_ERR("Out of memory");
3522 bacpy(¶ms->addr, addr);
3523 params->addr_type = addr_type;
3525 list_add(¶ms->list, &hdev->le_conn_params);
3526 INIT_LIST_HEAD(¶ms->action);
3528 params->conn_min_interval = hdev->le_conn_min_interval;
3529 params->conn_max_interval = hdev->le_conn_max_interval;
3530 params->conn_latency = hdev->le_conn_latency;
3531 params->supervision_timeout = hdev->le_supv_timeout;
3532 params->auto_connect = HCI_AUTO_CONN_DISABLED;
3534 BT_DBG("addr %pMR (type %u)", addr, addr_type);
3539 /* This function requires the caller holds hdev->lock */
3540 int hci_conn_params_set(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type,
3543 struct hci_conn_params *params;
3545 params = hci_conn_params_add(hdev, addr, addr_type);
3549 if (params->auto_connect == auto_connect)
3552 list_del_init(¶ms->action);
3554 switch (auto_connect) {
3555 case HCI_AUTO_CONN_DISABLED:
3556 case HCI_AUTO_CONN_LINK_LOSS:
3557 hci_update_background_scan(hdev);
3559 case HCI_AUTO_CONN_REPORT:
3560 list_add(¶ms->action, &hdev->pend_le_reports);
3561 hci_update_background_scan(hdev);
3563 case HCI_AUTO_CONN_ALWAYS:
3564 if (!is_connected(hdev, addr, addr_type)) {
3565 list_add(¶ms->action, &hdev->pend_le_conns);
3566 hci_update_background_scan(hdev);
3571 params->auto_connect = auto_connect;
3573 BT_DBG("addr %pMR (type %u) auto_connect %u", addr, addr_type,
3579 /* This function requires the caller holds hdev->lock */
3580 void hci_conn_params_del(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type)
3582 struct hci_conn_params *params;
3584 params = hci_conn_params_lookup(hdev, addr, addr_type);
3588 list_del(¶ms->action);
3589 list_del(¶ms->list);
3592 hci_update_background_scan(hdev);
3594 BT_DBG("addr %pMR (type %u)", addr, addr_type);
3597 /* This function requires the caller holds hdev->lock */
3598 void hci_conn_params_clear_disabled(struct hci_dev *hdev)
3600 struct hci_conn_params *params, *tmp;
3602 list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list) {
3603 if (params->auto_connect != HCI_AUTO_CONN_DISABLED)
3605 list_del(¶ms->list);
3609 BT_DBG("All LE disabled connection parameters were removed");
3612 /* This function requires the caller holds hdev->lock */
3613 void hci_conn_params_clear_all(struct hci_dev *hdev)
3615 struct hci_conn_params *params, *tmp;
3617 list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list) {
3618 list_del(¶ms->action);
3619 list_del(¶ms->list);
3623 hci_update_background_scan(hdev);
3625 BT_DBG("All LE connection parameters were removed");
3628 static void inquiry_complete(struct hci_dev *hdev, u8 status)
3631 BT_ERR("Failed to start inquiry: status %d", status);
3634 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3635 hci_dev_unlock(hdev);
3640 static void le_scan_disable_work_complete(struct hci_dev *hdev, u8 status)
3642 /* General inquiry access code (GIAC) */
3643 u8 lap[3] = { 0x33, 0x8b, 0x9e };
3644 struct hci_request req;
3645 struct hci_cp_inquiry cp;
3649 BT_ERR("Failed to disable LE scanning: status %d", status);
3653 switch (hdev->discovery.type) {
3654 case DISCOV_TYPE_LE:
3656 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3657 hci_dev_unlock(hdev);
3660 case DISCOV_TYPE_INTERLEAVED:
3661 hci_req_init(&req, hdev);
3663 memset(&cp, 0, sizeof(cp));
3664 memcpy(&cp.lap, lap, sizeof(cp.lap));
3665 cp.length = DISCOV_INTERLEAVED_INQUIRY_LEN;
3666 hci_req_add(&req, HCI_OP_INQUIRY, sizeof(cp), &cp);
3670 hci_inquiry_cache_flush(hdev);
3672 err = hci_req_run(&req, inquiry_complete);
3674 BT_ERR("Inquiry request failed: err %d", err);
3675 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3678 hci_dev_unlock(hdev);
3683 static void le_scan_disable_work(struct work_struct *work)
3685 struct hci_dev *hdev = container_of(work, struct hci_dev,
3686 le_scan_disable.work);
3687 struct hci_request req;
3690 BT_DBG("%s", hdev->name);
3692 hci_req_init(&req, hdev);
3694 hci_req_add_le_scan_disable(&req);
3696 err = hci_req_run(&req, le_scan_disable_work_complete);
3698 BT_ERR("Disable LE scanning request failed: err %d", err);
3701 static void set_random_addr(struct hci_request *req, bdaddr_t *rpa)
3703 struct hci_dev *hdev = req->hdev;
3705 /* If we're advertising or initiating an LE connection we can't
3706 * go ahead and change the random address at this time. This is
3707 * because the eventual initiator address used for the
3708 * subsequently created connection will be undefined (some
3709 * controllers use the new address and others the one we had
3710 * when the operation started).
3712 * In this kind of scenario skip the update and let the random
3713 * address be updated at the next cycle.
3715 if (test_bit(HCI_LE_ADV, &hdev->dev_flags) ||
3716 hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT)) {
3717 BT_DBG("Deferring random address update");
3721 hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6, rpa);
3724 int hci_update_random_address(struct hci_request *req, bool require_privacy,
3727 struct hci_dev *hdev = req->hdev;
3730 /* If privacy is enabled use a resolvable private address. If
3731 * current RPA has expired or there is something else than
3732 * the current RPA in use, then generate a new one.
3734 if (test_bit(HCI_PRIVACY, &hdev->dev_flags)) {
3737 *own_addr_type = ADDR_LE_DEV_RANDOM;
3739 if (!test_and_clear_bit(HCI_RPA_EXPIRED, &hdev->dev_flags) &&
3740 !bacmp(&hdev->random_addr, &hdev->rpa))
3743 err = smp_generate_rpa(hdev->tfm_aes, hdev->irk, &hdev->rpa);
3745 BT_ERR("%s failed to generate new RPA", hdev->name);
3749 set_random_addr(req, &hdev->rpa);
3751 to = msecs_to_jiffies(hdev->rpa_timeout * 1000);
3752 queue_delayed_work(hdev->workqueue, &hdev->rpa_expired, to);
3757 /* In case of required privacy without resolvable private address,
3758 * use an unresolvable private address. This is useful for active
3759 * scanning and non-connectable advertising.
3761 if (require_privacy) {
3764 get_random_bytes(&urpa, 6);
3765 urpa.b[5] &= 0x3f; /* Clear two most significant bits */
3767 *own_addr_type = ADDR_LE_DEV_RANDOM;
3768 set_random_addr(req, &urpa);
3772 /* If forcing static address is in use or there is no public
3773 * address use the static address as random address (but skip
3774 * the HCI command if the current random address is already the
3777 if (test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags) ||
3778 !bacmp(&hdev->bdaddr, BDADDR_ANY)) {
3779 *own_addr_type = ADDR_LE_DEV_RANDOM;
3780 if (bacmp(&hdev->static_addr, &hdev->random_addr))
3781 hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6,
3782 &hdev->static_addr);
3786 /* Neither privacy nor static address is being used so use a
3789 *own_addr_type = ADDR_LE_DEV_PUBLIC;
3794 /* Copy the Identity Address of the controller.
3796 * If the controller has a public BD_ADDR, then by default use that one.
3797 * If this is a LE only controller without a public address, default to
3798 * the static random address.
3800 * For debugging purposes it is possible to force controllers with a
3801 * public address to use the static random address instead.
3803 void hci_copy_identity_address(struct hci_dev *hdev, bdaddr_t *bdaddr,
3806 if (test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags) ||
3807 !bacmp(&hdev->bdaddr, BDADDR_ANY)) {
3808 bacpy(bdaddr, &hdev->static_addr);
3809 *bdaddr_type = ADDR_LE_DEV_RANDOM;
3811 bacpy(bdaddr, &hdev->bdaddr);
3812 *bdaddr_type = ADDR_LE_DEV_PUBLIC;
3816 /* Alloc HCI device */
3817 struct hci_dev *hci_alloc_dev(void)
3819 struct hci_dev *hdev;
3821 hdev = kzalloc(sizeof(struct hci_dev), GFP_KERNEL);
3825 hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1);
3826 hdev->esco_type = (ESCO_HV1);
3827 hdev->link_mode = (HCI_LM_ACCEPT);
3828 hdev->num_iac = 0x01; /* One IAC support is mandatory */
3829 hdev->io_capability = 0x03; /* No Input No Output */
3830 hdev->manufacturer = 0xffff; /* Default to internal use */
3831 hdev->inq_tx_power = HCI_TX_POWER_INVALID;
3832 hdev->adv_tx_power = HCI_TX_POWER_INVALID;
3834 hdev->sniff_max_interval = 800;
3835 hdev->sniff_min_interval = 80;
3837 hdev->le_adv_channel_map = 0x07;
3838 hdev->le_scan_interval = 0x0060;
3839 hdev->le_scan_window = 0x0030;
3840 hdev->le_conn_min_interval = 0x0028;
3841 hdev->le_conn_max_interval = 0x0038;
3842 hdev->le_conn_latency = 0x0000;
3843 hdev->le_supv_timeout = 0x002a;
3845 hdev->rpa_timeout = HCI_DEFAULT_RPA_TIMEOUT;
3846 hdev->discov_interleaved_timeout = DISCOV_INTERLEAVED_TIMEOUT;
3847 hdev->conn_info_min_age = DEFAULT_CONN_INFO_MIN_AGE;
3848 hdev->conn_info_max_age = DEFAULT_CONN_INFO_MAX_AGE;
3850 mutex_init(&hdev->lock);
3851 mutex_init(&hdev->req_lock);
3853 INIT_LIST_HEAD(&hdev->mgmt_pending);
3854 INIT_LIST_HEAD(&hdev->blacklist);
3855 INIT_LIST_HEAD(&hdev->whitelist);
3856 INIT_LIST_HEAD(&hdev->uuids);
3857 INIT_LIST_HEAD(&hdev->link_keys);
3858 INIT_LIST_HEAD(&hdev->long_term_keys);
3859 INIT_LIST_HEAD(&hdev->identity_resolving_keys);
3860 INIT_LIST_HEAD(&hdev->remote_oob_data);
3861 INIT_LIST_HEAD(&hdev->le_white_list);
3862 INIT_LIST_HEAD(&hdev->le_conn_params);
3863 INIT_LIST_HEAD(&hdev->pend_le_conns);
3864 INIT_LIST_HEAD(&hdev->pend_le_reports);
3865 INIT_LIST_HEAD(&hdev->conn_hash.list);
3867 INIT_WORK(&hdev->rx_work, hci_rx_work);
3868 INIT_WORK(&hdev->cmd_work, hci_cmd_work);
3869 INIT_WORK(&hdev->tx_work, hci_tx_work);
3870 INIT_WORK(&hdev->power_on, hci_power_on);
3872 INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
3873 INIT_DELAYED_WORK(&hdev->discov_off, hci_discov_off);
3874 INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
3876 skb_queue_head_init(&hdev->rx_q);
3877 skb_queue_head_init(&hdev->cmd_q);
3878 skb_queue_head_init(&hdev->raw_q);
3880 init_waitqueue_head(&hdev->req_wait_q);
3882 INIT_DELAYED_WORK(&hdev->cmd_timer, hci_cmd_timeout);
3884 hci_init_sysfs(hdev);
3885 discovery_init(hdev);
3889 EXPORT_SYMBOL(hci_alloc_dev);
3891 /* Free HCI device */
3892 void hci_free_dev(struct hci_dev *hdev)
3894 /* will free via device release */
3895 put_device(&hdev->dev);
3897 EXPORT_SYMBOL(hci_free_dev);
3899 /* Register HCI device */
3900 int hci_register_dev(struct hci_dev *hdev)
3904 if (!hdev->open || !hdev->close || !hdev->send)
3907 /* Do not allow HCI_AMP devices to register at index 0,
3908 * so the index can be used as the AMP controller ID.
3910 switch (hdev->dev_type) {
3912 id = ida_simple_get(&hci_index_ida, 0, 0, GFP_KERNEL);
3915 id = ida_simple_get(&hci_index_ida, 1, 0, GFP_KERNEL);
3924 sprintf(hdev->name, "hci%d", id);
3927 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
3929 hdev->workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
3930 WQ_MEM_RECLAIM, 1, hdev->name);
3931 if (!hdev->workqueue) {
3936 hdev->req_workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
3937 WQ_MEM_RECLAIM, 1, hdev->name);
3938 if (!hdev->req_workqueue) {
3939 destroy_workqueue(hdev->workqueue);
3944 if (!IS_ERR_OR_NULL(bt_debugfs))
3945 hdev->debugfs = debugfs_create_dir(hdev->name, bt_debugfs);
3947 dev_set_name(&hdev->dev, "%s", hdev->name);
3949 hdev->tfm_aes = crypto_alloc_blkcipher("ecb(aes)", 0,
3951 if (IS_ERR(hdev->tfm_aes)) {
3952 BT_ERR("Unable to create crypto context");
3953 error = PTR_ERR(hdev->tfm_aes);
3954 hdev->tfm_aes = NULL;
3958 error = device_add(&hdev->dev);
3962 hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
3963 RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops,
3966 if (rfkill_register(hdev->rfkill) < 0) {
3967 rfkill_destroy(hdev->rfkill);
3968 hdev->rfkill = NULL;
3972 if (hdev->rfkill && rfkill_blocked(hdev->rfkill))
3973 set_bit(HCI_RFKILLED, &hdev->dev_flags);
3975 set_bit(HCI_SETUP, &hdev->dev_flags);
3976 set_bit(HCI_AUTO_OFF, &hdev->dev_flags);
3978 if (hdev->dev_type == HCI_BREDR) {
3979 /* Assume BR/EDR support until proven otherwise (such as
3980 * through reading supported features during init.
3982 set_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
3985 write_lock(&hci_dev_list_lock);
3986 list_add(&hdev->list, &hci_dev_list);
3987 write_unlock(&hci_dev_list_lock);
3989 /* Devices that are marked for raw-only usage are unconfigured
3990 * and should not be included in normal operation.
3992 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
3993 set_bit(HCI_UNCONFIGURED, &hdev->dev_flags);
3995 hci_notify(hdev, HCI_DEV_REG);
3998 queue_work(hdev->req_workqueue, &hdev->power_on);
4003 crypto_free_blkcipher(hdev->tfm_aes);
4005 destroy_workqueue(hdev->workqueue);
4006 destroy_workqueue(hdev->req_workqueue);
4008 ida_simple_remove(&hci_index_ida, hdev->id);
4012 EXPORT_SYMBOL(hci_register_dev);
4014 /* Unregister HCI device */
4015 void hci_unregister_dev(struct hci_dev *hdev)
4019 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
4021 set_bit(HCI_UNREGISTER, &hdev->dev_flags);
4025 write_lock(&hci_dev_list_lock);
4026 list_del(&hdev->list);
4027 write_unlock(&hci_dev_list_lock);
4029 hci_dev_do_close(hdev);
4031 for (i = 0; i < NUM_REASSEMBLY; i++)
4032 kfree_skb(hdev->reassembly[i]);
4034 cancel_work_sync(&hdev->power_on);
4036 if (!test_bit(HCI_INIT, &hdev->flags) &&
4037 !test_bit(HCI_SETUP, &hdev->dev_flags) &&
4038 !test_bit(HCI_CONFIG, &hdev->dev_flags)) {
4040 mgmt_index_removed(hdev);
4041 hci_dev_unlock(hdev);
4044 /* mgmt_index_removed should take care of emptying the
4046 BUG_ON(!list_empty(&hdev->mgmt_pending));
4048 hci_notify(hdev, HCI_DEV_UNREG);
4051 rfkill_unregister(hdev->rfkill);
4052 rfkill_destroy(hdev->rfkill);
4056 crypto_free_blkcipher(hdev->tfm_aes);
4058 device_del(&hdev->dev);
4060 debugfs_remove_recursive(hdev->debugfs);
4062 destroy_workqueue(hdev->workqueue);
4063 destroy_workqueue(hdev->req_workqueue);
4066 hci_bdaddr_list_clear(&hdev->blacklist);
4067 hci_bdaddr_list_clear(&hdev->whitelist);
4068 hci_uuids_clear(hdev);
4069 hci_link_keys_clear(hdev);
4070 hci_smp_ltks_clear(hdev);
4071 hci_smp_irks_clear(hdev);
4072 hci_remote_oob_data_clear(hdev);
4073 hci_bdaddr_list_clear(&hdev->le_white_list);
4074 hci_conn_params_clear_all(hdev);
4075 hci_dev_unlock(hdev);
4079 ida_simple_remove(&hci_index_ida, id);
4081 EXPORT_SYMBOL(hci_unregister_dev);
4083 /* Suspend HCI device */
4084 int hci_suspend_dev(struct hci_dev *hdev)
4086 hci_notify(hdev, HCI_DEV_SUSPEND);
4089 EXPORT_SYMBOL(hci_suspend_dev);
4091 /* Resume HCI device */
4092 int hci_resume_dev(struct hci_dev *hdev)
4094 hci_notify(hdev, HCI_DEV_RESUME);
4097 EXPORT_SYMBOL(hci_resume_dev);
4099 /* Receive frame from HCI drivers */
4100 int hci_recv_frame(struct hci_dev *hdev, struct sk_buff *skb)
4102 if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
4103 && !test_bit(HCI_INIT, &hdev->flags))) {
4109 bt_cb(skb)->incoming = 1;
4112 __net_timestamp(skb);
4114 skb_queue_tail(&hdev->rx_q, skb);
4115 queue_work(hdev->workqueue, &hdev->rx_work);
4119 EXPORT_SYMBOL(hci_recv_frame);
4121 static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
4122 int count, __u8 index)
4127 struct sk_buff *skb;
4128 struct bt_skb_cb *scb;
4130 if ((type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT) ||
4131 index >= NUM_REASSEMBLY)
4134 skb = hdev->reassembly[index];
4138 case HCI_ACLDATA_PKT:
4139 len = HCI_MAX_FRAME_SIZE;
4140 hlen = HCI_ACL_HDR_SIZE;
4143 len = HCI_MAX_EVENT_SIZE;
4144 hlen = HCI_EVENT_HDR_SIZE;
4146 case HCI_SCODATA_PKT:
4147 len = HCI_MAX_SCO_SIZE;
4148 hlen = HCI_SCO_HDR_SIZE;
4152 skb = bt_skb_alloc(len, GFP_ATOMIC);
4156 scb = (void *) skb->cb;
4158 scb->pkt_type = type;
4160 hdev->reassembly[index] = skb;
4164 scb = (void *) skb->cb;
4165 len = min_t(uint, scb->expect, count);
4167 memcpy(skb_put(skb, len), data, len);
4176 if (skb->len == HCI_EVENT_HDR_SIZE) {
4177 struct hci_event_hdr *h = hci_event_hdr(skb);
4178 scb->expect = h->plen;
4180 if (skb_tailroom(skb) < scb->expect) {
4182 hdev->reassembly[index] = NULL;
4188 case HCI_ACLDATA_PKT:
4189 if (skb->len == HCI_ACL_HDR_SIZE) {
4190 struct hci_acl_hdr *h = hci_acl_hdr(skb);
4191 scb->expect = __le16_to_cpu(h->dlen);
4193 if (skb_tailroom(skb) < scb->expect) {
4195 hdev->reassembly[index] = NULL;
4201 case HCI_SCODATA_PKT:
4202 if (skb->len == HCI_SCO_HDR_SIZE) {
4203 struct hci_sco_hdr *h = hci_sco_hdr(skb);
4204 scb->expect = h->dlen;
4206 if (skb_tailroom(skb) < scb->expect) {
4208 hdev->reassembly[index] = NULL;
4215 if (scb->expect == 0) {
4216 /* Complete frame */
4218 bt_cb(skb)->pkt_type = type;
4219 hci_recv_frame(hdev, skb);
4221 hdev->reassembly[index] = NULL;
4229 int hci_recv_fragment(struct hci_dev *hdev, int type, void *data, int count)
4233 if (type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT)
4237 rem = hci_reassembly(hdev, type, data, count, type - 1);
4241 data += (count - rem);
4247 EXPORT_SYMBOL(hci_recv_fragment);
4249 #define STREAM_REASSEMBLY 0
4251 int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count)
4257 struct sk_buff *skb = hdev->reassembly[STREAM_REASSEMBLY];
4260 struct { char type; } *pkt;
4262 /* Start of the frame */
4269 type = bt_cb(skb)->pkt_type;
4271 rem = hci_reassembly(hdev, type, data, count,
4276 data += (count - rem);
4282 EXPORT_SYMBOL(hci_recv_stream_fragment);
4284 /* ---- Interface to upper protocols ---- */
4286 int hci_register_cb(struct hci_cb *cb)
4288 BT_DBG("%p name %s", cb, cb->name);
4290 write_lock(&hci_cb_list_lock);
4291 list_add(&cb->list, &hci_cb_list);
4292 write_unlock(&hci_cb_list_lock);
4296 EXPORT_SYMBOL(hci_register_cb);
4298 int hci_unregister_cb(struct hci_cb *cb)
4300 BT_DBG("%p name %s", cb, cb->name);
4302 write_lock(&hci_cb_list_lock);
4303 list_del(&cb->list);
4304 write_unlock(&hci_cb_list_lock);
4308 EXPORT_SYMBOL(hci_unregister_cb);
4310 static void hci_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
4314 BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
4317 __net_timestamp(skb);
4319 /* Send copy to monitor */
4320 hci_send_to_monitor(hdev, skb);
4322 if (atomic_read(&hdev->promisc)) {
4323 /* Send copy to the sockets */
4324 hci_send_to_sock(hdev, skb);
4327 /* Get rid of skb owner, prior to sending to the driver. */
4330 err = hdev->send(hdev, skb);
4332 BT_ERR("%s sending frame failed (%d)", hdev->name, err);
4337 void hci_req_init(struct hci_request *req, struct hci_dev *hdev)
4339 skb_queue_head_init(&req->cmd_q);
4344 int hci_req_run(struct hci_request *req, hci_req_complete_t complete)
4346 struct hci_dev *hdev = req->hdev;
4347 struct sk_buff *skb;
4348 unsigned long flags;
4350 BT_DBG("length %u", skb_queue_len(&req->cmd_q));
4352 /* If an error occured during request building, remove all HCI
4353 * commands queued on the HCI request queue.
4356 skb_queue_purge(&req->cmd_q);
4360 /* Do not allow empty requests */
4361 if (skb_queue_empty(&req->cmd_q))
4364 skb = skb_peek_tail(&req->cmd_q);
4365 bt_cb(skb)->req.complete = complete;
4367 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
4368 skb_queue_splice_tail(&req->cmd_q, &hdev->cmd_q);
4369 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
4371 queue_work(hdev->workqueue, &hdev->cmd_work);
4376 static struct sk_buff *hci_prepare_cmd(struct hci_dev *hdev, u16 opcode,
4377 u32 plen, const void *param)
4379 int len = HCI_COMMAND_HDR_SIZE + plen;
4380 struct hci_command_hdr *hdr;
4381 struct sk_buff *skb;
4383 skb = bt_skb_alloc(len, GFP_ATOMIC);
4387 hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
4388 hdr->opcode = cpu_to_le16(opcode);
4392 memcpy(skb_put(skb, plen), param, plen);
4394 BT_DBG("skb len %d", skb->len);
4396 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
4401 /* Send HCI command */
4402 int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen,
4405 struct sk_buff *skb;
4407 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
4409 skb = hci_prepare_cmd(hdev, opcode, plen, param);
4411 BT_ERR("%s no memory for command", hdev->name);
4415 /* Stand-alone HCI commands must be flaged as
4416 * single-command requests.
4418 bt_cb(skb)->req.start = true;
4420 skb_queue_tail(&hdev->cmd_q, skb);
4421 queue_work(hdev->workqueue, &hdev->cmd_work);
4426 /* Queue a command to an asynchronous HCI request */
4427 void hci_req_add_ev(struct hci_request *req, u16 opcode, u32 plen,
4428 const void *param, u8 event)
4430 struct hci_dev *hdev = req->hdev;
4431 struct sk_buff *skb;
4433 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
4435 /* If an error occured during request building, there is no point in
4436 * queueing the HCI command. We can simply return.
4441 skb = hci_prepare_cmd(hdev, opcode, plen, param);
4443 BT_ERR("%s no memory for command (opcode 0x%4.4x)",
4444 hdev->name, opcode);
4449 if (skb_queue_empty(&req->cmd_q))
4450 bt_cb(skb)->req.start = true;
4452 bt_cb(skb)->req.event = event;
4454 skb_queue_tail(&req->cmd_q, skb);
4457 void hci_req_add(struct hci_request *req, u16 opcode, u32 plen,
4460 hci_req_add_ev(req, opcode, plen, param, 0);
4463 /* Get data from the previously sent command */
4464 void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
4466 struct hci_command_hdr *hdr;
4468 if (!hdev->sent_cmd)
4471 hdr = (void *) hdev->sent_cmd->data;
4473 if (hdr->opcode != cpu_to_le16(opcode))
4476 BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
4478 return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
4482 static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
4484 struct hci_acl_hdr *hdr;
4487 skb_push(skb, HCI_ACL_HDR_SIZE);
4488 skb_reset_transport_header(skb);
4489 hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
4490 hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
4491 hdr->dlen = cpu_to_le16(len);
4494 static void hci_queue_acl(struct hci_chan *chan, struct sk_buff_head *queue,
4495 struct sk_buff *skb, __u16 flags)
4497 struct hci_conn *conn = chan->conn;
4498 struct hci_dev *hdev = conn->hdev;
4499 struct sk_buff *list;
4501 skb->len = skb_headlen(skb);
4504 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
4506 switch (hdev->dev_type) {
4508 hci_add_acl_hdr(skb, conn->handle, flags);
4511 hci_add_acl_hdr(skb, chan->handle, flags);
4514 BT_ERR("%s unknown dev_type %d", hdev->name, hdev->dev_type);
4518 list = skb_shinfo(skb)->frag_list;
4520 /* Non fragmented */
4521 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
4523 skb_queue_tail(queue, skb);
4526 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
4528 skb_shinfo(skb)->frag_list = NULL;
4530 /* Queue all fragments atomically */
4531 spin_lock(&queue->lock);
4533 __skb_queue_tail(queue, skb);
4535 flags &= ~ACL_START;
4538 skb = list; list = list->next;
4540 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
4541 hci_add_acl_hdr(skb, conn->handle, flags);
4543 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
4545 __skb_queue_tail(queue, skb);
4548 spin_unlock(&queue->lock);
4552 void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
4554 struct hci_dev *hdev = chan->conn->hdev;
4556 BT_DBG("%s chan %p flags 0x%4.4x", hdev->name, chan, flags);
4558 hci_queue_acl(chan, &chan->data_q, skb, flags);
4560 queue_work(hdev->workqueue, &hdev->tx_work);
4564 void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
4566 struct hci_dev *hdev = conn->hdev;
4567 struct hci_sco_hdr hdr;
4569 BT_DBG("%s len %d", hdev->name, skb->len);
4571 hdr.handle = cpu_to_le16(conn->handle);
4572 hdr.dlen = skb->len;
4574 skb_push(skb, HCI_SCO_HDR_SIZE);
4575 skb_reset_transport_header(skb);
4576 memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
4578 bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
4580 skb_queue_tail(&conn->data_q, skb);
4581 queue_work(hdev->workqueue, &hdev->tx_work);
4584 /* ---- HCI TX task (outgoing data) ---- */
4586 /* HCI Connection scheduler */
4587 static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type,
4590 struct hci_conn_hash *h = &hdev->conn_hash;
4591 struct hci_conn *conn = NULL, *c;
4592 unsigned int num = 0, min = ~0;
4594 /* We don't have to lock device here. Connections are always
4595 * added and removed with TX task disabled. */
4599 list_for_each_entry_rcu(c, &h->list, list) {
4600 if (c->type != type || skb_queue_empty(&c->data_q))
4603 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
4608 if (c->sent < min) {
4613 if (hci_conn_num(hdev, type) == num)
4622 switch (conn->type) {
4624 cnt = hdev->acl_cnt;
4628 cnt = hdev->sco_cnt;
4631 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
4635 BT_ERR("Unknown link type");
4643 BT_DBG("conn %p quote %d", conn, *quote);
4647 static void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
4649 struct hci_conn_hash *h = &hdev->conn_hash;
4652 BT_ERR("%s link tx timeout", hdev->name);
4656 /* Kill stalled connections */
4657 list_for_each_entry_rcu(c, &h->list, list) {
4658 if (c->type == type && c->sent) {
4659 BT_ERR("%s killing stalled connection %pMR",
4660 hdev->name, &c->dst);
4661 hci_disconnect(c, HCI_ERROR_REMOTE_USER_TERM);
4668 static struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
4671 struct hci_conn_hash *h = &hdev->conn_hash;
4672 struct hci_chan *chan = NULL;
4673 unsigned int num = 0, min = ~0, cur_prio = 0;
4674 struct hci_conn *conn;
4675 int cnt, q, conn_num = 0;
4677 BT_DBG("%s", hdev->name);
4681 list_for_each_entry_rcu(conn, &h->list, list) {
4682 struct hci_chan *tmp;
4684 if (conn->type != type)
4687 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
4692 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
4693 struct sk_buff *skb;
4695 if (skb_queue_empty(&tmp->data_q))
4698 skb = skb_peek(&tmp->data_q);
4699 if (skb->priority < cur_prio)
4702 if (skb->priority > cur_prio) {
4705 cur_prio = skb->priority;
4710 if (conn->sent < min) {
4716 if (hci_conn_num(hdev, type) == conn_num)
4725 switch (chan->conn->type) {
4727 cnt = hdev->acl_cnt;
4730 cnt = hdev->block_cnt;
4734 cnt = hdev->sco_cnt;
4737 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
4741 BT_ERR("Unknown link type");
4746 BT_DBG("chan %p quote %d", chan, *quote);
4750 static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
4752 struct hci_conn_hash *h = &hdev->conn_hash;
4753 struct hci_conn *conn;
4756 BT_DBG("%s", hdev->name);
4760 list_for_each_entry_rcu(conn, &h->list, list) {
4761 struct hci_chan *chan;
4763 if (conn->type != type)
4766 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
4771 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
4772 struct sk_buff *skb;
4779 if (skb_queue_empty(&chan->data_q))
4782 skb = skb_peek(&chan->data_q);
4783 if (skb->priority >= HCI_PRIO_MAX - 1)
4786 skb->priority = HCI_PRIO_MAX - 1;
4788 BT_DBG("chan %p skb %p promoted to %d", chan, skb,
4792 if (hci_conn_num(hdev, type) == num)
4800 static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
4802 /* Calculate count of blocks used by this packet */
4803 return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
4806 static void __check_timeout(struct hci_dev *hdev, unsigned int cnt)
4808 if (!test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
4809 /* ACL tx timeout must be longer than maximum
4810 * link supervision timeout (40.9 seconds) */
4811 if (!cnt && time_after(jiffies, hdev->acl_last_tx +
4812 HCI_ACL_TX_TIMEOUT))
4813 hci_link_tx_to(hdev, ACL_LINK);
4817 static void hci_sched_acl_pkt(struct hci_dev *hdev)
4819 unsigned int cnt = hdev->acl_cnt;
4820 struct hci_chan *chan;
4821 struct sk_buff *skb;
4824 __check_timeout(hdev, cnt);
4826 while (hdev->acl_cnt &&
4827 (chan = hci_chan_sent(hdev, ACL_LINK, "e))) {
4828 u32 priority = (skb_peek(&chan->data_q))->priority;
4829 while (quote-- && (skb = skb_peek(&chan->data_q))) {
4830 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
4831 skb->len, skb->priority);
4833 /* Stop if priority has changed */
4834 if (skb->priority < priority)
4837 skb = skb_dequeue(&chan->data_q);
4839 hci_conn_enter_active_mode(chan->conn,
4840 bt_cb(skb)->force_active);
4842 hci_send_frame(hdev, skb);
4843 hdev->acl_last_tx = jiffies;
4851 if (cnt != hdev->acl_cnt)
4852 hci_prio_recalculate(hdev, ACL_LINK);
4855 static void hci_sched_acl_blk(struct hci_dev *hdev)
4857 unsigned int cnt = hdev->block_cnt;
4858 struct hci_chan *chan;
4859 struct sk_buff *skb;
4863 __check_timeout(hdev, cnt);
4865 BT_DBG("%s", hdev->name);
4867 if (hdev->dev_type == HCI_AMP)
4872 while (hdev->block_cnt > 0 &&
4873 (chan = hci_chan_sent(hdev, type, "e))) {
4874 u32 priority = (skb_peek(&chan->data_q))->priority;
4875 while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
4878 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
4879 skb->len, skb->priority);
4881 /* Stop if priority has changed */
4882 if (skb->priority < priority)
4885 skb = skb_dequeue(&chan->data_q);
4887 blocks = __get_blocks(hdev, skb);
4888 if (blocks > hdev->block_cnt)
4891 hci_conn_enter_active_mode(chan->conn,
4892 bt_cb(skb)->force_active);
4894 hci_send_frame(hdev, skb);
4895 hdev->acl_last_tx = jiffies;
4897 hdev->block_cnt -= blocks;
4900 chan->sent += blocks;
4901 chan->conn->sent += blocks;
4905 if (cnt != hdev->block_cnt)
4906 hci_prio_recalculate(hdev, type);
4909 static void hci_sched_acl(struct hci_dev *hdev)
4911 BT_DBG("%s", hdev->name);
4913 /* No ACL link over BR/EDR controller */
4914 if (!hci_conn_num(hdev, ACL_LINK) && hdev->dev_type == HCI_BREDR)
4917 /* No AMP link over AMP controller */
4918 if (!hci_conn_num(hdev, AMP_LINK) && hdev->dev_type == HCI_AMP)
4921 switch (hdev->flow_ctl_mode) {
4922 case HCI_FLOW_CTL_MODE_PACKET_BASED:
4923 hci_sched_acl_pkt(hdev);
4926 case HCI_FLOW_CTL_MODE_BLOCK_BASED:
4927 hci_sched_acl_blk(hdev);
4933 static void hci_sched_sco(struct hci_dev *hdev)
4935 struct hci_conn *conn;
4936 struct sk_buff *skb;
4939 BT_DBG("%s", hdev->name);
4941 if (!hci_conn_num(hdev, SCO_LINK))
4944 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, "e))) {
4945 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
4946 BT_DBG("skb %p len %d", skb, skb->len);
4947 hci_send_frame(hdev, skb);
4950 if (conn->sent == ~0)
4956 static void hci_sched_esco(struct hci_dev *hdev)
4958 struct hci_conn *conn;
4959 struct sk_buff *skb;
4962 BT_DBG("%s", hdev->name);
4964 if (!hci_conn_num(hdev, ESCO_LINK))
4967 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK,
4969 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
4970 BT_DBG("skb %p len %d", skb, skb->len);
4971 hci_send_frame(hdev, skb);
4974 if (conn->sent == ~0)
4980 static void hci_sched_le(struct hci_dev *hdev)
4982 struct hci_chan *chan;
4983 struct sk_buff *skb;
4984 int quote, cnt, tmp;
4986 BT_DBG("%s", hdev->name);
4988 if (!hci_conn_num(hdev, LE_LINK))
4991 if (!test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
4992 /* LE tx timeout must be longer than maximum
4993 * link supervision timeout (40.9 seconds) */
4994 if (!hdev->le_cnt && hdev->le_pkts &&
4995 time_after(jiffies, hdev->le_last_tx + HZ * 45))
4996 hci_link_tx_to(hdev, LE_LINK);
4999 cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
5001 while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, "e))) {
5002 u32 priority = (skb_peek(&chan->data_q))->priority;
5003 while (quote-- && (skb = skb_peek(&chan->data_q))) {
5004 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
5005 skb->len, skb->priority);
5007 /* Stop if priority has changed */
5008 if (skb->priority < priority)
5011 skb = skb_dequeue(&chan->data_q);
5013 hci_send_frame(hdev, skb);
5014 hdev->le_last_tx = jiffies;
5025 hdev->acl_cnt = cnt;
5028 hci_prio_recalculate(hdev, LE_LINK);
5031 static void hci_tx_work(struct work_struct *work)
5033 struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
5034 struct sk_buff *skb;
5036 BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
5037 hdev->sco_cnt, hdev->le_cnt);
5039 if (!test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
5040 /* Schedule queues and send stuff to HCI driver */
5041 hci_sched_acl(hdev);
5042 hci_sched_sco(hdev);
5043 hci_sched_esco(hdev);
5047 /* Send next queued raw (unknown type) packet */
5048 while ((skb = skb_dequeue(&hdev->raw_q)))
5049 hci_send_frame(hdev, skb);
5052 /* ----- HCI RX task (incoming data processing) ----- */
5054 /* ACL data packet */
5055 static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
5057 struct hci_acl_hdr *hdr = (void *) skb->data;
5058 struct hci_conn *conn;
5059 __u16 handle, flags;
5061 skb_pull(skb, HCI_ACL_HDR_SIZE);
5063 handle = __le16_to_cpu(hdr->handle);
5064 flags = hci_flags(handle);
5065 handle = hci_handle(handle);
5067 BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
5070 hdev->stat.acl_rx++;
5073 conn = hci_conn_hash_lookup_handle(hdev, handle);
5074 hci_dev_unlock(hdev);
5077 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
5079 /* Send to upper protocol */
5080 l2cap_recv_acldata(conn, skb, flags);
5083 BT_ERR("%s ACL packet for unknown connection handle %d",
5084 hdev->name, handle);
5090 /* SCO data packet */
5091 static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
5093 struct hci_sco_hdr *hdr = (void *) skb->data;
5094 struct hci_conn *conn;
5097 skb_pull(skb, HCI_SCO_HDR_SIZE);
5099 handle = __le16_to_cpu(hdr->handle);
5101 BT_DBG("%s len %d handle 0x%4.4x", hdev->name, skb->len, handle);
5103 hdev->stat.sco_rx++;
5106 conn = hci_conn_hash_lookup_handle(hdev, handle);
5107 hci_dev_unlock(hdev);
5110 /* Send to upper protocol */
5111 sco_recv_scodata(conn, skb);
5114 BT_ERR("%s SCO packet for unknown connection handle %d",
5115 hdev->name, handle);
5121 static bool hci_req_is_complete(struct hci_dev *hdev)
5123 struct sk_buff *skb;
5125 skb = skb_peek(&hdev->cmd_q);
5129 return bt_cb(skb)->req.start;
5132 static void hci_resend_last(struct hci_dev *hdev)
5134 struct hci_command_hdr *sent;
5135 struct sk_buff *skb;
5138 if (!hdev->sent_cmd)
5141 sent = (void *) hdev->sent_cmd->data;
5142 opcode = __le16_to_cpu(sent->opcode);
5143 if (opcode == HCI_OP_RESET)
5146 skb = skb_clone(hdev->sent_cmd, GFP_KERNEL);
5150 skb_queue_head(&hdev->cmd_q, skb);
5151 queue_work(hdev->workqueue, &hdev->cmd_work);
5154 void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status)
5156 hci_req_complete_t req_complete = NULL;
5157 struct sk_buff *skb;
5158 unsigned long flags;
5160 BT_DBG("opcode 0x%04x status 0x%02x", opcode, status);
5162 /* If the completed command doesn't match the last one that was
5163 * sent we need to do special handling of it.
5165 if (!hci_sent_cmd_data(hdev, opcode)) {
5166 /* Some CSR based controllers generate a spontaneous
5167 * reset complete event during init and any pending
5168 * command will never be completed. In such a case we
5169 * need to resend whatever was the last sent
5172 if (test_bit(HCI_INIT, &hdev->flags) && opcode == HCI_OP_RESET)
5173 hci_resend_last(hdev);
5178 /* If the command succeeded and there's still more commands in
5179 * this request the request is not yet complete.
5181 if (!status && !hci_req_is_complete(hdev))
5184 /* If this was the last command in a request the complete
5185 * callback would be found in hdev->sent_cmd instead of the
5186 * command queue (hdev->cmd_q).
5188 if (hdev->sent_cmd) {
5189 req_complete = bt_cb(hdev->sent_cmd)->req.complete;
5192 /* We must set the complete callback to NULL to
5193 * avoid calling the callback more than once if
5194 * this function gets called again.
5196 bt_cb(hdev->sent_cmd)->req.complete = NULL;
5202 /* Remove all pending commands belonging to this request */
5203 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
5204 while ((skb = __skb_dequeue(&hdev->cmd_q))) {
5205 if (bt_cb(skb)->req.start) {
5206 __skb_queue_head(&hdev->cmd_q, skb);
5210 req_complete = bt_cb(skb)->req.complete;
5213 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
5217 req_complete(hdev, status);
5220 static void hci_rx_work(struct work_struct *work)
5222 struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
5223 struct sk_buff *skb;
5225 BT_DBG("%s", hdev->name);
5227 while ((skb = skb_dequeue(&hdev->rx_q))) {
5228 /* Send copy to monitor */
5229 hci_send_to_monitor(hdev, skb);
5231 if (atomic_read(&hdev->promisc)) {
5232 /* Send copy to the sockets */
5233 hci_send_to_sock(hdev, skb);
5236 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
5241 if (test_bit(HCI_INIT, &hdev->flags)) {
5242 /* Don't process data packets in this states. */
5243 switch (bt_cb(skb)->pkt_type) {
5244 case HCI_ACLDATA_PKT:
5245 case HCI_SCODATA_PKT:
5252 switch (bt_cb(skb)->pkt_type) {
5254 BT_DBG("%s Event packet", hdev->name);
5255 hci_event_packet(hdev, skb);
5258 case HCI_ACLDATA_PKT:
5259 BT_DBG("%s ACL data packet", hdev->name);
5260 hci_acldata_packet(hdev, skb);
5263 case HCI_SCODATA_PKT:
5264 BT_DBG("%s SCO data packet", hdev->name);
5265 hci_scodata_packet(hdev, skb);
5275 static void hci_cmd_work(struct work_struct *work)
5277 struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
5278 struct sk_buff *skb;
5280 BT_DBG("%s cmd_cnt %d cmd queued %d", hdev->name,
5281 atomic_read(&hdev->cmd_cnt), skb_queue_len(&hdev->cmd_q));
5283 /* Send queued commands */
5284 if (atomic_read(&hdev->cmd_cnt)) {
5285 skb = skb_dequeue(&hdev->cmd_q);
5289 kfree_skb(hdev->sent_cmd);
5291 hdev->sent_cmd = skb_clone(skb, GFP_KERNEL);
5292 if (hdev->sent_cmd) {
5293 atomic_dec(&hdev->cmd_cnt);
5294 hci_send_frame(hdev, skb);
5295 if (test_bit(HCI_RESET, &hdev->flags))
5296 cancel_delayed_work(&hdev->cmd_timer);
5298 schedule_delayed_work(&hdev->cmd_timer,
5301 skb_queue_head(&hdev->cmd_q, skb);
5302 queue_work(hdev->workqueue, &hdev->cmd_work);
5307 void hci_req_add_le_scan_disable(struct hci_request *req)
5309 struct hci_cp_le_set_scan_enable cp;
5311 memset(&cp, 0, sizeof(cp));
5312 cp.enable = LE_SCAN_DISABLE;
5313 hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
5316 void hci_req_add_le_passive_scan(struct hci_request *req)
5318 struct hci_cp_le_set_scan_param param_cp;
5319 struct hci_cp_le_set_scan_enable enable_cp;
5320 struct hci_dev *hdev = req->hdev;
5323 /* Set require_privacy to false since no SCAN_REQ are send
5324 * during passive scanning. Not using an unresolvable address
5325 * here is important so that peer devices using direct
5326 * advertising with our address will be correctly reported
5327 * by the controller.
5329 if (hci_update_random_address(req, false, &own_addr_type))
5332 memset(¶m_cp, 0, sizeof(param_cp));
5333 param_cp.type = LE_SCAN_PASSIVE;
5334 param_cp.interval = cpu_to_le16(hdev->le_scan_interval);
5335 param_cp.window = cpu_to_le16(hdev->le_scan_window);
5336 param_cp.own_address_type = own_addr_type;
5337 hci_req_add(req, HCI_OP_LE_SET_SCAN_PARAM, sizeof(param_cp),
5340 memset(&enable_cp, 0, sizeof(enable_cp));
5341 enable_cp.enable = LE_SCAN_ENABLE;
5342 enable_cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
5343 hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(enable_cp),
5347 static void update_background_scan_complete(struct hci_dev *hdev, u8 status)
5350 BT_DBG("HCI request failed to update background scanning: "
5351 "status 0x%2.2x", status);
5354 /* This function controls the background scanning based on hdev->pend_le_conns
5355 * list. If there are pending LE connection we start the background scanning,
5356 * otherwise we stop it.
5358 * This function requires the caller holds hdev->lock.
5360 void hci_update_background_scan(struct hci_dev *hdev)
5362 struct hci_request req;
5363 struct hci_conn *conn;
5366 if (!test_bit(HCI_UP, &hdev->flags) ||
5367 test_bit(HCI_INIT, &hdev->flags) ||
5368 test_bit(HCI_SETUP, &hdev->dev_flags) ||
5369 test_bit(HCI_CONFIG, &hdev->dev_flags) ||
5370 test_bit(HCI_AUTO_OFF, &hdev->dev_flags) ||
5371 test_bit(HCI_UNREGISTER, &hdev->dev_flags))
5374 /* No point in doing scanning if LE support hasn't been enabled */
5375 if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
5378 /* If discovery is active don't interfere with it */
5379 if (hdev->discovery.state != DISCOVERY_STOPPED)
5382 hci_req_init(&req, hdev);
5384 if (!test_bit(HCI_CONNECTABLE, &hdev->dev_flags) &&
5385 list_empty(&hdev->pend_le_conns) &&
5386 list_empty(&hdev->pend_le_reports)) {
5387 /* If there is no pending LE connections or devices
5388 * to be scanned for, we should stop the background
5392 /* If controller is not scanning we are done. */
5393 if (!test_bit(HCI_LE_SCAN, &hdev->dev_flags))
5396 hci_req_add_le_scan_disable(&req);
5398 BT_DBG("%s stopping background scanning", hdev->name);
5400 /* If there is at least one pending LE connection, we should
5401 * keep the background scan running.
5404 /* If controller is connecting, we should not start scanning
5405 * since some controllers are not able to scan and connect at
5408 conn = hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT);
5412 /* If controller is currently scanning, we stop it to ensure we
5413 * don't miss any advertising (due to duplicates filter).
5415 if (test_bit(HCI_LE_SCAN, &hdev->dev_flags))
5416 hci_req_add_le_scan_disable(&req);
5418 hci_req_add_le_passive_scan(&req);
5420 BT_DBG("%s starting background scanning", hdev->name);
5423 err = hci_req_run(&req, update_background_scan_complete);
5425 BT_ERR("Failed to run HCI request: err %d", err);