2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4 Copyright (C) 2011 ProFUSION Embedded Systems
6 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License version 2 as
10 published by the Free Software Foundation;
12 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
16 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
19 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
21 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
23 SOFTWARE IS DISCLAIMED.
26 /* Bluetooth HCI core. */
28 #include <linux/export.h>
29 #include <linux/idr.h>
30 #include <linux/rfkill.h>
31 #include <linux/debugfs.h>
32 #include <asm/unaligned.h>
34 #include <net/bluetooth/bluetooth.h>
35 #include <net/bluetooth/hci_core.h>
37 static void hci_rx_work(struct work_struct *work);
38 static void hci_cmd_work(struct work_struct *work);
39 static void hci_tx_work(struct work_struct *work);
42 LIST_HEAD(hci_dev_list);
43 DEFINE_RWLOCK(hci_dev_list_lock);
45 /* HCI callback list */
46 LIST_HEAD(hci_cb_list);
47 DEFINE_RWLOCK(hci_cb_list_lock);
49 /* HCI ID Numbering */
50 static DEFINE_IDA(hci_index_ida);
52 /* ---- HCI notifications ---- */
54 static void hci_notify(struct hci_dev *hdev, int event)
56 hci_sock_dev_event(hdev, event);
59 /* ---- HCI debugfs entries ---- */
61 static int features_show(struct seq_file *f, void *ptr)
63 struct hci_dev *hdev = f->private;
67 for (p = 0; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
68 seq_printf(f, "%2u: 0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x "
69 "0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x\n", p,
70 hdev->features[p][0], hdev->features[p][1],
71 hdev->features[p][2], hdev->features[p][3],
72 hdev->features[p][4], hdev->features[p][5],
73 hdev->features[p][6], hdev->features[p][7]);
75 if (lmp_le_capable(hdev))
76 seq_printf(f, "LE: 0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x "
77 "0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x\n",
78 hdev->le_features[0], hdev->le_features[1],
79 hdev->le_features[2], hdev->le_features[3],
80 hdev->le_features[4], hdev->le_features[5],
81 hdev->le_features[6], hdev->le_features[7]);
87 static int features_open(struct inode *inode, struct file *file)
89 return single_open(file, features_show, inode->i_private);
92 static const struct file_operations features_fops = {
93 .open = features_open,
96 .release = single_release,
99 static int blacklist_show(struct seq_file *f, void *p)
101 struct hci_dev *hdev = f->private;
102 struct bdaddr_list *b;
105 list_for_each_entry(b, &hdev->blacklist, list)
106 seq_printf(f, "%pMR (type %u)\n", &b->bdaddr, b->bdaddr_type);
107 hci_dev_unlock(hdev);
112 static int blacklist_open(struct inode *inode, struct file *file)
114 return single_open(file, blacklist_show, inode->i_private);
117 static const struct file_operations blacklist_fops = {
118 .open = blacklist_open,
121 .release = single_release,
124 static int uuids_show(struct seq_file *f, void *p)
126 struct hci_dev *hdev = f->private;
127 struct bt_uuid *uuid;
130 list_for_each_entry(uuid, &hdev->uuids, list) {
132 u16 data1, data2, data3, data4;
134 data5 = get_unaligned_le32(uuid);
135 data4 = get_unaligned_le16(uuid + 4);
136 data3 = get_unaligned_le16(uuid + 6);
137 data2 = get_unaligned_le16(uuid + 8);
138 data1 = get_unaligned_le16(uuid + 10);
139 data0 = get_unaligned_le32(uuid + 12);
141 seq_printf(f, "%.8x-%.4x-%.4x-%.4x-%.4x%.8x\n",
142 data0, data1, data2, data3, data4, data5);
144 hci_dev_unlock(hdev);
149 static int uuids_open(struct inode *inode, struct file *file)
151 return single_open(file, uuids_show, inode->i_private);
154 static const struct file_operations uuids_fops = {
158 .release = single_release,
161 static int inquiry_cache_show(struct seq_file *f, void *p)
163 struct hci_dev *hdev = f->private;
164 struct discovery_state *cache = &hdev->discovery;
165 struct inquiry_entry *e;
169 list_for_each_entry(e, &cache->all, all) {
170 struct inquiry_data *data = &e->data;
171 seq_printf(f, "%pMR %d %d %d 0x%.2x%.2x%.2x 0x%.4x %d %d %u\n",
173 data->pscan_rep_mode, data->pscan_period_mode,
174 data->pscan_mode, data->dev_class[2],
175 data->dev_class[1], data->dev_class[0],
176 __le16_to_cpu(data->clock_offset),
177 data->rssi, data->ssp_mode, e->timestamp);
180 hci_dev_unlock(hdev);
185 static int inquiry_cache_open(struct inode *inode, struct file *file)
187 return single_open(file, inquiry_cache_show, inode->i_private);
190 static const struct file_operations inquiry_cache_fops = {
191 .open = inquiry_cache_open,
194 .release = single_release,
197 static int link_keys_show(struct seq_file *f, void *ptr)
199 struct hci_dev *hdev = f->private;
200 struct list_head *p, *n;
203 list_for_each_safe(p, n, &hdev->link_keys) {
204 struct link_key *key = list_entry(p, struct link_key, list);
205 seq_printf(f, "%pMR %u %*phN %u\n", &key->bdaddr, key->type,
206 HCI_LINK_KEY_SIZE, key->val, key->pin_len);
208 hci_dev_unlock(hdev);
213 static int link_keys_open(struct inode *inode, struct file *file)
215 return single_open(file, link_keys_show, inode->i_private);
218 static const struct file_operations link_keys_fops = {
219 .open = link_keys_open,
222 .release = single_release,
225 static ssize_t use_debug_keys_read(struct file *file, char __user *user_buf,
226 size_t count, loff_t *ppos)
228 struct hci_dev *hdev = file->private_data;
231 buf[0] = test_bit(HCI_DEBUG_KEYS, &hdev->dev_flags) ? 'Y': 'N';
234 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
237 static const struct file_operations use_debug_keys_fops = {
239 .read = use_debug_keys_read,
240 .llseek = default_llseek,
243 static int dev_class_show(struct seq_file *f, void *ptr)
245 struct hci_dev *hdev = f->private;
248 seq_printf(f, "0x%.2x%.2x%.2x\n", hdev->dev_class[2],
249 hdev->dev_class[1], hdev->dev_class[0]);
250 hci_dev_unlock(hdev);
255 static int dev_class_open(struct inode *inode, struct file *file)
257 return single_open(file, dev_class_show, inode->i_private);
260 static const struct file_operations dev_class_fops = {
261 .open = dev_class_open,
264 .release = single_release,
267 static int voice_setting_get(void *data, u64 *val)
269 struct hci_dev *hdev = data;
272 *val = hdev->voice_setting;
273 hci_dev_unlock(hdev);
278 DEFINE_SIMPLE_ATTRIBUTE(voice_setting_fops, voice_setting_get,
279 NULL, "0x%4.4llx\n");
281 static int auto_accept_delay_set(void *data, u64 val)
283 struct hci_dev *hdev = data;
286 hdev->auto_accept_delay = val;
287 hci_dev_unlock(hdev);
292 static int auto_accept_delay_get(void *data, u64 *val)
294 struct hci_dev *hdev = data;
297 *val = hdev->auto_accept_delay;
298 hci_dev_unlock(hdev);
303 DEFINE_SIMPLE_ATTRIBUTE(auto_accept_delay_fops, auto_accept_delay_get,
304 auto_accept_delay_set, "%llu\n");
306 static int ssp_debug_mode_set(void *data, u64 val)
308 struct hci_dev *hdev = data;
313 if (val != 0 && val != 1)
316 if (!test_bit(HCI_UP, &hdev->flags))
321 skb = __hci_cmd_sync(hdev, HCI_OP_WRITE_SSP_DEBUG_MODE, sizeof(mode),
322 &mode, HCI_CMD_TIMEOUT);
323 hci_req_unlock(hdev);
328 err = -bt_to_errno(skb->data[0]);
335 hdev->ssp_debug_mode = val;
336 hci_dev_unlock(hdev);
341 static int ssp_debug_mode_get(void *data, u64 *val)
343 struct hci_dev *hdev = data;
346 *val = hdev->ssp_debug_mode;
347 hci_dev_unlock(hdev);
352 DEFINE_SIMPLE_ATTRIBUTE(ssp_debug_mode_fops, ssp_debug_mode_get,
353 ssp_debug_mode_set, "%llu\n");
355 static int idle_timeout_set(void *data, u64 val)
357 struct hci_dev *hdev = data;
359 if (val != 0 && (val < 500 || val > 3600000))
363 hdev->idle_timeout= val;
364 hci_dev_unlock(hdev);
369 static int idle_timeout_get(void *data, u64 *val)
371 struct hci_dev *hdev = data;
374 *val = hdev->idle_timeout;
375 hci_dev_unlock(hdev);
380 DEFINE_SIMPLE_ATTRIBUTE(idle_timeout_fops, idle_timeout_get,
381 idle_timeout_set, "%llu\n");
383 static int sniff_min_interval_set(void *data, u64 val)
385 struct hci_dev *hdev = data;
387 if (val == 0 || val % 2 || val > hdev->sniff_max_interval)
391 hdev->sniff_min_interval= val;
392 hci_dev_unlock(hdev);
397 static int sniff_min_interval_get(void *data, u64 *val)
399 struct hci_dev *hdev = data;
402 *val = hdev->sniff_min_interval;
403 hci_dev_unlock(hdev);
408 DEFINE_SIMPLE_ATTRIBUTE(sniff_min_interval_fops, sniff_min_interval_get,
409 sniff_min_interval_set, "%llu\n");
411 static int sniff_max_interval_set(void *data, u64 val)
413 struct hci_dev *hdev = data;
415 if (val == 0 || val % 2 || val < hdev->sniff_min_interval)
419 hdev->sniff_max_interval= val;
420 hci_dev_unlock(hdev);
425 static int sniff_max_interval_get(void *data, u64 *val)
427 struct hci_dev *hdev = data;
430 *val = hdev->sniff_max_interval;
431 hci_dev_unlock(hdev);
436 DEFINE_SIMPLE_ATTRIBUTE(sniff_max_interval_fops, sniff_max_interval_get,
437 sniff_max_interval_set, "%llu\n");
439 static int static_address_show(struct seq_file *f, void *p)
441 struct hci_dev *hdev = f->private;
444 seq_printf(f, "%pMR\n", &hdev->static_addr);
445 hci_dev_unlock(hdev);
450 static int static_address_open(struct inode *inode, struct file *file)
452 return single_open(file, static_address_show, inode->i_private);
455 static const struct file_operations static_address_fops = {
456 .open = static_address_open,
459 .release = single_release,
462 static int own_address_type_set(void *data, u64 val)
464 struct hci_dev *hdev = data;
466 if (val != 0 && val != 1)
470 hdev->own_addr_type = val;
471 hci_dev_unlock(hdev);
476 static int own_address_type_get(void *data, u64 *val)
478 struct hci_dev *hdev = data;
481 *val = hdev->own_addr_type;
482 hci_dev_unlock(hdev);
487 DEFINE_SIMPLE_ATTRIBUTE(own_address_type_fops, own_address_type_get,
488 own_address_type_set, "%llu\n");
490 static int long_term_keys_show(struct seq_file *f, void *ptr)
492 struct hci_dev *hdev = f->private;
493 struct list_head *p, *n;
496 list_for_each_safe(p, n, &hdev->link_keys) {
497 struct smp_ltk *ltk = list_entry(p, struct smp_ltk, list);
498 seq_printf(f, "%pMR (type %u) %u %u %u %.4x %*phN %*phN\\n",
499 <k->bdaddr, ltk->bdaddr_type, ltk->authenticated,
500 ltk->type, ltk->enc_size, __le16_to_cpu(ltk->ediv),
501 8, ltk->rand, 16, ltk->val);
503 hci_dev_unlock(hdev);
508 static int long_term_keys_open(struct inode *inode, struct file *file)
510 return single_open(file, long_term_keys_show, inode->i_private);
513 static const struct file_operations long_term_keys_fops = {
514 .open = long_term_keys_open,
517 .release = single_release,
520 /* ---- HCI requests ---- */
522 static void hci_req_sync_complete(struct hci_dev *hdev, u8 result)
524 BT_DBG("%s result 0x%2.2x", hdev->name, result);
526 if (hdev->req_status == HCI_REQ_PEND) {
527 hdev->req_result = result;
528 hdev->req_status = HCI_REQ_DONE;
529 wake_up_interruptible(&hdev->req_wait_q);
533 static void hci_req_cancel(struct hci_dev *hdev, int err)
535 BT_DBG("%s err 0x%2.2x", hdev->name, err);
537 if (hdev->req_status == HCI_REQ_PEND) {
538 hdev->req_result = err;
539 hdev->req_status = HCI_REQ_CANCELED;
540 wake_up_interruptible(&hdev->req_wait_q);
544 static struct sk_buff *hci_get_cmd_complete(struct hci_dev *hdev, u16 opcode,
547 struct hci_ev_cmd_complete *ev;
548 struct hci_event_hdr *hdr;
553 skb = hdev->recv_evt;
554 hdev->recv_evt = NULL;
556 hci_dev_unlock(hdev);
559 return ERR_PTR(-ENODATA);
561 if (skb->len < sizeof(*hdr)) {
562 BT_ERR("Too short HCI event");
566 hdr = (void *) skb->data;
567 skb_pull(skb, HCI_EVENT_HDR_SIZE);
570 if (hdr->evt != event)
575 if (hdr->evt != HCI_EV_CMD_COMPLETE) {
576 BT_DBG("Last event is not cmd complete (0x%2.2x)", hdr->evt);
580 if (skb->len < sizeof(*ev)) {
581 BT_ERR("Too short cmd_complete event");
585 ev = (void *) skb->data;
586 skb_pull(skb, sizeof(*ev));
588 if (opcode == __le16_to_cpu(ev->opcode))
591 BT_DBG("opcode doesn't match (0x%2.2x != 0x%2.2x)", opcode,
592 __le16_to_cpu(ev->opcode));
596 return ERR_PTR(-ENODATA);
599 struct sk_buff *__hci_cmd_sync_ev(struct hci_dev *hdev, u16 opcode, u32 plen,
600 const void *param, u8 event, u32 timeout)
602 DECLARE_WAITQUEUE(wait, current);
603 struct hci_request req;
606 BT_DBG("%s", hdev->name);
608 hci_req_init(&req, hdev);
610 hci_req_add_ev(&req, opcode, plen, param, event);
612 hdev->req_status = HCI_REQ_PEND;
614 err = hci_req_run(&req, hci_req_sync_complete);
618 add_wait_queue(&hdev->req_wait_q, &wait);
619 set_current_state(TASK_INTERRUPTIBLE);
621 schedule_timeout(timeout);
623 remove_wait_queue(&hdev->req_wait_q, &wait);
625 if (signal_pending(current))
626 return ERR_PTR(-EINTR);
628 switch (hdev->req_status) {
630 err = -bt_to_errno(hdev->req_result);
633 case HCI_REQ_CANCELED:
634 err = -hdev->req_result;
642 hdev->req_status = hdev->req_result = 0;
644 BT_DBG("%s end: err %d", hdev->name, err);
649 return hci_get_cmd_complete(hdev, opcode, event);
651 EXPORT_SYMBOL(__hci_cmd_sync_ev);
653 struct sk_buff *__hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
654 const void *param, u32 timeout)
656 return __hci_cmd_sync_ev(hdev, opcode, plen, param, 0, timeout);
658 EXPORT_SYMBOL(__hci_cmd_sync);
660 /* Execute request and wait for completion. */
661 static int __hci_req_sync(struct hci_dev *hdev,
662 void (*func)(struct hci_request *req,
664 unsigned long opt, __u32 timeout)
666 struct hci_request req;
667 DECLARE_WAITQUEUE(wait, current);
670 BT_DBG("%s start", hdev->name);
672 hci_req_init(&req, hdev);
674 hdev->req_status = HCI_REQ_PEND;
678 err = hci_req_run(&req, hci_req_sync_complete);
680 hdev->req_status = 0;
682 /* ENODATA means the HCI request command queue is empty.
683 * This can happen when a request with conditionals doesn't
684 * trigger any commands to be sent. This is normal behavior
685 * and should not trigger an error return.
693 add_wait_queue(&hdev->req_wait_q, &wait);
694 set_current_state(TASK_INTERRUPTIBLE);
696 schedule_timeout(timeout);
698 remove_wait_queue(&hdev->req_wait_q, &wait);
700 if (signal_pending(current))
703 switch (hdev->req_status) {
705 err = -bt_to_errno(hdev->req_result);
708 case HCI_REQ_CANCELED:
709 err = -hdev->req_result;
717 hdev->req_status = hdev->req_result = 0;
719 BT_DBG("%s end: err %d", hdev->name, err);
724 static int hci_req_sync(struct hci_dev *hdev,
725 void (*req)(struct hci_request *req,
727 unsigned long opt, __u32 timeout)
731 if (!test_bit(HCI_UP, &hdev->flags))
734 /* Serialize all requests */
736 ret = __hci_req_sync(hdev, req, opt, timeout);
737 hci_req_unlock(hdev);
742 static void hci_reset_req(struct hci_request *req, unsigned long opt)
744 BT_DBG("%s %ld", req->hdev->name, opt);
747 set_bit(HCI_RESET, &req->hdev->flags);
748 hci_req_add(req, HCI_OP_RESET, 0, NULL);
751 static void bredr_init(struct hci_request *req)
753 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;
755 /* Read Local Supported Features */
756 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
758 /* Read Local Version */
759 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
761 /* Read BD Address */
762 hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
765 static void amp_init(struct hci_request *req)
767 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
769 /* Read Local Version */
770 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
772 /* Read Local Supported Commands */
773 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
775 /* Read Local Supported Features */
776 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
778 /* Read Local AMP Info */
779 hci_req_add(req, HCI_OP_READ_LOCAL_AMP_INFO, 0, NULL);
781 /* Read Data Blk size */
782 hci_req_add(req, HCI_OP_READ_DATA_BLOCK_SIZE, 0, NULL);
784 /* Read Flow Control Mode */
785 hci_req_add(req, HCI_OP_READ_FLOW_CONTROL_MODE, 0, NULL);
787 /* Read Location Data */
788 hci_req_add(req, HCI_OP_READ_LOCATION_DATA, 0, NULL);
791 static void hci_init1_req(struct hci_request *req, unsigned long opt)
793 struct hci_dev *hdev = req->hdev;
795 BT_DBG("%s %ld", hdev->name, opt);
798 if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
799 hci_reset_req(req, 0);
801 switch (hdev->dev_type) {
811 BT_ERR("Unknown device type %d", hdev->dev_type);
816 static void bredr_setup(struct hci_request *req)
818 struct hci_dev *hdev = req->hdev;
823 /* Read Buffer Size (ACL mtu, max pkt, etc.) */
824 hci_req_add(req, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
826 /* Read Class of Device */
827 hci_req_add(req, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
829 /* Read Local Name */
830 hci_req_add(req, HCI_OP_READ_LOCAL_NAME, 0, NULL);
832 /* Read Voice Setting */
833 hci_req_add(req, HCI_OP_READ_VOICE_SETTING, 0, NULL);
835 /* Read Number of Supported IAC */
836 hci_req_add(req, HCI_OP_READ_NUM_SUPPORTED_IAC, 0, NULL);
838 /* Read Current IAC LAP */
839 hci_req_add(req, HCI_OP_READ_CURRENT_IAC_LAP, 0, NULL);
841 /* Clear Event Filters */
842 flt_type = HCI_FLT_CLEAR_ALL;
843 hci_req_add(req, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
845 /* Connection accept timeout ~20 secs */
846 param = __constant_cpu_to_le16(0x7d00);
847 hci_req_add(req, HCI_OP_WRITE_CA_TIMEOUT, 2, ¶m);
849 /* AVM Berlin (31), aka "BlueFRITZ!", reports version 1.2,
850 * but it does not support page scan related HCI commands.
852 if (hdev->manufacturer != 31 && hdev->hci_ver > BLUETOOTH_VER_1_1) {
853 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_ACTIVITY, 0, NULL);
854 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_TYPE, 0, NULL);
858 static void le_setup(struct hci_request *req)
860 struct hci_dev *hdev = req->hdev;
862 /* Read LE Buffer Size */
863 hci_req_add(req, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
865 /* Read LE Local Supported Features */
866 hci_req_add(req, HCI_OP_LE_READ_LOCAL_FEATURES, 0, NULL);
868 /* Read LE Advertising Channel TX Power */
869 hci_req_add(req, HCI_OP_LE_READ_ADV_TX_POWER, 0, NULL);
871 /* Read LE White List Size */
872 hci_req_add(req, HCI_OP_LE_READ_WHITE_LIST_SIZE, 0, NULL);
874 /* Read LE Supported States */
875 hci_req_add(req, HCI_OP_LE_READ_SUPPORTED_STATES, 0, NULL);
877 /* LE-only controllers have LE implicitly enabled */
878 if (!lmp_bredr_capable(hdev))
879 set_bit(HCI_LE_ENABLED, &hdev->dev_flags);
882 static u8 hci_get_inquiry_mode(struct hci_dev *hdev)
884 if (lmp_ext_inq_capable(hdev))
887 if (lmp_inq_rssi_capable(hdev))
890 if (hdev->manufacturer == 11 && hdev->hci_rev == 0x00 &&
891 hdev->lmp_subver == 0x0757)
894 if (hdev->manufacturer == 15) {
895 if (hdev->hci_rev == 0x03 && hdev->lmp_subver == 0x6963)
897 if (hdev->hci_rev == 0x09 && hdev->lmp_subver == 0x6963)
899 if (hdev->hci_rev == 0x00 && hdev->lmp_subver == 0x6965)
903 if (hdev->manufacturer == 31 && hdev->hci_rev == 0x2005 &&
904 hdev->lmp_subver == 0x1805)
910 static void hci_setup_inquiry_mode(struct hci_request *req)
914 mode = hci_get_inquiry_mode(req->hdev);
916 hci_req_add(req, HCI_OP_WRITE_INQUIRY_MODE, 1, &mode);
919 static void hci_setup_event_mask(struct hci_request *req)
921 struct hci_dev *hdev = req->hdev;
923 /* The second byte is 0xff instead of 0x9f (two reserved bits
924 * disabled) since a Broadcom 1.2 dongle doesn't respond to the
927 u8 events[8] = { 0xff, 0xff, 0xfb, 0xff, 0x00, 0x00, 0x00, 0x00 };
929 /* CSR 1.1 dongles does not accept any bitfield so don't try to set
930 * any event mask for pre 1.2 devices.
932 if (hdev->hci_ver < BLUETOOTH_VER_1_2)
935 if (lmp_bredr_capable(hdev)) {
936 events[4] |= 0x01; /* Flow Specification Complete */
937 events[4] |= 0x02; /* Inquiry Result with RSSI */
938 events[4] |= 0x04; /* Read Remote Extended Features Complete */
939 events[5] |= 0x08; /* Synchronous Connection Complete */
940 events[5] |= 0x10; /* Synchronous Connection Changed */
942 /* Use a different default for LE-only devices */
943 memset(events, 0, sizeof(events));
944 events[0] |= 0x10; /* Disconnection Complete */
945 events[0] |= 0x80; /* Encryption Change */
946 events[1] |= 0x08; /* Read Remote Version Information Complete */
947 events[1] |= 0x20; /* Command Complete */
948 events[1] |= 0x40; /* Command Status */
949 events[1] |= 0x80; /* Hardware Error */
950 events[2] |= 0x04; /* Number of Completed Packets */
951 events[3] |= 0x02; /* Data Buffer Overflow */
952 events[5] |= 0x80; /* Encryption Key Refresh Complete */
955 if (lmp_inq_rssi_capable(hdev))
956 events[4] |= 0x02; /* Inquiry Result with RSSI */
958 if (lmp_sniffsubr_capable(hdev))
959 events[5] |= 0x20; /* Sniff Subrating */
961 if (lmp_pause_enc_capable(hdev))
962 events[5] |= 0x80; /* Encryption Key Refresh Complete */
964 if (lmp_ext_inq_capable(hdev))
965 events[5] |= 0x40; /* Extended Inquiry Result */
967 if (lmp_no_flush_capable(hdev))
968 events[7] |= 0x01; /* Enhanced Flush Complete */
970 if (lmp_lsto_capable(hdev))
971 events[6] |= 0x80; /* Link Supervision Timeout Changed */
973 if (lmp_ssp_capable(hdev)) {
974 events[6] |= 0x01; /* IO Capability Request */
975 events[6] |= 0x02; /* IO Capability Response */
976 events[6] |= 0x04; /* User Confirmation Request */
977 events[6] |= 0x08; /* User Passkey Request */
978 events[6] |= 0x10; /* Remote OOB Data Request */
979 events[6] |= 0x20; /* Simple Pairing Complete */
980 events[7] |= 0x04; /* User Passkey Notification */
981 events[7] |= 0x08; /* Keypress Notification */
982 events[7] |= 0x10; /* Remote Host Supported
983 * Features Notification
987 if (lmp_le_capable(hdev))
988 events[7] |= 0x20; /* LE Meta-Event */
990 hci_req_add(req, HCI_OP_SET_EVENT_MASK, sizeof(events), events);
992 if (lmp_le_capable(hdev)) {
993 memset(events, 0, sizeof(events));
995 hci_req_add(req, HCI_OP_LE_SET_EVENT_MASK,
996 sizeof(events), events);
1000 static void hci_init2_req(struct hci_request *req, unsigned long opt)
1002 struct hci_dev *hdev = req->hdev;
1004 if (lmp_bredr_capable(hdev))
1007 clear_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
1009 if (lmp_le_capable(hdev))
1012 hci_setup_event_mask(req);
1014 /* AVM Berlin (31), aka "BlueFRITZ!", doesn't support the read
1015 * local supported commands HCI command.
1017 if (hdev->manufacturer != 31 && hdev->hci_ver > BLUETOOTH_VER_1_1)
1018 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
1020 if (lmp_ssp_capable(hdev)) {
1021 /* When SSP is available, then the host features page
1022 * should also be available as well. However some
1023 * controllers list the max_page as 0 as long as SSP
1024 * has not been enabled. To achieve proper debugging
1025 * output, force the minimum max_page to 1 at least.
1027 hdev->max_page = 0x01;
1029 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
1031 hci_req_add(req, HCI_OP_WRITE_SSP_MODE,
1032 sizeof(mode), &mode);
1034 struct hci_cp_write_eir cp;
1036 memset(hdev->eir, 0, sizeof(hdev->eir));
1037 memset(&cp, 0, sizeof(cp));
1039 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
1043 if (lmp_inq_rssi_capable(hdev))
1044 hci_setup_inquiry_mode(req);
1046 if (lmp_inq_tx_pwr_capable(hdev))
1047 hci_req_add(req, HCI_OP_READ_INQ_RSP_TX_POWER, 0, NULL);
1049 if (lmp_ext_feat_capable(hdev)) {
1050 struct hci_cp_read_local_ext_features cp;
1053 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
1057 if (test_bit(HCI_LINK_SECURITY, &hdev->dev_flags)) {
1059 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, sizeof(enable),
1064 static void hci_setup_link_policy(struct hci_request *req)
1066 struct hci_dev *hdev = req->hdev;
1067 struct hci_cp_write_def_link_policy cp;
1068 u16 link_policy = 0;
1070 if (lmp_rswitch_capable(hdev))
1071 link_policy |= HCI_LP_RSWITCH;
1072 if (lmp_hold_capable(hdev))
1073 link_policy |= HCI_LP_HOLD;
1074 if (lmp_sniff_capable(hdev))
1075 link_policy |= HCI_LP_SNIFF;
1076 if (lmp_park_capable(hdev))
1077 link_policy |= HCI_LP_PARK;
1079 cp.policy = cpu_to_le16(link_policy);
1080 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, sizeof(cp), &cp);
1083 static void hci_set_le_support(struct hci_request *req)
1085 struct hci_dev *hdev = req->hdev;
1086 struct hci_cp_write_le_host_supported cp;
1088 /* LE-only devices do not support explicit enablement */
1089 if (!lmp_bredr_capable(hdev))
1092 memset(&cp, 0, sizeof(cp));
1094 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
1096 cp.simul = lmp_le_br_capable(hdev);
1099 if (cp.le != lmp_host_le_capable(hdev))
1100 hci_req_add(req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(cp),
1104 static void hci_set_event_mask_page_2(struct hci_request *req)
1106 struct hci_dev *hdev = req->hdev;
1107 u8 events[8] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
1109 /* If Connectionless Slave Broadcast master role is supported
1110 * enable all necessary events for it.
1112 if (hdev->features[2][0] & 0x01) {
1113 events[1] |= 0x40; /* Triggered Clock Capture */
1114 events[1] |= 0x80; /* Synchronization Train Complete */
1115 events[2] |= 0x10; /* Slave Page Response Timeout */
1116 events[2] |= 0x20; /* CSB Channel Map Change */
1119 /* If Connectionless Slave Broadcast slave role is supported
1120 * enable all necessary events for it.
1122 if (hdev->features[2][0] & 0x02) {
1123 events[2] |= 0x01; /* Synchronization Train Received */
1124 events[2] |= 0x02; /* CSB Receive */
1125 events[2] |= 0x04; /* CSB Timeout */
1126 events[2] |= 0x08; /* Truncated Page Complete */
1129 hci_req_add(req, HCI_OP_SET_EVENT_MASK_PAGE_2, sizeof(events), events);
1132 static void hci_init3_req(struct hci_request *req, unsigned long opt)
1134 struct hci_dev *hdev = req->hdev;
1137 /* Some Broadcom based Bluetooth controllers do not support the
1138 * Delete Stored Link Key command. They are clearly indicating its
1139 * absence in the bit mask of supported commands.
1141 * Check the supported commands and only if the the command is marked
1142 * as supported send it. If not supported assume that the controller
1143 * does not have actual support for stored link keys which makes this
1144 * command redundant anyway.
1146 if (hdev->commands[6] & 0x80) {
1147 struct hci_cp_delete_stored_link_key cp;
1149 bacpy(&cp.bdaddr, BDADDR_ANY);
1150 cp.delete_all = 0x01;
1151 hci_req_add(req, HCI_OP_DELETE_STORED_LINK_KEY,
1155 if (hdev->commands[5] & 0x10)
1156 hci_setup_link_policy(req);
1158 if (lmp_le_capable(hdev)) {
1159 /* If the controller has a public BD_ADDR, then by
1160 * default use that one. If this is a LE only
1161 * controller without one, default to the random
1164 if (bacmp(&hdev->bdaddr, BDADDR_ANY))
1165 hdev->own_addr_type = ADDR_LE_DEV_PUBLIC;
1167 hdev->own_addr_type = ADDR_LE_DEV_RANDOM;
1169 hci_set_le_support(req);
1172 /* Read features beyond page 1 if available */
1173 for (p = 2; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
1174 struct hci_cp_read_local_ext_features cp;
1177 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
1182 static void hci_init4_req(struct hci_request *req, unsigned long opt)
1184 struct hci_dev *hdev = req->hdev;
1186 /* Set event mask page 2 if the HCI command for it is supported */
1187 if (hdev->commands[22] & 0x04)
1188 hci_set_event_mask_page_2(req);
1190 /* Check for Synchronization Train support */
1191 if (hdev->features[2][0] & 0x04)
1192 hci_req_add(req, HCI_OP_READ_SYNC_TRAIN_PARAMS, 0, NULL);
1195 static int __hci_init(struct hci_dev *hdev)
1199 err = __hci_req_sync(hdev, hci_init1_req, 0, HCI_INIT_TIMEOUT);
1203 /* HCI_BREDR covers both single-mode LE, BR/EDR and dual-mode
1204 * BR/EDR/LE type controllers. AMP controllers only need the
1207 if (hdev->dev_type != HCI_BREDR)
1210 err = __hci_req_sync(hdev, hci_init2_req, 0, HCI_INIT_TIMEOUT);
1214 err = __hci_req_sync(hdev, hci_init3_req, 0, HCI_INIT_TIMEOUT);
1218 err = __hci_req_sync(hdev, hci_init4_req, 0, HCI_INIT_TIMEOUT);
1222 /* Only create debugfs entries during the initial setup
1223 * phase and not every time the controller gets powered on.
1225 if (!test_bit(HCI_SETUP, &hdev->dev_flags))
1228 debugfs_create_file("features", 0444, hdev->debugfs, hdev,
1230 debugfs_create_u16("manufacturer", 0444, hdev->debugfs,
1231 &hdev->manufacturer);
1232 debugfs_create_u8("hci_version", 0444, hdev->debugfs, &hdev->hci_ver);
1233 debugfs_create_u16("hci_revision", 0444, hdev->debugfs, &hdev->hci_rev);
1234 debugfs_create_file("blacklist", 0444, hdev->debugfs, hdev,
1236 debugfs_create_file("uuids", 0444, hdev->debugfs, hdev, &uuids_fops);
1238 if (lmp_bredr_capable(hdev)) {
1239 debugfs_create_file("inquiry_cache", 0444, hdev->debugfs,
1240 hdev, &inquiry_cache_fops);
1241 debugfs_create_file("link_keys", 0400, hdev->debugfs,
1242 hdev, &link_keys_fops);
1243 debugfs_create_file("use_debug_keys", 0444, hdev->debugfs,
1244 hdev, &use_debug_keys_fops);
1245 debugfs_create_file("dev_class", 0444, hdev->debugfs,
1246 hdev, &dev_class_fops);
1247 debugfs_create_file("voice_setting", 0444, hdev->debugfs,
1248 hdev, &voice_setting_fops);
1251 if (lmp_ssp_capable(hdev)) {
1252 debugfs_create_file("auto_accept_delay", 0644, hdev->debugfs,
1253 hdev, &auto_accept_delay_fops);
1254 debugfs_create_file("ssp_debug_mode", 0644, hdev->debugfs,
1255 hdev, &ssp_debug_mode_fops);
1258 if (lmp_sniff_capable(hdev)) {
1259 debugfs_create_file("idle_timeout", 0644, hdev->debugfs,
1260 hdev, &idle_timeout_fops);
1261 debugfs_create_file("sniff_min_interval", 0644, hdev->debugfs,
1262 hdev, &sniff_min_interval_fops);
1263 debugfs_create_file("sniff_max_interval", 0644, hdev->debugfs,
1264 hdev, &sniff_max_interval_fops);
1267 if (lmp_le_capable(hdev)) {
1268 debugfs_create_u8("white_list_size", 0444, hdev->debugfs,
1269 &hdev->le_white_list_size);
1270 debugfs_create_file("static_address", 0444, hdev->debugfs,
1271 hdev, &static_address_fops);
1272 debugfs_create_file("own_address_type", 0644, hdev->debugfs,
1273 hdev, &own_address_type_fops);
1274 debugfs_create_file("long_term_keys", 0400, hdev->debugfs,
1275 hdev, &long_term_keys_fops);
1281 static void hci_scan_req(struct hci_request *req, unsigned long opt)
1285 BT_DBG("%s %x", req->hdev->name, scan);
1287 /* Inquiry and Page scans */
1288 hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
1291 static void hci_auth_req(struct hci_request *req, unsigned long opt)
1295 BT_DBG("%s %x", req->hdev->name, auth);
1297 /* Authentication */
1298 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
1301 static void hci_encrypt_req(struct hci_request *req, unsigned long opt)
1305 BT_DBG("%s %x", req->hdev->name, encrypt);
1308 hci_req_add(req, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
1311 static void hci_linkpol_req(struct hci_request *req, unsigned long opt)
1313 __le16 policy = cpu_to_le16(opt);
1315 BT_DBG("%s %x", req->hdev->name, policy);
1317 /* Default link policy */
1318 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
1321 /* Get HCI device by index.
1322 * Device is held on return. */
1323 struct hci_dev *hci_dev_get(int index)
1325 struct hci_dev *hdev = NULL, *d;
1327 BT_DBG("%d", index);
1332 read_lock(&hci_dev_list_lock);
1333 list_for_each_entry(d, &hci_dev_list, list) {
1334 if (d->id == index) {
1335 hdev = hci_dev_hold(d);
1339 read_unlock(&hci_dev_list_lock);
1343 /* ---- Inquiry support ---- */
1345 bool hci_discovery_active(struct hci_dev *hdev)
1347 struct discovery_state *discov = &hdev->discovery;
1349 switch (discov->state) {
1350 case DISCOVERY_FINDING:
1351 case DISCOVERY_RESOLVING:
1359 void hci_discovery_set_state(struct hci_dev *hdev, int state)
1361 BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
1363 if (hdev->discovery.state == state)
1367 case DISCOVERY_STOPPED:
1368 if (hdev->discovery.state != DISCOVERY_STARTING)
1369 mgmt_discovering(hdev, 0);
1371 case DISCOVERY_STARTING:
1373 case DISCOVERY_FINDING:
1374 mgmt_discovering(hdev, 1);
1376 case DISCOVERY_RESOLVING:
1378 case DISCOVERY_STOPPING:
1382 hdev->discovery.state = state;
1385 void hci_inquiry_cache_flush(struct hci_dev *hdev)
1387 struct discovery_state *cache = &hdev->discovery;
1388 struct inquiry_entry *p, *n;
1390 list_for_each_entry_safe(p, n, &cache->all, all) {
1395 INIT_LIST_HEAD(&cache->unknown);
1396 INIT_LIST_HEAD(&cache->resolve);
1399 struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev,
1402 struct discovery_state *cache = &hdev->discovery;
1403 struct inquiry_entry *e;
1405 BT_DBG("cache %p, %pMR", cache, bdaddr);
1407 list_for_each_entry(e, &cache->all, all) {
1408 if (!bacmp(&e->data.bdaddr, bdaddr))
1415 struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
1418 struct discovery_state *cache = &hdev->discovery;
1419 struct inquiry_entry *e;
1421 BT_DBG("cache %p, %pMR", cache, bdaddr);
1423 list_for_each_entry(e, &cache->unknown, list) {
1424 if (!bacmp(&e->data.bdaddr, bdaddr))
1431 struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
1435 struct discovery_state *cache = &hdev->discovery;
1436 struct inquiry_entry *e;
1438 BT_DBG("cache %p bdaddr %pMR state %d", cache, bdaddr, state);
1440 list_for_each_entry(e, &cache->resolve, list) {
1441 if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
1443 if (!bacmp(&e->data.bdaddr, bdaddr))
1450 void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
1451 struct inquiry_entry *ie)
1453 struct discovery_state *cache = &hdev->discovery;
1454 struct list_head *pos = &cache->resolve;
1455 struct inquiry_entry *p;
1457 list_del(&ie->list);
1459 list_for_each_entry(p, &cache->resolve, list) {
1460 if (p->name_state != NAME_PENDING &&
1461 abs(p->data.rssi) >= abs(ie->data.rssi))
1466 list_add(&ie->list, pos);
1469 bool hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
1470 bool name_known, bool *ssp)
1472 struct discovery_state *cache = &hdev->discovery;
1473 struct inquiry_entry *ie;
1475 BT_DBG("cache %p, %pMR", cache, &data->bdaddr);
1477 hci_remove_remote_oob_data(hdev, &data->bdaddr);
1480 *ssp = data->ssp_mode;
1482 ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
1484 if (ie->data.ssp_mode && ssp)
1487 if (ie->name_state == NAME_NEEDED &&
1488 data->rssi != ie->data.rssi) {
1489 ie->data.rssi = data->rssi;
1490 hci_inquiry_cache_update_resolve(hdev, ie);
1496 /* Entry not in the cache. Add new one. */
1497 ie = kzalloc(sizeof(struct inquiry_entry), GFP_ATOMIC);
1501 list_add(&ie->all, &cache->all);
1504 ie->name_state = NAME_KNOWN;
1506 ie->name_state = NAME_NOT_KNOWN;
1507 list_add(&ie->list, &cache->unknown);
1511 if (name_known && ie->name_state != NAME_KNOWN &&
1512 ie->name_state != NAME_PENDING) {
1513 ie->name_state = NAME_KNOWN;
1514 list_del(&ie->list);
1517 memcpy(&ie->data, data, sizeof(*data));
1518 ie->timestamp = jiffies;
1519 cache->timestamp = jiffies;
1521 if (ie->name_state == NAME_NOT_KNOWN)
1527 static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
1529 struct discovery_state *cache = &hdev->discovery;
1530 struct inquiry_info *info = (struct inquiry_info *) buf;
1531 struct inquiry_entry *e;
1534 list_for_each_entry(e, &cache->all, all) {
1535 struct inquiry_data *data = &e->data;
1540 bacpy(&info->bdaddr, &data->bdaddr);
1541 info->pscan_rep_mode = data->pscan_rep_mode;
1542 info->pscan_period_mode = data->pscan_period_mode;
1543 info->pscan_mode = data->pscan_mode;
1544 memcpy(info->dev_class, data->dev_class, 3);
1545 info->clock_offset = data->clock_offset;
1551 BT_DBG("cache %p, copied %d", cache, copied);
1555 static void hci_inq_req(struct hci_request *req, unsigned long opt)
1557 struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
1558 struct hci_dev *hdev = req->hdev;
1559 struct hci_cp_inquiry cp;
1561 BT_DBG("%s", hdev->name);
1563 if (test_bit(HCI_INQUIRY, &hdev->flags))
1567 memcpy(&cp.lap, &ir->lap, 3);
1568 cp.length = ir->length;
1569 cp.num_rsp = ir->num_rsp;
1570 hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
1573 static int wait_inquiry(void *word)
1576 return signal_pending(current);
1579 int hci_inquiry(void __user *arg)
1581 __u8 __user *ptr = arg;
1582 struct hci_inquiry_req ir;
1583 struct hci_dev *hdev;
1584 int err = 0, do_inquiry = 0, max_rsp;
1588 if (copy_from_user(&ir, ptr, sizeof(ir)))
1591 hdev = hci_dev_get(ir.dev_id);
1595 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
1600 if (hdev->dev_type != HCI_BREDR) {
1605 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
1611 if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
1612 inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) {
1613 hci_inquiry_cache_flush(hdev);
1616 hci_dev_unlock(hdev);
1618 timeo = ir.length * msecs_to_jiffies(2000);
1621 err = hci_req_sync(hdev, hci_inq_req, (unsigned long) &ir,
1626 /* Wait until Inquiry procedure finishes (HCI_INQUIRY flag is
1627 * cleared). If it is interrupted by a signal, return -EINTR.
1629 if (wait_on_bit(&hdev->flags, HCI_INQUIRY, wait_inquiry,
1630 TASK_INTERRUPTIBLE))
1634 /* for unlimited number of responses we will use buffer with
1637 max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
1639 /* cache_dump can't sleep. Therefore we allocate temp buffer and then
1640 * copy it to the user space.
1642 buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
1649 ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
1650 hci_dev_unlock(hdev);
1652 BT_DBG("num_rsp %d", ir.num_rsp);
1654 if (!copy_to_user(ptr, &ir, sizeof(ir))) {
1656 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
1669 static int hci_dev_do_open(struct hci_dev *hdev)
1673 BT_DBG("%s %p", hdev->name, hdev);
1677 if (test_bit(HCI_UNREGISTER, &hdev->dev_flags)) {
1682 if (!test_bit(HCI_SETUP, &hdev->dev_flags)) {
1683 /* Check for rfkill but allow the HCI setup stage to
1684 * proceed (which in itself doesn't cause any RF activity).
1686 if (test_bit(HCI_RFKILLED, &hdev->dev_flags)) {
1691 /* Check for valid public address or a configured static
1692 * random adddress, but let the HCI setup proceed to
1693 * be able to determine if there is a public address
1696 * This check is only valid for BR/EDR controllers
1697 * since AMP controllers do not have an address.
1699 if (hdev->dev_type == HCI_BREDR &&
1700 !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
1701 !bacmp(&hdev->static_addr, BDADDR_ANY)) {
1702 ret = -EADDRNOTAVAIL;
1707 if (test_bit(HCI_UP, &hdev->flags)) {
1712 if (hdev->open(hdev)) {
1717 atomic_set(&hdev->cmd_cnt, 1);
1718 set_bit(HCI_INIT, &hdev->flags);
1720 if (hdev->setup && test_bit(HCI_SETUP, &hdev->dev_flags))
1721 ret = hdev->setup(hdev);
1724 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
1725 set_bit(HCI_RAW, &hdev->flags);
1727 if (!test_bit(HCI_RAW, &hdev->flags) &&
1728 !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags))
1729 ret = __hci_init(hdev);
1732 clear_bit(HCI_INIT, &hdev->flags);
1736 set_bit(HCI_UP, &hdev->flags);
1737 hci_notify(hdev, HCI_DEV_UP);
1738 if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
1739 !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags) &&
1740 hdev->dev_type == HCI_BREDR) {
1742 mgmt_powered(hdev, 1);
1743 hci_dev_unlock(hdev);
1746 /* Init failed, cleanup */
1747 flush_work(&hdev->tx_work);
1748 flush_work(&hdev->cmd_work);
1749 flush_work(&hdev->rx_work);
1751 skb_queue_purge(&hdev->cmd_q);
1752 skb_queue_purge(&hdev->rx_q);
1757 if (hdev->sent_cmd) {
1758 kfree_skb(hdev->sent_cmd);
1759 hdev->sent_cmd = NULL;
1767 hci_req_unlock(hdev);
1771 /* ---- HCI ioctl helpers ---- */
1773 int hci_dev_open(__u16 dev)
1775 struct hci_dev *hdev;
1778 hdev = hci_dev_get(dev);
1782 /* We need to ensure that no other power on/off work is pending
1783 * before proceeding to call hci_dev_do_open. This is
1784 * particularly important if the setup procedure has not yet
1787 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
1788 cancel_delayed_work(&hdev->power_off);
1790 /* After this call it is guaranteed that the setup procedure
1791 * has finished. This means that error conditions like RFKILL
1792 * or no valid public or static random address apply.
1794 flush_workqueue(hdev->req_workqueue);
1796 err = hci_dev_do_open(hdev);
1803 static int hci_dev_do_close(struct hci_dev *hdev)
1805 BT_DBG("%s %p", hdev->name, hdev);
1807 cancel_delayed_work(&hdev->power_off);
1809 hci_req_cancel(hdev, ENODEV);
1812 if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
1813 del_timer_sync(&hdev->cmd_timer);
1814 hci_req_unlock(hdev);
1818 /* Flush RX and TX works */
1819 flush_work(&hdev->tx_work);
1820 flush_work(&hdev->rx_work);
1822 if (hdev->discov_timeout > 0) {
1823 cancel_delayed_work(&hdev->discov_off);
1824 hdev->discov_timeout = 0;
1825 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
1826 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
1829 if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
1830 cancel_delayed_work(&hdev->service_cache);
1832 cancel_delayed_work_sync(&hdev->le_scan_disable);
1835 hci_inquiry_cache_flush(hdev);
1836 hci_conn_hash_flush(hdev);
1837 hci_dev_unlock(hdev);
1839 hci_notify(hdev, HCI_DEV_DOWN);
1845 skb_queue_purge(&hdev->cmd_q);
1846 atomic_set(&hdev->cmd_cnt, 1);
1847 if (!test_bit(HCI_RAW, &hdev->flags) &&
1848 !test_bit(HCI_AUTO_OFF, &hdev->dev_flags) &&
1849 test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks)) {
1850 set_bit(HCI_INIT, &hdev->flags);
1851 __hci_req_sync(hdev, hci_reset_req, 0, HCI_CMD_TIMEOUT);
1852 clear_bit(HCI_INIT, &hdev->flags);
1855 /* flush cmd work */
1856 flush_work(&hdev->cmd_work);
1859 skb_queue_purge(&hdev->rx_q);
1860 skb_queue_purge(&hdev->cmd_q);
1861 skb_queue_purge(&hdev->raw_q);
1863 /* Drop last sent command */
1864 if (hdev->sent_cmd) {
1865 del_timer_sync(&hdev->cmd_timer);
1866 kfree_skb(hdev->sent_cmd);
1867 hdev->sent_cmd = NULL;
1870 kfree_skb(hdev->recv_evt);
1871 hdev->recv_evt = NULL;
1873 /* After this point our queues are empty
1874 * and no tasks are scheduled. */
1879 hdev->dev_flags &= ~HCI_PERSISTENT_MASK;
1881 if (!test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
1882 if (hdev->dev_type == HCI_BREDR) {
1884 mgmt_powered(hdev, 0);
1885 hci_dev_unlock(hdev);
1889 /* Controller radio is available but is currently powered down */
1890 hdev->amp_status = AMP_STATUS_POWERED_DOWN;
1892 memset(hdev->eir, 0, sizeof(hdev->eir));
1893 memset(hdev->dev_class, 0, sizeof(hdev->dev_class));
1895 hci_req_unlock(hdev);
1901 int hci_dev_close(__u16 dev)
1903 struct hci_dev *hdev;
1906 hdev = hci_dev_get(dev);
1910 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
1915 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
1916 cancel_delayed_work(&hdev->power_off);
1918 err = hci_dev_do_close(hdev);
1925 int hci_dev_reset(__u16 dev)
1927 struct hci_dev *hdev;
1930 hdev = hci_dev_get(dev);
1936 if (!test_bit(HCI_UP, &hdev->flags)) {
1941 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
1947 skb_queue_purge(&hdev->rx_q);
1948 skb_queue_purge(&hdev->cmd_q);
1951 hci_inquiry_cache_flush(hdev);
1952 hci_conn_hash_flush(hdev);
1953 hci_dev_unlock(hdev);
1958 atomic_set(&hdev->cmd_cnt, 1);
1959 hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
1961 if (!test_bit(HCI_RAW, &hdev->flags))
1962 ret = __hci_req_sync(hdev, hci_reset_req, 0, HCI_INIT_TIMEOUT);
1965 hci_req_unlock(hdev);
1970 int hci_dev_reset_stat(__u16 dev)
1972 struct hci_dev *hdev;
1975 hdev = hci_dev_get(dev);
1979 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
1984 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
1991 int hci_dev_cmd(unsigned int cmd, void __user *arg)
1993 struct hci_dev *hdev;
1994 struct hci_dev_req dr;
1997 if (copy_from_user(&dr, arg, sizeof(dr)))
2000 hdev = hci_dev_get(dr.dev_id);
2004 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2009 if (hdev->dev_type != HCI_BREDR) {
2014 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
2021 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
2026 if (!lmp_encrypt_capable(hdev)) {
2031 if (!test_bit(HCI_AUTH, &hdev->flags)) {
2032 /* Auth must be enabled first */
2033 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
2039 err = hci_req_sync(hdev, hci_encrypt_req, dr.dev_opt,
2044 err = hci_req_sync(hdev, hci_scan_req, dr.dev_opt,
2049 err = hci_req_sync(hdev, hci_linkpol_req, dr.dev_opt,
2053 case HCISETLINKMODE:
2054 hdev->link_mode = ((__u16) dr.dev_opt) &
2055 (HCI_LM_MASTER | HCI_LM_ACCEPT);
2059 hdev->pkt_type = (__u16) dr.dev_opt;
2063 hdev->acl_mtu = *((__u16 *) &dr.dev_opt + 1);
2064 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
2068 hdev->sco_mtu = *((__u16 *) &dr.dev_opt + 1);
2069 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
2082 int hci_get_dev_list(void __user *arg)
2084 struct hci_dev *hdev;
2085 struct hci_dev_list_req *dl;
2086 struct hci_dev_req *dr;
2087 int n = 0, size, err;
2090 if (get_user(dev_num, (__u16 __user *) arg))
2093 if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
2096 size = sizeof(*dl) + dev_num * sizeof(*dr);
2098 dl = kzalloc(size, GFP_KERNEL);
2104 read_lock(&hci_dev_list_lock);
2105 list_for_each_entry(hdev, &hci_dev_list, list) {
2106 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
2107 cancel_delayed_work(&hdev->power_off);
2109 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
2110 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
2112 (dr + n)->dev_id = hdev->id;
2113 (dr + n)->dev_opt = hdev->flags;
2118 read_unlock(&hci_dev_list_lock);
2121 size = sizeof(*dl) + n * sizeof(*dr);
2123 err = copy_to_user(arg, dl, size);
2126 return err ? -EFAULT : 0;
2129 int hci_get_dev_info(void __user *arg)
2131 struct hci_dev *hdev;
2132 struct hci_dev_info di;
2135 if (copy_from_user(&di, arg, sizeof(di)))
2138 hdev = hci_dev_get(di.dev_id);
2142 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
2143 cancel_delayed_work_sync(&hdev->power_off);
2145 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
2146 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
2148 strcpy(di.name, hdev->name);
2149 di.bdaddr = hdev->bdaddr;
2150 di.type = (hdev->bus & 0x0f) | ((hdev->dev_type & 0x03) << 4);
2151 di.flags = hdev->flags;
2152 di.pkt_type = hdev->pkt_type;
2153 if (lmp_bredr_capable(hdev)) {
2154 di.acl_mtu = hdev->acl_mtu;
2155 di.acl_pkts = hdev->acl_pkts;
2156 di.sco_mtu = hdev->sco_mtu;
2157 di.sco_pkts = hdev->sco_pkts;
2159 di.acl_mtu = hdev->le_mtu;
2160 di.acl_pkts = hdev->le_pkts;
2164 di.link_policy = hdev->link_policy;
2165 di.link_mode = hdev->link_mode;
2167 memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
2168 memcpy(&di.features, &hdev->features, sizeof(di.features));
2170 if (copy_to_user(arg, &di, sizeof(di)))
2178 /* ---- Interface to HCI drivers ---- */
2180 static int hci_rfkill_set_block(void *data, bool blocked)
2182 struct hci_dev *hdev = data;
2184 BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
2186 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags))
2190 set_bit(HCI_RFKILLED, &hdev->dev_flags);
2191 if (!test_bit(HCI_SETUP, &hdev->dev_flags))
2192 hci_dev_do_close(hdev);
2194 clear_bit(HCI_RFKILLED, &hdev->dev_flags);
2200 static const struct rfkill_ops hci_rfkill_ops = {
2201 .set_block = hci_rfkill_set_block,
2204 static void hci_power_on(struct work_struct *work)
2206 struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
2209 BT_DBG("%s", hdev->name);
2211 err = hci_dev_do_open(hdev);
2213 mgmt_set_powered_failed(hdev, err);
2217 /* During the HCI setup phase, a few error conditions are
2218 * ignored and they need to be checked now. If they are still
2219 * valid, it is important to turn the device back off.
2221 if (test_bit(HCI_RFKILLED, &hdev->dev_flags) ||
2222 (hdev->dev_type == HCI_BREDR &&
2223 !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
2224 !bacmp(&hdev->static_addr, BDADDR_ANY))) {
2225 clear_bit(HCI_AUTO_OFF, &hdev->dev_flags);
2226 hci_dev_do_close(hdev);
2227 } else if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
2228 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
2229 HCI_AUTO_OFF_TIMEOUT);
2232 if (test_and_clear_bit(HCI_SETUP, &hdev->dev_flags))
2233 mgmt_index_added(hdev);
2236 static void hci_power_off(struct work_struct *work)
2238 struct hci_dev *hdev = container_of(work, struct hci_dev,
2241 BT_DBG("%s", hdev->name);
2243 hci_dev_do_close(hdev);
2246 static void hci_discov_off(struct work_struct *work)
2248 struct hci_dev *hdev;
2250 hdev = container_of(work, struct hci_dev, discov_off.work);
2252 BT_DBG("%s", hdev->name);
2254 mgmt_discoverable_timeout(hdev);
2257 int hci_uuids_clear(struct hci_dev *hdev)
2259 struct bt_uuid *uuid, *tmp;
2261 list_for_each_entry_safe(uuid, tmp, &hdev->uuids, list) {
2262 list_del(&uuid->list);
2269 int hci_link_keys_clear(struct hci_dev *hdev)
2271 struct list_head *p, *n;
2273 list_for_each_safe(p, n, &hdev->link_keys) {
2274 struct link_key *key;
2276 key = list_entry(p, struct link_key, list);
2285 int hci_smp_ltks_clear(struct hci_dev *hdev)
2287 struct smp_ltk *k, *tmp;
2289 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
2297 struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
2301 list_for_each_entry(k, &hdev->link_keys, list)
2302 if (bacmp(bdaddr, &k->bdaddr) == 0)
2308 static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
2309 u8 key_type, u8 old_key_type)
2312 if (key_type < 0x03)
2315 /* Debug keys are insecure so don't store them persistently */
2316 if (key_type == HCI_LK_DEBUG_COMBINATION)
2319 /* Changed combination key and there's no previous one */
2320 if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
2323 /* Security mode 3 case */
2327 /* Neither local nor remote side had no-bonding as requirement */
2328 if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
2331 /* Local side had dedicated bonding as requirement */
2332 if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
2335 /* Remote side had dedicated bonding as requirement */
2336 if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
2339 /* If none of the above criteria match, then don't store the key
2344 struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, __le16 ediv, u8 rand[8])
2348 list_for_each_entry(k, &hdev->long_term_keys, list) {
2349 if (k->ediv != ediv ||
2350 memcmp(rand, k->rand, sizeof(k->rand)))
2359 struct smp_ltk *hci_find_ltk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
2364 list_for_each_entry(k, &hdev->long_term_keys, list)
2365 if (addr_type == k->bdaddr_type &&
2366 bacmp(bdaddr, &k->bdaddr) == 0)
2372 int hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn, int new_key,
2373 bdaddr_t *bdaddr, u8 *val, u8 type, u8 pin_len)
2375 struct link_key *key, *old_key;
2379 old_key = hci_find_link_key(hdev, bdaddr);
2381 old_key_type = old_key->type;
2384 old_key_type = conn ? conn->key_type : 0xff;
2385 key = kzalloc(sizeof(*key), GFP_ATOMIC);
2388 list_add(&key->list, &hdev->link_keys);
2391 BT_DBG("%s key for %pMR type %u", hdev->name, bdaddr, type);
2393 /* Some buggy controller combinations generate a changed
2394 * combination key for legacy pairing even when there's no
2396 if (type == HCI_LK_CHANGED_COMBINATION &&
2397 (!conn || conn->remote_auth == 0xff) && old_key_type == 0xff) {
2398 type = HCI_LK_COMBINATION;
2400 conn->key_type = type;
2403 bacpy(&key->bdaddr, bdaddr);
2404 memcpy(key->val, val, HCI_LINK_KEY_SIZE);
2405 key->pin_len = pin_len;
2407 if (type == HCI_LK_CHANGED_COMBINATION)
2408 key->type = old_key_type;
2415 persistent = hci_persistent_key(hdev, conn, type, old_key_type);
2417 mgmt_new_link_key(hdev, key, persistent);
2420 conn->flush_key = !persistent;
2425 int hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type, u8 type,
2426 int new_key, u8 authenticated, u8 tk[16], u8 enc_size, __le16
2429 struct smp_ltk *key, *old_key;
2431 if (!(type & HCI_SMP_STK) && !(type & HCI_SMP_LTK))
2434 old_key = hci_find_ltk_by_addr(hdev, bdaddr, addr_type);
2438 key = kzalloc(sizeof(*key), GFP_ATOMIC);
2441 list_add(&key->list, &hdev->long_term_keys);
2444 bacpy(&key->bdaddr, bdaddr);
2445 key->bdaddr_type = addr_type;
2446 memcpy(key->val, tk, sizeof(key->val));
2447 key->authenticated = authenticated;
2449 key->enc_size = enc_size;
2451 memcpy(key->rand, rand, sizeof(key->rand));
2456 if (type & HCI_SMP_LTK)
2457 mgmt_new_ltk(hdev, key, 1);
2462 int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
2464 struct link_key *key;
2466 key = hci_find_link_key(hdev, bdaddr);
2470 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
2472 list_del(&key->list);
2478 int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr)
2480 struct smp_ltk *k, *tmp;
2482 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
2483 if (bacmp(bdaddr, &k->bdaddr))
2486 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
2495 /* HCI command timer function */
2496 static void hci_cmd_timeout(unsigned long arg)
2498 struct hci_dev *hdev = (void *) arg;
2500 if (hdev->sent_cmd) {
2501 struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
2502 u16 opcode = __le16_to_cpu(sent->opcode);
2504 BT_ERR("%s command 0x%4.4x tx timeout", hdev->name, opcode);
2506 BT_ERR("%s command tx timeout", hdev->name);
2509 atomic_set(&hdev->cmd_cnt, 1);
2510 queue_work(hdev->workqueue, &hdev->cmd_work);
2513 struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
2516 struct oob_data *data;
2518 list_for_each_entry(data, &hdev->remote_oob_data, list)
2519 if (bacmp(bdaddr, &data->bdaddr) == 0)
2525 int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr)
2527 struct oob_data *data;
2529 data = hci_find_remote_oob_data(hdev, bdaddr);
2533 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
2535 list_del(&data->list);
2541 int hci_remote_oob_data_clear(struct hci_dev *hdev)
2543 struct oob_data *data, *n;
2545 list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
2546 list_del(&data->list);
2553 int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 *hash,
2556 struct oob_data *data;
2558 data = hci_find_remote_oob_data(hdev, bdaddr);
2561 data = kmalloc(sizeof(*data), GFP_ATOMIC);
2565 bacpy(&data->bdaddr, bdaddr);
2566 list_add(&data->list, &hdev->remote_oob_data);
2569 memcpy(data->hash, hash, sizeof(data->hash));
2570 memcpy(data->randomizer, randomizer, sizeof(data->randomizer));
2572 BT_DBG("%s for %pMR", hdev->name, bdaddr);
2577 struct bdaddr_list *hci_blacklist_lookup(struct hci_dev *hdev,
2578 bdaddr_t *bdaddr, u8 type)
2580 struct bdaddr_list *b;
2582 list_for_each_entry(b, &hdev->blacklist, list) {
2583 if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
2590 int hci_blacklist_clear(struct hci_dev *hdev)
2592 struct list_head *p, *n;
2594 list_for_each_safe(p, n, &hdev->blacklist) {
2595 struct bdaddr_list *b = list_entry(p, struct bdaddr_list, list);
2604 int hci_blacklist_add(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
2606 struct bdaddr_list *entry;
2608 if (!bacmp(bdaddr, BDADDR_ANY))
2611 if (hci_blacklist_lookup(hdev, bdaddr, type))
2614 entry = kzalloc(sizeof(struct bdaddr_list), GFP_KERNEL);
2618 bacpy(&entry->bdaddr, bdaddr);
2619 entry->bdaddr_type = type;
2621 list_add(&entry->list, &hdev->blacklist);
2623 return mgmt_device_blocked(hdev, bdaddr, type);
2626 int hci_blacklist_del(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
2628 struct bdaddr_list *entry;
2630 if (!bacmp(bdaddr, BDADDR_ANY))
2631 return hci_blacklist_clear(hdev);
2633 entry = hci_blacklist_lookup(hdev, bdaddr, type);
2637 list_del(&entry->list);
2640 return mgmt_device_unblocked(hdev, bdaddr, type);
2643 static void inquiry_complete(struct hci_dev *hdev, u8 status)
2646 BT_ERR("Failed to start inquiry: status %d", status);
2649 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2650 hci_dev_unlock(hdev);
2655 static void le_scan_disable_work_complete(struct hci_dev *hdev, u8 status)
2657 /* General inquiry access code (GIAC) */
2658 u8 lap[3] = { 0x33, 0x8b, 0x9e };
2659 struct hci_request req;
2660 struct hci_cp_inquiry cp;
2664 BT_ERR("Failed to disable LE scanning: status %d", status);
2668 switch (hdev->discovery.type) {
2669 case DISCOV_TYPE_LE:
2671 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2672 hci_dev_unlock(hdev);
2675 case DISCOV_TYPE_INTERLEAVED:
2676 hci_req_init(&req, hdev);
2678 memset(&cp, 0, sizeof(cp));
2679 memcpy(&cp.lap, lap, sizeof(cp.lap));
2680 cp.length = DISCOV_INTERLEAVED_INQUIRY_LEN;
2681 hci_req_add(&req, HCI_OP_INQUIRY, sizeof(cp), &cp);
2685 hci_inquiry_cache_flush(hdev);
2687 err = hci_req_run(&req, inquiry_complete);
2689 BT_ERR("Inquiry request failed: err %d", err);
2690 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2693 hci_dev_unlock(hdev);
2698 static void le_scan_disable_work(struct work_struct *work)
2700 struct hci_dev *hdev = container_of(work, struct hci_dev,
2701 le_scan_disable.work);
2702 struct hci_cp_le_set_scan_enable cp;
2703 struct hci_request req;
2706 BT_DBG("%s", hdev->name);
2708 hci_req_init(&req, hdev);
2710 memset(&cp, 0, sizeof(cp));
2711 cp.enable = LE_SCAN_DISABLE;
2712 hci_req_add(&req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
2714 err = hci_req_run(&req, le_scan_disable_work_complete);
2716 BT_ERR("Disable LE scanning request failed: err %d", err);
2719 /* Alloc HCI device */
2720 struct hci_dev *hci_alloc_dev(void)
2722 struct hci_dev *hdev;
2724 hdev = kzalloc(sizeof(struct hci_dev), GFP_KERNEL);
2728 hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1);
2729 hdev->esco_type = (ESCO_HV1);
2730 hdev->link_mode = (HCI_LM_ACCEPT);
2731 hdev->num_iac = 0x01; /* One IAC support is mandatory */
2732 hdev->io_capability = 0x03; /* No Input No Output */
2733 hdev->inq_tx_power = HCI_TX_POWER_INVALID;
2734 hdev->adv_tx_power = HCI_TX_POWER_INVALID;
2736 hdev->sniff_max_interval = 800;
2737 hdev->sniff_min_interval = 80;
2739 hdev->le_scan_interval = 0x0060;
2740 hdev->le_scan_window = 0x0030;
2742 mutex_init(&hdev->lock);
2743 mutex_init(&hdev->req_lock);
2745 INIT_LIST_HEAD(&hdev->mgmt_pending);
2746 INIT_LIST_HEAD(&hdev->blacklist);
2747 INIT_LIST_HEAD(&hdev->uuids);
2748 INIT_LIST_HEAD(&hdev->link_keys);
2749 INIT_LIST_HEAD(&hdev->long_term_keys);
2750 INIT_LIST_HEAD(&hdev->remote_oob_data);
2751 INIT_LIST_HEAD(&hdev->conn_hash.list);
2753 INIT_WORK(&hdev->rx_work, hci_rx_work);
2754 INIT_WORK(&hdev->cmd_work, hci_cmd_work);
2755 INIT_WORK(&hdev->tx_work, hci_tx_work);
2756 INIT_WORK(&hdev->power_on, hci_power_on);
2758 INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
2759 INIT_DELAYED_WORK(&hdev->discov_off, hci_discov_off);
2760 INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
2762 skb_queue_head_init(&hdev->rx_q);
2763 skb_queue_head_init(&hdev->cmd_q);
2764 skb_queue_head_init(&hdev->raw_q);
2766 init_waitqueue_head(&hdev->req_wait_q);
2768 setup_timer(&hdev->cmd_timer, hci_cmd_timeout, (unsigned long) hdev);
2770 hci_init_sysfs(hdev);
2771 discovery_init(hdev);
2775 EXPORT_SYMBOL(hci_alloc_dev);
2777 /* Free HCI device */
2778 void hci_free_dev(struct hci_dev *hdev)
2780 /* will free via device release */
2781 put_device(&hdev->dev);
2783 EXPORT_SYMBOL(hci_free_dev);
2785 /* Register HCI device */
2786 int hci_register_dev(struct hci_dev *hdev)
2790 if (!hdev->open || !hdev->close)
2793 /* Do not allow HCI_AMP devices to register at index 0,
2794 * so the index can be used as the AMP controller ID.
2796 switch (hdev->dev_type) {
2798 id = ida_simple_get(&hci_index_ida, 0, 0, GFP_KERNEL);
2801 id = ida_simple_get(&hci_index_ida, 1, 0, GFP_KERNEL);
2810 sprintf(hdev->name, "hci%d", id);
2813 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
2815 hdev->workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
2816 WQ_MEM_RECLAIM, 1, hdev->name);
2817 if (!hdev->workqueue) {
2822 hdev->req_workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
2823 WQ_MEM_RECLAIM, 1, hdev->name);
2824 if (!hdev->req_workqueue) {
2825 destroy_workqueue(hdev->workqueue);
2830 if (!IS_ERR_OR_NULL(bt_debugfs))
2831 hdev->debugfs = debugfs_create_dir(hdev->name, bt_debugfs);
2833 dev_set_name(&hdev->dev, "%s", hdev->name);
2835 error = device_add(&hdev->dev);
2839 hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
2840 RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops,
2843 if (rfkill_register(hdev->rfkill) < 0) {
2844 rfkill_destroy(hdev->rfkill);
2845 hdev->rfkill = NULL;
2849 if (hdev->rfkill && rfkill_blocked(hdev->rfkill))
2850 set_bit(HCI_RFKILLED, &hdev->dev_flags);
2852 set_bit(HCI_SETUP, &hdev->dev_flags);
2853 set_bit(HCI_AUTO_OFF, &hdev->dev_flags);
2855 if (hdev->dev_type == HCI_BREDR) {
2856 /* Assume BR/EDR support until proven otherwise (such as
2857 * through reading supported features during init.
2859 set_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
2862 write_lock(&hci_dev_list_lock);
2863 list_add(&hdev->list, &hci_dev_list);
2864 write_unlock(&hci_dev_list_lock);
2866 hci_notify(hdev, HCI_DEV_REG);
2869 queue_work(hdev->req_workqueue, &hdev->power_on);
2874 destroy_workqueue(hdev->workqueue);
2875 destroy_workqueue(hdev->req_workqueue);
2877 ida_simple_remove(&hci_index_ida, hdev->id);
2881 EXPORT_SYMBOL(hci_register_dev);
2883 /* Unregister HCI device */
2884 void hci_unregister_dev(struct hci_dev *hdev)
2888 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
2890 set_bit(HCI_UNREGISTER, &hdev->dev_flags);
2894 write_lock(&hci_dev_list_lock);
2895 list_del(&hdev->list);
2896 write_unlock(&hci_dev_list_lock);
2898 hci_dev_do_close(hdev);
2900 for (i = 0; i < NUM_REASSEMBLY; i++)
2901 kfree_skb(hdev->reassembly[i]);
2903 cancel_work_sync(&hdev->power_on);
2905 if (!test_bit(HCI_INIT, &hdev->flags) &&
2906 !test_bit(HCI_SETUP, &hdev->dev_flags)) {
2908 mgmt_index_removed(hdev);
2909 hci_dev_unlock(hdev);
2912 /* mgmt_index_removed should take care of emptying the
2914 BUG_ON(!list_empty(&hdev->mgmt_pending));
2916 hci_notify(hdev, HCI_DEV_UNREG);
2919 rfkill_unregister(hdev->rfkill);
2920 rfkill_destroy(hdev->rfkill);
2923 device_del(&hdev->dev);
2925 debugfs_remove_recursive(hdev->debugfs);
2927 destroy_workqueue(hdev->workqueue);
2928 destroy_workqueue(hdev->req_workqueue);
2931 hci_blacklist_clear(hdev);
2932 hci_uuids_clear(hdev);
2933 hci_link_keys_clear(hdev);
2934 hci_smp_ltks_clear(hdev);
2935 hci_remote_oob_data_clear(hdev);
2936 hci_dev_unlock(hdev);
2940 ida_simple_remove(&hci_index_ida, id);
2942 EXPORT_SYMBOL(hci_unregister_dev);
2944 /* Suspend HCI device */
2945 int hci_suspend_dev(struct hci_dev *hdev)
2947 hci_notify(hdev, HCI_DEV_SUSPEND);
2950 EXPORT_SYMBOL(hci_suspend_dev);
2952 /* Resume HCI device */
2953 int hci_resume_dev(struct hci_dev *hdev)
2955 hci_notify(hdev, HCI_DEV_RESUME);
2958 EXPORT_SYMBOL(hci_resume_dev);
2960 /* Receive frame from HCI drivers */
2961 int hci_recv_frame(struct hci_dev *hdev, struct sk_buff *skb)
2963 if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
2964 && !test_bit(HCI_INIT, &hdev->flags))) {
2970 bt_cb(skb)->incoming = 1;
2973 __net_timestamp(skb);
2975 skb_queue_tail(&hdev->rx_q, skb);
2976 queue_work(hdev->workqueue, &hdev->rx_work);
2980 EXPORT_SYMBOL(hci_recv_frame);
2982 static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
2983 int count, __u8 index)
2988 struct sk_buff *skb;
2989 struct bt_skb_cb *scb;
2991 if ((type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT) ||
2992 index >= NUM_REASSEMBLY)
2995 skb = hdev->reassembly[index];
2999 case HCI_ACLDATA_PKT:
3000 len = HCI_MAX_FRAME_SIZE;
3001 hlen = HCI_ACL_HDR_SIZE;
3004 len = HCI_MAX_EVENT_SIZE;
3005 hlen = HCI_EVENT_HDR_SIZE;
3007 case HCI_SCODATA_PKT:
3008 len = HCI_MAX_SCO_SIZE;
3009 hlen = HCI_SCO_HDR_SIZE;
3013 skb = bt_skb_alloc(len, GFP_ATOMIC);
3017 scb = (void *) skb->cb;
3019 scb->pkt_type = type;
3021 hdev->reassembly[index] = skb;
3025 scb = (void *) skb->cb;
3026 len = min_t(uint, scb->expect, count);
3028 memcpy(skb_put(skb, len), data, len);
3037 if (skb->len == HCI_EVENT_HDR_SIZE) {
3038 struct hci_event_hdr *h = hci_event_hdr(skb);
3039 scb->expect = h->plen;
3041 if (skb_tailroom(skb) < scb->expect) {
3043 hdev->reassembly[index] = NULL;
3049 case HCI_ACLDATA_PKT:
3050 if (skb->len == HCI_ACL_HDR_SIZE) {
3051 struct hci_acl_hdr *h = hci_acl_hdr(skb);
3052 scb->expect = __le16_to_cpu(h->dlen);
3054 if (skb_tailroom(skb) < scb->expect) {
3056 hdev->reassembly[index] = NULL;
3062 case HCI_SCODATA_PKT:
3063 if (skb->len == HCI_SCO_HDR_SIZE) {
3064 struct hci_sco_hdr *h = hci_sco_hdr(skb);
3065 scb->expect = h->dlen;
3067 if (skb_tailroom(skb) < scb->expect) {
3069 hdev->reassembly[index] = NULL;
3076 if (scb->expect == 0) {
3077 /* Complete frame */
3079 bt_cb(skb)->pkt_type = type;
3080 hci_recv_frame(hdev, skb);
3082 hdev->reassembly[index] = NULL;
3090 int hci_recv_fragment(struct hci_dev *hdev, int type, void *data, int count)
3094 if (type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT)
3098 rem = hci_reassembly(hdev, type, data, count, type - 1);
3102 data += (count - rem);
3108 EXPORT_SYMBOL(hci_recv_fragment);
3110 #define STREAM_REASSEMBLY 0
3112 int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count)
3118 struct sk_buff *skb = hdev->reassembly[STREAM_REASSEMBLY];
3121 struct { char type; } *pkt;
3123 /* Start of the frame */
3130 type = bt_cb(skb)->pkt_type;
3132 rem = hci_reassembly(hdev, type, data, count,
3137 data += (count - rem);
3143 EXPORT_SYMBOL(hci_recv_stream_fragment);
3145 /* ---- Interface to upper protocols ---- */
3147 int hci_register_cb(struct hci_cb *cb)
3149 BT_DBG("%p name %s", cb, cb->name);
3151 write_lock(&hci_cb_list_lock);
3152 list_add(&cb->list, &hci_cb_list);
3153 write_unlock(&hci_cb_list_lock);
3157 EXPORT_SYMBOL(hci_register_cb);
3159 int hci_unregister_cb(struct hci_cb *cb)
3161 BT_DBG("%p name %s", cb, cb->name);
3163 write_lock(&hci_cb_list_lock);
3164 list_del(&cb->list);
3165 write_unlock(&hci_cb_list_lock);
3169 EXPORT_SYMBOL(hci_unregister_cb);
3171 static void hci_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
3173 BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
3176 __net_timestamp(skb);
3178 /* Send copy to monitor */
3179 hci_send_to_monitor(hdev, skb);
3181 if (atomic_read(&hdev->promisc)) {
3182 /* Send copy to the sockets */
3183 hci_send_to_sock(hdev, skb);
3186 /* Get rid of skb owner, prior to sending to the driver. */
3189 if (hdev->send(hdev, skb) < 0)
3190 BT_ERR("%s sending frame failed", hdev->name);
3193 void hci_req_init(struct hci_request *req, struct hci_dev *hdev)
3195 skb_queue_head_init(&req->cmd_q);
3200 int hci_req_run(struct hci_request *req, hci_req_complete_t complete)
3202 struct hci_dev *hdev = req->hdev;
3203 struct sk_buff *skb;
3204 unsigned long flags;
3206 BT_DBG("length %u", skb_queue_len(&req->cmd_q));
3208 /* If an error occured during request building, remove all HCI
3209 * commands queued on the HCI request queue.
3212 skb_queue_purge(&req->cmd_q);
3216 /* Do not allow empty requests */
3217 if (skb_queue_empty(&req->cmd_q))
3220 skb = skb_peek_tail(&req->cmd_q);
3221 bt_cb(skb)->req.complete = complete;
3223 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
3224 skb_queue_splice_tail(&req->cmd_q, &hdev->cmd_q);
3225 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
3227 queue_work(hdev->workqueue, &hdev->cmd_work);
3232 static struct sk_buff *hci_prepare_cmd(struct hci_dev *hdev, u16 opcode,
3233 u32 plen, const void *param)
3235 int len = HCI_COMMAND_HDR_SIZE + plen;
3236 struct hci_command_hdr *hdr;
3237 struct sk_buff *skb;
3239 skb = bt_skb_alloc(len, GFP_ATOMIC);
3243 hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
3244 hdr->opcode = cpu_to_le16(opcode);
3248 memcpy(skb_put(skb, plen), param, plen);
3250 BT_DBG("skb len %d", skb->len);
3252 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
3257 /* Send HCI command */
3258 int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen,
3261 struct sk_buff *skb;
3263 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
3265 skb = hci_prepare_cmd(hdev, opcode, plen, param);
3267 BT_ERR("%s no memory for command", hdev->name);
3271 /* Stand-alone HCI commands must be flaged as
3272 * single-command requests.
3274 bt_cb(skb)->req.start = true;
3276 skb_queue_tail(&hdev->cmd_q, skb);
3277 queue_work(hdev->workqueue, &hdev->cmd_work);
3282 /* Queue a command to an asynchronous HCI request */
3283 void hci_req_add_ev(struct hci_request *req, u16 opcode, u32 plen,
3284 const void *param, u8 event)
3286 struct hci_dev *hdev = req->hdev;
3287 struct sk_buff *skb;
3289 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
3291 /* If an error occured during request building, there is no point in
3292 * queueing the HCI command. We can simply return.
3297 skb = hci_prepare_cmd(hdev, opcode, plen, param);
3299 BT_ERR("%s no memory for command (opcode 0x%4.4x)",
3300 hdev->name, opcode);
3305 if (skb_queue_empty(&req->cmd_q))
3306 bt_cb(skb)->req.start = true;
3308 bt_cb(skb)->req.event = event;
3310 skb_queue_tail(&req->cmd_q, skb);
3313 void hci_req_add(struct hci_request *req, u16 opcode, u32 plen,
3316 hci_req_add_ev(req, opcode, plen, param, 0);
3319 /* Get data from the previously sent command */
3320 void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
3322 struct hci_command_hdr *hdr;
3324 if (!hdev->sent_cmd)
3327 hdr = (void *) hdev->sent_cmd->data;
3329 if (hdr->opcode != cpu_to_le16(opcode))
3332 BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
3334 return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
3338 static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
3340 struct hci_acl_hdr *hdr;
3343 skb_push(skb, HCI_ACL_HDR_SIZE);
3344 skb_reset_transport_header(skb);
3345 hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
3346 hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
3347 hdr->dlen = cpu_to_le16(len);
3350 static void hci_queue_acl(struct hci_chan *chan, struct sk_buff_head *queue,
3351 struct sk_buff *skb, __u16 flags)
3353 struct hci_conn *conn = chan->conn;
3354 struct hci_dev *hdev = conn->hdev;
3355 struct sk_buff *list;
3357 skb->len = skb_headlen(skb);
3360 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
3362 switch (hdev->dev_type) {
3364 hci_add_acl_hdr(skb, conn->handle, flags);
3367 hci_add_acl_hdr(skb, chan->handle, flags);
3370 BT_ERR("%s unknown dev_type %d", hdev->name, hdev->dev_type);
3374 list = skb_shinfo(skb)->frag_list;
3376 /* Non fragmented */
3377 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
3379 skb_queue_tail(queue, skb);
3382 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
3384 skb_shinfo(skb)->frag_list = NULL;
3386 /* Queue all fragments atomically */
3387 spin_lock(&queue->lock);
3389 __skb_queue_tail(queue, skb);
3391 flags &= ~ACL_START;
3394 skb = list; list = list->next;
3396 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
3397 hci_add_acl_hdr(skb, conn->handle, flags);
3399 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
3401 __skb_queue_tail(queue, skb);
3404 spin_unlock(&queue->lock);
3408 void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
3410 struct hci_dev *hdev = chan->conn->hdev;
3412 BT_DBG("%s chan %p flags 0x%4.4x", hdev->name, chan, flags);
3414 hci_queue_acl(chan, &chan->data_q, skb, flags);
3416 queue_work(hdev->workqueue, &hdev->tx_work);
3420 void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
3422 struct hci_dev *hdev = conn->hdev;
3423 struct hci_sco_hdr hdr;
3425 BT_DBG("%s len %d", hdev->name, skb->len);
3427 hdr.handle = cpu_to_le16(conn->handle);
3428 hdr.dlen = skb->len;
3430 skb_push(skb, HCI_SCO_HDR_SIZE);
3431 skb_reset_transport_header(skb);
3432 memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
3434 bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
3436 skb_queue_tail(&conn->data_q, skb);
3437 queue_work(hdev->workqueue, &hdev->tx_work);
3440 /* ---- HCI TX task (outgoing data) ---- */
3442 /* HCI Connection scheduler */
3443 static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type,
3446 struct hci_conn_hash *h = &hdev->conn_hash;
3447 struct hci_conn *conn = NULL, *c;
3448 unsigned int num = 0, min = ~0;
3450 /* We don't have to lock device here. Connections are always
3451 * added and removed with TX task disabled. */
3455 list_for_each_entry_rcu(c, &h->list, list) {
3456 if (c->type != type || skb_queue_empty(&c->data_q))
3459 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
3464 if (c->sent < min) {
3469 if (hci_conn_num(hdev, type) == num)
3478 switch (conn->type) {
3480 cnt = hdev->acl_cnt;
3484 cnt = hdev->sco_cnt;
3487 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
3491 BT_ERR("Unknown link type");
3499 BT_DBG("conn %p quote %d", conn, *quote);
3503 static void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
3505 struct hci_conn_hash *h = &hdev->conn_hash;
3508 BT_ERR("%s link tx timeout", hdev->name);
3512 /* Kill stalled connections */
3513 list_for_each_entry_rcu(c, &h->list, list) {
3514 if (c->type == type && c->sent) {
3515 BT_ERR("%s killing stalled connection %pMR",
3516 hdev->name, &c->dst);
3517 hci_disconnect(c, HCI_ERROR_REMOTE_USER_TERM);
3524 static struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
3527 struct hci_conn_hash *h = &hdev->conn_hash;
3528 struct hci_chan *chan = NULL;
3529 unsigned int num = 0, min = ~0, cur_prio = 0;
3530 struct hci_conn *conn;
3531 int cnt, q, conn_num = 0;
3533 BT_DBG("%s", hdev->name);
3537 list_for_each_entry_rcu(conn, &h->list, list) {
3538 struct hci_chan *tmp;
3540 if (conn->type != type)
3543 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
3548 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
3549 struct sk_buff *skb;
3551 if (skb_queue_empty(&tmp->data_q))
3554 skb = skb_peek(&tmp->data_q);
3555 if (skb->priority < cur_prio)
3558 if (skb->priority > cur_prio) {
3561 cur_prio = skb->priority;
3566 if (conn->sent < min) {
3572 if (hci_conn_num(hdev, type) == conn_num)
3581 switch (chan->conn->type) {
3583 cnt = hdev->acl_cnt;
3586 cnt = hdev->block_cnt;
3590 cnt = hdev->sco_cnt;
3593 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
3597 BT_ERR("Unknown link type");
3602 BT_DBG("chan %p quote %d", chan, *quote);
3606 static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
3608 struct hci_conn_hash *h = &hdev->conn_hash;
3609 struct hci_conn *conn;
3612 BT_DBG("%s", hdev->name);
3616 list_for_each_entry_rcu(conn, &h->list, list) {
3617 struct hci_chan *chan;
3619 if (conn->type != type)
3622 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
3627 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
3628 struct sk_buff *skb;
3635 if (skb_queue_empty(&chan->data_q))
3638 skb = skb_peek(&chan->data_q);
3639 if (skb->priority >= HCI_PRIO_MAX - 1)
3642 skb->priority = HCI_PRIO_MAX - 1;
3644 BT_DBG("chan %p skb %p promoted to %d", chan, skb,
3648 if (hci_conn_num(hdev, type) == num)
3656 static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
3658 /* Calculate count of blocks used by this packet */
3659 return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
3662 static void __check_timeout(struct hci_dev *hdev, unsigned int cnt)
3664 if (!test_bit(HCI_RAW, &hdev->flags)) {
3665 /* ACL tx timeout must be longer than maximum
3666 * link supervision timeout (40.9 seconds) */
3667 if (!cnt && time_after(jiffies, hdev->acl_last_tx +
3668 HCI_ACL_TX_TIMEOUT))
3669 hci_link_tx_to(hdev, ACL_LINK);
3673 static void hci_sched_acl_pkt(struct hci_dev *hdev)
3675 unsigned int cnt = hdev->acl_cnt;
3676 struct hci_chan *chan;
3677 struct sk_buff *skb;
3680 __check_timeout(hdev, cnt);
3682 while (hdev->acl_cnt &&
3683 (chan = hci_chan_sent(hdev, ACL_LINK, "e))) {
3684 u32 priority = (skb_peek(&chan->data_q))->priority;
3685 while (quote-- && (skb = skb_peek(&chan->data_q))) {
3686 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
3687 skb->len, skb->priority);
3689 /* Stop if priority has changed */
3690 if (skb->priority < priority)
3693 skb = skb_dequeue(&chan->data_q);
3695 hci_conn_enter_active_mode(chan->conn,
3696 bt_cb(skb)->force_active);
3698 hci_send_frame(hdev, skb);
3699 hdev->acl_last_tx = jiffies;
3707 if (cnt != hdev->acl_cnt)
3708 hci_prio_recalculate(hdev, ACL_LINK);
3711 static void hci_sched_acl_blk(struct hci_dev *hdev)
3713 unsigned int cnt = hdev->block_cnt;
3714 struct hci_chan *chan;
3715 struct sk_buff *skb;
3719 __check_timeout(hdev, cnt);
3721 BT_DBG("%s", hdev->name);
3723 if (hdev->dev_type == HCI_AMP)
3728 while (hdev->block_cnt > 0 &&
3729 (chan = hci_chan_sent(hdev, type, "e))) {
3730 u32 priority = (skb_peek(&chan->data_q))->priority;
3731 while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
3734 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
3735 skb->len, skb->priority);
3737 /* Stop if priority has changed */
3738 if (skb->priority < priority)
3741 skb = skb_dequeue(&chan->data_q);
3743 blocks = __get_blocks(hdev, skb);
3744 if (blocks > hdev->block_cnt)
3747 hci_conn_enter_active_mode(chan->conn,
3748 bt_cb(skb)->force_active);
3750 hci_send_frame(hdev, skb);
3751 hdev->acl_last_tx = jiffies;
3753 hdev->block_cnt -= blocks;
3756 chan->sent += blocks;
3757 chan->conn->sent += blocks;
3761 if (cnt != hdev->block_cnt)
3762 hci_prio_recalculate(hdev, type);
3765 static void hci_sched_acl(struct hci_dev *hdev)
3767 BT_DBG("%s", hdev->name);
3769 /* No ACL link over BR/EDR controller */
3770 if (!hci_conn_num(hdev, ACL_LINK) && hdev->dev_type == HCI_BREDR)
3773 /* No AMP link over AMP controller */
3774 if (!hci_conn_num(hdev, AMP_LINK) && hdev->dev_type == HCI_AMP)
3777 switch (hdev->flow_ctl_mode) {
3778 case HCI_FLOW_CTL_MODE_PACKET_BASED:
3779 hci_sched_acl_pkt(hdev);
3782 case HCI_FLOW_CTL_MODE_BLOCK_BASED:
3783 hci_sched_acl_blk(hdev);
3789 static void hci_sched_sco(struct hci_dev *hdev)
3791 struct hci_conn *conn;
3792 struct sk_buff *skb;
3795 BT_DBG("%s", hdev->name);
3797 if (!hci_conn_num(hdev, SCO_LINK))
3800 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, "e))) {
3801 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
3802 BT_DBG("skb %p len %d", skb, skb->len);
3803 hci_send_frame(hdev, skb);
3806 if (conn->sent == ~0)
3812 static void hci_sched_esco(struct hci_dev *hdev)
3814 struct hci_conn *conn;
3815 struct sk_buff *skb;
3818 BT_DBG("%s", hdev->name);
3820 if (!hci_conn_num(hdev, ESCO_LINK))
3823 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK,
3825 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
3826 BT_DBG("skb %p len %d", skb, skb->len);
3827 hci_send_frame(hdev, skb);
3830 if (conn->sent == ~0)
3836 static void hci_sched_le(struct hci_dev *hdev)
3838 struct hci_chan *chan;
3839 struct sk_buff *skb;
3840 int quote, cnt, tmp;
3842 BT_DBG("%s", hdev->name);
3844 if (!hci_conn_num(hdev, LE_LINK))
3847 if (!test_bit(HCI_RAW, &hdev->flags)) {
3848 /* LE tx timeout must be longer than maximum
3849 * link supervision timeout (40.9 seconds) */
3850 if (!hdev->le_cnt && hdev->le_pkts &&
3851 time_after(jiffies, hdev->le_last_tx + HZ * 45))
3852 hci_link_tx_to(hdev, LE_LINK);
3855 cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
3857 while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, "e))) {
3858 u32 priority = (skb_peek(&chan->data_q))->priority;
3859 while (quote-- && (skb = skb_peek(&chan->data_q))) {
3860 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
3861 skb->len, skb->priority);
3863 /* Stop if priority has changed */
3864 if (skb->priority < priority)
3867 skb = skb_dequeue(&chan->data_q);
3869 hci_send_frame(hdev, skb);
3870 hdev->le_last_tx = jiffies;
3881 hdev->acl_cnt = cnt;
3884 hci_prio_recalculate(hdev, LE_LINK);
3887 static void hci_tx_work(struct work_struct *work)
3889 struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
3890 struct sk_buff *skb;
3892 BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
3893 hdev->sco_cnt, hdev->le_cnt);
3895 if (!test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
3896 /* Schedule queues and send stuff to HCI driver */
3897 hci_sched_acl(hdev);
3898 hci_sched_sco(hdev);
3899 hci_sched_esco(hdev);
3903 /* Send next queued raw (unknown type) packet */
3904 while ((skb = skb_dequeue(&hdev->raw_q)))
3905 hci_send_frame(hdev, skb);
3908 /* ----- HCI RX task (incoming data processing) ----- */
3910 /* ACL data packet */
3911 static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
3913 struct hci_acl_hdr *hdr = (void *) skb->data;
3914 struct hci_conn *conn;
3915 __u16 handle, flags;
3917 skb_pull(skb, HCI_ACL_HDR_SIZE);
3919 handle = __le16_to_cpu(hdr->handle);
3920 flags = hci_flags(handle);
3921 handle = hci_handle(handle);
3923 BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
3926 hdev->stat.acl_rx++;
3929 conn = hci_conn_hash_lookup_handle(hdev, handle);
3930 hci_dev_unlock(hdev);
3933 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
3935 /* Send to upper protocol */
3936 l2cap_recv_acldata(conn, skb, flags);
3939 BT_ERR("%s ACL packet for unknown connection handle %d",
3940 hdev->name, handle);
3946 /* SCO data packet */
3947 static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
3949 struct hci_sco_hdr *hdr = (void *) skb->data;
3950 struct hci_conn *conn;
3953 skb_pull(skb, HCI_SCO_HDR_SIZE);
3955 handle = __le16_to_cpu(hdr->handle);
3957 BT_DBG("%s len %d handle 0x%4.4x", hdev->name, skb->len, handle);
3959 hdev->stat.sco_rx++;
3962 conn = hci_conn_hash_lookup_handle(hdev, handle);
3963 hci_dev_unlock(hdev);
3966 /* Send to upper protocol */
3967 sco_recv_scodata(conn, skb);
3970 BT_ERR("%s SCO packet for unknown connection handle %d",
3971 hdev->name, handle);
3977 static bool hci_req_is_complete(struct hci_dev *hdev)
3979 struct sk_buff *skb;
3981 skb = skb_peek(&hdev->cmd_q);
3985 return bt_cb(skb)->req.start;
3988 static void hci_resend_last(struct hci_dev *hdev)
3990 struct hci_command_hdr *sent;
3991 struct sk_buff *skb;
3994 if (!hdev->sent_cmd)
3997 sent = (void *) hdev->sent_cmd->data;
3998 opcode = __le16_to_cpu(sent->opcode);
3999 if (opcode == HCI_OP_RESET)
4002 skb = skb_clone(hdev->sent_cmd, GFP_KERNEL);
4006 skb_queue_head(&hdev->cmd_q, skb);
4007 queue_work(hdev->workqueue, &hdev->cmd_work);
4010 void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status)
4012 hci_req_complete_t req_complete = NULL;
4013 struct sk_buff *skb;
4014 unsigned long flags;
4016 BT_DBG("opcode 0x%04x status 0x%02x", opcode, status);
4018 /* If the completed command doesn't match the last one that was
4019 * sent we need to do special handling of it.
4021 if (!hci_sent_cmd_data(hdev, opcode)) {
4022 /* Some CSR based controllers generate a spontaneous
4023 * reset complete event during init and any pending
4024 * command will never be completed. In such a case we
4025 * need to resend whatever was the last sent
4028 if (test_bit(HCI_INIT, &hdev->flags) && opcode == HCI_OP_RESET)
4029 hci_resend_last(hdev);
4034 /* If the command succeeded and there's still more commands in
4035 * this request the request is not yet complete.
4037 if (!status && !hci_req_is_complete(hdev))
4040 /* If this was the last command in a request the complete
4041 * callback would be found in hdev->sent_cmd instead of the
4042 * command queue (hdev->cmd_q).
4044 if (hdev->sent_cmd) {
4045 req_complete = bt_cb(hdev->sent_cmd)->req.complete;
4048 /* We must set the complete callback to NULL to
4049 * avoid calling the callback more than once if
4050 * this function gets called again.
4052 bt_cb(hdev->sent_cmd)->req.complete = NULL;
4058 /* Remove all pending commands belonging to this request */
4059 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
4060 while ((skb = __skb_dequeue(&hdev->cmd_q))) {
4061 if (bt_cb(skb)->req.start) {
4062 __skb_queue_head(&hdev->cmd_q, skb);
4066 req_complete = bt_cb(skb)->req.complete;
4069 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
4073 req_complete(hdev, status);
4076 static void hci_rx_work(struct work_struct *work)
4078 struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
4079 struct sk_buff *skb;
4081 BT_DBG("%s", hdev->name);
4083 while ((skb = skb_dequeue(&hdev->rx_q))) {
4084 /* Send copy to monitor */
4085 hci_send_to_monitor(hdev, skb);
4087 if (atomic_read(&hdev->promisc)) {
4088 /* Send copy to the sockets */
4089 hci_send_to_sock(hdev, skb);
4092 if (test_bit(HCI_RAW, &hdev->flags) ||
4093 test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
4098 if (test_bit(HCI_INIT, &hdev->flags)) {
4099 /* Don't process data packets in this states. */
4100 switch (bt_cb(skb)->pkt_type) {
4101 case HCI_ACLDATA_PKT:
4102 case HCI_SCODATA_PKT:
4109 switch (bt_cb(skb)->pkt_type) {
4111 BT_DBG("%s Event packet", hdev->name);
4112 hci_event_packet(hdev, skb);
4115 case HCI_ACLDATA_PKT:
4116 BT_DBG("%s ACL data packet", hdev->name);
4117 hci_acldata_packet(hdev, skb);
4120 case HCI_SCODATA_PKT:
4121 BT_DBG("%s SCO data packet", hdev->name);
4122 hci_scodata_packet(hdev, skb);
4132 static void hci_cmd_work(struct work_struct *work)
4134 struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
4135 struct sk_buff *skb;
4137 BT_DBG("%s cmd_cnt %d cmd queued %d", hdev->name,
4138 atomic_read(&hdev->cmd_cnt), skb_queue_len(&hdev->cmd_q));
4140 /* Send queued commands */
4141 if (atomic_read(&hdev->cmd_cnt)) {
4142 skb = skb_dequeue(&hdev->cmd_q);
4146 kfree_skb(hdev->sent_cmd);
4148 hdev->sent_cmd = skb_clone(skb, GFP_KERNEL);
4149 if (hdev->sent_cmd) {
4150 atomic_dec(&hdev->cmd_cnt);
4151 hci_send_frame(hdev, skb);
4152 if (test_bit(HCI_RESET, &hdev->flags))
4153 del_timer(&hdev->cmd_timer);
4155 mod_timer(&hdev->cmd_timer,
4156 jiffies + HCI_CMD_TIMEOUT);
4158 skb_queue_head(&hdev->cmd_q, skb);
4159 queue_work(hdev->workqueue, &hdev->cmd_work);