2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4 Copyright (C) 2011 ProFUSION Embedded Systems
6 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License version 2 as
10 published by the Free Software Foundation;
12 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
16 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
19 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
21 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
23 SOFTWARE IS DISCLAIMED.
26 /* Bluetooth HCI core. */
28 #include <linux/export.h>
29 #include <linux/idr.h>
31 #include <linux/rfkill.h>
33 #include <net/bluetooth/bluetooth.h>
34 #include <net/bluetooth/hci_core.h>
36 static void hci_rx_work(struct work_struct *work);
37 static void hci_cmd_work(struct work_struct *work);
38 static void hci_tx_work(struct work_struct *work);
41 LIST_HEAD(hci_dev_list);
42 DEFINE_RWLOCK(hci_dev_list_lock);
44 /* HCI callback list */
45 LIST_HEAD(hci_cb_list);
46 DEFINE_RWLOCK(hci_cb_list_lock);
48 /* HCI ID Numbering */
49 static DEFINE_IDA(hci_index_ida);
51 /* ---- HCI notifications ---- */
53 static void hci_notify(struct hci_dev *hdev, int event)
55 hci_sock_dev_event(hdev, event);
58 /* ---- HCI requests ---- */
60 static void hci_req_sync_complete(struct hci_dev *hdev, u8 result)
62 BT_DBG("%s result 0x%2.2x", hdev->name, result);
64 if (hdev->req_status == HCI_REQ_PEND) {
65 hdev->req_result = result;
66 hdev->req_status = HCI_REQ_DONE;
67 wake_up_interruptible(&hdev->req_wait_q);
71 static void hci_req_cancel(struct hci_dev *hdev, int err)
73 BT_DBG("%s err 0x%2.2x", hdev->name, err);
75 if (hdev->req_status == HCI_REQ_PEND) {
76 hdev->req_result = err;
77 hdev->req_status = HCI_REQ_CANCELED;
78 wake_up_interruptible(&hdev->req_wait_q);
82 static struct sk_buff *hci_get_cmd_complete(struct hci_dev *hdev, u16 opcode,
85 struct hci_ev_cmd_complete *ev;
86 struct hci_event_hdr *hdr;
92 hdev->recv_evt = NULL;
97 return ERR_PTR(-ENODATA);
99 if (skb->len < sizeof(*hdr)) {
100 BT_ERR("Too short HCI event");
104 hdr = (void *) skb->data;
105 skb_pull(skb, HCI_EVENT_HDR_SIZE);
108 if (hdr->evt != event)
113 if (hdr->evt != HCI_EV_CMD_COMPLETE) {
114 BT_DBG("Last event is not cmd complete (0x%2.2x)", hdr->evt);
118 if (skb->len < sizeof(*ev)) {
119 BT_ERR("Too short cmd_complete event");
123 ev = (void *) skb->data;
124 skb_pull(skb, sizeof(*ev));
126 if (opcode == __le16_to_cpu(ev->opcode))
129 BT_DBG("opcode doesn't match (0x%2.2x != 0x%2.2x)", opcode,
130 __le16_to_cpu(ev->opcode));
134 return ERR_PTR(-ENODATA);
137 struct sk_buff *__hci_cmd_sync_ev(struct hci_dev *hdev, u16 opcode, u32 plen,
138 const void *param, u8 event, u32 timeout)
140 DECLARE_WAITQUEUE(wait, current);
141 struct hci_request req;
144 BT_DBG("%s", hdev->name);
146 hci_req_init(&req, hdev);
148 hci_req_add_ev(&req, opcode, plen, param, event);
150 hdev->req_status = HCI_REQ_PEND;
152 err = hci_req_run(&req, hci_req_sync_complete);
156 add_wait_queue(&hdev->req_wait_q, &wait);
157 set_current_state(TASK_INTERRUPTIBLE);
159 schedule_timeout(timeout);
161 remove_wait_queue(&hdev->req_wait_q, &wait);
163 if (signal_pending(current))
164 return ERR_PTR(-EINTR);
166 switch (hdev->req_status) {
168 err = -bt_to_errno(hdev->req_result);
171 case HCI_REQ_CANCELED:
172 err = -hdev->req_result;
180 hdev->req_status = hdev->req_result = 0;
182 BT_DBG("%s end: err %d", hdev->name, err);
187 return hci_get_cmd_complete(hdev, opcode, event);
189 EXPORT_SYMBOL(__hci_cmd_sync_ev);
191 struct sk_buff *__hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
192 const void *param, u32 timeout)
194 return __hci_cmd_sync_ev(hdev, opcode, plen, param, 0, timeout);
196 EXPORT_SYMBOL(__hci_cmd_sync);
198 /* Execute request and wait for completion. */
199 static int __hci_req_sync(struct hci_dev *hdev,
200 void (*func)(struct hci_request *req,
202 unsigned long opt, __u32 timeout)
204 struct hci_request req;
205 DECLARE_WAITQUEUE(wait, current);
208 BT_DBG("%s start", hdev->name);
210 hci_req_init(&req, hdev);
212 hdev->req_status = HCI_REQ_PEND;
216 err = hci_req_run(&req, hci_req_sync_complete);
218 hdev->req_status = 0;
220 /* ENODATA means the HCI request command queue is empty.
221 * This can happen when a request with conditionals doesn't
222 * trigger any commands to be sent. This is normal behavior
223 * and should not trigger an error return.
231 add_wait_queue(&hdev->req_wait_q, &wait);
232 set_current_state(TASK_INTERRUPTIBLE);
234 schedule_timeout(timeout);
236 remove_wait_queue(&hdev->req_wait_q, &wait);
238 if (signal_pending(current))
241 switch (hdev->req_status) {
243 err = -bt_to_errno(hdev->req_result);
246 case HCI_REQ_CANCELED:
247 err = -hdev->req_result;
255 hdev->req_status = hdev->req_result = 0;
257 BT_DBG("%s end: err %d", hdev->name, err);
262 static int hci_req_sync(struct hci_dev *hdev,
263 void (*req)(struct hci_request *req,
265 unsigned long opt, __u32 timeout)
269 if (!test_bit(HCI_UP, &hdev->flags))
272 /* Serialize all requests */
274 ret = __hci_req_sync(hdev, req, opt, timeout);
275 hci_req_unlock(hdev);
280 static void hci_reset_req(struct hci_request *req, unsigned long opt)
282 BT_DBG("%s %ld", req->hdev->name, opt);
285 set_bit(HCI_RESET, &req->hdev->flags);
286 hci_req_add(req, HCI_OP_RESET, 0, NULL);
289 static void bredr_init(struct hci_request *req)
291 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;
293 /* Read Local Supported Features */
294 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
296 /* Read Local Version */
297 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
299 /* Read BD Address */
300 hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
303 static void amp_init(struct hci_request *req)
305 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
307 /* Read Local Version */
308 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
310 /* Read Local AMP Info */
311 hci_req_add(req, HCI_OP_READ_LOCAL_AMP_INFO, 0, NULL);
313 /* Read Data Blk size */
314 hci_req_add(req, HCI_OP_READ_DATA_BLOCK_SIZE, 0, NULL);
317 static void hci_init1_req(struct hci_request *req, unsigned long opt)
319 struct hci_dev *hdev = req->hdev;
321 BT_DBG("%s %ld", hdev->name, opt);
324 if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
325 hci_reset_req(req, 0);
327 switch (hdev->dev_type) {
337 BT_ERR("Unknown device type %d", hdev->dev_type);
342 static void bredr_setup(struct hci_request *req)
347 /* Read Buffer Size (ACL mtu, max pkt, etc.) */
348 hci_req_add(req, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
350 /* Read Class of Device */
351 hci_req_add(req, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
353 /* Read Local Name */
354 hci_req_add(req, HCI_OP_READ_LOCAL_NAME, 0, NULL);
356 /* Read Voice Setting */
357 hci_req_add(req, HCI_OP_READ_VOICE_SETTING, 0, NULL);
359 /* Clear Event Filters */
360 flt_type = HCI_FLT_CLEAR_ALL;
361 hci_req_add(req, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
363 /* Connection accept timeout ~20 secs */
364 param = __constant_cpu_to_le16(0x7d00);
365 hci_req_add(req, HCI_OP_WRITE_CA_TIMEOUT, 2, ¶m);
367 /* Read page scan parameters */
368 if (req->hdev->hci_ver > BLUETOOTH_VER_1_1) {
369 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_ACTIVITY, 0, NULL);
370 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_TYPE, 0, NULL);
374 static void le_setup(struct hci_request *req)
376 struct hci_dev *hdev = req->hdev;
378 /* Read LE Buffer Size */
379 hci_req_add(req, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
381 /* Read LE Local Supported Features */
382 hci_req_add(req, HCI_OP_LE_READ_LOCAL_FEATURES, 0, NULL);
384 /* Read LE Advertising Channel TX Power */
385 hci_req_add(req, HCI_OP_LE_READ_ADV_TX_POWER, 0, NULL);
387 /* Read LE White List Size */
388 hci_req_add(req, HCI_OP_LE_READ_WHITE_LIST_SIZE, 0, NULL);
390 /* Read LE Supported States */
391 hci_req_add(req, HCI_OP_LE_READ_SUPPORTED_STATES, 0, NULL);
393 /* LE-only controllers have LE implicitly enabled */
394 if (!lmp_bredr_capable(hdev))
395 set_bit(HCI_LE_ENABLED, &hdev->dev_flags);
398 static u8 hci_get_inquiry_mode(struct hci_dev *hdev)
400 if (lmp_ext_inq_capable(hdev))
403 if (lmp_inq_rssi_capable(hdev))
406 if (hdev->manufacturer == 11 && hdev->hci_rev == 0x00 &&
407 hdev->lmp_subver == 0x0757)
410 if (hdev->manufacturer == 15) {
411 if (hdev->hci_rev == 0x03 && hdev->lmp_subver == 0x6963)
413 if (hdev->hci_rev == 0x09 && hdev->lmp_subver == 0x6963)
415 if (hdev->hci_rev == 0x00 && hdev->lmp_subver == 0x6965)
419 if (hdev->manufacturer == 31 && hdev->hci_rev == 0x2005 &&
420 hdev->lmp_subver == 0x1805)
426 static void hci_setup_inquiry_mode(struct hci_request *req)
430 mode = hci_get_inquiry_mode(req->hdev);
432 hci_req_add(req, HCI_OP_WRITE_INQUIRY_MODE, 1, &mode);
435 static void hci_setup_event_mask(struct hci_request *req)
437 struct hci_dev *hdev = req->hdev;
439 /* The second byte is 0xff instead of 0x9f (two reserved bits
440 * disabled) since a Broadcom 1.2 dongle doesn't respond to the
443 u8 events[8] = { 0xff, 0xff, 0xfb, 0xff, 0x00, 0x00, 0x00, 0x00 };
445 /* CSR 1.1 dongles does not accept any bitfield so don't try to set
446 * any event mask for pre 1.2 devices.
448 if (hdev->hci_ver < BLUETOOTH_VER_1_2)
451 if (lmp_bredr_capable(hdev)) {
452 events[4] |= 0x01; /* Flow Specification Complete */
453 events[4] |= 0x02; /* Inquiry Result with RSSI */
454 events[4] |= 0x04; /* Read Remote Extended Features Complete */
455 events[5] |= 0x08; /* Synchronous Connection Complete */
456 events[5] |= 0x10; /* Synchronous Connection Changed */
458 /* Use a different default for LE-only devices */
459 memset(events, 0, sizeof(events));
460 events[0] |= 0x10; /* Disconnection Complete */
461 events[0] |= 0x80; /* Encryption Change */
462 events[1] |= 0x08; /* Read Remote Version Information Complete */
463 events[1] |= 0x20; /* Command Complete */
464 events[1] |= 0x40; /* Command Status */
465 events[1] |= 0x80; /* Hardware Error */
466 events[2] |= 0x04; /* Number of Completed Packets */
467 events[3] |= 0x02; /* Data Buffer Overflow */
468 events[5] |= 0x80; /* Encryption Key Refresh Complete */
471 if (lmp_inq_rssi_capable(hdev))
472 events[4] |= 0x02; /* Inquiry Result with RSSI */
474 if (lmp_sniffsubr_capable(hdev))
475 events[5] |= 0x20; /* Sniff Subrating */
477 if (lmp_pause_enc_capable(hdev))
478 events[5] |= 0x80; /* Encryption Key Refresh Complete */
480 if (lmp_ext_inq_capable(hdev))
481 events[5] |= 0x40; /* Extended Inquiry Result */
483 if (lmp_no_flush_capable(hdev))
484 events[7] |= 0x01; /* Enhanced Flush Complete */
486 if (lmp_lsto_capable(hdev))
487 events[6] |= 0x80; /* Link Supervision Timeout Changed */
489 if (lmp_ssp_capable(hdev)) {
490 events[6] |= 0x01; /* IO Capability Request */
491 events[6] |= 0x02; /* IO Capability Response */
492 events[6] |= 0x04; /* User Confirmation Request */
493 events[6] |= 0x08; /* User Passkey Request */
494 events[6] |= 0x10; /* Remote OOB Data Request */
495 events[6] |= 0x20; /* Simple Pairing Complete */
496 events[7] |= 0x04; /* User Passkey Notification */
497 events[7] |= 0x08; /* Keypress Notification */
498 events[7] |= 0x10; /* Remote Host Supported
499 * Features Notification
503 if (lmp_le_capable(hdev))
504 events[7] |= 0x20; /* LE Meta-Event */
506 hci_req_add(req, HCI_OP_SET_EVENT_MASK, sizeof(events), events);
508 if (lmp_le_capable(hdev)) {
509 memset(events, 0, sizeof(events));
511 hci_req_add(req, HCI_OP_LE_SET_EVENT_MASK,
512 sizeof(events), events);
516 static void hci_init2_req(struct hci_request *req, unsigned long opt)
518 struct hci_dev *hdev = req->hdev;
520 if (lmp_bredr_capable(hdev))
523 clear_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
525 if (lmp_le_capable(hdev))
528 hci_setup_event_mask(req);
530 /* AVM Berlin (31), aka "BlueFRITZ!", doesn't support the read
531 * local supported commands HCI command.
533 if (hdev->manufacturer != 31 && hdev->hci_ver > BLUETOOTH_VER_1_1)
534 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
536 if (lmp_ssp_capable(hdev)) {
537 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
539 hci_req_add(req, HCI_OP_WRITE_SSP_MODE,
540 sizeof(mode), &mode);
542 struct hci_cp_write_eir cp;
544 memset(hdev->eir, 0, sizeof(hdev->eir));
545 memset(&cp, 0, sizeof(cp));
547 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
551 if (lmp_inq_rssi_capable(hdev))
552 hci_setup_inquiry_mode(req);
554 if (lmp_inq_tx_pwr_capable(hdev))
555 hci_req_add(req, HCI_OP_READ_INQ_RSP_TX_POWER, 0, NULL);
557 if (lmp_ext_feat_capable(hdev)) {
558 struct hci_cp_read_local_ext_features cp;
561 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
565 if (test_bit(HCI_LINK_SECURITY, &hdev->dev_flags)) {
567 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, sizeof(enable),
572 static void hci_setup_link_policy(struct hci_request *req)
574 struct hci_dev *hdev = req->hdev;
575 struct hci_cp_write_def_link_policy cp;
578 if (lmp_rswitch_capable(hdev))
579 link_policy |= HCI_LP_RSWITCH;
580 if (lmp_hold_capable(hdev))
581 link_policy |= HCI_LP_HOLD;
582 if (lmp_sniff_capable(hdev))
583 link_policy |= HCI_LP_SNIFF;
584 if (lmp_park_capable(hdev))
585 link_policy |= HCI_LP_PARK;
587 cp.policy = cpu_to_le16(link_policy);
588 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, sizeof(cp), &cp);
591 static void hci_set_le_support(struct hci_request *req)
593 struct hci_dev *hdev = req->hdev;
594 struct hci_cp_write_le_host_supported cp;
596 /* LE-only devices do not support explicit enablement */
597 if (!lmp_bredr_capable(hdev))
600 memset(&cp, 0, sizeof(cp));
602 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
604 cp.simul = lmp_le_br_capable(hdev);
607 if (cp.le != lmp_host_le_capable(hdev))
608 hci_req_add(req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(cp),
612 static void hci_set_event_mask_page_2(struct hci_request *req)
614 struct hci_dev *hdev = req->hdev;
615 u8 events[8] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
617 /* If Connectionless Slave Broadcast master role is supported
618 * enable all necessary events for it.
620 if (hdev->features[2][0] & 0x01) {
621 events[1] |= 0x40; /* Triggered Clock Capture */
622 events[1] |= 0x80; /* Synchronization Train Complete */
623 events[2] |= 0x10; /* Slave Page Response Timeout */
624 events[2] |= 0x20; /* CSB Channel Map Change */
627 /* If Connectionless Slave Broadcast slave role is supported
628 * enable all necessary events for it.
630 if (hdev->features[2][0] & 0x02) {
631 events[2] |= 0x01; /* Synchronization Train Received */
632 events[2] |= 0x02; /* CSB Receive */
633 events[2] |= 0x04; /* CSB Timeout */
634 events[2] |= 0x08; /* Truncated Page Complete */
637 hci_req_add(req, HCI_OP_SET_EVENT_MASK_PAGE_2, sizeof(events), events);
640 static void hci_init3_req(struct hci_request *req, unsigned long opt)
642 struct hci_dev *hdev = req->hdev;
645 /* Some Broadcom based Bluetooth controllers do not support the
646 * Delete Stored Link Key command. They are clearly indicating its
647 * absence in the bit mask of supported commands.
649 * Check the supported commands and only if the the command is marked
650 * as supported send it. If not supported assume that the controller
651 * does not have actual support for stored link keys which makes this
652 * command redundant anyway.
654 if (hdev->commands[6] & 0x80) {
655 struct hci_cp_delete_stored_link_key cp;
657 bacpy(&cp.bdaddr, BDADDR_ANY);
658 cp.delete_all = 0x01;
659 hci_req_add(req, HCI_OP_DELETE_STORED_LINK_KEY,
663 if (hdev->commands[5] & 0x10)
664 hci_setup_link_policy(req);
666 if (lmp_le_capable(hdev)) {
667 hci_set_le_support(req);
671 /* Read features beyond page 1 if available */
672 for (p = 2; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
673 struct hci_cp_read_local_ext_features cp;
676 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
681 static void hci_init4_req(struct hci_request *req, unsigned long opt)
683 struct hci_dev *hdev = req->hdev;
685 /* Set event mask page 2 if the HCI command for it is supported */
686 if (hdev->commands[22] & 0x04)
687 hci_set_event_mask_page_2(req);
689 /* Check for Synchronization Train support */
690 if (hdev->features[2][0] & 0x04)
691 hci_req_add(req, HCI_OP_READ_SYNC_TRAIN_PARAMS, 0, NULL);
694 static int __hci_init(struct hci_dev *hdev)
698 err = __hci_req_sync(hdev, hci_init1_req, 0, HCI_INIT_TIMEOUT);
702 /* HCI_BREDR covers both single-mode LE, BR/EDR and dual-mode
703 * BR/EDR/LE type controllers. AMP controllers only need the
706 if (hdev->dev_type != HCI_BREDR)
709 err = __hci_req_sync(hdev, hci_init2_req, 0, HCI_INIT_TIMEOUT);
713 err = __hci_req_sync(hdev, hci_init3_req, 0, HCI_INIT_TIMEOUT);
717 return __hci_req_sync(hdev, hci_init4_req, 0, HCI_INIT_TIMEOUT);
720 static void hci_scan_req(struct hci_request *req, unsigned long opt)
724 BT_DBG("%s %x", req->hdev->name, scan);
726 /* Inquiry and Page scans */
727 hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
730 static void hci_auth_req(struct hci_request *req, unsigned long opt)
734 BT_DBG("%s %x", req->hdev->name, auth);
737 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
740 static void hci_encrypt_req(struct hci_request *req, unsigned long opt)
744 BT_DBG("%s %x", req->hdev->name, encrypt);
747 hci_req_add(req, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
750 static void hci_linkpol_req(struct hci_request *req, unsigned long opt)
752 __le16 policy = cpu_to_le16(opt);
754 BT_DBG("%s %x", req->hdev->name, policy);
756 /* Default link policy */
757 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
760 /* Get HCI device by index.
761 * Device is held on return. */
762 struct hci_dev *hci_dev_get(int index)
764 struct hci_dev *hdev = NULL, *d;
771 read_lock(&hci_dev_list_lock);
772 list_for_each_entry(d, &hci_dev_list, list) {
773 if (d->id == index) {
774 hdev = hci_dev_hold(d);
778 read_unlock(&hci_dev_list_lock);
782 /* ---- Inquiry support ---- */
784 bool hci_discovery_active(struct hci_dev *hdev)
786 struct discovery_state *discov = &hdev->discovery;
788 switch (discov->state) {
789 case DISCOVERY_FINDING:
790 case DISCOVERY_RESOLVING:
798 void hci_discovery_set_state(struct hci_dev *hdev, int state)
800 BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
802 if (hdev->discovery.state == state)
806 case DISCOVERY_STOPPED:
807 if (hdev->discovery.state != DISCOVERY_STARTING)
808 mgmt_discovering(hdev, 0);
810 case DISCOVERY_STARTING:
812 case DISCOVERY_FINDING:
813 mgmt_discovering(hdev, 1);
815 case DISCOVERY_RESOLVING:
817 case DISCOVERY_STOPPING:
821 hdev->discovery.state = state;
824 void hci_inquiry_cache_flush(struct hci_dev *hdev)
826 struct discovery_state *cache = &hdev->discovery;
827 struct inquiry_entry *p, *n;
829 list_for_each_entry_safe(p, n, &cache->all, all) {
834 INIT_LIST_HEAD(&cache->unknown);
835 INIT_LIST_HEAD(&cache->resolve);
838 struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev,
841 struct discovery_state *cache = &hdev->discovery;
842 struct inquiry_entry *e;
844 BT_DBG("cache %p, %pMR", cache, bdaddr);
846 list_for_each_entry(e, &cache->all, all) {
847 if (!bacmp(&e->data.bdaddr, bdaddr))
854 struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
857 struct discovery_state *cache = &hdev->discovery;
858 struct inquiry_entry *e;
860 BT_DBG("cache %p, %pMR", cache, bdaddr);
862 list_for_each_entry(e, &cache->unknown, list) {
863 if (!bacmp(&e->data.bdaddr, bdaddr))
870 struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
874 struct discovery_state *cache = &hdev->discovery;
875 struct inquiry_entry *e;
877 BT_DBG("cache %p bdaddr %pMR state %d", cache, bdaddr, state);
879 list_for_each_entry(e, &cache->resolve, list) {
880 if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
882 if (!bacmp(&e->data.bdaddr, bdaddr))
889 void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
890 struct inquiry_entry *ie)
892 struct discovery_state *cache = &hdev->discovery;
893 struct list_head *pos = &cache->resolve;
894 struct inquiry_entry *p;
898 list_for_each_entry(p, &cache->resolve, list) {
899 if (p->name_state != NAME_PENDING &&
900 abs(p->data.rssi) >= abs(ie->data.rssi))
905 list_add(&ie->list, pos);
908 bool hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
909 bool name_known, bool *ssp)
911 struct discovery_state *cache = &hdev->discovery;
912 struct inquiry_entry *ie;
914 BT_DBG("cache %p, %pMR", cache, &data->bdaddr);
916 hci_remove_remote_oob_data(hdev, &data->bdaddr);
919 *ssp = data->ssp_mode;
921 ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
923 if (ie->data.ssp_mode && ssp)
926 if (ie->name_state == NAME_NEEDED &&
927 data->rssi != ie->data.rssi) {
928 ie->data.rssi = data->rssi;
929 hci_inquiry_cache_update_resolve(hdev, ie);
935 /* Entry not in the cache. Add new one. */
936 ie = kzalloc(sizeof(struct inquiry_entry), GFP_ATOMIC);
940 list_add(&ie->all, &cache->all);
943 ie->name_state = NAME_KNOWN;
945 ie->name_state = NAME_NOT_KNOWN;
946 list_add(&ie->list, &cache->unknown);
950 if (name_known && ie->name_state != NAME_KNOWN &&
951 ie->name_state != NAME_PENDING) {
952 ie->name_state = NAME_KNOWN;
956 memcpy(&ie->data, data, sizeof(*data));
957 ie->timestamp = jiffies;
958 cache->timestamp = jiffies;
960 if (ie->name_state == NAME_NOT_KNOWN)
966 static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
968 struct discovery_state *cache = &hdev->discovery;
969 struct inquiry_info *info = (struct inquiry_info *) buf;
970 struct inquiry_entry *e;
973 list_for_each_entry(e, &cache->all, all) {
974 struct inquiry_data *data = &e->data;
979 bacpy(&info->bdaddr, &data->bdaddr);
980 info->pscan_rep_mode = data->pscan_rep_mode;
981 info->pscan_period_mode = data->pscan_period_mode;
982 info->pscan_mode = data->pscan_mode;
983 memcpy(info->dev_class, data->dev_class, 3);
984 info->clock_offset = data->clock_offset;
990 BT_DBG("cache %p, copied %d", cache, copied);
994 static void hci_inq_req(struct hci_request *req, unsigned long opt)
996 struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
997 struct hci_dev *hdev = req->hdev;
998 struct hci_cp_inquiry cp;
1000 BT_DBG("%s", hdev->name);
1002 if (test_bit(HCI_INQUIRY, &hdev->flags))
1006 memcpy(&cp.lap, &ir->lap, 3);
1007 cp.length = ir->length;
1008 cp.num_rsp = ir->num_rsp;
1009 hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
1012 static int wait_inquiry(void *word)
1015 return signal_pending(current);
1018 int hci_inquiry(void __user *arg)
1020 __u8 __user *ptr = arg;
1021 struct hci_inquiry_req ir;
1022 struct hci_dev *hdev;
1023 int err = 0, do_inquiry = 0, max_rsp;
1027 if (copy_from_user(&ir, ptr, sizeof(ir)))
1030 hdev = hci_dev_get(ir.dev_id);
1034 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
1039 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
1045 if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
1046 inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) {
1047 hci_inquiry_cache_flush(hdev);
1050 hci_dev_unlock(hdev);
1052 timeo = ir.length * msecs_to_jiffies(2000);
1055 err = hci_req_sync(hdev, hci_inq_req, (unsigned long) &ir,
1060 /* Wait until Inquiry procedure finishes (HCI_INQUIRY flag is
1061 * cleared). If it is interrupted by a signal, return -EINTR.
1063 if (wait_on_bit(&hdev->flags, HCI_INQUIRY, wait_inquiry,
1064 TASK_INTERRUPTIBLE))
1068 /* for unlimited number of responses we will use buffer with
1071 max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
1073 /* cache_dump can't sleep. Therefore we allocate temp buffer and then
1074 * copy it to the user space.
1076 buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
1083 ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
1084 hci_dev_unlock(hdev);
1086 BT_DBG("num_rsp %d", ir.num_rsp);
1088 if (!copy_to_user(ptr, &ir, sizeof(ir))) {
1090 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
1103 static u8 create_ad(struct hci_dev *hdev, u8 *ptr)
1105 u8 ad_len = 0, flags = 0;
1108 if (test_bit(HCI_LE_PERIPHERAL, &hdev->dev_flags))
1109 flags |= LE_AD_GENERAL;
1111 if (test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
1112 if (lmp_le_br_capable(hdev))
1113 flags |= LE_AD_SIM_LE_BREDR_CTRL;
1114 if (lmp_host_le_br_capable(hdev))
1115 flags |= LE_AD_SIM_LE_BREDR_HOST;
1117 flags |= LE_AD_NO_BREDR;
1121 BT_DBG("adv flags 0x%02x", flags);
1131 if (hdev->adv_tx_power != HCI_TX_POWER_INVALID) {
1133 ptr[1] = EIR_TX_POWER;
1134 ptr[2] = (u8) hdev->adv_tx_power;
1140 name_len = strlen(hdev->dev_name);
1142 size_t max_len = HCI_MAX_AD_LENGTH - ad_len - 2;
1144 if (name_len > max_len) {
1146 ptr[1] = EIR_NAME_SHORT;
1148 ptr[1] = EIR_NAME_COMPLETE;
1150 ptr[0] = name_len + 1;
1152 memcpy(ptr + 2, hdev->dev_name, name_len);
1154 ad_len += (name_len + 2);
1155 ptr += (name_len + 2);
1161 void hci_update_ad(struct hci_request *req)
1163 struct hci_dev *hdev = req->hdev;
1164 struct hci_cp_le_set_adv_data cp;
1167 if (!lmp_le_capable(hdev))
1170 memset(&cp, 0, sizeof(cp));
1172 len = create_ad(hdev, cp.data);
1174 if (hdev->adv_data_len == len &&
1175 memcmp(cp.data, hdev->adv_data, len) == 0)
1178 memcpy(hdev->adv_data, cp.data, sizeof(cp.data));
1179 hdev->adv_data_len = len;
1183 hci_req_add(req, HCI_OP_LE_SET_ADV_DATA, sizeof(cp), &cp);
1186 static int hci_dev_do_open(struct hci_dev *hdev)
1190 BT_DBG("%s %p", hdev->name, hdev);
1194 if (test_bit(HCI_UNREGISTER, &hdev->dev_flags)) {
1199 /* Check for rfkill but allow the HCI setup stage to proceed
1200 * (which in itself doesn't cause any RF activity).
1202 if (test_bit(HCI_RFKILLED, &hdev->dev_flags) &&
1203 !test_bit(HCI_SETUP, &hdev->dev_flags)) {
1208 if (test_bit(HCI_UP, &hdev->flags)) {
1213 if (hdev->open(hdev)) {
1218 atomic_set(&hdev->cmd_cnt, 1);
1219 set_bit(HCI_INIT, &hdev->flags);
1221 if (hdev->setup && test_bit(HCI_SETUP, &hdev->dev_flags))
1222 ret = hdev->setup(hdev);
1225 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
1226 set_bit(HCI_RAW, &hdev->flags);
1228 if (!test_bit(HCI_RAW, &hdev->flags) &&
1229 !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags))
1230 ret = __hci_init(hdev);
1233 clear_bit(HCI_INIT, &hdev->flags);
1237 set_bit(HCI_UP, &hdev->flags);
1238 hci_notify(hdev, HCI_DEV_UP);
1239 if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
1240 !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags) &&
1241 mgmt_valid_hdev(hdev)) {
1243 mgmt_powered(hdev, 1);
1244 hci_dev_unlock(hdev);
1247 /* Init failed, cleanup */
1248 flush_work(&hdev->tx_work);
1249 flush_work(&hdev->cmd_work);
1250 flush_work(&hdev->rx_work);
1252 skb_queue_purge(&hdev->cmd_q);
1253 skb_queue_purge(&hdev->rx_q);
1258 if (hdev->sent_cmd) {
1259 kfree_skb(hdev->sent_cmd);
1260 hdev->sent_cmd = NULL;
1268 hci_req_unlock(hdev);
1272 /* ---- HCI ioctl helpers ---- */
1274 int hci_dev_open(__u16 dev)
1276 struct hci_dev *hdev;
1279 hdev = hci_dev_get(dev);
1283 /* We need to ensure that no other power on/off work is pending
1284 * before proceeding to call hci_dev_do_open. This is
1285 * particularly important if the setup procedure has not yet
1288 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
1289 cancel_delayed_work(&hdev->power_off);
1291 flush_workqueue(hdev->req_workqueue);
1293 err = hci_dev_do_open(hdev);
1300 static int hci_dev_do_close(struct hci_dev *hdev)
1302 BT_DBG("%s %p", hdev->name, hdev);
1304 cancel_delayed_work(&hdev->power_off);
1306 hci_req_cancel(hdev, ENODEV);
1309 if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
1310 del_timer_sync(&hdev->cmd_timer);
1311 hci_req_unlock(hdev);
1315 /* Flush RX and TX works */
1316 flush_work(&hdev->tx_work);
1317 flush_work(&hdev->rx_work);
1319 if (hdev->discov_timeout > 0) {
1320 cancel_delayed_work(&hdev->discov_off);
1321 hdev->discov_timeout = 0;
1322 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
1325 if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
1326 cancel_delayed_work(&hdev->service_cache);
1328 cancel_delayed_work_sync(&hdev->le_scan_disable);
1331 hci_inquiry_cache_flush(hdev);
1332 hci_conn_hash_flush(hdev);
1333 hci_dev_unlock(hdev);
1335 hci_notify(hdev, HCI_DEV_DOWN);
1341 skb_queue_purge(&hdev->cmd_q);
1342 atomic_set(&hdev->cmd_cnt, 1);
1343 if (!test_bit(HCI_RAW, &hdev->flags) &&
1344 test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks)) {
1345 set_bit(HCI_INIT, &hdev->flags);
1346 __hci_req_sync(hdev, hci_reset_req, 0, HCI_CMD_TIMEOUT);
1347 clear_bit(HCI_INIT, &hdev->flags);
1350 /* flush cmd work */
1351 flush_work(&hdev->cmd_work);
1354 skb_queue_purge(&hdev->rx_q);
1355 skb_queue_purge(&hdev->cmd_q);
1356 skb_queue_purge(&hdev->raw_q);
1358 /* Drop last sent command */
1359 if (hdev->sent_cmd) {
1360 del_timer_sync(&hdev->cmd_timer);
1361 kfree_skb(hdev->sent_cmd);
1362 hdev->sent_cmd = NULL;
1365 kfree_skb(hdev->recv_evt);
1366 hdev->recv_evt = NULL;
1368 /* After this point our queues are empty
1369 * and no tasks are scheduled. */
1374 hdev->dev_flags &= ~HCI_PERSISTENT_MASK;
1376 if (!test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags) &&
1377 mgmt_valid_hdev(hdev)) {
1379 mgmt_powered(hdev, 0);
1380 hci_dev_unlock(hdev);
1383 /* Controller radio is available but is currently powered down */
1384 hdev->amp_status = 0;
1386 memset(hdev->eir, 0, sizeof(hdev->eir));
1387 memset(hdev->dev_class, 0, sizeof(hdev->dev_class));
1389 hci_req_unlock(hdev);
1395 int hci_dev_close(__u16 dev)
1397 struct hci_dev *hdev;
1400 hdev = hci_dev_get(dev);
1404 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
1409 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
1410 cancel_delayed_work(&hdev->power_off);
1412 err = hci_dev_do_close(hdev);
1419 int hci_dev_reset(__u16 dev)
1421 struct hci_dev *hdev;
1424 hdev = hci_dev_get(dev);
1430 if (!test_bit(HCI_UP, &hdev->flags)) {
1435 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
1441 skb_queue_purge(&hdev->rx_q);
1442 skb_queue_purge(&hdev->cmd_q);
1445 hci_inquiry_cache_flush(hdev);
1446 hci_conn_hash_flush(hdev);
1447 hci_dev_unlock(hdev);
1452 atomic_set(&hdev->cmd_cnt, 1);
1453 hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
1455 if (!test_bit(HCI_RAW, &hdev->flags))
1456 ret = __hci_req_sync(hdev, hci_reset_req, 0, HCI_INIT_TIMEOUT);
1459 hci_req_unlock(hdev);
1464 int hci_dev_reset_stat(__u16 dev)
1466 struct hci_dev *hdev;
1469 hdev = hci_dev_get(dev);
1473 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
1478 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
1485 int hci_dev_cmd(unsigned int cmd, void __user *arg)
1487 struct hci_dev *hdev;
1488 struct hci_dev_req dr;
1491 if (copy_from_user(&dr, arg, sizeof(dr)))
1494 hdev = hci_dev_get(dr.dev_id);
1498 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
1503 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
1510 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
1515 if (!lmp_encrypt_capable(hdev)) {
1520 if (!test_bit(HCI_AUTH, &hdev->flags)) {
1521 /* Auth must be enabled first */
1522 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
1528 err = hci_req_sync(hdev, hci_encrypt_req, dr.dev_opt,
1533 err = hci_req_sync(hdev, hci_scan_req, dr.dev_opt,
1538 err = hci_req_sync(hdev, hci_linkpol_req, dr.dev_opt,
1542 case HCISETLINKMODE:
1543 hdev->link_mode = ((__u16) dr.dev_opt) &
1544 (HCI_LM_MASTER | HCI_LM_ACCEPT);
1548 hdev->pkt_type = (__u16) dr.dev_opt;
1552 hdev->acl_mtu = *((__u16 *) &dr.dev_opt + 1);
1553 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
1557 hdev->sco_mtu = *((__u16 *) &dr.dev_opt + 1);
1558 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
1571 int hci_get_dev_list(void __user *arg)
1573 struct hci_dev *hdev;
1574 struct hci_dev_list_req *dl;
1575 struct hci_dev_req *dr;
1576 int n = 0, size, err;
1579 if (get_user(dev_num, (__u16 __user *) arg))
1582 if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
1585 size = sizeof(*dl) + dev_num * sizeof(*dr);
1587 dl = kzalloc(size, GFP_KERNEL);
1593 read_lock(&hci_dev_list_lock);
1594 list_for_each_entry(hdev, &hci_dev_list, list) {
1595 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
1596 cancel_delayed_work(&hdev->power_off);
1598 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
1599 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
1601 (dr + n)->dev_id = hdev->id;
1602 (dr + n)->dev_opt = hdev->flags;
1607 read_unlock(&hci_dev_list_lock);
1610 size = sizeof(*dl) + n * sizeof(*dr);
1612 err = copy_to_user(arg, dl, size);
1615 return err ? -EFAULT : 0;
1618 int hci_get_dev_info(void __user *arg)
1620 struct hci_dev *hdev;
1621 struct hci_dev_info di;
1624 if (copy_from_user(&di, arg, sizeof(di)))
1627 hdev = hci_dev_get(di.dev_id);
1631 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
1632 cancel_delayed_work_sync(&hdev->power_off);
1634 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
1635 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
1637 strcpy(di.name, hdev->name);
1638 di.bdaddr = hdev->bdaddr;
1639 di.type = (hdev->bus & 0x0f) | ((hdev->dev_type & 0x03) << 4);
1640 di.flags = hdev->flags;
1641 di.pkt_type = hdev->pkt_type;
1642 if (lmp_bredr_capable(hdev)) {
1643 di.acl_mtu = hdev->acl_mtu;
1644 di.acl_pkts = hdev->acl_pkts;
1645 di.sco_mtu = hdev->sco_mtu;
1646 di.sco_pkts = hdev->sco_pkts;
1648 di.acl_mtu = hdev->le_mtu;
1649 di.acl_pkts = hdev->le_pkts;
1653 di.link_policy = hdev->link_policy;
1654 di.link_mode = hdev->link_mode;
1656 memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
1657 memcpy(&di.features, &hdev->features, sizeof(di.features));
1659 if (copy_to_user(arg, &di, sizeof(di)))
1667 /* ---- Interface to HCI drivers ---- */
1669 static int hci_rfkill_set_block(void *data, bool blocked)
1671 struct hci_dev *hdev = data;
1673 BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
1675 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags))
1679 set_bit(HCI_RFKILLED, &hdev->dev_flags);
1680 if (!test_bit(HCI_SETUP, &hdev->dev_flags))
1681 hci_dev_do_close(hdev);
1683 clear_bit(HCI_RFKILLED, &hdev->dev_flags);
1689 static const struct rfkill_ops hci_rfkill_ops = {
1690 .set_block = hci_rfkill_set_block,
1693 static void hci_power_on(struct work_struct *work)
1695 struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
1698 BT_DBG("%s", hdev->name);
1700 err = hci_dev_do_open(hdev);
1702 mgmt_set_powered_failed(hdev, err);
1706 if (test_bit(HCI_RFKILLED, &hdev->dev_flags)) {
1707 clear_bit(HCI_AUTO_OFF, &hdev->dev_flags);
1708 hci_dev_do_close(hdev);
1709 } else if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
1710 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
1711 HCI_AUTO_OFF_TIMEOUT);
1714 if (test_and_clear_bit(HCI_SETUP, &hdev->dev_flags))
1715 mgmt_index_added(hdev);
1718 static void hci_power_off(struct work_struct *work)
1720 struct hci_dev *hdev = container_of(work, struct hci_dev,
1723 BT_DBG("%s", hdev->name);
1725 hci_dev_do_close(hdev);
1728 static void hci_discov_off(struct work_struct *work)
1730 struct hci_dev *hdev;
1731 u8 scan = SCAN_PAGE;
1733 hdev = container_of(work, struct hci_dev, discov_off.work);
1735 BT_DBG("%s", hdev->name);
1739 hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, sizeof(scan), &scan);
1741 hdev->discov_timeout = 0;
1743 hci_dev_unlock(hdev);
1746 int hci_uuids_clear(struct hci_dev *hdev)
1748 struct bt_uuid *uuid, *tmp;
1750 list_for_each_entry_safe(uuid, tmp, &hdev->uuids, list) {
1751 list_del(&uuid->list);
1758 int hci_link_keys_clear(struct hci_dev *hdev)
1760 struct list_head *p, *n;
1762 list_for_each_safe(p, n, &hdev->link_keys) {
1763 struct link_key *key;
1765 key = list_entry(p, struct link_key, list);
1774 int hci_smp_ltks_clear(struct hci_dev *hdev)
1776 struct smp_ltk *k, *tmp;
1778 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
1786 struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1790 list_for_each_entry(k, &hdev->link_keys, list)
1791 if (bacmp(bdaddr, &k->bdaddr) == 0)
1797 static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
1798 u8 key_type, u8 old_key_type)
1801 if (key_type < 0x03)
1804 /* Debug keys are insecure so don't store them persistently */
1805 if (key_type == HCI_LK_DEBUG_COMBINATION)
1808 /* Changed combination key and there's no previous one */
1809 if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
1812 /* Security mode 3 case */
1816 /* Neither local nor remote side had no-bonding as requirement */
1817 if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
1820 /* Local side had dedicated bonding as requirement */
1821 if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
1824 /* Remote side had dedicated bonding as requirement */
1825 if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
1828 /* If none of the above criteria match, then don't store the key
1833 struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, __le16 ediv, u8 rand[8])
1837 list_for_each_entry(k, &hdev->long_term_keys, list) {
1838 if (k->ediv != ediv ||
1839 memcmp(rand, k->rand, sizeof(k->rand)))
1848 struct smp_ltk *hci_find_ltk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
1853 list_for_each_entry(k, &hdev->long_term_keys, list)
1854 if (addr_type == k->bdaddr_type &&
1855 bacmp(bdaddr, &k->bdaddr) == 0)
1861 int hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn, int new_key,
1862 bdaddr_t *bdaddr, u8 *val, u8 type, u8 pin_len)
1864 struct link_key *key, *old_key;
1868 old_key = hci_find_link_key(hdev, bdaddr);
1870 old_key_type = old_key->type;
1873 old_key_type = conn ? conn->key_type : 0xff;
1874 key = kzalloc(sizeof(*key), GFP_ATOMIC);
1877 list_add(&key->list, &hdev->link_keys);
1880 BT_DBG("%s key for %pMR type %u", hdev->name, bdaddr, type);
1882 /* Some buggy controller combinations generate a changed
1883 * combination key for legacy pairing even when there's no
1885 if (type == HCI_LK_CHANGED_COMBINATION &&
1886 (!conn || conn->remote_auth == 0xff) && old_key_type == 0xff) {
1887 type = HCI_LK_COMBINATION;
1889 conn->key_type = type;
1892 bacpy(&key->bdaddr, bdaddr);
1893 memcpy(key->val, val, HCI_LINK_KEY_SIZE);
1894 key->pin_len = pin_len;
1896 if (type == HCI_LK_CHANGED_COMBINATION)
1897 key->type = old_key_type;
1904 persistent = hci_persistent_key(hdev, conn, type, old_key_type);
1906 mgmt_new_link_key(hdev, key, persistent);
1909 conn->flush_key = !persistent;
1914 int hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type, u8 type,
1915 int new_key, u8 authenticated, u8 tk[16], u8 enc_size, __le16
1918 struct smp_ltk *key, *old_key;
1920 if (!(type & HCI_SMP_STK) && !(type & HCI_SMP_LTK))
1923 old_key = hci_find_ltk_by_addr(hdev, bdaddr, addr_type);
1927 key = kzalloc(sizeof(*key), GFP_ATOMIC);
1930 list_add(&key->list, &hdev->long_term_keys);
1933 bacpy(&key->bdaddr, bdaddr);
1934 key->bdaddr_type = addr_type;
1935 memcpy(key->val, tk, sizeof(key->val));
1936 key->authenticated = authenticated;
1938 key->enc_size = enc_size;
1940 memcpy(key->rand, rand, sizeof(key->rand));
1945 if (type & HCI_SMP_LTK)
1946 mgmt_new_ltk(hdev, key, 1);
1951 int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1953 struct link_key *key;
1955 key = hci_find_link_key(hdev, bdaddr);
1959 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
1961 list_del(&key->list);
1967 int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr)
1969 struct smp_ltk *k, *tmp;
1971 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
1972 if (bacmp(bdaddr, &k->bdaddr))
1975 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
1984 /* HCI command timer function */
1985 static void hci_cmd_timeout(unsigned long arg)
1987 struct hci_dev *hdev = (void *) arg;
1989 if (hdev->sent_cmd) {
1990 struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
1991 u16 opcode = __le16_to_cpu(sent->opcode);
1993 BT_ERR("%s command 0x%4.4x tx timeout", hdev->name, opcode);
1995 BT_ERR("%s command tx timeout", hdev->name);
1998 atomic_set(&hdev->cmd_cnt, 1);
1999 queue_work(hdev->workqueue, &hdev->cmd_work);
2002 struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
2005 struct oob_data *data;
2007 list_for_each_entry(data, &hdev->remote_oob_data, list)
2008 if (bacmp(bdaddr, &data->bdaddr) == 0)
2014 int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr)
2016 struct oob_data *data;
2018 data = hci_find_remote_oob_data(hdev, bdaddr);
2022 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
2024 list_del(&data->list);
2030 int hci_remote_oob_data_clear(struct hci_dev *hdev)
2032 struct oob_data *data, *n;
2034 list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
2035 list_del(&data->list);
2042 int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 *hash,
2045 struct oob_data *data;
2047 data = hci_find_remote_oob_data(hdev, bdaddr);
2050 data = kmalloc(sizeof(*data), GFP_ATOMIC);
2054 bacpy(&data->bdaddr, bdaddr);
2055 list_add(&data->list, &hdev->remote_oob_data);
2058 memcpy(data->hash, hash, sizeof(data->hash));
2059 memcpy(data->randomizer, randomizer, sizeof(data->randomizer));
2061 BT_DBG("%s for %pMR", hdev->name, bdaddr);
2066 struct bdaddr_list *hci_blacklist_lookup(struct hci_dev *hdev, bdaddr_t *bdaddr)
2068 struct bdaddr_list *b;
2070 list_for_each_entry(b, &hdev->blacklist, list)
2071 if (bacmp(bdaddr, &b->bdaddr) == 0)
2077 int hci_blacklist_clear(struct hci_dev *hdev)
2079 struct list_head *p, *n;
2081 list_for_each_safe(p, n, &hdev->blacklist) {
2082 struct bdaddr_list *b;
2084 b = list_entry(p, struct bdaddr_list, list);
2093 int hci_blacklist_add(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
2095 struct bdaddr_list *entry;
2097 if (bacmp(bdaddr, BDADDR_ANY) == 0)
2100 if (hci_blacklist_lookup(hdev, bdaddr))
2103 entry = kzalloc(sizeof(struct bdaddr_list), GFP_KERNEL);
2107 bacpy(&entry->bdaddr, bdaddr);
2109 list_add(&entry->list, &hdev->blacklist);
2111 return mgmt_device_blocked(hdev, bdaddr, type);
2114 int hci_blacklist_del(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
2116 struct bdaddr_list *entry;
2118 if (bacmp(bdaddr, BDADDR_ANY) == 0)
2119 return hci_blacklist_clear(hdev);
2121 entry = hci_blacklist_lookup(hdev, bdaddr);
2125 list_del(&entry->list);
2128 return mgmt_device_unblocked(hdev, bdaddr, type);
2131 static void inquiry_complete(struct hci_dev *hdev, u8 status)
2134 BT_ERR("Failed to start inquiry: status %d", status);
2137 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2138 hci_dev_unlock(hdev);
2143 static void le_scan_disable_work_complete(struct hci_dev *hdev, u8 status)
2145 /* General inquiry access code (GIAC) */
2146 u8 lap[3] = { 0x33, 0x8b, 0x9e };
2147 struct hci_request req;
2148 struct hci_cp_inquiry cp;
2152 BT_ERR("Failed to disable LE scanning: status %d", status);
2156 switch (hdev->discovery.type) {
2157 case DISCOV_TYPE_LE:
2159 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2160 hci_dev_unlock(hdev);
2163 case DISCOV_TYPE_INTERLEAVED:
2164 hci_req_init(&req, hdev);
2166 memset(&cp, 0, sizeof(cp));
2167 memcpy(&cp.lap, lap, sizeof(cp.lap));
2168 cp.length = DISCOV_INTERLEAVED_INQUIRY_LEN;
2169 hci_req_add(&req, HCI_OP_INQUIRY, sizeof(cp), &cp);
2173 hci_inquiry_cache_flush(hdev);
2175 err = hci_req_run(&req, inquiry_complete);
2177 BT_ERR("Inquiry request failed: err %d", err);
2178 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2181 hci_dev_unlock(hdev);
2186 static void le_scan_disable_work(struct work_struct *work)
2188 struct hci_dev *hdev = container_of(work, struct hci_dev,
2189 le_scan_disable.work);
2190 struct hci_cp_le_set_scan_enable cp;
2191 struct hci_request req;
2194 BT_DBG("%s", hdev->name);
2196 hci_req_init(&req, hdev);
2198 memset(&cp, 0, sizeof(cp));
2199 cp.enable = LE_SCAN_DISABLE;
2200 hci_req_add(&req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
2202 err = hci_req_run(&req, le_scan_disable_work_complete);
2204 BT_ERR("Disable LE scanning request failed: err %d", err);
2207 /* Alloc HCI device */
2208 struct hci_dev *hci_alloc_dev(void)
2210 struct hci_dev *hdev;
2212 hdev = kzalloc(sizeof(struct hci_dev), GFP_KERNEL);
2216 hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1);
2217 hdev->esco_type = (ESCO_HV1);
2218 hdev->link_mode = (HCI_LM_ACCEPT);
2219 hdev->io_capability = 0x03; /* No Input No Output */
2220 hdev->inq_tx_power = HCI_TX_POWER_INVALID;
2221 hdev->adv_tx_power = HCI_TX_POWER_INVALID;
2223 hdev->sniff_max_interval = 800;
2224 hdev->sniff_min_interval = 80;
2226 mutex_init(&hdev->lock);
2227 mutex_init(&hdev->req_lock);
2229 INIT_LIST_HEAD(&hdev->mgmt_pending);
2230 INIT_LIST_HEAD(&hdev->blacklist);
2231 INIT_LIST_HEAD(&hdev->uuids);
2232 INIT_LIST_HEAD(&hdev->link_keys);
2233 INIT_LIST_HEAD(&hdev->long_term_keys);
2234 INIT_LIST_HEAD(&hdev->remote_oob_data);
2235 INIT_LIST_HEAD(&hdev->conn_hash.list);
2237 INIT_WORK(&hdev->rx_work, hci_rx_work);
2238 INIT_WORK(&hdev->cmd_work, hci_cmd_work);
2239 INIT_WORK(&hdev->tx_work, hci_tx_work);
2240 INIT_WORK(&hdev->power_on, hci_power_on);
2242 INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
2243 INIT_DELAYED_WORK(&hdev->discov_off, hci_discov_off);
2244 INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
2246 skb_queue_head_init(&hdev->rx_q);
2247 skb_queue_head_init(&hdev->cmd_q);
2248 skb_queue_head_init(&hdev->raw_q);
2250 init_waitqueue_head(&hdev->req_wait_q);
2252 setup_timer(&hdev->cmd_timer, hci_cmd_timeout, (unsigned long) hdev);
2254 hci_init_sysfs(hdev);
2255 discovery_init(hdev);
2259 EXPORT_SYMBOL(hci_alloc_dev);
2261 /* Free HCI device */
2262 void hci_free_dev(struct hci_dev *hdev)
2264 /* will free via device release */
2265 put_device(&hdev->dev);
2267 EXPORT_SYMBOL(hci_free_dev);
2269 /* Register HCI device */
2270 int hci_register_dev(struct hci_dev *hdev)
2274 if (!hdev->open || !hdev->close)
2277 /* Do not allow HCI_AMP devices to register at index 0,
2278 * so the index can be used as the AMP controller ID.
2280 switch (hdev->dev_type) {
2282 id = ida_simple_get(&hci_index_ida, 0, 0, GFP_KERNEL);
2285 id = ida_simple_get(&hci_index_ida, 1, 0, GFP_KERNEL);
2294 sprintf(hdev->name, "hci%d", id);
2297 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
2299 hdev->workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
2300 WQ_MEM_RECLAIM, 1, hdev->name);
2301 if (!hdev->workqueue) {
2306 hdev->req_workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
2307 WQ_MEM_RECLAIM, 1, hdev->name);
2308 if (!hdev->req_workqueue) {
2309 destroy_workqueue(hdev->workqueue);
2314 error = hci_add_sysfs(hdev);
2318 hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
2319 RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops,
2322 if (rfkill_register(hdev->rfkill) < 0) {
2323 rfkill_destroy(hdev->rfkill);
2324 hdev->rfkill = NULL;
2328 if (hdev->rfkill && rfkill_blocked(hdev->rfkill))
2329 set_bit(HCI_RFKILLED, &hdev->dev_flags);
2331 set_bit(HCI_SETUP, &hdev->dev_flags);
2333 if (hdev->dev_type != HCI_AMP) {
2334 set_bit(HCI_AUTO_OFF, &hdev->dev_flags);
2335 /* Assume BR/EDR support until proven otherwise (such as
2336 * through reading supported features during init.
2338 set_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
2341 write_lock(&hci_dev_list_lock);
2342 list_add(&hdev->list, &hci_dev_list);
2343 write_unlock(&hci_dev_list_lock);
2345 hci_notify(hdev, HCI_DEV_REG);
2348 queue_work(hdev->req_workqueue, &hdev->power_on);
2353 destroy_workqueue(hdev->workqueue);
2354 destroy_workqueue(hdev->req_workqueue);
2356 ida_simple_remove(&hci_index_ida, hdev->id);
2360 EXPORT_SYMBOL(hci_register_dev);
2362 /* Unregister HCI device */
2363 void hci_unregister_dev(struct hci_dev *hdev)
2367 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
2369 set_bit(HCI_UNREGISTER, &hdev->dev_flags);
2373 write_lock(&hci_dev_list_lock);
2374 list_del(&hdev->list);
2375 write_unlock(&hci_dev_list_lock);
2377 hci_dev_do_close(hdev);
2379 for (i = 0; i < NUM_REASSEMBLY; i++)
2380 kfree_skb(hdev->reassembly[i]);
2382 cancel_work_sync(&hdev->power_on);
2384 if (!test_bit(HCI_INIT, &hdev->flags) &&
2385 !test_bit(HCI_SETUP, &hdev->dev_flags)) {
2387 mgmt_index_removed(hdev);
2388 hci_dev_unlock(hdev);
2391 /* mgmt_index_removed should take care of emptying the
2393 BUG_ON(!list_empty(&hdev->mgmt_pending));
2395 hci_notify(hdev, HCI_DEV_UNREG);
2398 rfkill_unregister(hdev->rfkill);
2399 rfkill_destroy(hdev->rfkill);
2402 hci_del_sysfs(hdev);
2404 destroy_workqueue(hdev->workqueue);
2405 destroy_workqueue(hdev->req_workqueue);
2408 hci_blacklist_clear(hdev);
2409 hci_uuids_clear(hdev);
2410 hci_link_keys_clear(hdev);
2411 hci_smp_ltks_clear(hdev);
2412 hci_remote_oob_data_clear(hdev);
2413 hci_dev_unlock(hdev);
2417 ida_simple_remove(&hci_index_ida, id);
2419 EXPORT_SYMBOL(hci_unregister_dev);
2421 /* Suspend HCI device */
2422 int hci_suspend_dev(struct hci_dev *hdev)
2424 hci_notify(hdev, HCI_DEV_SUSPEND);
2427 EXPORT_SYMBOL(hci_suspend_dev);
2429 /* Resume HCI device */
2430 int hci_resume_dev(struct hci_dev *hdev)
2432 hci_notify(hdev, HCI_DEV_RESUME);
2435 EXPORT_SYMBOL(hci_resume_dev);
2437 /* Receive frame from HCI drivers */
2438 int hci_recv_frame(struct sk_buff *skb)
2440 struct hci_dev *hdev = (struct hci_dev *) skb->dev;
2441 if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
2442 && !test_bit(HCI_INIT, &hdev->flags))) {
2448 bt_cb(skb)->incoming = 1;
2451 __net_timestamp(skb);
2453 skb_queue_tail(&hdev->rx_q, skb);
2454 queue_work(hdev->workqueue, &hdev->rx_work);
2458 EXPORT_SYMBOL(hci_recv_frame);
2460 static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
2461 int count, __u8 index)
2466 struct sk_buff *skb;
2467 struct bt_skb_cb *scb;
2469 if ((type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT) ||
2470 index >= NUM_REASSEMBLY)
2473 skb = hdev->reassembly[index];
2477 case HCI_ACLDATA_PKT:
2478 len = HCI_MAX_FRAME_SIZE;
2479 hlen = HCI_ACL_HDR_SIZE;
2482 len = HCI_MAX_EVENT_SIZE;
2483 hlen = HCI_EVENT_HDR_SIZE;
2485 case HCI_SCODATA_PKT:
2486 len = HCI_MAX_SCO_SIZE;
2487 hlen = HCI_SCO_HDR_SIZE;
2491 skb = bt_skb_alloc(len, GFP_ATOMIC);
2495 scb = (void *) skb->cb;
2497 scb->pkt_type = type;
2499 skb->dev = (void *) hdev;
2500 hdev->reassembly[index] = skb;
2504 scb = (void *) skb->cb;
2505 len = min_t(uint, scb->expect, count);
2507 memcpy(skb_put(skb, len), data, len);
2516 if (skb->len == HCI_EVENT_HDR_SIZE) {
2517 struct hci_event_hdr *h = hci_event_hdr(skb);
2518 scb->expect = h->plen;
2520 if (skb_tailroom(skb) < scb->expect) {
2522 hdev->reassembly[index] = NULL;
2528 case HCI_ACLDATA_PKT:
2529 if (skb->len == HCI_ACL_HDR_SIZE) {
2530 struct hci_acl_hdr *h = hci_acl_hdr(skb);
2531 scb->expect = __le16_to_cpu(h->dlen);
2533 if (skb_tailroom(skb) < scb->expect) {
2535 hdev->reassembly[index] = NULL;
2541 case HCI_SCODATA_PKT:
2542 if (skb->len == HCI_SCO_HDR_SIZE) {
2543 struct hci_sco_hdr *h = hci_sco_hdr(skb);
2544 scb->expect = h->dlen;
2546 if (skb_tailroom(skb) < scb->expect) {
2548 hdev->reassembly[index] = NULL;
2555 if (scb->expect == 0) {
2556 /* Complete frame */
2558 bt_cb(skb)->pkt_type = type;
2559 hci_recv_frame(skb);
2561 hdev->reassembly[index] = NULL;
2569 int hci_recv_fragment(struct hci_dev *hdev, int type, void *data, int count)
2573 if (type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT)
2577 rem = hci_reassembly(hdev, type, data, count, type - 1);
2581 data += (count - rem);
2587 EXPORT_SYMBOL(hci_recv_fragment);
2589 #define STREAM_REASSEMBLY 0
2591 int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count)
2597 struct sk_buff *skb = hdev->reassembly[STREAM_REASSEMBLY];
2600 struct { char type; } *pkt;
2602 /* Start of the frame */
2609 type = bt_cb(skb)->pkt_type;
2611 rem = hci_reassembly(hdev, type, data, count,
2616 data += (count - rem);
2622 EXPORT_SYMBOL(hci_recv_stream_fragment);
2624 /* ---- Interface to upper protocols ---- */
2626 int hci_register_cb(struct hci_cb *cb)
2628 BT_DBG("%p name %s", cb, cb->name);
2630 write_lock(&hci_cb_list_lock);
2631 list_add(&cb->list, &hci_cb_list);
2632 write_unlock(&hci_cb_list_lock);
2636 EXPORT_SYMBOL(hci_register_cb);
2638 int hci_unregister_cb(struct hci_cb *cb)
2640 BT_DBG("%p name %s", cb, cb->name);
2642 write_lock(&hci_cb_list_lock);
2643 list_del(&cb->list);
2644 write_unlock(&hci_cb_list_lock);
2648 EXPORT_SYMBOL(hci_unregister_cb);
2650 static int hci_send_frame(struct sk_buff *skb)
2652 struct hci_dev *hdev = (struct hci_dev *) skb->dev;
2659 BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
2662 __net_timestamp(skb);
2664 /* Send copy to monitor */
2665 hci_send_to_monitor(hdev, skb);
2667 if (atomic_read(&hdev->promisc)) {
2668 /* Send copy to the sockets */
2669 hci_send_to_sock(hdev, skb);
2672 /* Get rid of skb owner, prior to sending to the driver. */
2675 return hdev->send(skb);
2678 void hci_req_init(struct hci_request *req, struct hci_dev *hdev)
2680 skb_queue_head_init(&req->cmd_q);
2685 int hci_req_run(struct hci_request *req, hci_req_complete_t complete)
2687 struct hci_dev *hdev = req->hdev;
2688 struct sk_buff *skb;
2689 unsigned long flags;
2691 BT_DBG("length %u", skb_queue_len(&req->cmd_q));
2693 /* If an error occured during request building, remove all HCI
2694 * commands queued on the HCI request queue.
2697 skb_queue_purge(&req->cmd_q);
2701 /* Do not allow empty requests */
2702 if (skb_queue_empty(&req->cmd_q))
2705 skb = skb_peek_tail(&req->cmd_q);
2706 bt_cb(skb)->req.complete = complete;
2708 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
2709 skb_queue_splice_tail(&req->cmd_q, &hdev->cmd_q);
2710 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
2712 queue_work(hdev->workqueue, &hdev->cmd_work);
2717 static struct sk_buff *hci_prepare_cmd(struct hci_dev *hdev, u16 opcode,
2718 u32 plen, const void *param)
2720 int len = HCI_COMMAND_HDR_SIZE + plen;
2721 struct hci_command_hdr *hdr;
2722 struct sk_buff *skb;
2724 skb = bt_skb_alloc(len, GFP_ATOMIC);
2728 hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
2729 hdr->opcode = cpu_to_le16(opcode);
2733 memcpy(skb_put(skb, plen), param, plen);
2735 BT_DBG("skb len %d", skb->len);
2737 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
2738 skb->dev = (void *) hdev;
2743 /* Send HCI command */
2744 int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen,
2747 struct sk_buff *skb;
2749 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
2751 skb = hci_prepare_cmd(hdev, opcode, plen, param);
2753 BT_ERR("%s no memory for command", hdev->name);
2757 /* Stand-alone HCI commands must be flaged as
2758 * single-command requests.
2760 bt_cb(skb)->req.start = true;
2762 skb_queue_tail(&hdev->cmd_q, skb);
2763 queue_work(hdev->workqueue, &hdev->cmd_work);
2768 /* Queue a command to an asynchronous HCI request */
2769 void hci_req_add_ev(struct hci_request *req, u16 opcode, u32 plen,
2770 const void *param, u8 event)
2772 struct hci_dev *hdev = req->hdev;
2773 struct sk_buff *skb;
2775 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
2777 /* If an error occured during request building, there is no point in
2778 * queueing the HCI command. We can simply return.
2783 skb = hci_prepare_cmd(hdev, opcode, plen, param);
2785 BT_ERR("%s no memory for command (opcode 0x%4.4x)",
2786 hdev->name, opcode);
2791 if (skb_queue_empty(&req->cmd_q))
2792 bt_cb(skb)->req.start = true;
2794 bt_cb(skb)->req.event = event;
2796 skb_queue_tail(&req->cmd_q, skb);
2799 void hci_req_add(struct hci_request *req, u16 opcode, u32 plen,
2802 hci_req_add_ev(req, opcode, plen, param, 0);
2805 /* Get data from the previously sent command */
2806 void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
2808 struct hci_command_hdr *hdr;
2810 if (!hdev->sent_cmd)
2813 hdr = (void *) hdev->sent_cmd->data;
2815 if (hdr->opcode != cpu_to_le16(opcode))
2818 BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
2820 return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
2824 static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
2826 struct hci_acl_hdr *hdr;
2829 skb_push(skb, HCI_ACL_HDR_SIZE);
2830 skb_reset_transport_header(skb);
2831 hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
2832 hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
2833 hdr->dlen = cpu_to_le16(len);
2836 static void hci_queue_acl(struct hci_chan *chan, struct sk_buff_head *queue,
2837 struct sk_buff *skb, __u16 flags)
2839 struct hci_conn *conn = chan->conn;
2840 struct hci_dev *hdev = conn->hdev;
2841 struct sk_buff *list;
2843 skb->len = skb_headlen(skb);
2846 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
2848 switch (hdev->dev_type) {
2850 hci_add_acl_hdr(skb, conn->handle, flags);
2853 hci_add_acl_hdr(skb, chan->handle, flags);
2856 BT_ERR("%s unknown dev_type %d", hdev->name, hdev->dev_type);
2860 list = skb_shinfo(skb)->frag_list;
2862 /* Non fragmented */
2863 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
2865 skb_queue_tail(queue, skb);
2868 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
2870 skb_shinfo(skb)->frag_list = NULL;
2872 /* Queue all fragments atomically */
2873 spin_lock(&queue->lock);
2875 __skb_queue_tail(queue, skb);
2877 flags &= ~ACL_START;
2880 skb = list; list = list->next;
2882 skb->dev = (void *) hdev;
2883 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
2884 hci_add_acl_hdr(skb, conn->handle, flags);
2886 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
2888 __skb_queue_tail(queue, skb);
2891 spin_unlock(&queue->lock);
2895 void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
2897 struct hci_dev *hdev = chan->conn->hdev;
2899 BT_DBG("%s chan %p flags 0x%4.4x", hdev->name, chan, flags);
2901 skb->dev = (void *) hdev;
2903 hci_queue_acl(chan, &chan->data_q, skb, flags);
2905 queue_work(hdev->workqueue, &hdev->tx_work);
2909 void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
2911 struct hci_dev *hdev = conn->hdev;
2912 struct hci_sco_hdr hdr;
2914 BT_DBG("%s len %d", hdev->name, skb->len);
2916 hdr.handle = cpu_to_le16(conn->handle);
2917 hdr.dlen = skb->len;
2919 skb_push(skb, HCI_SCO_HDR_SIZE);
2920 skb_reset_transport_header(skb);
2921 memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
2923 skb->dev = (void *) hdev;
2924 bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
2926 skb_queue_tail(&conn->data_q, skb);
2927 queue_work(hdev->workqueue, &hdev->tx_work);
2930 /* ---- HCI TX task (outgoing data) ---- */
2932 /* HCI Connection scheduler */
2933 static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type,
2936 struct hci_conn_hash *h = &hdev->conn_hash;
2937 struct hci_conn *conn = NULL, *c;
2938 unsigned int num = 0, min = ~0;
2940 /* We don't have to lock device here. Connections are always
2941 * added and removed with TX task disabled. */
2945 list_for_each_entry_rcu(c, &h->list, list) {
2946 if (c->type != type || skb_queue_empty(&c->data_q))
2949 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
2954 if (c->sent < min) {
2959 if (hci_conn_num(hdev, type) == num)
2968 switch (conn->type) {
2970 cnt = hdev->acl_cnt;
2974 cnt = hdev->sco_cnt;
2977 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
2981 BT_ERR("Unknown link type");
2989 BT_DBG("conn %p quote %d", conn, *quote);
2993 static void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
2995 struct hci_conn_hash *h = &hdev->conn_hash;
2998 BT_ERR("%s link tx timeout", hdev->name);
3002 /* Kill stalled connections */
3003 list_for_each_entry_rcu(c, &h->list, list) {
3004 if (c->type == type && c->sent) {
3005 BT_ERR("%s killing stalled connection %pMR",
3006 hdev->name, &c->dst);
3007 hci_disconnect(c, HCI_ERROR_REMOTE_USER_TERM);
3014 static struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
3017 struct hci_conn_hash *h = &hdev->conn_hash;
3018 struct hci_chan *chan = NULL;
3019 unsigned int num = 0, min = ~0, cur_prio = 0;
3020 struct hci_conn *conn;
3021 int cnt, q, conn_num = 0;
3023 BT_DBG("%s", hdev->name);
3027 list_for_each_entry_rcu(conn, &h->list, list) {
3028 struct hci_chan *tmp;
3030 if (conn->type != type)
3033 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
3038 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
3039 struct sk_buff *skb;
3041 if (skb_queue_empty(&tmp->data_q))
3044 skb = skb_peek(&tmp->data_q);
3045 if (skb->priority < cur_prio)
3048 if (skb->priority > cur_prio) {
3051 cur_prio = skb->priority;
3056 if (conn->sent < min) {
3062 if (hci_conn_num(hdev, type) == conn_num)
3071 switch (chan->conn->type) {
3073 cnt = hdev->acl_cnt;
3076 cnt = hdev->block_cnt;
3080 cnt = hdev->sco_cnt;
3083 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
3087 BT_ERR("Unknown link type");
3092 BT_DBG("chan %p quote %d", chan, *quote);
3096 static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
3098 struct hci_conn_hash *h = &hdev->conn_hash;
3099 struct hci_conn *conn;
3102 BT_DBG("%s", hdev->name);
3106 list_for_each_entry_rcu(conn, &h->list, list) {
3107 struct hci_chan *chan;
3109 if (conn->type != type)
3112 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
3117 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
3118 struct sk_buff *skb;
3125 if (skb_queue_empty(&chan->data_q))
3128 skb = skb_peek(&chan->data_q);
3129 if (skb->priority >= HCI_PRIO_MAX - 1)
3132 skb->priority = HCI_PRIO_MAX - 1;
3134 BT_DBG("chan %p skb %p promoted to %d", chan, skb,
3138 if (hci_conn_num(hdev, type) == num)
3146 static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
3148 /* Calculate count of blocks used by this packet */
3149 return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
3152 static void __check_timeout(struct hci_dev *hdev, unsigned int cnt)
3154 if (!test_bit(HCI_RAW, &hdev->flags)) {
3155 /* ACL tx timeout must be longer than maximum
3156 * link supervision timeout (40.9 seconds) */
3157 if (!cnt && time_after(jiffies, hdev->acl_last_tx +
3158 HCI_ACL_TX_TIMEOUT))
3159 hci_link_tx_to(hdev, ACL_LINK);
3163 static void hci_sched_acl_pkt(struct hci_dev *hdev)
3165 unsigned int cnt = hdev->acl_cnt;
3166 struct hci_chan *chan;
3167 struct sk_buff *skb;
3170 __check_timeout(hdev, cnt);
3172 while (hdev->acl_cnt &&
3173 (chan = hci_chan_sent(hdev, ACL_LINK, "e))) {
3174 u32 priority = (skb_peek(&chan->data_q))->priority;
3175 while (quote-- && (skb = skb_peek(&chan->data_q))) {
3176 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
3177 skb->len, skb->priority);
3179 /* Stop if priority has changed */
3180 if (skb->priority < priority)
3183 skb = skb_dequeue(&chan->data_q);
3185 hci_conn_enter_active_mode(chan->conn,
3186 bt_cb(skb)->force_active);
3188 hci_send_frame(skb);
3189 hdev->acl_last_tx = jiffies;
3197 if (cnt != hdev->acl_cnt)
3198 hci_prio_recalculate(hdev, ACL_LINK);
3201 static void hci_sched_acl_blk(struct hci_dev *hdev)
3203 unsigned int cnt = hdev->block_cnt;
3204 struct hci_chan *chan;
3205 struct sk_buff *skb;
3209 __check_timeout(hdev, cnt);
3211 BT_DBG("%s", hdev->name);
3213 if (hdev->dev_type == HCI_AMP)
3218 while (hdev->block_cnt > 0 &&
3219 (chan = hci_chan_sent(hdev, type, "e))) {
3220 u32 priority = (skb_peek(&chan->data_q))->priority;
3221 while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
3224 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
3225 skb->len, skb->priority);
3227 /* Stop if priority has changed */
3228 if (skb->priority < priority)
3231 skb = skb_dequeue(&chan->data_q);
3233 blocks = __get_blocks(hdev, skb);
3234 if (blocks > hdev->block_cnt)
3237 hci_conn_enter_active_mode(chan->conn,
3238 bt_cb(skb)->force_active);
3240 hci_send_frame(skb);
3241 hdev->acl_last_tx = jiffies;
3243 hdev->block_cnt -= blocks;
3246 chan->sent += blocks;
3247 chan->conn->sent += blocks;
3251 if (cnt != hdev->block_cnt)
3252 hci_prio_recalculate(hdev, type);
3255 static void hci_sched_acl(struct hci_dev *hdev)
3257 BT_DBG("%s", hdev->name);
3259 /* No ACL link over BR/EDR controller */
3260 if (!hci_conn_num(hdev, ACL_LINK) && hdev->dev_type == HCI_BREDR)
3263 /* No AMP link over AMP controller */
3264 if (!hci_conn_num(hdev, AMP_LINK) && hdev->dev_type == HCI_AMP)
3267 switch (hdev->flow_ctl_mode) {
3268 case HCI_FLOW_CTL_MODE_PACKET_BASED:
3269 hci_sched_acl_pkt(hdev);
3272 case HCI_FLOW_CTL_MODE_BLOCK_BASED:
3273 hci_sched_acl_blk(hdev);
3279 static void hci_sched_sco(struct hci_dev *hdev)
3281 struct hci_conn *conn;
3282 struct sk_buff *skb;
3285 BT_DBG("%s", hdev->name);
3287 if (!hci_conn_num(hdev, SCO_LINK))
3290 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, "e))) {
3291 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
3292 BT_DBG("skb %p len %d", skb, skb->len);
3293 hci_send_frame(skb);
3296 if (conn->sent == ~0)
3302 static void hci_sched_esco(struct hci_dev *hdev)
3304 struct hci_conn *conn;
3305 struct sk_buff *skb;
3308 BT_DBG("%s", hdev->name);
3310 if (!hci_conn_num(hdev, ESCO_LINK))
3313 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK,
3315 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
3316 BT_DBG("skb %p len %d", skb, skb->len);
3317 hci_send_frame(skb);
3320 if (conn->sent == ~0)
3326 static void hci_sched_le(struct hci_dev *hdev)
3328 struct hci_chan *chan;
3329 struct sk_buff *skb;
3330 int quote, cnt, tmp;
3332 BT_DBG("%s", hdev->name);
3334 if (!hci_conn_num(hdev, LE_LINK))
3337 if (!test_bit(HCI_RAW, &hdev->flags)) {
3338 /* LE tx timeout must be longer than maximum
3339 * link supervision timeout (40.9 seconds) */
3340 if (!hdev->le_cnt && hdev->le_pkts &&
3341 time_after(jiffies, hdev->le_last_tx + HZ * 45))
3342 hci_link_tx_to(hdev, LE_LINK);
3345 cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
3347 while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, "e))) {
3348 u32 priority = (skb_peek(&chan->data_q))->priority;
3349 while (quote-- && (skb = skb_peek(&chan->data_q))) {
3350 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
3351 skb->len, skb->priority);
3353 /* Stop if priority has changed */
3354 if (skb->priority < priority)
3357 skb = skb_dequeue(&chan->data_q);
3359 hci_send_frame(skb);
3360 hdev->le_last_tx = jiffies;
3371 hdev->acl_cnt = cnt;
3374 hci_prio_recalculate(hdev, LE_LINK);
3377 static void hci_tx_work(struct work_struct *work)
3379 struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
3380 struct sk_buff *skb;
3382 BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
3383 hdev->sco_cnt, hdev->le_cnt);
3385 if (!test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
3386 /* Schedule queues and send stuff to HCI driver */
3387 hci_sched_acl(hdev);
3388 hci_sched_sco(hdev);
3389 hci_sched_esco(hdev);
3393 /* Send next queued raw (unknown type) packet */
3394 while ((skb = skb_dequeue(&hdev->raw_q)))
3395 hci_send_frame(skb);
3398 /* ----- HCI RX task (incoming data processing) ----- */
3400 /* ACL data packet */
3401 static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
3403 struct hci_acl_hdr *hdr = (void *) skb->data;
3404 struct hci_conn *conn;
3405 __u16 handle, flags;
3407 skb_pull(skb, HCI_ACL_HDR_SIZE);
3409 handle = __le16_to_cpu(hdr->handle);
3410 flags = hci_flags(handle);
3411 handle = hci_handle(handle);
3413 BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
3416 hdev->stat.acl_rx++;
3419 conn = hci_conn_hash_lookup_handle(hdev, handle);
3420 hci_dev_unlock(hdev);
3423 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
3425 /* Send to upper protocol */
3426 l2cap_recv_acldata(conn, skb, flags);
3429 BT_ERR("%s ACL packet for unknown connection handle %d",
3430 hdev->name, handle);
3436 /* SCO data packet */
3437 static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
3439 struct hci_sco_hdr *hdr = (void *) skb->data;
3440 struct hci_conn *conn;
3443 skb_pull(skb, HCI_SCO_HDR_SIZE);
3445 handle = __le16_to_cpu(hdr->handle);
3447 BT_DBG("%s len %d handle 0x%4.4x", hdev->name, skb->len, handle);
3449 hdev->stat.sco_rx++;
3452 conn = hci_conn_hash_lookup_handle(hdev, handle);
3453 hci_dev_unlock(hdev);
3456 /* Send to upper protocol */
3457 sco_recv_scodata(conn, skb);
3460 BT_ERR("%s SCO packet for unknown connection handle %d",
3461 hdev->name, handle);
3467 static bool hci_req_is_complete(struct hci_dev *hdev)
3469 struct sk_buff *skb;
3471 skb = skb_peek(&hdev->cmd_q);
3475 return bt_cb(skb)->req.start;
3478 static void hci_resend_last(struct hci_dev *hdev)
3480 struct hci_command_hdr *sent;
3481 struct sk_buff *skb;
3484 if (!hdev->sent_cmd)
3487 sent = (void *) hdev->sent_cmd->data;
3488 opcode = __le16_to_cpu(sent->opcode);
3489 if (opcode == HCI_OP_RESET)
3492 skb = skb_clone(hdev->sent_cmd, GFP_KERNEL);
3496 skb_queue_head(&hdev->cmd_q, skb);
3497 queue_work(hdev->workqueue, &hdev->cmd_work);
3500 void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status)
3502 hci_req_complete_t req_complete = NULL;
3503 struct sk_buff *skb;
3504 unsigned long flags;
3506 BT_DBG("opcode 0x%04x status 0x%02x", opcode, status);
3508 /* If the completed command doesn't match the last one that was
3509 * sent we need to do special handling of it.
3511 if (!hci_sent_cmd_data(hdev, opcode)) {
3512 /* Some CSR based controllers generate a spontaneous
3513 * reset complete event during init and any pending
3514 * command will never be completed. In such a case we
3515 * need to resend whatever was the last sent
3518 if (test_bit(HCI_INIT, &hdev->flags) && opcode == HCI_OP_RESET)
3519 hci_resend_last(hdev);
3524 /* If the command succeeded and there's still more commands in
3525 * this request the request is not yet complete.
3527 if (!status && !hci_req_is_complete(hdev))
3530 /* If this was the last command in a request the complete
3531 * callback would be found in hdev->sent_cmd instead of the
3532 * command queue (hdev->cmd_q).
3534 if (hdev->sent_cmd) {
3535 req_complete = bt_cb(hdev->sent_cmd)->req.complete;
3538 /* We must set the complete callback to NULL to
3539 * avoid calling the callback more than once if
3540 * this function gets called again.
3542 bt_cb(hdev->sent_cmd)->req.complete = NULL;
3548 /* Remove all pending commands belonging to this request */
3549 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
3550 while ((skb = __skb_dequeue(&hdev->cmd_q))) {
3551 if (bt_cb(skb)->req.start) {
3552 __skb_queue_head(&hdev->cmd_q, skb);
3556 req_complete = bt_cb(skb)->req.complete;
3559 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
3563 req_complete(hdev, status);
3566 static void hci_rx_work(struct work_struct *work)
3568 struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
3569 struct sk_buff *skb;
3571 BT_DBG("%s", hdev->name);
3573 while ((skb = skb_dequeue(&hdev->rx_q))) {
3574 /* Send copy to monitor */
3575 hci_send_to_monitor(hdev, skb);
3577 if (atomic_read(&hdev->promisc)) {
3578 /* Send copy to the sockets */
3579 hci_send_to_sock(hdev, skb);
3582 if (test_bit(HCI_RAW, &hdev->flags) ||
3583 test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
3588 if (test_bit(HCI_INIT, &hdev->flags)) {
3589 /* Don't process data packets in this states. */
3590 switch (bt_cb(skb)->pkt_type) {
3591 case HCI_ACLDATA_PKT:
3592 case HCI_SCODATA_PKT:
3599 switch (bt_cb(skb)->pkt_type) {
3601 BT_DBG("%s Event packet", hdev->name);
3602 hci_event_packet(hdev, skb);
3605 case HCI_ACLDATA_PKT:
3606 BT_DBG("%s ACL data packet", hdev->name);
3607 hci_acldata_packet(hdev, skb);
3610 case HCI_SCODATA_PKT:
3611 BT_DBG("%s SCO data packet", hdev->name);
3612 hci_scodata_packet(hdev, skb);
3622 static void hci_cmd_work(struct work_struct *work)
3624 struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
3625 struct sk_buff *skb;
3627 BT_DBG("%s cmd_cnt %d cmd queued %d", hdev->name,
3628 atomic_read(&hdev->cmd_cnt), skb_queue_len(&hdev->cmd_q));
3630 /* Send queued commands */
3631 if (atomic_read(&hdev->cmd_cnt)) {
3632 skb = skb_dequeue(&hdev->cmd_q);
3636 kfree_skb(hdev->sent_cmd);
3638 hdev->sent_cmd = skb_clone(skb, GFP_KERNEL);
3639 if (hdev->sent_cmd) {
3640 atomic_dec(&hdev->cmd_cnt);
3641 hci_send_frame(skb);
3642 if (test_bit(HCI_RESET, &hdev->flags))
3643 del_timer(&hdev->cmd_timer);
3645 mod_timer(&hdev->cmd_timer,
3646 jiffies + HCI_CMD_TIMEOUT);
3648 skb_queue_head(&hdev->cmd_q, skb);
3649 queue_work(hdev->workqueue, &hdev->cmd_work);
3654 u8 bdaddr_to_le(u8 bdaddr_type)
3656 switch (bdaddr_type) {
3657 case BDADDR_LE_PUBLIC:
3658 return ADDR_LE_DEV_PUBLIC;
3661 /* Fallback to LE Random address type */
3662 return ADDR_LE_DEV_RANDOM;