2 BlueZ - Bluetooth protocol stack for Linux
4 Copyright (C) 2014 Intel Corporation
6 This program is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License version 2 as
8 published by the Free Software Foundation;
10 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
11 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
12 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
13 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
14 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
15 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
16 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
17 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
20 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
21 SOFTWARE IS DISCLAIMED.
24 #include <asm/unaligned.h>
26 #include <net/bluetooth/bluetooth.h>
27 #include <net/bluetooth/hci_core.h>
28 #include <net/bluetooth/mgmt.h>
31 #include "hci_request.h"
33 #define HCI_REQ_DONE 0
34 #define HCI_REQ_PEND 1
35 #define HCI_REQ_CANCELED 2
37 void hci_req_init(struct hci_request *req, struct hci_dev *hdev)
39 skb_queue_head_init(&req->cmd_q);
44 static int req_run(struct hci_request *req, hci_req_complete_t complete,
45 hci_req_complete_skb_t complete_skb)
47 struct hci_dev *hdev = req->hdev;
51 BT_DBG("length %u", skb_queue_len(&req->cmd_q));
53 /* If an error occurred during request building, remove all HCI
54 * commands queued on the HCI request queue.
57 skb_queue_purge(&req->cmd_q);
61 /* Do not allow empty requests */
62 if (skb_queue_empty(&req->cmd_q))
65 skb = skb_peek_tail(&req->cmd_q);
67 bt_cb(skb)->hci.req_complete = complete;
68 } else if (complete_skb) {
69 bt_cb(skb)->hci.req_complete_skb = complete_skb;
70 bt_cb(skb)->hci.req_flags |= HCI_REQ_SKB;
73 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
74 skb_queue_splice_tail(&req->cmd_q, &hdev->cmd_q);
75 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
77 queue_work(hdev->workqueue, &hdev->cmd_work);
82 int hci_req_run(struct hci_request *req, hci_req_complete_t complete)
84 return req_run(req, complete, NULL);
87 int hci_req_run_skb(struct hci_request *req, hci_req_complete_skb_t complete)
89 return req_run(req, NULL, complete);
92 static void hci_req_sync_complete(struct hci_dev *hdev, u8 result, u16 opcode,
95 BT_DBG("%s result 0x%2.2x", hdev->name, result);
97 if (hdev->req_status == HCI_REQ_PEND) {
98 hdev->req_result = result;
99 hdev->req_status = HCI_REQ_DONE;
101 hdev->req_skb = skb_get(skb);
102 wake_up_interruptible(&hdev->req_wait_q);
106 void hci_req_sync_cancel(struct hci_dev *hdev, int err)
108 BT_DBG("%s err 0x%2.2x", hdev->name, err);
110 if (hdev->req_status == HCI_REQ_PEND) {
111 hdev->req_result = err;
112 hdev->req_status = HCI_REQ_CANCELED;
113 wake_up_interruptible(&hdev->req_wait_q);
117 struct sk_buff *__hci_cmd_sync_ev(struct hci_dev *hdev, u16 opcode, u32 plen,
118 const void *param, u8 event, u32 timeout)
120 DECLARE_WAITQUEUE(wait, current);
121 struct hci_request req;
125 BT_DBG("%s", hdev->name);
127 hci_req_init(&req, hdev);
129 hci_req_add_ev(&req, opcode, plen, param, event);
131 hdev->req_status = HCI_REQ_PEND;
133 add_wait_queue(&hdev->req_wait_q, &wait);
134 set_current_state(TASK_INTERRUPTIBLE);
136 err = hci_req_run_skb(&req, hci_req_sync_complete);
138 remove_wait_queue(&hdev->req_wait_q, &wait);
139 set_current_state(TASK_RUNNING);
143 schedule_timeout(timeout);
145 remove_wait_queue(&hdev->req_wait_q, &wait);
147 if (signal_pending(current))
148 return ERR_PTR(-EINTR);
150 switch (hdev->req_status) {
152 err = -bt_to_errno(hdev->req_result);
155 case HCI_REQ_CANCELED:
156 err = -hdev->req_result;
164 hdev->req_status = hdev->req_result = 0;
166 hdev->req_skb = NULL;
168 BT_DBG("%s end: err %d", hdev->name, err);
176 return ERR_PTR(-ENODATA);
180 EXPORT_SYMBOL(__hci_cmd_sync_ev);
182 struct sk_buff *__hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
183 const void *param, u32 timeout)
185 return __hci_cmd_sync_ev(hdev, opcode, plen, param, 0, timeout);
187 EXPORT_SYMBOL(__hci_cmd_sync);
189 /* Execute request and wait for completion. */
190 int __hci_req_sync(struct hci_dev *hdev, int (*func)(struct hci_request *req,
192 unsigned long opt, u32 timeout, u8 *hci_status)
194 struct hci_request req;
195 DECLARE_WAITQUEUE(wait, current);
198 BT_DBG("%s start", hdev->name);
200 hci_req_init(&req, hdev);
202 hdev->req_status = HCI_REQ_PEND;
204 err = func(&req, opt);
207 *hci_status = HCI_ERROR_UNSPECIFIED;
211 add_wait_queue(&hdev->req_wait_q, &wait);
212 set_current_state(TASK_INTERRUPTIBLE);
214 err = hci_req_run_skb(&req, hci_req_sync_complete);
216 hdev->req_status = 0;
218 remove_wait_queue(&hdev->req_wait_q, &wait);
219 set_current_state(TASK_RUNNING);
221 /* ENODATA means the HCI request command queue is empty.
222 * This can happen when a request with conditionals doesn't
223 * trigger any commands to be sent. This is normal behavior
224 * and should not trigger an error return.
226 if (err == -ENODATA) {
233 *hci_status = HCI_ERROR_UNSPECIFIED;
238 schedule_timeout(timeout);
240 remove_wait_queue(&hdev->req_wait_q, &wait);
242 if (signal_pending(current))
245 switch (hdev->req_status) {
247 err = -bt_to_errno(hdev->req_result);
249 *hci_status = hdev->req_result;
252 case HCI_REQ_CANCELED:
253 err = -hdev->req_result;
255 *hci_status = HCI_ERROR_UNSPECIFIED;
261 *hci_status = HCI_ERROR_UNSPECIFIED;
265 hdev->req_status = hdev->req_result = 0;
267 BT_DBG("%s end: err %d", hdev->name, err);
272 int hci_req_sync(struct hci_dev *hdev, int (*req)(struct hci_request *req,
274 unsigned long opt, u32 timeout, u8 *hci_status)
278 if (!test_bit(HCI_UP, &hdev->flags))
281 /* Serialize all requests */
282 hci_req_sync_lock(hdev);
283 ret = __hci_req_sync(hdev, req, opt, timeout, hci_status);
284 hci_req_sync_unlock(hdev);
289 struct sk_buff *hci_prepare_cmd(struct hci_dev *hdev, u16 opcode, u32 plen,
292 int len = HCI_COMMAND_HDR_SIZE + plen;
293 struct hci_command_hdr *hdr;
296 skb = bt_skb_alloc(len, GFP_ATOMIC);
300 hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
301 hdr->opcode = cpu_to_le16(opcode);
305 memcpy(skb_put(skb, plen), param, plen);
307 BT_DBG("skb len %d", skb->len);
309 hci_skb_pkt_type(skb) = HCI_COMMAND_PKT;
310 hci_skb_opcode(skb) = opcode;
315 /* Queue a command to an asynchronous HCI request */
316 void hci_req_add_ev(struct hci_request *req, u16 opcode, u32 plen,
317 const void *param, u8 event)
319 struct hci_dev *hdev = req->hdev;
322 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
324 /* If an error occurred during request building, there is no point in
325 * queueing the HCI command. We can simply return.
330 skb = hci_prepare_cmd(hdev, opcode, plen, param);
332 BT_ERR("%s no memory for command (opcode 0x%4.4x)",
338 if (skb_queue_empty(&req->cmd_q))
339 bt_cb(skb)->hci.req_flags |= HCI_REQ_START;
341 bt_cb(skb)->hci.req_event = event;
343 skb_queue_tail(&req->cmd_q, skb);
346 void hci_req_add(struct hci_request *req, u16 opcode, u32 plen,
349 hci_req_add_ev(req, opcode, plen, param, 0);
352 void __hci_req_write_fast_connectable(struct hci_request *req, bool enable)
354 struct hci_dev *hdev = req->hdev;
355 struct hci_cp_write_page_scan_activity acp;
358 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
361 if (hdev->hci_ver < BLUETOOTH_VER_1_2)
365 type = PAGE_SCAN_TYPE_INTERLACED;
367 /* 160 msec page scan interval */
368 acp.interval = cpu_to_le16(0x0100);
370 type = PAGE_SCAN_TYPE_STANDARD; /* default */
372 /* default 1.28 sec page scan */
373 acp.interval = cpu_to_le16(0x0800);
376 acp.window = cpu_to_le16(0x0012);
378 if (__cpu_to_le16(hdev->page_scan_interval) != acp.interval ||
379 __cpu_to_le16(hdev->page_scan_window) != acp.window)
380 hci_req_add(req, HCI_OP_WRITE_PAGE_SCAN_ACTIVITY,
383 if (hdev->page_scan_type != type)
384 hci_req_add(req, HCI_OP_WRITE_PAGE_SCAN_TYPE, 1, &type);
387 /* This function controls the background scanning based on hdev->pend_le_conns
388 * list. If there are pending LE connection we start the background scanning,
389 * otherwise we stop it.
391 * This function requires the caller holds hdev->lock.
393 static void __hci_update_background_scan(struct hci_request *req)
395 struct hci_dev *hdev = req->hdev;
397 if (!test_bit(HCI_UP, &hdev->flags) ||
398 test_bit(HCI_INIT, &hdev->flags) ||
399 hci_dev_test_flag(hdev, HCI_SETUP) ||
400 hci_dev_test_flag(hdev, HCI_CONFIG) ||
401 hci_dev_test_flag(hdev, HCI_AUTO_OFF) ||
402 hci_dev_test_flag(hdev, HCI_UNREGISTER))
405 /* No point in doing scanning if LE support hasn't been enabled */
406 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
409 /* If discovery is active don't interfere with it */
410 if (hdev->discovery.state != DISCOVERY_STOPPED)
413 /* Reset RSSI and UUID filters when starting background scanning
414 * since these filters are meant for service discovery only.
416 * The Start Discovery and Start Service Discovery operations
417 * ensure to set proper values for RSSI threshold and UUID
418 * filter list. So it is safe to just reset them here.
420 hci_discovery_filter_clear(hdev);
422 if (list_empty(&hdev->pend_le_conns) &&
423 list_empty(&hdev->pend_le_reports)) {
424 /* If there is no pending LE connections or devices
425 * to be scanned for, we should stop the background
429 /* If controller is not scanning we are done. */
430 if (!hci_dev_test_flag(hdev, HCI_LE_SCAN))
433 hci_req_add_le_scan_disable(req);
435 BT_DBG("%s stopping background scanning", hdev->name);
437 /* If there is at least one pending LE connection, we should
438 * keep the background scan running.
441 /* If controller is connecting, we should not start scanning
442 * since some controllers are not able to scan and connect at
445 if (hci_lookup_le_connect(hdev))
448 /* If controller is currently scanning, we stop it to ensure we
449 * don't miss any advertising (due to duplicates filter).
451 if (hci_dev_test_flag(hdev, HCI_LE_SCAN))
452 hci_req_add_le_scan_disable(req);
454 hci_req_add_le_passive_scan(req);
456 BT_DBG("%s starting background scanning", hdev->name);
460 void __hci_req_update_name(struct hci_request *req)
462 struct hci_dev *hdev = req->hdev;
463 struct hci_cp_write_local_name cp;
465 memcpy(cp.name, hdev->dev_name, sizeof(cp.name));
467 hci_req_add(req, HCI_OP_WRITE_LOCAL_NAME, sizeof(cp), &cp);
470 #define PNP_INFO_SVCLASS_ID 0x1200
472 static u8 *create_uuid16_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
474 u8 *ptr = data, *uuids_start = NULL;
475 struct bt_uuid *uuid;
480 list_for_each_entry(uuid, &hdev->uuids, list) {
483 if (uuid->size != 16)
486 uuid16 = get_unaligned_le16(&uuid->uuid[12]);
490 if (uuid16 == PNP_INFO_SVCLASS_ID)
496 uuids_start[1] = EIR_UUID16_ALL;
500 /* Stop if not enough space to put next UUID */
501 if ((ptr - data) + sizeof(u16) > len) {
502 uuids_start[1] = EIR_UUID16_SOME;
506 *ptr++ = (uuid16 & 0x00ff);
507 *ptr++ = (uuid16 & 0xff00) >> 8;
508 uuids_start[0] += sizeof(uuid16);
514 static u8 *create_uuid32_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
516 u8 *ptr = data, *uuids_start = NULL;
517 struct bt_uuid *uuid;
522 list_for_each_entry(uuid, &hdev->uuids, list) {
523 if (uuid->size != 32)
529 uuids_start[1] = EIR_UUID32_ALL;
533 /* Stop if not enough space to put next UUID */
534 if ((ptr - data) + sizeof(u32) > len) {
535 uuids_start[1] = EIR_UUID32_SOME;
539 memcpy(ptr, &uuid->uuid[12], sizeof(u32));
541 uuids_start[0] += sizeof(u32);
547 static u8 *create_uuid128_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
549 u8 *ptr = data, *uuids_start = NULL;
550 struct bt_uuid *uuid;
555 list_for_each_entry(uuid, &hdev->uuids, list) {
556 if (uuid->size != 128)
562 uuids_start[1] = EIR_UUID128_ALL;
566 /* Stop if not enough space to put next UUID */
567 if ((ptr - data) + 16 > len) {
568 uuids_start[1] = EIR_UUID128_SOME;
572 memcpy(ptr, uuid->uuid, 16);
574 uuids_start[0] += 16;
580 static void create_eir(struct hci_dev *hdev, u8 *data)
585 name_len = strlen(hdev->dev_name);
591 ptr[1] = EIR_NAME_SHORT;
593 ptr[1] = EIR_NAME_COMPLETE;
595 /* EIR Data length */
596 ptr[0] = name_len + 1;
598 memcpy(ptr + 2, hdev->dev_name, name_len);
600 ptr += (name_len + 2);
603 if (hdev->inq_tx_power != HCI_TX_POWER_INVALID) {
605 ptr[1] = EIR_TX_POWER;
606 ptr[2] = (u8) hdev->inq_tx_power;
611 if (hdev->devid_source > 0) {
613 ptr[1] = EIR_DEVICE_ID;
615 put_unaligned_le16(hdev->devid_source, ptr + 2);
616 put_unaligned_le16(hdev->devid_vendor, ptr + 4);
617 put_unaligned_le16(hdev->devid_product, ptr + 6);
618 put_unaligned_le16(hdev->devid_version, ptr + 8);
623 ptr = create_uuid16_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
624 ptr = create_uuid32_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
625 ptr = create_uuid128_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
628 void __hci_req_update_eir(struct hci_request *req)
630 struct hci_dev *hdev = req->hdev;
631 struct hci_cp_write_eir cp;
633 if (!hdev_is_powered(hdev))
636 if (!lmp_ext_inq_capable(hdev))
639 if (!hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
642 if (hci_dev_test_flag(hdev, HCI_SERVICE_CACHE))
645 memset(&cp, 0, sizeof(cp));
647 create_eir(hdev, cp.data);
649 if (memcmp(cp.data, hdev->eir, sizeof(cp.data)) == 0)
652 memcpy(hdev->eir, cp.data, sizeof(cp.data));
654 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
657 void hci_req_add_le_scan_disable(struct hci_request *req)
659 struct hci_cp_le_set_scan_enable cp;
661 memset(&cp, 0, sizeof(cp));
662 cp.enable = LE_SCAN_DISABLE;
663 hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
666 static void add_to_white_list(struct hci_request *req,
667 struct hci_conn_params *params)
669 struct hci_cp_le_add_to_white_list cp;
671 cp.bdaddr_type = params->addr_type;
672 bacpy(&cp.bdaddr, ¶ms->addr);
674 hci_req_add(req, HCI_OP_LE_ADD_TO_WHITE_LIST, sizeof(cp), &cp);
677 static u8 update_white_list(struct hci_request *req)
679 struct hci_dev *hdev = req->hdev;
680 struct hci_conn_params *params;
681 struct bdaddr_list *b;
682 uint8_t white_list_entries = 0;
684 /* Go through the current white list programmed into the
685 * controller one by one and check if that address is still
686 * in the list of pending connections or list of devices to
687 * report. If not present in either list, then queue the
688 * command to remove it from the controller.
690 list_for_each_entry(b, &hdev->le_white_list, list) {
691 struct hci_cp_le_del_from_white_list cp;
693 if (hci_pend_le_action_lookup(&hdev->pend_le_conns,
694 &b->bdaddr, b->bdaddr_type) ||
695 hci_pend_le_action_lookup(&hdev->pend_le_reports,
696 &b->bdaddr, b->bdaddr_type)) {
697 white_list_entries++;
701 cp.bdaddr_type = b->bdaddr_type;
702 bacpy(&cp.bdaddr, &b->bdaddr);
704 hci_req_add(req, HCI_OP_LE_DEL_FROM_WHITE_LIST,
708 /* Since all no longer valid white list entries have been
709 * removed, walk through the list of pending connections
710 * and ensure that any new device gets programmed into
713 * If the list of the devices is larger than the list of
714 * available white list entries in the controller, then
715 * just abort and return filer policy value to not use the
718 list_for_each_entry(params, &hdev->pend_le_conns, action) {
719 if (hci_bdaddr_list_lookup(&hdev->le_white_list,
720 ¶ms->addr, params->addr_type))
723 if (white_list_entries >= hdev->le_white_list_size) {
724 /* Select filter policy to accept all advertising */
728 if (hci_find_irk_by_addr(hdev, ¶ms->addr,
729 params->addr_type)) {
730 /* White list can not be used with RPAs */
734 white_list_entries++;
735 add_to_white_list(req, params);
738 /* After adding all new pending connections, walk through
739 * the list of pending reports and also add these to the
740 * white list if there is still space.
742 list_for_each_entry(params, &hdev->pend_le_reports, action) {
743 if (hci_bdaddr_list_lookup(&hdev->le_white_list,
744 ¶ms->addr, params->addr_type))
747 if (white_list_entries >= hdev->le_white_list_size) {
748 /* Select filter policy to accept all advertising */
752 if (hci_find_irk_by_addr(hdev, ¶ms->addr,
753 params->addr_type)) {
754 /* White list can not be used with RPAs */
758 white_list_entries++;
759 add_to_white_list(req, params);
762 /* Select filter policy to use white list */
766 void hci_req_add_le_passive_scan(struct hci_request *req)
768 struct hci_cp_le_set_scan_param param_cp;
769 struct hci_cp_le_set_scan_enable enable_cp;
770 struct hci_dev *hdev = req->hdev;
774 /* Set require_privacy to false since no SCAN_REQ are send
775 * during passive scanning. Not using an non-resolvable address
776 * here is important so that peer devices using direct
777 * advertising with our address will be correctly reported
780 if (hci_update_random_address(req, false, &own_addr_type))
783 /* Adding or removing entries from the white list must
784 * happen before enabling scanning. The controller does
785 * not allow white list modification while scanning.
787 filter_policy = update_white_list(req);
789 /* When the controller is using random resolvable addresses and
790 * with that having LE privacy enabled, then controllers with
791 * Extended Scanner Filter Policies support can now enable support
792 * for handling directed advertising.
794 * So instead of using filter polices 0x00 (no whitelist)
795 * and 0x01 (whitelist enabled) use the new filter policies
796 * 0x02 (no whitelist) and 0x03 (whitelist enabled).
798 if (hci_dev_test_flag(hdev, HCI_PRIVACY) &&
799 (hdev->le_features[0] & HCI_LE_EXT_SCAN_POLICY))
800 filter_policy |= 0x02;
802 memset(¶m_cp, 0, sizeof(param_cp));
803 param_cp.type = LE_SCAN_PASSIVE;
804 param_cp.interval = cpu_to_le16(hdev->le_scan_interval);
805 param_cp.window = cpu_to_le16(hdev->le_scan_window);
806 param_cp.own_address_type = own_addr_type;
807 param_cp.filter_policy = filter_policy;
808 hci_req_add(req, HCI_OP_LE_SET_SCAN_PARAM, sizeof(param_cp),
811 memset(&enable_cp, 0, sizeof(enable_cp));
812 enable_cp.enable = LE_SCAN_ENABLE;
813 enable_cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
814 hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(enable_cp),
818 static u8 get_cur_adv_instance_scan_rsp_len(struct hci_dev *hdev)
820 u8 instance = hdev->cur_adv_instance;
821 struct adv_info *adv_instance;
823 /* Ignore instance 0 */
824 if (instance == 0x00)
827 adv_instance = hci_find_adv_instance(hdev, instance);
831 /* TODO: Take into account the "appearance" and "local-name" flags here.
832 * These are currently being ignored as they are not supported.
834 return adv_instance->scan_rsp_len;
837 void __hci_req_disable_advertising(struct hci_request *req)
841 hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable);
844 static u32 get_adv_instance_flags(struct hci_dev *hdev, u8 instance)
847 struct adv_info *adv_instance;
849 if (instance == 0x00) {
850 /* Instance 0 always manages the "Tx Power" and "Flags"
853 flags = MGMT_ADV_FLAG_TX_POWER | MGMT_ADV_FLAG_MANAGED_FLAGS;
855 /* For instance 0, the HCI_ADVERTISING_CONNECTABLE setting
856 * corresponds to the "connectable" instance flag.
858 if (hci_dev_test_flag(hdev, HCI_ADVERTISING_CONNECTABLE))
859 flags |= MGMT_ADV_FLAG_CONNECTABLE;
864 adv_instance = hci_find_adv_instance(hdev, instance);
866 /* Return 0 when we got an invalid instance identifier. */
870 return adv_instance->flags;
873 void __hci_req_enable_advertising(struct hci_request *req)
875 struct hci_dev *hdev = req->hdev;
876 struct hci_cp_le_set_adv_param cp;
877 u8 own_addr_type, enable = 0x01;
881 if (hci_conn_num(hdev, LE_LINK) > 0)
884 if (hci_dev_test_flag(hdev, HCI_LE_ADV))
885 __hci_req_disable_advertising(req);
887 /* Clear the HCI_LE_ADV bit temporarily so that the
888 * hci_update_random_address knows that it's safe to go ahead
889 * and write a new random address. The flag will be set back on
890 * as soon as the SET_ADV_ENABLE HCI command completes.
892 hci_dev_clear_flag(hdev, HCI_LE_ADV);
894 flags = get_adv_instance_flags(hdev, hdev->cur_adv_instance);
896 /* If the "connectable" instance flag was not set, then choose between
897 * ADV_IND and ADV_NONCONN_IND based on the global connectable setting.
899 connectable = (flags & MGMT_ADV_FLAG_CONNECTABLE) ||
900 mgmt_get_connectable(hdev);
902 /* Set require_privacy to true only when non-connectable
903 * advertising is used. In that case it is fine to use a
904 * non-resolvable private address.
906 if (hci_update_random_address(req, !connectable, &own_addr_type) < 0)
909 memset(&cp, 0, sizeof(cp));
910 cp.min_interval = cpu_to_le16(hdev->le_adv_min_interval);
911 cp.max_interval = cpu_to_le16(hdev->le_adv_max_interval);
914 cp.type = LE_ADV_IND;
915 else if (get_cur_adv_instance_scan_rsp_len(hdev))
916 cp.type = LE_ADV_SCAN_IND;
918 cp.type = LE_ADV_NONCONN_IND;
920 cp.own_address_type = own_addr_type;
921 cp.channel_map = hdev->le_adv_channel_map;
923 hci_req_add(req, HCI_OP_LE_SET_ADV_PARAM, sizeof(cp), &cp);
925 hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable);
928 static u8 create_default_scan_rsp_data(struct hci_dev *hdev, u8 *ptr)
933 name_len = strlen(hdev->dev_name);
935 size_t max_len = HCI_MAX_AD_LENGTH - ad_len - 2;
937 if (name_len > max_len) {
939 ptr[1] = EIR_NAME_SHORT;
941 ptr[1] = EIR_NAME_COMPLETE;
943 ptr[0] = name_len + 1;
945 memcpy(ptr + 2, hdev->dev_name, name_len);
947 ad_len += (name_len + 2);
948 ptr += (name_len + 2);
954 static u8 create_instance_scan_rsp_data(struct hci_dev *hdev, u8 instance,
957 struct adv_info *adv_instance;
959 adv_instance = hci_find_adv_instance(hdev, instance);
963 /* TODO: Set the appropriate entries based on advertising instance flags
964 * here once flags other than 0 are supported.
966 memcpy(ptr, adv_instance->scan_rsp_data,
967 adv_instance->scan_rsp_len);
969 return adv_instance->scan_rsp_len;
972 void __hci_req_update_scan_rsp_data(struct hci_request *req, u8 instance)
974 struct hci_dev *hdev = req->hdev;
975 struct hci_cp_le_set_scan_rsp_data cp;
978 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
981 memset(&cp, 0, sizeof(cp));
984 len = create_instance_scan_rsp_data(hdev, instance, cp.data);
986 len = create_default_scan_rsp_data(hdev, cp.data);
988 if (hdev->scan_rsp_data_len == len &&
989 !memcmp(cp.data, hdev->scan_rsp_data, len))
992 memcpy(hdev->scan_rsp_data, cp.data, sizeof(cp.data));
993 hdev->scan_rsp_data_len = len;
997 hci_req_add(req, HCI_OP_LE_SET_SCAN_RSP_DATA, sizeof(cp), &cp);
1000 static u8 create_instance_adv_data(struct hci_dev *hdev, u8 instance, u8 *ptr)
1002 struct adv_info *adv_instance = NULL;
1003 u8 ad_len = 0, flags = 0;
1006 /* Return 0 when the current instance identifier is invalid. */
1008 adv_instance = hci_find_adv_instance(hdev, instance);
1013 instance_flags = get_adv_instance_flags(hdev, instance);
1015 /* The Add Advertising command allows userspace to set both the general
1016 * and limited discoverable flags.
1018 if (instance_flags & MGMT_ADV_FLAG_DISCOV)
1019 flags |= LE_AD_GENERAL;
1021 if (instance_flags & MGMT_ADV_FLAG_LIMITED_DISCOV)
1022 flags |= LE_AD_LIMITED;
1024 if (flags || (instance_flags & MGMT_ADV_FLAG_MANAGED_FLAGS)) {
1025 /* If a discovery flag wasn't provided, simply use the global
1029 flags |= mgmt_get_adv_discov_flags(hdev);
1031 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1032 flags |= LE_AD_NO_BREDR;
1034 /* If flags would still be empty, then there is no need to
1035 * include the "Flags" AD field".
1048 memcpy(ptr, adv_instance->adv_data,
1049 adv_instance->adv_data_len);
1050 ad_len += adv_instance->adv_data_len;
1051 ptr += adv_instance->adv_data_len;
1054 /* Provide Tx Power only if we can provide a valid value for it */
1055 if (hdev->adv_tx_power != HCI_TX_POWER_INVALID &&
1056 (instance_flags & MGMT_ADV_FLAG_TX_POWER)) {
1058 ptr[1] = EIR_TX_POWER;
1059 ptr[2] = (u8)hdev->adv_tx_power;
1068 void __hci_req_update_adv_data(struct hci_request *req, u8 instance)
1070 struct hci_dev *hdev = req->hdev;
1071 struct hci_cp_le_set_adv_data cp;
1074 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1077 memset(&cp, 0, sizeof(cp));
1079 len = create_instance_adv_data(hdev, instance, cp.data);
1081 /* There's nothing to do if the data hasn't changed */
1082 if (hdev->adv_data_len == len &&
1083 memcmp(cp.data, hdev->adv_data, len) == 0)
1086 memcpy(hdev->adv_data, cp.data, sizeof(cp.data));
1087 hdev->adv_data_len = len;
1091 hci_req_add(req, HCI_OP_LE_SET_ADV_DATA, sizeof(cp), &cp);
1094 int hci_req_update_adv_data(struct hci_dev *hdev, u8 instance)
1096 struct hci_request req;
1098 hci_req_init(&req, hdev);
1099 __hci_req_update_adv_data(&req, instance);
1101 return hci_req_run(&req, NULL);
1104 static void adv_enable_complete(struct hci_dev *hdev, u8 status, u16 opcode)
1106 BT_DBG("%s status %u", hdev->name, status);
1109 void hci_req_reenable_advertising(struct hci_dev *hdev)
1111 struct hci_request req;
1113 if (!hci_dev_test_flag(hdev, HCI_ADVERTISING) &&
1114 list_empty(&hdev->adv_instances))
1117 hci_req_init(&req, hdev);
1119 if (hdev->cur_adv_instance) {
1120 __hci_req_schedule_adv_instance(&req, hdev->cur_adv_instance,
1123 __hci_req_update_adv_data(&req, 0x00);
1124 __hci_req_update_scan_rsp_data(&req, 0x00);
1125 __hci_req_enable_advertising(&req);
1128 hci_req_run(&req, adv_enable_complete);
1131 static void adv_timeout_expire(struct work_struct *work)
1133 struct hci_dev *hdev = container_of(work, struct hci_dev,
1134 adv_instance_expire.work);
1136 struct hci_request req;
1139 BT_DBG("%s", hdev->name);
1143 hdev->adv_instance_timeout = 0;
1145 instance = hdev->cur_adv_instance;
1146 if (instance == 0x00)
1149 hci_req_init(&req, hdev);
1151 hci_req_clear_adv_instance(hdev, &req, instance, false);
1153 if (list_empty(&hdev->adv_instances))
1154 __hci_req_disable_advertising(&req);
1156 hci_req_run(&req, NULL);
1159 hci_dev_unlock(hdev);
1162 int __hci_req_schedule_adv_instance(struct hci_request *req, u8 instance,
1165 struct hci_dev *hdev = req->hdev;
1166 struct adv_info *adv_instance = NULL;
1169 if (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
1170 list_empty(&hdev->adv_instances))
1173 if (hdev->adv_instance_timeout)
1176 adv_instance = hci_find_adv_instance(hdev, instance);
1180 /* A zero timeout means unlimited advertising. As long as there is
1181 * only one instance, duration should be ignored. We still set a timeout
1182 * in case further instances are being added later on.
1184 * If the remaining lifetime of the instance is more than the duration
1185 * then the timeout corresponds to the duration, otherwise it will be
1186 * reduced to the remaining instance lifetime.
1188 if (adv_instance->timeout == 0 ||
1189 adv_instance->duration <= adv_instance->remaining_time)
1190 timeout = adv_instance->duration;
1192 timeout = adv_instance->remaining_time;
1194 /* The remaining time is being reduced unless the instance is being
1195 * advertised without time limit.
1197 if (adv_instance->timeout)
1198 adv_instance->remaining_time =
1199 adv_instance->remaining_time - timeout;
1201 hdev->adv_instance_timeout = timeout;
1202 queue_delayed_work(hdev->req_workqueue,
1203 &hdev->adv_instance_expire,
1204 msecs_to_jiffies(timeout * 1000));
1206 /* If we're just re-scheduling the same instance again then do not
1207 * execute any HCI commands. This happens when a single instance is
1210 if (!force && hdev->cur_adv_instance == instance &&
1211 hci_dev_test_flag(hdev, HCI_LE_ADV))
1214 hdev->cur_adv_instance = instance;
1215 __hci_req_update_adv_data(req, instance);
1216 __hci_req_update_scan_rsp_data(req, instance);
1217 __hci_req_enable_advertising(req);
1222 static void cancel_adv_timeout(struct hci_dev *hdev)
1224 if (hdev->adv_instance_timeout) {
1225 hdev->adv_instance_timeout = 0;
1226 cancel_delayed_work(&hdev->adv_instance_expire);
1230 /* For a single instance:
1231 * - force == true: The instance will be removed even when its remaining
1232 * lifetime is not zero.
1233 * - force == false: the instance will be deactivated but kept stored unless
1234 * the remaining lifetime is zero.
1236 * For instance == 0x00:
1237 * - force == true: All instances will be removed regardless of their timeout
1239 * - force == false: Only instances that have a timeout will be removed.
1241 void hci_req_clear_adv_instance(struct hci_dev *hdev, struct hci_request *req,
1242 u8 instance, bool force)
1244 struct adv_info *adv_instance, *n, *next_instance = NULL;
1248 /* Cancel any timeout concerning the removed instance(s). */
1249 if (!instance || hdev->cur_adv_instance == instance)
1250 cancel_adv_timeout(hdev);
1252 /* Get the next instance to advertise BEFORE we remove
1253 * the current one. This can be the same instance again
1254 * if there is only one instance.
1256 if (instance && hdev->cur_adv_instance == instance)
1257 next_instance = hci_get_next_instance(hdev, instance);
1259 if (instance == 0x00) {
1260 list_for_each_entry_safe(adv_instance, n, &hdev->adv_instances,
1262 if (!(force || adv_instance->timeout))
1265 rem_inst = adv_instance->instance;
1266 err = hci_remove_adv_instance(hdev, rem_inst);
1268 mgmt_advertising_removed(NULL, hdev, rem_inst);
1271 adv_instance = hci_find_adv_instance(hdev, instance);
1273 if (force || (adv_instance && adv_instance->timeout &&
1274 !adv_instance->remaining_time)) {
1275 /* Don't advertise a removed instance. */
1276 if (next_instance &&
1277 next_instance->instance == instance)
1278 next_instance = NULL;
1280 err = hci_remove_adv_instance(hdev, instance);
1282 mgmt_advertising_removed(NULL, hdev, instance);
1286 if (!req || !hdev_is_powered(hdev) ||
1287 hci_dev_test_flag(hdev, HCI_ADVERTISING))
1291 __hci_req_schedule_adv_instance(req, next_instance->instance,
1295 static void set_random_addr(struct hci_request *req, bdaddr_t *rpa)
1297 struct hci_dev *hdev = req->hdev;
1299 /* If we're advertising or initiating an LE connection we can't
1300 * go ahead and change the random address at this time. This is
1301 * because the eventual initiator address used for the
1302 * subsequently created connection will be undefined (some
1303 * controllers use the new address and others the one we had
1304 * when the operation started).
1306 * In this kind of scenario skip the update and let the random
1307 * address be updated at the next cycle.
1309 if (hci_dev_test_flag(hdev, HCI_LE_ADV) ||
1310 hci_lookup_le_connect(hdev)) {
1311 BT_DBG("Deferring random address update");
1312 hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
1316 hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6, rpa);
1319 int hci_update_random_address(struct hci_request *req, bool require_privacy,
1322 struct hci_dev *hdev = req->hdev;
1325 /* If privacy is enabled use a resolvable private address. If
1326 * current RPA has expired or there is something else than
1327 * the current RPA in use, then generate a new one.
1329 if (hci_dev_test_flag(hdev, HCI_PRIVACY)) {
1332 *own_addr_type = ADDR_LE_DEV_RANDOM;
1334 if (!hci_dev_test_and_clear_flag(hdev, HCI_RPA_EXPIRED) &&
1335 !bacmp(&hdev->random_addr, &hdev->rpa))
1338 err = smp_generate_rpa(hdev, hdev->irk, &hdev->rpa);
1340 BT_ERR("%s failed to generate new RPA", hdev->name);
1344 set_random_addr(req, &hdev->rpa);
1346 to = msecs_to_jiffies(hdev->rpa_timeout * 1000);
1347 queue_delayed_work(hdev->workqueue, &hdev->rpa_expired, to);
1352 /* In case of required privacy without resolvable private address,
1353 * use an non-resolvable private address. This is useful for active
1354 * scanning and non-connectable advertising.
1356 if (require_privacy) {
1360 /* The non-resolvable private address is generated
1361 * from random six bytes with the two most significant
1364 get_random_bytes(&nrpa, 6);
1367 /* The non-resolvable private address shall not be
1368 * equal to the public address.
1370 if (bacmp(&hdev->bdaddr, &nrpa))
1374 *own_addr_type = ADDR_LE_DEV_RANDOM;
1375 set_random_addr(req, &nrpa);
1379 /* If forcing static address is in use or there is no public
1380 * address use the static address as random address (but skip
1381 * the HCI command if the current random address is already the
1384 * In case BR/EDR has been disabled on a dual-mode controller
1385 * and a static address has been configured, then use that
1386 * address instead of the public BR/EDR address.
1388 if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
1389 !bacmp(&hdev->bdaddr, BDADDR_ANY) ||
1390 (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
1391 bacmp(&hdev->static_addr, BDADDR_ANY))) {
1392 *own_addr_type = ADDR_LE_DEV_RANDOM;
1393 if (bacmp(&hdev->static_addr, &hdev->random_addr))
1394 hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6,
1395 &hdev->static_addr);
1399 /* Neither privacy nor static address is being used so use a
1402 *own_addr_type = ADDR_LE_DEV_PUBLIC;
1407 static bool disconnected_whitelist_entries(struct hci_dev *hdev)
1409 struct bdaddr_list *b;
1411 list_for_each_entry(b, &hdev->whitelist, list) {
1412 struct hci_conn *conn;
1414 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &b->bdaddr);
1418 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
1425 void __hci_req_update_scan(struct hci_request *req)
1427 struct hci_dev *hdev = req->hdev;
1430 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1433 if (!hdev_is_powered(hdev))
1436 if (mgmt_powering_down(hdev))
1439 if (hci_dev_test_flag(hdev, HCI_CONNECTABLE) ||
1440 disconnected_whitelist_entries(hdev))
1443 scan = SCAN_DISABLED;
1445 if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
1446 scan |= SCAN_INQUIRY;
1448 if (test_bit(HCI_PSCAN, &hdev->flags) == !!(scan & SCAN_PAGE) &&
1449 test_bit(HCI_ISCAN, &hdev->flags) == !!(scan & SCAN_INQUIRY))
1452 hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
1455 static int update_scan(struct hci_request *req, unsigned long opt)
1457 hci_dev_lock(req->hdev);
1458 __hci_req_update_scan(req);
1459 hci_dev_unlock(req->hdev);
1463 static void scan_update_work(struct work_struct *work)
1465 struct hci_dev *hdev = container_of(work, struct hci_dev, scan_update);
1467 hci_req_sync(hdev, update_scan, 0, HCI_CMD_TIMEOUT, NULL);
1470 static int connectable_update(struct hci_request *req, unsigned long opt)
1472 struct hci_dev *hdev = req->hdev;
1476 __hci_req_update_scan(req);
1478 /* If BR/EDR is not enabled and we disable advertising as a
1479 * by-product of disabling connectable, we need to update the
1480 * advertising flags.
1482 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1483 __hci_req_update_adv_data(req, hdev->cur_adv_instance);
1485 /* Update the advertising parameters if necessary */
1486 if (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
1487 !list_empty(&hdev->adv_instances))
1488 __hci_req_enable_advertising(req);
1490 __hci_update_background_scan(req);
1492 hci_dev_unlock(hdev);
1497 static void connectable_update_work(struct work_struct *work)
1499 struct hci_dev *hdev = container_of(work, struct hci_dev,
1500 connectable_update);
1503 hci_req_sync(hdev, connectable_update, 0, HCI_CMD_TIMEOUT, &status);
1504 mgmt_set_connectable_complete(hdev, status);
1507 static u8 get_service_classes(struct hci_dev *hdev)
1509 struct bt_uuid *uuid;
1512 list_for_each_entry(uuid, &hdev->uuids, list)
1513 val |= uuid->svc_hint;
1518 void __hci_req_update_class(struct hci_request *req)
1520 struct hci_dev *hdev = req->hdev;
1523 BT_DBG("%s", hdev->name);
1525 if (!hdev_is_powered(hdev))
1528 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1531 if (hci_dev_test_flag(hdev, HCI_SERVICE_CACHE))
1534 cod[0] = hdev->minor_class;
1535 cod[1] = hdev->major_class;
1536 cod[2] = get_service_classes(hdev);
1538 if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE))
1541 if (memcmp(cod, hdev->dev_class, 3) == 0)
1544 hci_req_add(req, HCI_OP_WRITE_CLASS_OF_DEV, sizeof(cod), cod);
1547 static void write_iac(struct hci_request *req)
1549 struct hci_dev *hdev = req->hdev;
1550 struct hci_cp_write_current_iac_lap cp;
1552 if (!hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
1555 if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE)) {
1556 /* Limited discoverable mode */
1557 cp.num_iac = min_t(u8, hdev->num_iac, 2);
1558 cp.iac_lap[0] = 0x00; /* LIAC */
1559 cp.iac_lap[1] = 0x8b;
1560 cp.iac_lap[2] = 0x9e;
1561 cp.iac_lap[3] = 0x33; /* GIAC */
1562 cp.iac_lap[4] = 0x8b;
1563 cp.iac_lap[5] = 0x9e;
1565 /* General discoverable mode */
1567 cp.iac_lap[0] = 0x33; /* GIAC */
1568 cp.iac_lap[1] = 0x8b;
1569 cp.iac_lap[2] = 0x9e;
1572 hci_req_add(req, HCI_OP_WRITE_CURRENT_IAC_LAP,
1573 (cp.num_iac * 3) + 1, &cp);
1576 static int discoverable_update(struct hci_request *req, unsigned long opt)
1578 struct hci_dev *hdev = req->hdev;
1582 if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
1584 __hci_req_update_scan(req);
1585 __hci_req_update_class(req);
1588 /* Advertising instances don't use the global discoverable setting, so
1589 * only update AD if advertising was enabled using Set Advertising.
1591 if (hci_dev_test_flag(hdev, HCI_ADVERTISING))
1592 __hci_req_update_adv_data(req, 0x00);
1594 hci_dev_unlock(hdev);
1599 static void discoverable_update_work(struct work_struct *work)
1601 struct hci_dev *hdev = container_of(work, struct hci_dev,
1602 discoverable_update);
1605 hci_req_sync(hdev, discoverable_update, 0, HCI_CMD_TIMEOUT, &status);
1606 mgmt_set_discoverable_complete(hdev, status);
1609 void __hci_abort_conn(struct hci_request *req, struct hci_conn *conn,
1612 switch (conn->state) {
1615 if (conn->type == AMP_LINK) {
1616 struct hci_cp_disconn_phy_link cp;
1618 cp.phy_handle = HCI_PHY_HANDLE(conn->handle);
1620 hci_req_add(req, HCI_OP_DISCONN_PHY_LINK, sizeof(cp),
1623 struct hci_cp_disconnect dc;
1625 dc.handle = cpu_to_le16(conn->handle);
1627 hci_req_add(req, HCI_OP_DISCONNECT, sizeof(dc), &dc);
1630 conn->state = BT_DISCONN;
1634 if (conn->type == LE_LINK) {
1635 if (test_bit(HCI_CONN_SCANNING, &conn->flags))
1637 hci_req_add(req, HCI_OP_LE_CREATE_CONN_CANCEL,
1639 } else if (conn->type == ACL_LINK) {
1640 if (req->hdev->hci_ver < BLUETOOTH_VER_1_2)
1642 hci_req_add(req, HCI_OP_CREATE_CONN_CANCEL,
1647 if (conn->type == ACL_LINK) {
1648 struct hci_cp_reject_conn_req rej;
1650 bacpy(&rej.bdaddr, &conn->dst);
1651 rej.reason = reason;
1653 hci_req_add(req, HCI_OP_REJECT_CONN_REQ,
1655 } else if (conn->type == SCO_LINK || conn->type == ESCO_LINK) {
1656 struct hci_cp_reject_sync_conn_req rej;
1658 bacpy(&rej.bdaddr, &conn->dst);
1660 /* SCO rejection has its own limited set of
1661 * allowed error values (0x0D-0x0F) which isn't
1662 * compatible with most values passed to this
1663 * function. To be safe hard-code one of the
1664 * values that's suitable for SCO.
1666 rej.reason = HCI_ERROR_REMOTE_LOW_RESOURCES;
1668 hci_req_add(req, HCI_OP_REJECT_SYNC_CONN_REQ,
1673 conn->state = BT_CLOSED;
1678 static void abort_conn_complete(struct hci_dev *hdev, u8 status, u16 opcode)
1681 BT_DBG("Failed to abort connection: status 0x%2.2x", status);
1684 int hci_abort_conn(struct hci_conn *conn, u8 reason)
1686 struct hci_request req;
1689 hci_req_init(&req, conn->hdev);
1691 __hci_abort_conn(&req, conn, reason);
1693 err = hci_req_run(&req, abort_conn_complete);
1694 if (err && err != -ENODATA) {
1695 BT_ERR("Failed to run HCI request: err %d", err);
1702 static int update_bg_scan(struct hci_request *req, unsigned long opt)
1704 hci_dev_lock(req->hdev);
1705 __hci_update_background_scan(req);
1706 hci_dev_unlock(req->hdev);
1710 static void bg_scan_update(struct work_struct *work)
1712 struct hci_dev *hdev = container_of(work, struct hci_dev,
1714 struct hci_conn *conn;
1718 err = hci_req_sync(hdev, update_bg_scan, 0, HCI_CMD_TIMEOUT, &status);
1724 conn = hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT);
1726 hci_le_conn_failed(conn, status);
1728 hci_dev_unlock(hdev);
1731 static int le_scan_disable(struct hci_request *req, unsigned long opt)
1733 hci_req_add_le_scan_disable(req);
1737 static int bredr_inquiry(struct hci_request *req, unsigned long opt)
1740 const u8 giac[3] = { 0x33, 0x8b, 0x9e };
1741 const u8 liac[3] = { 0x00, 0x8b, 0x9e };
1742 struct hci_cp_inquiry cp;
1744 BT_DBG("%s", req->hdev->name);
1746 hci_dev_lock(req->hdev);
1747 hci_inquiry_cache_flush(req->hdev);
1748 hci_dev_unlock(req->hdev);
1750 memset(&cp, 0, sizeof(cp));
1752 if (req->hdev->discovery.limited)
1753 memcpy(&cp.lap, liac, sizeof(cp.lap));
1755 memcpy(&cp.lap, giac, sizeof(cp.lap));
1759 hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
1764 static void le_scan_disable_work(struct work_struct *work)
1766 struct hci_dev *hdev = container_of(work, struct hci_dev,
1767 le_scan_disable.work);
1770 BT_DBG("%s", hdev->name);
1772 if (!hci_dev_test_flag(hdev, HCI_LE_SCAN))
1775 cancel_delayed_work(&hdev->le_scan_restart);
1777 hci_req_sync(hdev, le_scan_disable, 0, HCI_CMD_TIMEOUT, &status);
1779 BT_ERR("Failed to disable LE scan: status 0x%02x", status);
1783 hdev->discovery.scan_start = 0;
1785 /* If we were running LE only scan, change discovery state. If
1786 * we were running both LE and BR/EDR inquiry simultaneously,
1787 * and BR/EDR inquiry is already finished, stop discovery,
1788 * otherwise BR/EDR inquiry will stop discovery when finished.
1789 * If we will resolve remote device name, do not change
1793 if (hdev->discovery.type == DISCOV_TYPE_LE)
1794 goto discov_stopped;
1796 if (hdev->discovery.type != DISCOV_TYPE_INTERLEAVED)
1799 if (test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY, &hdev->quirks)) {
1800 if (!test_bit(HCI_INQUIRY, &hdev->flags) &&
1801 hdev->discovery.state != DISCOVERY_RESOLVING)
1802 goto discov_stopped;
1807 hci_req_sync(hdev, bredr_inquiry, DISCOV_INTERLEAVED_INQUIRY_LEN,
1808 HCI_CMD_TIMEOUT, &status);
1810 BT_ERR("Inquiry failed: status 0x%02x", status);
1811 goto discov_stopped;
1818 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1819 hci_dev_unlock(hdev);
1822 static int le_scan_restart(struct hci_request *req, unsigned long opt)
1824 struct hci_dev *hdev = req->hdev;
1825 struct hci_cp_le_set_scan_enable cp;
1827 /* If controller is not scanning we are done. */
1828 if (!hci_dev_test_flag(hdev, HCI_LE_SCAN))
1831 hci_req_add_le_scan_disable(req);
1833 memset(&cp, 0, sizeof(cp));
1834 cp.enable = LE_SCAN_ENABLE;
1835 cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
1836 hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
1841 static void le_scan_restart_work(struct work_struct *work)
1843 struct hci_dev *hdev = container_of(work, struct hci_dev,
1844 le_scan_restart.work);
1845 unsigned long timeout, duration, scan_start, now;
1848 BT_DBG("%s", hdev->name);
1850 hci_req_sync(hdev, le_scan_restart, 0, HCI_CMD_TIMEOUT, &status);
1852 BT_ERR("Failed to restart LE scan: status %d", status);
1858 if (!test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks) ||
1859 !hdev->discovery.scan_start)
1862 /* When the scan was started, hdev->le_scan_disable has been queued
1863 * after duration from scan_start. During scan restart this job
1864 * has been canceled, and we need to queue it again after proper
1865 * timeout, to make sure that scan does not run indefinitely.
1867 duration = hdev->discovery.scan_duration;
1868 scan_start = hdev->discovery.scan_start;
1870 if (now - scan_start <= duration) {
1873 if (now >= scan_start)
1874 elapsed = now - scan_start;
1876 elapsed = ULONG_MAX - scan_start + now;
1878 timeout = duration - elapsed;
1883 queue_delayed_work(hdev->req_workqueue,
1884 &hdev->le_scan_disable, timeout);
1887 hci_dev_unlock(hdev);
1890 static void disable_advertising(struct hci_request *req)
1894 hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable);
1897 static int active_scan(struct hci_request *req, unsigned long opt)
1899 uint16_t interval = opt;
1900 struct hci_dev *hdev = req->hdev;
1901 struct hci_cp_le_set_scan_param param_cp;
1902 struct hci_cp_le_set_scan_enable enable_cp;
1906 BT_DBG("%s", hdev->name);
1908 if (hci_dev_test_flag(hdev, HCI_LE_ADV)) {
1911 /* Don't let discovery abort an outgoing connection attempt
1912 * that's using directed advertising.
1914 if (hci_lookup_le_connect(hdev)) {
1915 hci_dev_unlock(hdev);
1919 cancel_adv_timeout(hdev);
1920 hci_dev_unlock(hdev);
1922 disable_advertising(req);
1925 /* If controller is scanning, it means the background scanning is
1926 * running. Thus, we should temporarily stop it in order to set the
1927 * discovery scanning parameters.
1929 if (hci_dev_test_flag(hdev, HCI_LE_SCAN))
1930 hci_req_add_le_scan_disable(req);
1932 /* All active scans will be done with either a resolvable private
1933 * address (when privacy feature has been enabled) or non-resolvable
1936 err = hci_update_random_address(req, true, &own_addr_type);
1938 own_addr_type = ADDR_LE_DEV_PUBLIC;
1940 memset(¶m_cp, 0, sizeof(param_cp));
1941 param_cp.type = LE_SCAN_ACTIVE;
1942 param_cp.interval = cpu_to_le16(interval);
1943 param_cp.window = cpu_to_le16(DISCOV_LE_SCAN_WIN);
1944 param_cp.own_address_type = own_addr_type;
1946 hci_req_add(req, HCI_OP_LE_SET_SCAN_PARAM, sizeof(param_cp),
1949 memset(&enable_cp, 0, sizeof(enable_cp));
1950 enable_cp.enable = LE_SCAN_ENABLE;
1951 enable_cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
1953 hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(enable_cp),
1959 static int interleaved_discov(struct hci_request *req, unsigned long opt)
1963 BT_DBG("%s", req->hdev->name);
1965 err = active_scan(req, opt);
1969 return bredr_inquiry(req, DISCOV_BREDR_INQUIRY_LEN);
1972 static void start_discovery(struct hci_dev *hdev, u8 *status)
1974 unsigned long timeout;
1976 BT_DBG("%s type %u", hdev->name, hdev->discovery.type);
1978 switch (hdev->discovery.type) {
1979 case DISCOV_TYPE_BREDR:
1980 if (!hci_dev_test_flag(hdev, HCI_INQUIRY))
1981 hci_req_sync(hdev, bredr_inquiry,
1982 DISCOV_BREDR_INQUIRY_LEN, HCI_CMD_TIMEOUT,
1985 case DISCOV_TYPE_INTERLEAVED:
1986 /* When running simultaneous discovery, the LE scanning time
1987 * should occupy the whole discovery time sine BR/EDR inquiry
1988 * and LE scanning are scheduled by the controller.
1990 * For interleaving discovery in comparison, BR/EDR inquiry
1991 * and LE scanning are done sequentially with separate
1994 if (test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY,
1996 timeout = msecs_to_jiffies(DISCOV_LE_TIMEOUT);
1997 /* During simultaneous discovery, we double LE scan
1998 * interval. We must leave some time for the controller
1999 * to do BR/EDR inquiry.
2001 hci_req_sync(hdev, interleaved_discov,
2002 DISCOV_LE_SCAN_INT * 2, HCI_CMD_TIMEOUT,
2007 timeout = msecs_to_jiffies(hdev->discov_interleaved_timeout);
2008 hci_req_sync(hdev, active_scan, DISCOV_LE_SCAN_INT,
2009 HCI_CMD_TIMEOUT, status);
2011 case DISCOV_TYPE_LE:
2012 timeout = msecs_to_jiffies(DISCOV_LE_TIMEOUT);
2013 hci_req_sync(hdev, active_scan, DISCOV_LE_SCAN_INT,
2014 HCI_CMD_TIMEOUT, status);
2017 *status = HCI_ERROR_UNSPECIFIED;
2024 BT_DBG("%s timeout %u ms", hdev->name, jiffies_to_msecs(timeout));
2026 /* When service discovery is used and the controller has a
2027 * strict duplicate filter, it is important to remember the
2028 * start and duration of the scan. This is required for
2029 * restarting scanning during the discovery phase.
2031 if (test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks) &&
2032 hdev->discovery.result_filtering) {
2033 hdev->discovery.scan_start = jiffies;
2034 hdev->discovery.scan_duration = timeout;
2037 queue_delayed_work(hdev->req_workqueue, &hdev->le_scan_disable,
2041 bool hci_req_stop_discovery(struct hci_request *req)
2043 struct hci_dev *hdev = req->hdev;
2044 struct discovery_state *d = &hdev->discovery;
2045 struct hci_cp_remote_name_req_cancel cp;
2046 struct inquiry_entry *e;
2049 BT_DBG("%s state %u", hdev->name, hdev->discovery.state);
2051 if (d->state == DISCOVERY_FINDING || d->state == DISCOVERY_STOPPING) {
2052 if (test_bit(HCI_INQUIRY, &hdev->flags))
2053 hci_req_add(req, HCI_OP_INQUIRY_CANCEL, 0, NULL);
2055 if (hci_dev_test_flag(hdev, HCI_LE_SCAN)) {
2056 cancel_delayed_work(&hdev->le_scan_disable);
2057 hci_req_add_le_scan_disable(req);
2062 /* Passive scanning */
2063 if (hci_dev_test_flag(hdev, HCI_LE_SCAN)) {
2064 hci_req_add_le_scan_disable(req);
2069 /* No further actions needed for LE-only discovery */
2070 if (d->type == DISCOV_TYPE_LE)
2073 if (d->state == DISCOVERY_RESOLVING || d->state == DISCOVERY_STOPPING) {
2074 e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY,
2079 bacpy(&cp.bdaddr, &e->data.bdaddr);
2080 hci_req_add(req, HCI_OP_REMOTE_NAME_REQ_CANCEL, sizeof(cp),
2088 static int stop_discovery(struct hci_request *req, unsigned long opt)
2090 hci_dev_lock(req->hdev);
2091 hci_req_stop_discovery(req);
2092 hci_dev_unlock(req->hdev);
2097 static void discov_update(struct work_struct *work)
2099 struct hci_dev *hdev = container_of(work, struct hci_dev,
2103 switch (hdev->discovery.state) {
2104 case DISCOVERY_STARTING:
2105 start_discovery(hdev, &status);
2106 mgmt_start_discovery_complete(hdev, status);
2108 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2110 hci_discovery_set_state(hdev, DISCOVERY_FINDING);
2112 case DISCOVERY_STOPPING:
2113 hci_req_sync(hdev, stop_discovery, 0, HCI_CMD_TIMEOUT, &status);
2114 mgmt_stop_discovery_complete(hdev, status);
2116 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2118 case DISCOVERY_STOPPED:
2124 static void discov_off(struct work_struct *work)
2126 struct hci_dev *hdev = container_of(work, struct hci_dev,
2129 BT_DBG("%s", hdev->name);
2133 /* When discoverable timeout triggers, then just make sure
2134 * the limited discoverable flag is cleared. Even in the case
2135 * of a timeout triggered from general discoverable, it is
2136 * safe to unconditionally clear the flag.
2138 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
2139 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
2140 hdev->discov_timeout = 0;
2142 hci_dev_unlock(hdev);
2144 hci_req_sync(hdev, discoverable_update, 0, HCI_CMD_TIMEOUT, NULL);
2145 mgmt_new_settings(hdev);
2148 static int powered_update_hci(struct hci_request *req, unsigned long opt)
2150 struct hci_dev *hdev = req->hdev;
2155 if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED) &&
2156 !lmp_host_ssp_capable(hdev)) {
2159 hci_req_add(req, HCI_OP_WRITE_SSP_MODE, sizeof(mode), &mode);
2161 if (bredr_sc_enabled(hdev) && !lmp_host_sc_capable(hdev)) {
2164 hci_req_add(req, HCI_OP_WRITE_SC_SUPPORT,
2165 sizeof(support), &support);
2169 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED) &&
2170 lmp_bredr_capable(hdev)) {
2171 struct hci_cp_write_le_host_supported cp;
2176 /* Check first if we already have the right
2177 * host state (host features set)
2179 if (cp.le != lmp_host_le_capable(hdev) ||
2180 cp.simul != lmp_host_le_br_capable(hdev))
2181 hci_req_add(req, HCI_OP_WRITE_LE_HOST_SUPPORTED,
2185 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
2186 /* Make sure the controller has a good default for
2187 * advertising data. This also applies to the case
2188 * where BR/EDR was toggled during the AUTO_OFF phase.
2190 if (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
2191 list_empty(&hdev->adv_instances)) {
2192 __hci_req_update_adv_data(req, 0x00);
2193 __hci_req_update_scan_rsp_data(req, 0x00);
2195 if (hci_dev_test_flag(hdev, HCI_ADVERTISING))
2196 __hci_req_enable_advertising(req);
2197 } else if (!list_empty(&hdev->adv_instances)) {
2198 struct adv_info *adv_instance;
2200 adv_instance = list_first_entry(&hdev->adv_instances,
2201 struct adv_info, list);
2202 __hci_req_schedule_adv_instance(req,
2203 adv_instance->instance,
2208 link_sec = hci_dev_test_flag(hdev, HCI_LINK_SECURITY);
2209 if (link_sec != test_bit(HCI_AUTH, &hdev->flags))
2210 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE,
2211 sizeof(link_sec), &link_sec);
2213 if (lmp_bredr_capable(hdev)) {
2214 if (hci_dev_test_flag(hdev, HCI_FAST_CONNECTABLE))
2215 __hci_req_write_fast_connectable(req, true);
2217 __hci_req_write_fast_connectable(req, false);
2218 __hci_req_update_scan(req);
2219 __hci_req_update_class(req);
2220 __hci_req_update_name(req);
2221 __hci_req_update_eir(req);
2224 hci_dev_unlock(hdev);
2228 int __hci_req_hci_power_on(struct hci_dev *hdev)
2230 /* Register the available SMP channels (BR/EDR and LE) only when
2231 * successfully powering on the controller. This late
2232 * registration is required so that LE SMP can clearly decide if
2233 * the public address or static address is used.
2237 return __hci_req_sync(hdev, powered_update_hci, 0, HCI_CMD_TIMEOUT,
2241 void hci_request_setup(struct hci_dev *hdev)
2243 INIT_WORK(&hdev->discov_update, discov_update);
2244 INIT_WORK(&hdev->bg_scan_update, bg_scan_update);
2245 INIT_WORK(&hdev->scan_update, scan_update_work);
2246 INIT_WORK(&hdev->connectable_update, connectable_update_work);
2247 INIT_WORK(&hdev->discoverable_update, discoverable_update_work);
2248 INIT_DELAYED_WORK(&hdev->discov_off, discov_off);
2249 INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
2250 INIT_DELAYED_WORK(&hdev->le_scan_restart, le_scan_restart_work);
2251 INIT_DELAYED_WORK(&hdev->adv_instance_expire, adv_timeout_expire);
2254 void hci_request_cancel_all(struct hci_dev *hdev)
2256 hci_req_sync_cancel(hdev, ENODEV);
2258 cancel_work_sync(&hdev->discov_update);
2259 cancel_work_sync(&hdev->bg_scan_update);
2260 cancel_work_sync(&hdev->scan_update);
2261 cancel_work_sync(&hdev->connectable_update);
2262 cancel_work_sync(&hdev->discoverable_update);
2263 cancel_delayed_work_sync(&hdev->discov_off);
2264 cancel_delayed_work_sync(&hdev->le_scan_disable);
2265 cancel_delayed_work_sync(&hdev->le_scan_restart);
2267 if (hdev->adv_instance_timeout) {
2268 cancel_delayed_work_sync(&hdev->adv_instance_expire);
2269 hdev->adv_instance_timeout = 0;