2 BlueZ - Bluetooth protocol stack for Linux
4 Copyright (C) 2010 Nokia Corporation
5 Copyright (C) 2011-2012 Intel Corporation
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
25 /* Bluetooth HCI Management interface */
27 #include <linux/module.h>
28 #include <asm/unaligned.h>
30 #include <net/bluetooth/bluetooth.h>
31 #include <net/bluetooth/hci_core.h>
32 #include <net/bluetooth/l2cap.h>
33 #include <net/bluetooth/mgmt.h>
35 #include "hci_request.h"
38 #define MGMT_VERSION 1
39 #define MGMT_REVISION 8
41 static const u16 mgmt_commands[] = {
42 MGMT_OP_READ_INDEX_LIST,
45 MGMT_OP_SET_DISCOVERABLE,
46 MGMT_OP_SET_CONNECTABLE,
47 MGMT_OP_SET_FAST_CONNECTABLE,
49 MGMT_OP_SET_LINK_SECURITY,
53 MGMT_OP_SET_DEV_CLASS,
54 MGMT_OP_SET_LOCAL_NAME,
57 MGMT_OP_LOAD_LINK_KEYS,
58 MGMT_OP_LOAD_LONG_TERM_KEYS,
60 MGMT_OP_GET_CONNECTIONS,
61 MGMT_OP_PIN_CODE_REPLY,
62 MGMT_OP_PIN_CODE_NEG_REPLY,
63 MGMT_OP_SET_IO_CAPABILITY,
65 MGMT_OP_CANCEL_PAIR_DEVICE,
66 MGMT_OP_UNPAIR_DEVICE,
67 MGMT_OP_USER_CONFIRM_REPLY,
68 MGMT_OP_USER_CONFIRM_NEG_REPLY,
69 MGMT_OP_USER_PASSKEY_REPLY,
70 MGMT_OP_USER_PASSKEY_NEG_REPLY,
71 MGMT_OP_READ_LOCAL_OOB_DATA,
72 MGMT_OP_ADD_REMOTE_OOB_DATA,
73 MGMT_OP_REMOVE_REMOTE_OOB_DATA,
74 MGMT_OP_START_DISCOVERY,
75 MGMT_OP_STOP_DISCOVERY,
78 MGMT_OP_UNBLOCK_DEVICE,
79 MGMT_OP_SET_DEVICE_ID,
80 MGMT_OP_SET_ADVERTISING,
82 MGMT_OP_SET_STATIC_ADDRESS,
83 MGMT_OP_SET_SCAN_PARAMS,
84 MGMT_OP_SET_SECURE_CONN,
85 MGMT_OP_SET_DEBUG_KEYS,
88 MGMT_OP_GET_CONN_INFO,
89 MGMT_OP_GET_CLOCK_INFO,
91 MGMT_OP_REMOVE_DEVICE,
92 MGMT_OP_LOAD_CONN_PARAM,
93 MGMT_OP_READ_UNCONF_INDEX_LIST,
94 MGMT_OP_READ_CONFIG_INFO,
95 MGMT_OP_SET_EXTERNAL_CONFIG,
96 MGMT_OP_SET_PUBLIC_ADDRESS,
97 MGMT_OP_START_SERVICE_DISCOVERY,
100 static const u16 mgmt_events[] = {
101 MGMT_EV_CONTROLLER_ERROR,
103 MGMT_EV_INDEX_REMOVED,
104 MGMT_EV_NEW_SETTINGS,
105 MGMT_EV_CLASS_OF_DEV_CHANGED,
106 MGMT_EV_LOCAL_NAME_CHANGED,
107 MGMT_EV_NEW_LINK_KEY,
108 MGMT_EV_NEW_LONG_TERM_KEY,
109 MGMT_EV_DEVICE_CONNECTED,
110 MGMT_EV_DEVICE_DISCONNECTED,
111 MGMT_EV_CONNECT_FAILED,
112 MGMT_EV_PIN_CODE_REQUEST,
113 MGMT_EV_USER_CONFIRM_REQUEST,
114 MGMT_EV_USER_PASSKEY_REQUEST,
116 MGMT_EV_DEVICE_FOUND,
118 MGMT_EV_DEVICE_BLOCKED,
119 MGMT_EV_DEVICE_UNBLOCKED,
120 MGMT_EV_DEVICE_UNPAIRED,
121 MGMT_EV_PASSKEY_NOTIFY,
124 MGMT_EV_DEVICE_ADDED,
125 MGMT_EV_DEVICE_REMOVED,
126 MGMT_EV_NEW_CONN_PARAM,
127 MGMT_EV_UNCONF_INDEX_ADDED,
128 MGMT_EV_UNCONF_INDEX_REMOVED,
129 MGMT_EV_NEW_CONFIG_OPTIONS,
132 #define CACHE_TIMEOUT msecs_to_jiffies(2 * 1000)
135 struct list_head list;
142 int (*cmd_complete)(struct pending_cmd *cmd, u8 status);
145 /* HCI to MGMT error code conversion table */
146 static u8 mgmt_status_table[] = {
148 MGMT_STATUS_UNKNOWN_COMMAND, /* Unknown Command */
149 MGMT_STATUS_NOT_CONNECTED, /* No Connection */
150 MGMT_STATUS_FAILED, /* Hardware Failure */
151 MGMT_STATUS_CONNECT_FAILED, /* Page Timeout */
152 MGMT_STATUS_AUTH_FAILED, /* Authentication Failed */
153 MGMT_STATUS_AUTH_FAILED, /* PIN or Key Missing */
154 MGMT_STATUS_NO_RESOURCES, /* Memory Full */
155 MGMT_STATUS_TIMEOUT, /* Connection Timeout */
156 MGMT_STATUS_NO_RESOURCES, /* Max Number of Connections */
157 MGMT_STATUS_NO_RESOURCES, /* Max Number of SCO Connections */
158 MGMT_STATUS_ALREADY_CONNECTED, /* ACL Connection Exists */
159 MGMT_STATUS_BUSY, /* Command Disallowed */
160 MGMT_STATUS_NO_RESOURCES, /* Rejected Limited Resources */
161 MGMT_STATUS_REJECTED, /* Rejected Security */
162 MGMT_STATUS_REJECTED, /* Rejected Personal */
163 MGMT_STATUS_TIMEOUT, /* Host Timeout */
164 MGMT_STATUS_NOT_SUPPORTED, /* Unsupported Feature */
165 MGMT_STATUS_INVALID_PARAMS, /* Invalid Parameters */
166 MGMT_STATUS_DISCONNECTED, /* OE User Ended Connection */
167 MGMT_STATUS_NO_RESOURCES, /* OE Low Resources */
168 MGMT_STATUS_DISCONNECTED, /* OE Power Off */
169 MGMT_STATUS_DISCONNECTED, /* Connection Terminated */
170 MGMT_STATUS_BUSY, /* Repeated Attempts */
171 MGMT_STATUS_REJECTED, /* Pairing Not Allowed */
172 MGMT_STATUS_FAILED, /* Unknown LMP PDU */
173 MGMT_STATUS_NOT_SUPPORTED, /* Unsupported Remote Feature */
174 MGMT_STATUS_REJECTED, /* SCO Offset Rejected */
175 MGMT_STATUS_REJECTED, /* SCO Interval Rejected */
176 MGMT_STATUS_REJECTED, /* Air Mode Rejected */
177 MGMT_STATUS_INVALID_PARAMS, /* Invalid LMP Parameters */
178 MGMT_STATUS_FAILED, /* Unspecified Error */
179 MGMT_STATUS_NOT_SUPPORTED, /* Unsupported LMP Parameter Value */
180 MGMT_STATUS_FAILED, /* Role Change Not Allowed */
181 MGMT_STATUS_TIMEOUT, /* LMP Response Timeout */
182 MGMT_STATUS_FAILED, /* LMP Error Transaction Collision */
183 MGMT_STATUS_FAILED, /* LMP PDU Not Allowed */
184 MGMT_STATUS_REJECTED, /* Encryption Mode Not Accepted */
185 MGMT_STATUS_FAILED, /* Unit Link Key Used */
186 MGMT_STATUS_NOT_SUPPORTED, /* QoS Not Supported */
187 MGMT_STATUS_TIMEOUT, /* Instant Passed */
188 MGMT_STATUS_NOT_SUPPORTED, /* Pairing Not Supported */
189 MGMT_STATUS_FAILED, /* Transaction Collision */
190 MGMT_STATUS_INVALID_PARAMS, /* Unacceptable Parameter */
191 MGMT_STATUS_REJECTED, /* QoS Rejected */
192 MGMT_STATUS_NOT_SUPPORTED, /* Classification Not Supported */
193 MGMT_STATUS_REJECTED, /* Insufficient Security */
194 MGMT_STATUS_INVALID_PARAMS, /* Parameter Out Of Range */
195 MGMT_STATUS_BUSY, /* Role Switch Pending */
196 MGMT_STATUS_FAILED, /* Slot Violation */
197 MGMT_STATUS_FAILED, /* Role Switch Failed */
198 MGMT_STATUS_INVALID_PARAMS, /* EIR Too Large */
199 MGMT_STATUS_NOT_SUPPORTED, /* Simple Pairing Not Supported */
200 MGMT_STATUS_BUSY, /* Host Busy Pairing */
201 MGMT_STATUS_REJECTED, /* Rejected, No Suitable Channel */
202 MGMT_STATUS_BUSY, /* Controller Busy */
203 MGMT_STATUS_INVALID_PARAMS, /* Unsuitable Connection Interval */
204 MGMT_STATUS_TIMEOUT, /* Directed Advertising Timeout */
205 MGMT_STATUS_AUTH_FAILED, /* Terminated Due to MIC Failure */
206 MGMT_STATUS_CONNECT_FAILED, /* Connection Establishment Failed */
207 MGMT_STATUS_CONNECT_FAILED, /* MAC Connection Failed */
210 static u8 mgmt_status(u8 hci_status)
212 if (hci_status < ARRAY_SIZE(mgmt_status_table))
213 return mgmt_status_table[hci_status];
215 return MGMT_STATUS_FAILED;
218 static int mgmt_event(u16 event, struct hci_dev *hdev, void *data, u16 data_len,
219 struct sock *skip_sk)
222 struct mgmt_hdr *hdr;
224 skb = alloc_skb(sizeof(*hdr) + data_len, GFP_KERNEL);
228 hdr = (void *) skb_put(skb, sizeof(*hdr));
229 hdr->opcode = cpu_to_le16(event);
231 hdr->index = cpu_to_le16(hdev->id);
233 hdr->index = cpu_to_le16(MGMT_INDEX_NONE);
234 hdr->len = cpu_to_le16(data_len);
237 memcpy(skb_put(skb, data_len), data, data_len);
240 __net_timestamp(skb);
242 hci_send_to_control(skb, skip_sk);
248 static int cmd_status(struct sock *sk, u16 index, u16 cmd, u8 status)
251 struct mgmt_hdr *hdr;
252 struct mgmt_ev_cmd_status *ev;
255 BT_DBG("sock %p, index %u, cmd %u, status %u", sk, index, cmd, status);
257 skb = alloc_skb(sizeof(*hdr) + sizeof(*ev), GFP_KERNEL);
261 hdr = (void *) skb_put(skb, sizeof(*hdr));
263 hdr->opcode = cpu_to_le16(MGMT_EV_CMD_STATUS);
264 hdr->index = cpu_to_le16(index);
265 hdr->len = cpu_to_le16(sizeof(*ev));
267 ev = (void *) skb_put(skb, sizeof(*ev));
269 ev->opcode = cpu_to_le16(cmd);
271 err = sock_queue_rcv_skb(sk, skb);
278 static int cmd_complete(struct sock *sk, u16 index, u16 cmd, u8 status,
279 void *rp, size_t rp_len)
282 struct mgmt_hdr *hdr;
283 struct mgmt_ev_cmd_complete *ev;
286 BT_DBG("sock %p", sk);
288 skb = alloc_skb(sizeof(*hdr) + sizeof(*ev) + rp_len, GFP_KERNEL);
292 hdr = (void *) skb_put(skb, sizeof(*hdr));
294 hdr->opcode = cpu_to_le16(MGMT_EV_CMD_COMPLETE);
295 hdr->index = cpu_to_le16(index);
296 hdr->len = cpu_to_le16(sizeof(*ev) + rp_len);
298 ev = (void *) skb_put(skb, sizeof(*ev) + rp_len);
299 ev->opcode = cpu_to_le16(cmd);
303 memcpy(ev->data, rp, rp_len);
305 err = sock_queue_rcv_skb(sk, skb);
312 static int read_version(struct sock *sk, struct hci_dev *hdev, void *data,
315 struct mgmt_rp_read_version rp;
317 BT_DBG("sock %p", sk);
319 rp.version = MGMT_VERSION;
320 rp.revision = cpu_to_le16(MGMT_REVISION);
322 return cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_VERSION, 0, &rp,
326 static int read_commands(struct sock *sk, struct hci_dev *hdev, void *data,
329 struct mgmt_rp_read_commands *rp;
330 const u16 num_commands = ARRAY_SIZE(mgmt_commands);
331 const u16 num_events = ARRAY_SIZE(mgmt_events);
336 BT_DBG("sock %p", sk);
338 rp_size = sizeof(*rp) + ((num_commands + num_events) * sizeof(u16));
340 rp = kmalloc(rp_size, GFP_KERNEL);
344 rp->num_commands = cpu_to_le16(num_commands);
345 rp->num_events = cpu_to_le16(num_events);
347 for (i = 0, opcode = rp->opcodes; i < num_commands; i++, opcode++)
348 put_unaligned_le16(mgmt_commands[i], opcode);
350 for (i = 0; i < num_events; i++, opcode++)
351 put_unaligned_le16(mgmt_events[i], opcode);
353 err = cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_COMMANDS, 0, rp,
360 static int read_index_list(struct sock *sk, struct hci_dev *hdev, void *data,
363 struct mgmt_rp_read_index_list *rp;
369 BT_DBG("sock %p", sk);
371 read_lock(&hci_dev_list_lock);
374 list_for_each_entry(d, &hci_dev_list, list) {
375 if (d->dev_type == HCI_BREDR &&
376 !test_bit(HCI_UNCONFIGURED, &d->dev_flags))
380 rp_len = sizeof(*rp) + (2 * count);
381 rp = kmalloc(rp_len, GFP_ATOMIC);
383 read_unlock(&hci_dev_list_lock);
388 list_for_each_entry(d, &hci_dev_list, list) {
389 if (test_bit(HCI_SETUP, &d->dev_flags) ||
390 test_bit(HCI_CONFIG, &d->dev_flags) ||
391 test_bit(HCI_USER_CHANNEL, &d->dev_flags))
394 /* Devices marked as raw-only are neither configured
395 * nor unconfigured controllers.
397 if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks))
400 if (d->dev_type == HCI_BREDR &&
401 !test_bit(HCI_UNCONFIGURED, &d->dev_flags)) {
402 rp->index[count++] = cpu_to_le16(d->id);
403 BT_DBG("Added hci%u", d->id);
407 rp->num_controllers = cpu_to_le16(count);
408 rp_len = sizeof(*rp) + (2 * count);
410 read_unlock(&hci_dev_list_lock);
412 err = cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_INDEX_LIST, 0, rp,
420 static int read_unconf_index_list(struct sock *sk, struct hci_dev *hdev,
421 void *data, u16 data_len)
423 struct mgmt_rp_read_unconf_index_list *rp;
429 BT_DBG("sock %p", sk);
431 read_lock(&hci_dev_list_lock);
434 list_for_each_entry(d, &hci_dev_list, list) {
435 if (d->dev_type == HCI_BREDR &&
436 test_bit(HCI_UNCONFIGURED, &d->dev_flags))
440 rp_len = sizeof(*rp) + (2 * count);
441 rp = kmalloc(rp_len, GFP_ATOMIC);
443 read_unlock(&hci_dev_list_lock);
448 list_for_each_entry(d, &hci_dev_list, list) {
449 if (test_bit(HCI_SETUP, &d->dev_flags) ||
450 test_bit(HCI_CONFIG, &d->dev_flags) ||
451 test_bit(HCI_USER_CHANNEL, &d->dev_flags))
454 /* Devices marked as raw-only are neither configured
455 * nor unconfigured controllers.
457 if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks))
460 if (d->dev_type == HCI_BREDR &&
461 test_bit(HCI_UNCONFIGURED, &d->dev_flags)) {
462 rp->index[count++] = cpu_to_le16(d->id);
463 BT_DBG("Added hci%u", d->id);
467 rp->num_controllers = cpu_to_le16(count);
468 rp_len = sizeof(*rp) + (2 * count);
470 read_unlock(&hci_dev_list_lock);
472 err = cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_UNCONF_INDEX_LIST,
480 static bool is_configured(struct hci_dev *hdev)
482 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) &&
483 !test_bit(HCI_EXT_CONFIGURED, &hdev->dev_flags))
486 if (test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks) &&
487 !bacmp(&hdev->public_addr, BDADDR_ANY))
493 static __le32 get_missing_options(struct hci_dev *hdev)
497 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) &&
498 !test_bit(HCI_EXT_CONFIGURED, &hdev->dev_flags))
499 options |= MGMT_OPTION_EXTERNAL_CONFIG;
501 if (test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks) &&
502 !bacmp(&hdev->public_addr, BDADDR_ANY))
503 options |= MGMT_OPTION_PUBLIC_ADDRESS;
505 return cpu_to_le32(options);
508 static int new_options(struct hci_dev *hdev, struct sock *skip)
510 __le32 options = get_missing_options(hdev);
512 return mgmt_event(MGMT_EV_NEW_CONFIG_OPTIONS, hdev, &options,
513 sizeof(options), skip);
516 static int send_options_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev)
518 __le32 options = get_missing_options(hdev);
520 return cmd_complete(sk, hdev->id, opcode, 0, &options,
524 static int read_config_info(struct sock *sk, struct hci_dev *hdev,
525 void *data, u16 data_len)
527 struct mgmt_rp_read_config_info rp;
530 BT_DBG("sock %p %s", sk, hdev->name);
534 memset(&rp, 0, sizeof(rp));
535 rp.manufacturer = cpu_to_le16(hdev->manufacturer);
537 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks))
538 options |= MGMT_OPTION_EXTERNAL_CONFIG;
540 if (hdev->set_bdaddr)
541 options |= MGMT_OPTION_PUBLIC_ADDRESS;
543 rp.supported_options = cpu_to_le32(options);
544 rp.missing_options = get_missing_options(hdev);
546 hci_dev_unlock(hdev);
548 return cmd_complete(sk, hdev->id, MGMT_OP_READ_CONFIG_INFO, 0, &rp,
552 static u32 get_supported_settings(struct hci_dev *hdev)
556 settings |= MGMT_SETTING_POWERED;
557 settings |= MGMT_SETTING_BONDABLE;
558 settings |= MGMT_SETTING_DEBUG_KEYS;
559 settings |= MGMT_SETTING_CONNECTABLE;
560 settings |= MGMT_SETTING_DISCOVERABLE;
562 if (lmp_bredr_capable(hdev)) {
563 if (hdev->hci_ver >= BLUETOOTH_VER_1_2)
564 settings |= MGMT_SETTING_FAST_CONNECTABLE;
565 settings |= MGMT_SETTING_BREDR;
566 settings |= MGMT_SETTING_LINK_SECURITY;
568 if (lmp_ssp_capable(hdev)) {
569 settings |= MGMT_SETTING_SSP;
570 settings |= MGMT_SETTING_HS;
573 if (lmp_sc_capable(hdev))
574 settings |= MGMT_SETTING_SECURE_CONN;
577 if (lmp_le_capable(hdev)) {
578 settings |= MGMT_SETTING_LE;
579 settings |= MGMT_SETTING_ADVERTISING;
580 settings |= MGMT_SETTING_SECURE_CONN;
581 settings |= MGMT_SETTING_PRIVACY;
584 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) ||
586 settings |= MGMT_SETTING_CONFIGURATION;
591 static u32 get_current_settings(struct hci_dev *hdev)
595 if (hdev_is_powered(hdev))
596 settings |= MGMT_SETTING_POWERED;
598 if (test_bit(HCI_CONNECTABLE, &hdev->dev_flags))
599 settings |= MGMT_SETTING_CONNECTABLE;
601 if (test_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags))
602 settings |= MGMT_SETTING_FAST_CONNECTABLE;
604 if (test_bit(HCI_DISCOVERABLE, &hdev->dev_flags))
605 settings |= MGMT_SETTING_DISCOVERABLE;
607 if (test_bit(HCI_BONDABLE, &hdev->dev_flags))
608 settings |= MGMT_SETTING_BONDABLE;
610 if (test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
611 settings |= MGMT_SETTING_BREDR;
613 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
614 settings |= MGMT_SETTING_LE;
616 if (test_bit(HCI_LINK_SECURITY, &hdev->dev_flags))
617 settings |= MGMT_SETTING_LINK_SECURITY;
619 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags))
620 settings |= MGMT_SETTING_SSP;
622 if (test_bit(HCI_HS_ENABLED, &hdev->dev_flags))
623 settings |= MGMT_SETTING_HS;
625 if (test_bit(HCI_ADVERTISING, &hdev->dev_flags))
626 settings |= MGMT_SETTING_ADVERTISING;
628 if (test_bit(HCI_SC_ENABLED, &hdev->dev_flags))
629 settings |= MGMT_SETTING_SECURE_CONN;
631 if (test_bit(HCI_KEEP_DEBUG_KEYS, &hdev->dev_flags))
632 settings |= MGMT_SETTING_DEBUG_KEYS;
634 if (test_bit(HCI_PRIVACY, &hdev->dev_flags))
635 settings |= MGMT_SETTING_PRIVACY;
640 #define PNP_INFO_SVCLASS_ID 0x1200
642 static u8 *create_uuid16_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
644 u8 *ptr = data, *uuids_start = NULL;
645 struct bt_uuid *uuid;
650 list_for_each_entry(uuid, &hdev->uuids, list) {
653 if (uuid->size != 16)
656 uuid16 = get_unaligned_le16(&uuid->uuid[12]);
660 if (uuid16 == PNP_INFO_SVCLASS_ID)
666 uuids_start[1] = EIR_UUID16_ALL;
670 /* Stop if not enough space to put next UUID */
671 if ((ptr - data) + sizeof(u16) > len) {
672 uuids_start[1] = EIR_UUID16_SOME;
676 *ptr++ = (uuid16 & 0x00ff);
677 *ptr++ = (uuid16 & 0xff00) >> 8;
678 uuids_start[0] += sizeof(uuid16);
684 static u8 *create_uuid32_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
686 u8 *ptr = data, *uuids_start = NULL;
687 struct bt_uuid *uuid;
692 list_for_each_entry(uuid, &hdev->uuids, list) {
693 if (uuid->size != 32)
699 uuids_start[1] = EIR_UUID32_ALL;
703 /* Stop if not enough space to put next UUID */
704 if ((ptr - data) + sizeof(u32) > len) {
705 uuids_start[1] = EIR_UUID32_SOME;
709 memcpy(ptr, &uuid->uuid[12], sizeof(u32));
711 uuids_start[0] += sizeof(u32);
717 static u8 *create_uuid128_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
719 u8 *ptr = data, *uuids_start = NULL;
720 struct bt_uuid *uuid;
725 list_for_each_entry(uuid, &hdev->uuids, list) {
726 if (uuid->size != 128)
732 uuids_start[1] = EIR_UUID128_ALL;
736 /* Stop if not enough space to put next UUID */
737 if ((ptr - data) + 16 > len) {
738 uuids_start[1] = EIR_UUID128_SOME;
742 memcpy(ptr, uuid->uuid, 16);
744 uuids_start[0] += 16;
750 static struct pending_cmd *mgmt_pending_find(u16 opcode, struct hci_dev *hdev)
752 struct pending_cmd *cmd;
754 list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
755 if (cmd->opcode == opcode)
762 static struct pending_cmd *mgmt_pending_find_data(u16 opcode,
763 struct hci_dev *hdev,
766 struct pending_cmd *cmd;
768 list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
769 if (cmd->user_data != data)
771 if (cmd->opcode == opcode)
778 static u8 create_scan_rsp_data(struct hci_dev *hdev, u8 *ptr)
783 name_len = strlen(hdev->dev_name);
785 size_t max_len = HCI_MAX_AD_LENGTH - ad_len - 2;
787 if (name_len > max_len) {
789 ptr[1] = EIR_NAME_SHORT;
791 ptr[1] = EIR_NAME_COMPLETE;
793 ptr[0] = name_len + 1;
795 memcpy(ptr + 2, hdev->dev_name, name_len);
797 ad_len += (name_len + 2);
798 ptr += (name_len + 2);
804 static void update_scan_rsp_data(struct hci_request *req)
806 struct hci_dev *hdev = req->hdev;
807 struct hci_cp_le_set_scan_rsp_data cp;
810 if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
813 memset(&cp, 0, sizeof(cp));
815 len = create_scan_rsp_data(hdev, cp.data);
817 if (hdev->scan_rsp_data_len == len &&
818 memcmp(cp.data, hdev->scan_rsp_data, len) == 0)
821 memcpy(hdev->scan_rsp_data, cp.data, sizeof(cp.data));
822 hdev->scan_rsp_data_len = len;
826 hci_req_add(req, HCI_OP_LE_SET_SCAN_RSP_DATA, sizeof(cp), &cp);
829 static u8 get_adv_discov_flags(struct hci_dev *hdev)
831 struct pending_cmd *cmd;
833 /* If there's a pending mgmt command the flags will not yet have
834 * their final values, so check for this first.
836 cmd = mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, hdev);
838 struct mgmt_mode *cp = cmd->param;
840 return LE_AD_GENERAL;
841 else if (cp->val == 0x02)
842 return LE_AD_LIMITED;
844 if (test_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags))
845 return LE_AD_LIMITED;
846 else if (test_bit(HCI_DISCOVERABLE, &hdev->dev_flags))
847 return LE_AD_GENERAL;
853 static u8 create_adv_data(struct hci_dev *hdev, u8 *ptr)
855 u8 ad_len = 0, flags = 0;
857 flags |= get_adv_discov_flags(hdev);
859 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
860 flags |= LE_AD_NO_BREDR;
863 BT_DBG("adv flags 0x%02x", flags);
873 if (hdev->adv_tx_power != HCI_TX_POWER_INVALID) {
875 ptr[1] = EIR_TX_POWER;
876 ptr[2] = (u8) hdev->adv_tx_power;
885 static void update_adv_data(struct hci_request *req)
887 struct hci_dev *hdev = req->hdev;
888 struct hci_cp_le_set_adv_data cp;
891 if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
894 memset(&cp, 0, sizeof(cp));
896 len = create_adv_data(hdev, cp.data);
898 if (hdev->adv_data_len == len &&
899 memcmp(cp.data, hdev->adv_data, len) == 0)
902 memcpy(hdev->adv_data, cp.data, sizeof(cp.data));
903 hdev->adv_data_len = len;
907 hci_req_add(req, HCI_OP_LE_SET_ADV_DATA, sizeof(cp), &cp);
910 int mgmt_update_adv_data(struct hci_dev *hdev)
912 struct hci_request req;
914 hci_req_init(&req, hdev);
915 update_adv_data(&req);
917 return hci_req_run(&req, NULL);
920 static void create_eir(struct hci_dev *hdev, u8 *data)
925 name_len = strlen(hdev->dev_name);
931 ptr[1] = EIR_NAME_SHORT;
933 ptr[1] = EIR_NAME_COMPLETE;
935 /* EIR Data length */
936 ptr[0] = name_len + 1;
938 memcpy(ptr + 2, hdev->dev_name, name_len);
940 ptr += (name_len + 2);
943 if (hdev->inq_tx_power != HCI_TX_POWER_INVALID) {
945 ptr[1] = EIR_TX_POWER;
946 ptr[2] = (u8) hdev->inq_tx_power;
951 if (hdev->devid_source > 0) {
953 ptr[1] = EIR_DEVICE_ID;
955 put_unaligned_le16(hdev->devid_source, ptr + 2);
956 put_unaligned_le16(hdev->devid_vendor, ptr + 4);
957 put_unaligned_le16(hdev->devid_product, ptr + 6);
958 put_unaligned_le16(hdev->devid_version, ptr + 8);
963 ptr = create_uuid16_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
964 ptr = create_uuid32_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
965 ptr = create_uuid128_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
968 static void update_eir(struct hci_request *req)
970 struct hci_dev *hdev = req->hdev;
971 struct hci_cp_write_eir cp;
973 if (!hdev_is_powered(hdev))
976 if (!lmp_ext_inq_capable(hdev))
979 if (!test_bit(HCI_SSP_ENABLED, &hdev->dev_flags))
982 if (test_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
985 memset(&cp, 0, sizeof(cp));
987 create_eir(hdev, cp.data);
989 if (memcmp(cp.data, hdev->eir, sizeof(cp.data)) == 0)
992 memcpy(hdev->eir, cp.data, sizeof(cp.data));
994 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
997 static u8 get_service_classes(struct hci_dev *hdev)
999 struct bt_uuid *uuid;
1002 list_for_each_entry(uuid, &hdev->uuids, list)
1003 val |= uuid->svc_hint;
1008 static void update_class(struct hci_request *req)
1010 struct hci_dev *hdev = req->hdev;
1013 BT_DBG("%s", hdev->name);
1015 if (!hdev_is_powered(hdev))
1018 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
1021 if (test_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
1024 cod[0] = hdev->minor_class;
1025 cod[1] = hdev->major_class;
1026 cod[2] = get_service_classes(hdev);
1028 if (test_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags))
1031 if (memcmp(cod, hdev->dev_class, 3) == 0)
1034 hci_req_add(req, HCI_OP_WRITE_CLASS_OF_DEV, sizeof(cod), cod);
1037 static bool get_connectable(struct hci_dev *hdev)
1039 struct pending_cmd *cmd;
1041 /* If there's a pending mgmt command the flag will not yet have
1042 * it's final value, so check for this first.
1044 cmd = mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev);
1046 struct mgmt_mode *cp = cmd->param;
1050 return test_bit(HCI_CONNECTABLE, &hdev->dev_flags);
1053 static void disable_advertising(struct hci_request *req)
1057 hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable);
1060 static void enable_advertising(struct hci_request *req)
1062 struct hci_dev *hdev = req->hdev;
1063 struct hci_cp_le_set_adv_param cp;
1064 u8 own_addr_type, enable = 0x01;
1067 if (hci_conn_num(hdev, LE_LINK) > 0)
1070 if (test_bit(HCI_LE_ADV, &hdev->dev_flags))
1071 disable_advertising(req);
1073 /* Clear the HCI_LE_ADV bit temporarily so that the
1074 * hci_update_random_address knows that it's safe to go ahead
1075 * and write a new random address. The flag will be set back on
1076 * as soon as the SET_ADV_ENABLE HCI command completes.
1078 clear_bit(HCI_LE_ADV, &hdev->dev_flags);
1080 connectable = get_connectable(hdev);
1082 /* Set require_privacy to true only when non-connectable
1083 * advertising is used. In that case it is fine to use a
1084 * non-resolvable private address.
1086 if (hci_update_random_address(req, !connectable, &own_addr_type) < 0)
1089 memset(&cp, 0, sizeof(cp));
1090 cp.min_interval = cpu_to_le16(hdev->le_adv_min_interval);
1091 cp.max_interval = cpu_to_le16(hdev->le_adv_max_interval);
1092 cp.type = connectable ? LE_ADV_IND : LE_ADV_NONCONN_IND;
1093 cp.own_address_type = own_addr_type;
1094 cp.channel_map = hdev->le_adv_channel_map;
1096 hci_req_add(req, HCI_OP_LE_SET_ADV_PARAM, sizeof(cp), &cp);
1098 hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable);
1101 static void service_cache_off(struct work_struct *work)
1103 struct hci_dev *hdev = container_of(work, struct hci_dev,
1104 service_cache.work);
1105 struct hci_request req;
1107 if (!test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
1110 hci_req_init(&req, hdev);
1117 hci_dev_unlock(hdev);
1119 hci_req_run(&req, NULL);
1122 static void rpa_expired(struct work_struct *work)
1124 struct hci_dev *hdev = container_of(work, struct hci_dev,
1126 struct hci_request req;
1130 set_bit(HCI_RPA_EXPIRED, &hdev->dev_flags);
1132 if (!test_bit(HCI_ADVERTISING, &hdev->dev_flags))
1135 /* The generation of a new RPA and programming it into the
1136 * controller happens in the enable_advertising() function.
1138 hci_req_init(&req, hdev);
1139 enable_advertising(&req);
1140 hci_req_run(&req, NULL);
1143 static void mgmt_init_hdev(struct sock *sk, struct hci_dev *hdev)
1145 if (test_and_set_bit(HCI_MGMT, &hdev->dev_flags))
1148 INIT_DELAYED_WORK(&hdev->service_cache, service_cache_off);
1149 INIT_DELAYED_WORK(&hdev->rpa_expired, rpa_expired);
1151 /* Non-mgmt controlled devices get this bit set
1152 * implicitly so that pairing works for them, however
1153 * for mgmt we require user-space to explicitly enable
1156 clear_bit(HCI_BONDABLE, &hdev->dev_flags);
1159 static int read_controller_info(struct sock *sk, struct hci_dev *hdev,
1160 void *data, u16 data_len)
1162 struct mgmt_rp_read_info rp;
1164 BT_DBG("sock %p %s", sk, hdev->name);
1168 memset(&rp, 0, sizeof(rp));
1170 bacpy(&rp.bdaddr, &hdev->bdaddr);
1172 rp.version = hdev->hci_ver;
1173 rp.manufacturer = cpu_to_le16(hdev->manufacturer);
1175 rp.supported_settings = cpu_to_le32(get_supported_settings(hdev));
1176 rp.current_settings = cpu_to_le32(get_current_settings(hdev));
1178 memcpy(rp.dev_class, hdev->dev_class, 3);
1180 memcpy(rp.name, hdev->dev_name, sizeof(hdev->dev_name));
1181 memcpy(rp.short_name, hdev->short_name, sizeof(hdev->short_name));
1183 hci_dev_unlock(hdev);
1185 return cmd_complete(sk, hdev->id, MGMT_OP_READ_INFO, 0, &rp,
1189 static void mgmt_pending_free(struct pending_cmd *cmd)
1196 static struct pending_cmd *mgmt_pending_add(struct sock *sk, u16 opcode,
1197 struct hci_dev *hdev, void *data,
1200 struct pending_cmd *cmd;
1202 cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
1206 cmd->opcode = opcode;
1207 cmd->index = hdev->id;
1209 cmd->param = kmemdup(data, len, GFP_KERNEL);
1215 cmd->param_len = len;
1220 list_add(&cmd->list, &hdev->mgmt_pending);
1225 static void mgmt_pending_foreach(u16 opcode, struct hci_dev *hdev,
1226 void (*cb)(struct pending_cmd *cmd,
1230 struct pending_cmd *cmd, *tmp;
1232 list_for_each_entry_safe(cmd, tmp, &hdev->mgmt_pending, list) {
1233 if (opcode > 0 && cmd->opcode != opcode)
1240 static void mgmt_pending_remove(struct pending_cmd *cmd)
1242 list_del(&cmd->list);
1243 mgmt_pending_free(cmd);
1246 static int send_settings_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev)
1248 __le32 settings = cpu_to_le32(get_current_settings(hdev));
1250 return cmd_complete(sk, hdev->id, opcode, 0, &settings,
1254 static void clean_up_hci_complete(struct hci_dev *hdev, u8 status, u16 opcode)
1256 BT_DBG("%s status 0x%02x", hdev->name, status);
1258 if (hci_conn_count(hdev) == 0) {
1259 cancel_delayed_work(&hdev->power_off);
1260 queue_work(hdev->req_workqueue, &hdev->power_off.work);
1264 static bool hci_stop_discovery(struct hci_request *req)
1266 struct hci_dev *hdev = req->hdev;
1267 struct hci_cp_remote_name_req_cancel cp;
1268 struct inquiry_entry *e;
1270 switch (hdev->discovery.state) {
1271 case DISCOVERY_FINDING:
1272 if (test_bit(HCI_INQUIRY, &hdev->flags)) {
1273 hci_req_add(req, HCI_OP_INQUIRY_CANCEL, 0, NULL);
1275 cancel_delayed_work(&hdev->le_scan_disable);
1276 hci_req_add_le_scan_disable(req);
1281 case DISCOVERY_RESOLVING:
1282 e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY,
1287 bacpy(&cp.bdaddr, &e->data.bdaddr);
1288 hci_req_add(req, HCI_OP_REMOTE_NAME_REQ_CANCEL, sizeof(cp),
1294 /* Passive scanning */
1295 if (test_bit(HCI_LE_SCAN, &hdev->dev_flags)) {
1296 hci_req_add_le_scan_disable(req);
1306 static int clean_up_hci_state(struct hci_dev *hdev)
1308 struct hci_request req;
1309 struct hci_conn *conn;
1310 bool discov_stopped;
1313 hci_req_init(&req, hdev);
1315 if (test_bit(HCI_ISCAN, &hdev->flags) ||
1316 test_bit(HCI_PSCAN, &hdev->flags)) {
1318 hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
1321 if (test_bit(HCI_LE_ADV, &hdev->dev_flags))
1322 disable_advertising(&req);
1324 discov_stopped = hci_stop_discovery(&req);
1326 list_for_each_entry(conn, &hdev->conn_hash.list, list) {
1327 struct hci_cp_disconnect dc;
1328 struct hci_cp_reject_conn_req rej;
1330 switch (conn->state) {
1333 dc.handle = cpu_to_le16(conn->handle);
1334 dc.reason = 0x15; /* Terminated due to Power Off */
1335 hci_req_add(&req, HCI_OP_DISCONNECT, sizeof(dc), &dc);
1338 if (conn->type == LE_LINK)
1339 hci_req_add(&req, HCI_OP_LE_CREATE_CONN_CANCEL,
1341 else if (conn->type == ACL_LINK)
1342 hci_req_add(&req, HCI_OP_CREATE_CONN_CANCEL,
1346 bacpy(&rej.bdaddr, &conn->dst);
1347 rej.reason = 0x15; /* Terminated due to Power Off */
1348 if (conn->type == ACL_LINK)
1349 hci_req_add(&req, HCI_OP_REJECT_CONN_REQ,
1351 else if (conn->type == SCO_LINK)
1352 hci_req_add(&req, HCI_OP_REJECT_SYNC_CONN_REQ,
1358 err = hci_req_run(&req, clean_up_hci_complete);
1359 if (!err && discov_stopped)
1360 hci_discovery_set_state(hdev, DISCOVERY_STOPPING);
1365 static int set_powered(struct sock *sk, struct hci_dev *hdev, void *data,
1368 struct mgmt_mode *cp = data;
1369 struct pending_cmd *cmd;
1372 BT_DBG("request for %s", hdev->name);
1374 if (cp->val != 0x00 && cp->val != 0x01)
1375 return cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
1376 MGMT_STATUS_INVALID_PARAMS);
1380 if (mgmt_pending_find(MGMT_OP_SET_POWERED, hdev)) {
1381 err = cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
1386 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
1387 cancel_delayed_work(&hdev->power_off);
1390 mgmt_pending_add(sk, MGMT_OP_SET_POWERED, hdev,
1392 err = mgmt_powered(hdev, 1);
1397 if (!!cp->val == hdev_is_powered(hdev)) {
1398 err = send_settings_rsp(sk, MGMT_OP_SET_POWERED, hdev);
1402 cmd = mgmt_pending_add(sk, MGMT_OP_SET_POWERED, hdev, data, len);
1409 queue_work(hdev->req_workqueue, &hdev->power_on);
1412 /* Disconnect connections, stop scans, etc */
1413 err = clean_up_hci_state(hdev);
1415 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
1416 HCI_POWER_OFF_TIMEOUT);
1418 /* ENODATA means there were no HCI commands queued */
1419 if (err == -ENODATA) {
1420 cancel_delayed_work(&hdev->power_off);
1421 queue_work(hdev->req_workqueue, &hdev->power_off.work);
1427 hci_dev_unlock(hdev);
1431 static int new_settings(struct hci_dev *hdev, struct sock *skip)
1435 ev = cpu_to_le32(get_current_settings(hdev));
1437 return mgmt_event(MGMT_EV_NEW_SETTINGS, hdev, &ev, sizeof(ev), skip);
1440 int mgmt_new_settings(struct hci_dev *hdev)
1442 return new_settings(hdev, NULL);
1447 struct hci_dev *hdev;
1451 static void settings_rsp(struct pending_cmd *cmd, void *data)
1453 struct cmd_lookup *match = data;
1455 send_settings_rsp(cmd->sk, cmd->opcode, match->hdev);
1457 list_del(&cmd->list);
1459 if (match->sk == NULL) {
1460 match->sk = cmd->sk;
1461 sock_hold(match->sk);
1464 mgmt_pending_free(cmd);
1467 static void cmd_status_rsp(struct pending_cmd *cmd, void *data)
1471 cmd_status(cmd->sk, cmd->index, cmd->opcode, *status);
1472 mgmt_pending_remove(cmd);
1475 static void cmd_complete_rsp(struct pending_cmd *cmd, void *data)
1477 if (cmd->cmd_complete) {
1480 cmd->cmd_complete(cmd, *status);
1481 mgmt_pending_remove(cmd);
1486 cmd_status_rsp(cmd, data);
1489 static int generic_cmd_complete(struct pending_cmd *cmd, u8 status)
1491 return cmd_complete(cmd->sk, cmd->index, cmd->opcode, status,
1492 cmd->param, cmd->param_len);
1495 static int addr_cmd_complete(struct pending_cmd *cmd, u8 status)
1497 return cmd_complete(cmd->sk, cmd->index, cmd->opcode, status, cmd->param,
1498 sizeof(struct mgmt_addr_info));
1501 static u8 mgmt_bredr_support(struct hci_dev *hdev)
1503 if (!lmp_bredr_capable(hdev))
1504 return MGMT_STATUS_NOT_SUPPORTED;
1505 else if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
1506 return MGMT_STATUS_REJECTED;
1508 return MGMT_STATUS_SUCCESS;
1511 static u8 mgmt_le_support(struct hci_dev *hdev)
1513 if (!lmp_le_capable(hdev))
1514 return MGMT_STATUS_NOT_SUPPORTED;
1515 else if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
1516 return MGMT_STATUS_REJECTED;
1518 return MGMT_STATUS_SUCCESS;
1521 static void set_discoverable_complete(struct hci_dev *hdev, u8 status,
1524 struct pending_cmd *cmd;
1525 struct mgmt_mode *cp;
1526 struct hci_request req;
1529 BT_DBG("status 0x%02x", status);
1533 cmd = mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, hdev);
1538 u8 mgmt_err = mgmt_status(status);
1539 cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
1540 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
1546 changed = !test_and_set_bit(HCI_DISCOVERABLE,
1549 if (hdev->discov_timeout > 0) {
1550 int to = msecs_to_jiffies(hdev->discov_timeout * 1000);
1551 queue_delayed_work(hdev->workqueue, &hdev->discov_off,
1555 changed = test_and_clear_bit(HCI_DISCOVERABLE,
1559 send_settings_rsp(cmd->sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1562 new_settings(hdev, cmd->sk);
1564 /* When the discoverable mode gets changed, make sure
1565 * that class of device has the limited discoverable
1566 * bit correctly set. Also update page scan based on whitelist
1569 hci_req_init(&req, hdev);
1570 __hci_update_page_scan(&req);
1572 hci_req_run(&req, NULL);
1575 mgmt_pending_remove(cmd);
1578 hci_dev_unlock(hdev);
1581 static int set_discoverable(struct sock *sk, struct hci_dev *hdev, void *data,
1584 struct mgmt_cp_set_discoverable *cp = data;
1585 struct pending_cmd *cmd;
1586 struct hci_request req;
1591 BT_DBG("request for %s", hdev->name);
1593 if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags) &&
1594 !test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
1595 return cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1596 MGMT_STATUS_REJECTED);
1598 if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
1599 return cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1600 MGMT_STATUS_INVALID_PARAMS);
1602 timeout = __le16_to_cpu(cp->timeout);
1604 /* Disabling discoverable requires that no timeout is set,
1605 * and enabling limited discoverable requires a timeout.
1607 if ((cp->val == 0x00 && timeout > 0) ||
1608 (cp->val == 0x02 && timeout == 0))
1609 return cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1610 MGMT_STATUS_INVALID_PARAMS);
1614 if (!hdev_is_powered(hdev) && timeout > 0) {
1615 err = cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1616 MGMT_STATUS_NOT_POWERED);
1620 if (mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
1621 mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
1622 err = cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1627 if (!test_bit(HCI_CONNECTABLE, &hdev->dev_flags)) {
1628 err = cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1629 MGMT_STATUS_REJECTED);
1633 if (!hdev_is_powered(hdev)) {
1634 bool changed = false;
1636 /* Setting limited discoverable when powered off is
1637 * not a valid operation since it requires a timeout
1638 * and so no need to check HCI_LIMITED_DISCOVERABLE.
1640 if (!!cp->val != test_bit(HCI_DISCOVERABLE, &hdev->dev_flags)) {
1641 change_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
1645 err = send_settings_rsp(sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1650 err = new_settings(hdev, sk);
1655 /* If the current mode is the same, then just update the timeout
1656 * value with the new value. And if only the timeout gets updated,
1657 * then no need for any HCI transactions.
1659 if (!!cp->val == test_bit(HCI_DISCOVERABLE, &hdev->dev_flags) &&
1660 (cp->val == 0x02) == test_bit(HCI_LIMITED_DISCOVERABLE,
1661 &hdev->dev_flags)) {
1662 cancel_delayed_work(&hdev->discov_off);
1663 hdev->discov_timeout = timeout;
1665 if (cp->val && hdev->discov_timeout > 0) {
1666 int to = msecs_to_jiffies(hdev->discov_timeout * 1000);
1667 queue_delayed_work(hdev->workqueue, &hdev->discov_off,
1671 err = send_settings_rsp(sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1675 cmd = mgmt_pending_add(sk, MGMT_OP_SET_DISCOVERABLE, hdev, data, len);
1681 /* Cancel any potential discoverable timeout that might be
1682 * still active and store new timeout value. The arming of
1683 * the timeout happens in the complete handler.
1685 cancel_delayed_work(&hdev->discov_off);
1686 hdev->discov_timeout = timeout;
1688 /* Limited discoverable mode */
1689 if (cp->val == 0x02)
1690 set_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
1692 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
1694 hci_req_init(&req, hdev);
1696 /* The procedure for LE-only controllers is much simpler - just
1697 * update the advertising data.
1699 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
1705 struct hci_cp_write_current_iac_lap hci_cp;
1707 if (cp->val == 0x02) {
1708 /* Limited discoverable mode */
1709 hci_cp.num_iac = min_t(u8, hdev->num_iac, 2);
1710 hci_cp.iac_lap[0] = 0x00; /* LIAC */
1711 hci_cp.iac_lap[1] = 0x8b;
1712 hci_cp.iac_lap[2] = 0x9e;
1713 hci_cp.iac_lap[3] = 0x33; /* GIAC */
1714 hci_cp.iac_lap[4] = 0x8b;
1715 hci_cp.iac_lap[5] = 0x9e;
1717 /* General discoverable mode */
1719 hci_cp.iac_lap[0] = 0x33; /* GIAC */
1720 hci_cp.iac_lap[1] = 0x8b;
1721 hci_cp.iac_lap[2] = 0x9e;
1724 hci_req_add(&req, HCI_OP_WRITE_CURRENT_IAC_LAP,
1725 (hci_cp.num_iac * 3) + 1, &hci_cp);
1727 scan |= SCAN_INQUIRY;
1729 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
1732 hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE, sizeof(scan), &scan);
1735 update_adv_data(&req);
1737 err = hci_req_run(&req, set_discoverable_complete);
1739 mgmt_pending_remove(cmd);
1742 hci_dev_unlock(hdev);
1746 static void write_fast_connectable(struct hci_request *req, bool enable)
1748 struct hci_dev *hdev = req->hdev;
1749 struct hci_cp_write_page_scan_activity acp;
1752 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
1755 if (hdev->hci_ver < BLUETOOTH_VER_1_2)
1759 type = PAGE_SCAN_TYPE_INTERLACED;
1761 /* 160 msec page scan interval */
1762 acp.interval = cpu_to_le16(0x0100);
1764 type = PAGE_SCAN_TYPE_STANDARD; /* default */
1766 /* default 1.28 sec page scan */
1767 acp.interval = cpu_to_le16(0x0800);
1770 acp.window = cpu_to_le16(0x0012);
1772 if (__cpu_to_le16(hdev->page_scan_interval) != acp.interval ||
1773 __cpu_to_le16(hdev->page_scan_window) != acp.window)
1774 hci_req_add(req, HCI_OP_WRITE_PAGE_SCAN_ACTIVITY,
1777 if (hdev->page_scan_type != type)
1778 hci_req_add(req, HCI_OP_WRITE_PAGE_SCAN_TYPE, 1, &type);
1781 static void set_connectable_complete(struct hci_dev *hdev, u8 status,
1784 struct pending_cmd *cmd;
1785 struct mgmt_mode *cp;
1786 bool conn_changed, discov_changed;
1788 BT_DBG("status 0x%02x", status);
1792 cmd = mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev);
1797 u8 mgmt_err = mgmt_status(status);
1798 cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
1804 conn_changed = !test_and_set_bit(HCI_CONNECTABLE,
1806 discov_changed = false;
1808 conn_changed = test_and_clear_bit(HCI_CONNECTABLE,
1810 discov_changed = test_and_clear_bit(HCI_DISCOVERABLE,
1814 send_settings_rsp(cmd->sk, MGMT_OP_SET_CONNECTABLE, hdev);
1816 if (conn_changed || discov_changed) {
1817 new_settings(hdev, cmd->sk);
1818 hci_update_page_scan(hdev);
1820 mgmt_update_adv_data(hdev);
1821 hci_update_background_scan(hdev);
1825 mgmt_pending_remove(cmd);
1828 hci_dev_unlock(hdev);
1831 static int set_connectable_update_settings(struct hci_dev *hdev,
1832 struct sock *sk, u8 val)
1834 bool changed = false;
1837 if (!!val != test_bit(HCI_CONNECTABLE, &hdev->dev_flags))
1841 set_bit(HCI_CONNECTABLE, &hdev->dev_flags);
1843 clear_bit(HCI_CONNECTABLE, &hdev->dev_flags);
1844 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
1847 err = send_settings_rsp(sk, MGMT_OP_SET_CONNECTABLE, hdev);
1852 hci_update_page_scan(hdev);
1853 hci_update_background_scan(hdev);
1854 return new_settings(hdev, sk);
1860 static int set_connectable(struct sock *sk, struct hci_dev *hdev, void *data,
1863 struct mgmt_mode *cp = data;
1864 struct pending_cmd *cmd;
1865 struct hci_request req;
1869 BT_DBG("request for %s", hdev->name);
1871 if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags) &&
1872 !test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
1873 return cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1874 MGMT_STATUS_REJECTED);
1876 if (cp->val != 0x00 && cp->val != 0x01)
1877 return cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1878 MGMT_STATUS_INVALID_PARAMS);
1882 if (!hdev_is_powered(hdev)) {
1883 err = set_connectable_update_settings(hdev, sk, cp->val);
1887 if (mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
1888 mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
1889 err = cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1894 cmd = mgmt_pending_add(sk, MGMT_OP_SET_CONNECTABLE, hdev, data, len);
1900 hci_req_init(&req, hdev);
1902 /* If BR/EDR is not enabled and we disable advertising as a
1903 * by-product of disabling connectable, we need to update the
1904 * advertising flags.
1906 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
1908 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
1909 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
1911 update_adv_data(&req);
1912 } else if (cp->val != test_bit(HCI_PSCAN, &hdev->flags)) {
1916 /* If we don't have any whitelist entries just
1917 * disable all scanning. If there are entries
1918 * and we had both page and inquiry scanning
1919 * enabled then fall back to only page scanning.
1920 * Otherwise no changes are needed.
1922 if (list_empty(&hdev->whitelist))
1923 scan = SCAN_DISABLED;
1924 else if (test_bit(HCI_ISCAN, &hdev->flags))
1927 goto no_scan_update;
1929 if (test_bit(HCI_ISCAN, &hdev->flags) &&
1930 hdev->discov_timeout > 0)
1931 cancel_delayed_work(&hdev->discov_off);
1934 hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
1938 /* If we're going from non-connectable to connectable or
1939 * vice-versa when fast connectable is enabled ensure that fast
1940 * connectable gets disabled. write_fast_connectable won't do
1941 * anything if the page scan parameters are already what they
1944 if (cp->val || test_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags))
1945 write_fast_connectable(&req, false);
1947 /* Update the advertising parameters if necessary */
1948 if (test_bit(HCI_ADVERTISING, &hdev->dev_flags))
1949 enable_advertising(&req);
1951 err = hci_req_run(&req, set_connectable_complete);
1953 mgmt_pending_remove(cmd);
1954 if (err == -ENODATA)
1955 err = set_connectable_update_settings(hdev, sk,
1961 hci_dev_unlock(hdev);
1965 static int set_bondable(struct sock *sk, struct hci_dev *hdev, void *data,
1968 struct mgmt_mode *cp = data;
1972 BT_DBG("request for %s", hdev->name);
1974 if (cp->val != 0x00 && cp->val != 0x01)
1975 return cmd_status(sk, hdev->id, MGMT_OP_SET_BONDABLE,
1976 MGMT_STATUS_INVALID_PARAMS);
1981 changed = !test_and_set_bit(HCI_BONDABLE, &hdev->dev_flags);
1983 changed = test_and_clear_bit(HCI_BONDABLE, &hdev->dev_flags);
1985 err = send_settings_rsp(sk, MGMT_OP_SET_BONDABLE, hdev);
1990 err = new_settings(hdev, sk);
1993 hci_dev_unlock(hdev);
1997 static int set_link_security(struct sock *sk, struct hci_dev *hdev, void *data,
2000 struct mgmt_mode *cp = data;
2001 struct pending_cmd *cmd;
2005 BT_DBG("request for %s", hdev->name);
2007 status = mgmt_bredr_support(hdev);
2009 return cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
2012 if (cp->val != 0x00 && cp->val != 0x01)
2013 return cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
2014 MGMT_STATUS_INVALID_PARAMS);
2018 if (!hdev_is_powered(hdev)) {
2019 bool changed = false;
2021 if (!!cp->val != test_bit(HCI_LINK_SECURITY,
2022 &hdev->dev_flags)) {
2023 change_bit(HCI_LINK_SECURITY, &hdev->dev_flags);
2027 err = send_settings_rsp(sk, MGMT_OP_SET_LINK_SECURITY, hdev);
2032 err = new_settings(hdev, sk);
2037 if (mgmt_pending_find(MGMT_OP_SET_LINK_SECURITY, hdev)) {
2038 err = cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
2045 if (test_bit(HCI_AUTH, &hdev->flags) == val) {
2046 err = send_settings_rsp(sk, MGMT_OP_SET_LINK_SECURITY, hdev);
2050 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LINK_SECURITY, hdev, data, len);
2056 err = hci_send_cmd(hdev, HCI_OP_WRITE_AUTH_ENABLE, sizeof(val), &val);
2058 mgmt_pending_remove(cmd);
2063 hci_dev_unlock(hdev);
2067 static int set_ssp(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2069 struct mgmt_mode *cp = data;
2070 struct pending_cmd *cmd;
2074 BT_DBG("request for %s", hdev->name);
2076 status = mgmt_bredr_support(hdev);
2078 return cmd_status(sk, hdev->id, MGMT_OP_SET_SSP, status);
2080 if (!lmp_ssp_capable(hdev))
2081 return cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
2082 MGMT_STATUS_NOT_SUPPORTED);
2084 if (cp->val != 0x00 && cp->val != 0x01)
2085 return cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
2086 MGMT_STATUS_INVALID_PARAMS);
2090 if (!hdev_is_powered(hdev)) {
2094 changed = !test_and_set_bit(HCI_SSP_ENABLED,
2097 changed = test_and_clear_bit(HCI_SSP_ENABLED,
2100 changed = test_and_clear_bit(HCI_HS_ENABLED,
2103 clear_bit(HCI_HS_ENABLED, &hdev->dev_flags);
2106 err = send_settings_rsp(sk, MGMT_OP_SET_SSP, hdev);
2111 err = new_settings(hdev, sk);
2116 if (mgmt_pending_find(MGMT_OP_SET_SSP, hdev) ||
2117 mgmt_pending_find(MGMT_OP_SET_HS, hdev)) {
2118 err = cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
2123 if (!!cp->val == test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
2124 err = send_settings_rsp(sk, MGMT_OP_SET_SSP, hdev);
2128 cmd = mgmt_pending_add(sk, MGMT_OP_SET_SSP, hdev, data, len);
2134 if (!cp->val && test_bit(HCI_USE_DEBUG_KEYS, &hdev->dev_flags))
2135 hci_send_cmd(hdev, HCI_OP_WRITE_SSP_DEBUG_MODE,
2136 sizeof(cp->val), &cp->val);
2138 err = hci_send_cmd(hdev, HCI_OP_WRITE_SSP_MODE, 1, &cp->val);
2140 mgmt_pending_remove(cmd);
2145 hci_dev_unlock(hdev);
2149 static int set_hs(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2151 struct mgmt_mode *cp = data;
2156 BT_DBG("request for %s", hdev->name);
2158 status = mgmt_bredr_support(hdev);
2160 return cmd_status(sk, hdev->id, MGMT_OP_SET_HS, status);
2162 if (!lmp_ssp_capable(hdev))
2163 return cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
2164 MGMT_STATUS_NOT_SUPPORTED);
2166 if (!test_bit(HCI_SSP_ENABLED, &hdev->dev_flags))
2167 return cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
2168 MGMT_STATUS_REJECTED);
2170 if (cp->val != 0x00 && cp->val != 0x01)
2171 return cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
2172 MGMT_STATUS_INVALID_PARAMS);
2177 changed = !test_and_set_bit(HCI_HS_ENABLED, &hdev->dev_flags);
2179 if (hdev_is_powered(hdev)) {
2180 err = cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
2181 MGMT_STATUS_REJECTED);
2185 changed = test_and_clear_bit(HCI_HS_ENABLED, &hdev->dev_flags);
2188 err = send_settings_rsp(sk, MGMT_OP_SET_HS, hdev);
2193 err = new_settings(hdev, sk);
2196 hci_dev_unlock(hdev);
2200 static void le_enable_complete(struct hci_dev *hdev, u8 status, u16 opcode)
2202 struct cmd_lookup match = { NULL, hdev };
2207 u8 mgmt_err = mgmt_status(status);
2209 mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, cmd_status_rsp,
2214 mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, settings_rsp, &match);
2216 new_settings(hdev, match.sk);
2221 /* Make sure the controller has a good default for
2222 * advertising data. Restrict the update to when LE
2223 * has actually been enabled. During power on, the
2224 * update in powered_update_hci will take care of it.
2226 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
2227 struct hci_request req;
2229 hci_req_init(&req, hdev);
2230 update_adv_data(&req);
2231 update_scan_rsp_data(&req);
2232 __hci_update_background_scan(&req);
2233 hci_req_run(&req, NULL);
2237 hci_dev_unlock(hdev);
2240 static int set_le(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2242 struct mgmt_mode *cp = data;
2243 struct hci_cp_write_le_host_supported hci_cp;
2244 struct pending_cmd *cmd;
2245 struct hci_request req;
2249 BT_DBG("request for %s", hdev->name);
2251 if (!lmp_le_capable(hdev))
2252 return cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2253 MGMT_STATUS_NOT_SUPPORTED);
2255 if (cp->val != 0x00 && cp->val != 0x01)
2256 return cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2257 MGMT_STATUS_INVALID_PARAMS);
2259 /* LE-only devices do not allow toggling LE on/off */
2260 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
2261 return cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2262 MGMT_STATUS_REJECTED);
2267 enabled = lmp_host_le_capable(hdev);
2269 if (!hdev_is_powered(hdev) || val == enabled) {
2270 bool changed = false;
2272 if (val != test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
2273 change_bit(HCI_LE_ENABLED, &hdev->dev_flags);
2277 if (!val && test_bit(HCI_ADVERTISING, &hdev->dev_flags)) {
2278 clear_bit(HCI_ADVERTISING, &hdev->dev_flags);
2282 err = send_settings_rsp(sk, MGMT_OP_SET_LE, hdev);
2287 err = new_settings(hdev, sk);
2292 if (mgmt_pending_find(MGMT_OP_SET_LE, hdev) ||
2293 mgmt_pending_find(MGMT_OP_SET_ADVERTISING, hdev)) {
2294 err = cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2299 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LE, hdev, data, len);
2305 hci_req_init(&req, hdev);
2307 memset(&hci_cp, 0, sizeof(hci_cp));
2311 hci_cp.simul = 0x00;
2313 if (test_bit(HCI_LE_ADV, &hdev->dev_flags))
2314 disable_advertising(&req);
2317 hci_req_add(&req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(hci_cp),
2320 err = hci_req_run(&req, le_enable_complete);
2322 mgmt_pending_remove(cmd);
2325 hci_dev_unlock(hdev);
2329 /* This is a helper function to test for pending mgmt commands that can
2330 * cause CoD or EIR HCI commands. We can only allow one such pending
2331 * mgmt command at a time since otherwise we cannot easily track what
2332 * the current values are, will be, and based on that calculate if a new
2333 * HCI command needs to be sent and if yes with what value.
2335 static bool pending_eir_or_class(struct hci_dev *hdev)
2337 struct pending_cmd *cmd;
2339 list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
2340 switch (cmd->opcode) {
2341 case MGMT_OP_ADD_UUID:
2342 case MGMT_OP_REMOVE_UUID:
2343 case MGMT_OP_SET_DEV_CLASS:
2344 case MGMT_OP_SET_POWERED:
2352 static const u8 bluetooth_base_uuid[] = {
2353 0xfb, 0x34, 0x9b, 0x5f, 0x80, 0x00, 0x00, 0x80,
2354 0x00, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
2357 static u8 get_uuid_size(const u8 *uuid)
2361 if (memcmp(uuid, bluetooth_base_uuid, 12))
2364 val = get_unaligned_le32(&uuid[12]);
2371 static void mgmt_class_complete(struct hci_dev *hdev, u16 mgmt_op, u8 status)
2373 struct pending_cmd *cmd;
2377 cmd = mgmt_pending_find(mgmt_op, hdev);
2381 cmd_complete(cmd->sk, cmd->index, cmd->opcode, mgmt_status(status),
2382 hdev->dev_class, 3);
2384 mgmt_pending_remove(cmd);
2387 hci_dev_unlock(hdev);
2390 static void add_uuid_complete(struct hci_dev *hdev, u8 status, u16 opcode)
2392 BT_DBG("status 0x%02x", status);
2394 mgmt_class_complete(hdev, MGMT_OP_ADD_UUID, status);
2397 static int add_uuid(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2399 struct mgmt_cp_add_uuid *cp = data;
2400 struct pending_cmd *cmd;
2401 struct hci_request req;
2402 struct bt_uuid *uuid;
2405 BT_DBG("request for %s", hdev->name);
2409 if (pending_eir_or_class(hdev)) {
2410 err = cmd_status(sk, hdev->id, MGMT_OP_ADD_UUID,
2415 uuid = kmalloc(sizeof(*uuid), GFP_KERNEL);
2421 memcpy(uuid->uuid, cp->uuid, 16);
2422 uuid->svc_hint = cp->svc_hint;
2423 uuid->size = get_uuid_size(cp->uuid);
2425 list_add_tail(&uuid->list, &hdev->uuids);
2427 hci_req_init(&req, hdev);
2432 err = hci_req_run(&req, add_uuid_complete);
2434 if (err != -ENODATA)
2437 err = cmd_complete(sk, hdev->id, MGMT_OP_ADD_UUID, 0,
2438 hdev->dev_class, 3);
2442 cmd = mgmt_pending_add(sk, MGMT_OP_ADD_UUID, hdev, data, len);
2451 hci_dev_unlock(hdev);
2455 static bool enable_service_cache(struct hci_dev *hdev)
2457 if (!hdev_is_powered(hdev))
2460 if (!test_and_set_bit(HCI_SERVICE_CACHE, &hdev->dev_flags)) {
2461 queue_delayed_work(hdev->workqueue, &hdev->service_cache,
2469 static void remove_uuid_complete(struct hci_dev *hdev, u8 status, u16 opcode)
2471 BT_DBG("status 0x%02x", status);
2473 mgmt_class_complete(hdev, MGMT_OP_REMOVE_UUID, status);
2476 static int remove_uuid(struct sock *sk, struct hci_dev *hdev, void *data,
2479 struct mgmt_cp_remove_uuid *cp = data;
2480 struct pending_cmd *cmd;
2481 struct bt_uuid *match, *tmp;
2482 u8 bt_uuid_any[] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
2483 struct hci_request req;
2486 BT_DBG("request for %s", hdev->name);
2490 if (pending_eir_or_class(hdev)) {
2491 err = cmd_status(sk, hdev->id, MGMT_OP_REMOVE_UUID,
2496 if (memcmp(cp->uuid, bt_uuid_any, 16) == 0) {
2497 hci_uuids_clear(hdev);
2499 if (enable_service_cache(hdev)) {
2500 err = cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_UUID,
2501 0, hdev->dev_class, 3);
2510 list_for_each_entry_safe(match, tmp, &hdev->uuids, list) {
2511 if (memcmp(match->uuid, cp->uuid, 16) != 0)
2514 list_del(&match->list);
2520 err = cmd_status(sk, hdev->id, MGMT_OP_REMOVE_UUID,
2521 MGMT_STATUS_INVALID_PARAMS);
2526 hci_req_init(&req, hdev);
2531 err = hci_req_run(&req, remove_uuid_complete);
2533 if (err != -ENODATA)
2536 err = cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_UUID, 0,
2537 hdev->dev_class, 3);
2541 cmd = mgmt_pending_add(sk, MGMT_OP_REMOVE_UUID, hdev, data, len);
2550 hci_dev_unlock(hdev);
2554 static void set_class_complete(struct hci_dev *hdev, u8 status, u16 opcode)
2556 BT_DBG("status 0x%02x", status);
2558 mgmt_class_complete(hdev, MGMT_OP_SET_DEV_CLASS, status);
2561 static int set_dev_class(struct sock *sk, struct hci_dev *hdev, void *data,
2564 struct mgmt_cp_set_dev_class *cp = data;
2565 struct pending_cmd *cmd;
2566 struct hci_request req;
2569 BT_DBG("request for %s", hdev->name);
2571 if (!lmp_bredr_capable(hdev))
2572 return cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2573 MGMT_STATUS_NOT_SUPPORTED);
2577 if (pending_eir_or_class(hdev)) {
2578 err = cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2583 if ((cp->minor & 0x03) != 0 || (cp->major & 0xe0) != 0) {
2584 err = cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2585 MGMT_STATUS_INVALID_PARAMS);
2589 hdev->major_class = cp->major;
2590 hdev->minor_class = cp->minor;
2592 if (!hdev_is_powered(hdev)) {
2593 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_DEV_CLASS, 0,
2594 hdev->dev_class, 3);
2598 hci_req_init(&req, hdev);
2600 if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags)) {
2601 hci_dev_unlock(hdev);
2602 cancel_delayed_work_sync(&hdev->service_cache);
2609 err = hci_req_run(&req, set_class_complete);
2611 if (err != -ENODATA)
2614 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_DEV_CLASS, 0,
2615 hdev->dev_class, 3);
2619 cmd = mgmt_pending_add(sk, MGMT_OP_SET_DEV_CLASS, hdev, data, len);
2628 hci_dev_unlock(hdev);
2632 static int load_link_keys(struct sock *sk, struct hci_dev *hdev, void *data,
2635 struct mgmt_cp_load_link_keys *cp = data;
2636 const u16 max_key_count = ((U16_MAX - sizeof(*cp)) /
2637 sizeof(struct mgmt_link_key_info));
2638 u16 key_count, expected_len;
2642 BT_DBG("request for %s", hdev->name);
2644 if (!lmp_bredr_capable(hdev))
2645 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2646 MGMT_STATUS_NOT_SUPPORTED);
2648 key_count = __le16_to_cpu(cp->key_count);
2649 if (key_count > max_key_count) {
2650 BT_ERR("load_link_keys: too big key_count value %u",
2652 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2653 MGMT_STATUS_INVALID_PARAMS);
2656 expected_len = sizeof(*cp) + key_count *
2657 sizeof(struct mgmt_link_key_info);
2658 if (expected_len != len) {
2659 BT_ERR("load_link_keys: expected %u bytes, got %u bytes",
2661 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2662 MGMT_STATUS_INVALID_PARAMS);
2665 if (cp->debug_keys != 0x00 && cp->debug_keys != 0x01)
2666 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2667 MGMT_STATUS_INVALID_PARAMS);
2669 BT_DBG("%s debug_keys %u key_count %u", hdev->name, cp->debug_keys,
2672 for (i = 0; i < key_count; i++) {
2673 struct mgmt_link_key_info *key = &cp->keys[i];
2675 if (key->addr.type != BDADDR_BREDR || key->type > 0x08)
2676 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2677 MGMT_STATUS_INVALID_PARAMS);
2682 hci_link_keys_clear(hdev);
2685 changed = !test_and_set_bit(HCI_KEEP_DEBUG_KEYS,
2688 changed = test_and_clear_bit(HCI_KEEP_DEBUG_KEYS,
2692 new_settings(hdev, NULL);
2694 for (i = 0; i < key_count; i++) {
2695 struct mgmt_link_key_info *key = &cp->keys[i];
2697 /* Always ignore debug keys and require a new pairing if
2698 * the user wants to use them.
2700 if (key->type == HCI_LK_DEBUG_COMBINATION)
2703 hci_add_link_key(hdev, NULL, &key->addr.bdaddr, key->val,
2704 key->type, key->pin_len, NULL);
2707 cmd_complete(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS, 0, NULL, 0);
2709 hci_dev_unlock(hdev);
2714 static int device_unpaired(struct hci_dev *hdev, bdaddr_t *bdaddr,
2715 u8 addr_type, struct sock *skip_sk)
2717 struct mgmt_ev_device_unpaired ev;
2719 bacpy(&ev.addr.bdaddr, bdaddr);
2720 ev.addr.type = addr_type;
2722 return mgmt_event(MGMT_EV_DEVICE_UNPAIRED, hdev, &ev, sizeof(ev),
2726 static int unpair_device(struct sock *sk, struct hci_dev *hdev, void *data,
2729 struct mgmt_cp_unpair_device *cp = data;
2730 struct mgmt_rp_unpair_device rp;
2731 struct hci_cp_disconnect dc;
2732 struct pending_cmd *cmd;
2733 struct hci_conn *conn;
2736 memset(&rp, 0, sizeof(rp));
2737 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
2738 rp.addr.type = cp->addr.type;
2740 if (!bdaddr_type_is_valid(cp->addr.type))
2741 return cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2742 MGMT_STATUS_INVALID_PARAMS,
2745 if (cp->disconnect != 0x00 && cp->disconnect != 0x01)
2746 return cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2747 MGMT_STATUS_INVALID_PARAMS,
2752 if (!hdev_is_powered(hdev)) {
2753 err = cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2754 MGMT_STATUS_NOT_POWERED, &rp, sizeof(rp));
2758 if (cp->addr.type == BDADDR_BREDR) {
2759 /* If disconnection is requested, then look up the
2760 * connection. If the remote device is connected, it
2761 * will be later used to terminate the link.
2763 * Setting it to NULL explicitly will cause no
2764 * termination of the link.
2767 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
2772 err = hci_remove_link_key(hdev, &cp->addr.bdaddr);
2776 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK,
2779 /* Defer clearing up the connection parameters
2780 * until closing to give a chance of keeping
2781 * them if a repairing happens.
2783 set_bit(HCI_CONN_PARAM_REMOVAL_PEND, &conn->flags);
2785 /* If disconnection is not requested, then
2786 * clear the connection variable so that the
2787 * link is not terminated.
2789 if (!cp->disconnect)
2793 if (cp->addr.type == BDADDR_LE_PUBLIC)
2794 addr_type = ADDR_LE_DEV_PUBLIC;
2796 addr_type = ADDR_LE_DEV_RANDOM;
2798 hci_remove_irk(hdev, &cp->addr.bdaddr, addr_type);
2800 err = hci_remove_ltk(hdev, &cp->addr.bdaddr, addr_type);
2804 err = cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2805 MGMT_STATUS_NOT_PAIRED, &rp, sizeof(rp));
2809 /* If the connection variable is set, then termination of the
2810 * link is requested.
2813 err = cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE, 0,
2815 device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, sk);
2819 cmd = mgmt_pending_add(sk, MGMT_OP_UNPAIR_DEVICE, hdev, cp,
2826 cmd->cmd_complete = addr_cmd_complete;
2828 dc.handle = cpu_to_le16(conn->handle);
2829 dc.reason = 0x13; /* Remote User Terminated Connection */
2830 err = hci_send_cmd(hdev, HCI_OP_DISCONNECT, sizeof(dc), &dc);
2832 mgmt_pending_remove(cmd);
2835 hci_dev_unlock(hdev);
2839 static int disconnect(struct sock *sk, struct hci_dev *hdev, void *data,
2842 struct mgmt_cp_disconnect *cp = data;
2843 struct mgmt_rp_disconnect rp;
2844 struct pending_cmd *cmd;
2845 struct hci_conn *conn;
2850 memset(&rp, 0, sizeof(rp));
2851 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
2852 rp.addr.type = cp->addr.type;
2854 if (!bdaddr_type_is_valid(cp->addr.type))
2855 return cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2856 MGMT_STATUS_INVALID_PARAMS,
2861 if (!test_bit(HCI_UP, &hdev->flags)) {
2862 err = cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2863 MGMT_STATUS_NOT_POWERED, &rp, sizeof(rp));
2867 if (mgmt_pending_find(MGMT_OP_DISCONNECT, hdev)) {
2868 err = cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2869 MGMT_STATUS_BUSY, &rp, sizeof(rp));
2873 if (cp->addr.type == BDADDR_BREDR)
2874 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
2877 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->addr.bdaddr);
2879 if (!conn || conn->state == BT_OPEN || conn->state == BT_CLOSED) {
2880 err = cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2881 MGMT_STATUS_NOT_CONNECTED, &rp, sizeof(rp));
2885 cmd = mgmt_pending_add(sk, MGMT_OP_DISCONNECT, hdev, data, len);
2891 cmd->cmd_complete = generic_cmd_complete;
2893 err = hci_disconnect(conn, HCI_ERROR_REMOTE_USER_TERM);
2895 mgmt_pending_remove(cmd);
2898 hci_dev_unlock(hdev);
2902 static u8 link_to_bdaddr(u8 link_type, u8 addr_type)
2904 switch (link_type) {
2906 switch (addr_type) {
2907 case ADDR_LE_DEV_PUBLIC:
2908 return BDADDR_LE_PUBLIC;
2911 /* Fallback to LE Random address type */
2912 return BDADDR_LE_RANDOM;
2916 /* Fallback to BR/EDR type */
2917 return BDADDR_BREDR;
2921 static int get_connections(struct sock *sk, struct hci_dev *hdev, void *data,
2924 struct mgmt_rp_get_connections *rp;
2934 if (!hdev_is_powered(hdev)) {
2935 err = cmd_status(sk, hdev->id, MGMT_OP_GET_CONNECTIONS,
2936 MGMT_STATUS_NOT_POWERED);
2941 list_for_each_entry(c, &hdev->conn_hash.list, list) {
2942 if (test_bit(HCI_CONN_MGMT_CONNECTED, &c->flags))
2946 rp_len = sizeof(*rp) + (i * sizeof(struct mgmt_addr_info));
2947 rp = kmalloc(rp_len, GFP_KERNEL);
2954 list_for_each_entry(c, &hdev->conn_hash.list, list) {
2955 if (!test_bit(HCI_CONN_MGMT_CONNECTED, &c->flags))
2957 bacpy(&rp->addr[i].bdaddr, &c->dst);
2958 rp->addr[i].type = link_to_bdaddr(c->type, c->dst_type);
2959 if (c->type == SCO_LINK || c->type == ESCO_LINK)
2964 rp->conn_count = cpu_to_le16(i);
2966 /* Recalculate length in case of filtered SCO connections, etc */
2967 rp_len = sizeof(*rp) + (i * sizeof(struct mgmt_addr_info));
2969 err = cmd_complete(sk, hdev->id, MGMT_OP_GET_CONNECTIONS, 0, rp,
2975 hci_dev_unlock(hdev);
2979 static int send_pin_code_neg_reply(struct sock *sk, struct hci_dev *hdev,
2980 struct mgmt_cp_pin_code_neg_reply *cp)
2982 struct pending_cmd *cmd;
2985 cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_NEG_REPLY, hdev, cp,
2990 err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_NEG_REPLY,
2991 sizeof(cp->addr.bdaddr), &cp->addr.bdaddr);
2993 mgmt_pending_remove(cmd);
2998 static int pin_code_reply(struct sock *sk, struct hci_dev *hdev, void *data,
3001 struct hci_conn *conn;
3002 struct mgmt_cp_pin_code_reply *cp = data;
3003 struct hci_cp_pin_code_reply reply;
3004 struct pending_cmd *cmd;
3011 if (!hdev_is_powered(hdev)) {
3012 err = cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
3013 MGMT_STATUS_NOT_POWERED);
3017 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->addr.bdaddr);
3019 err = cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
3020 MGMT_STATUS_NOT_CONNECTED);
3024 if (conn->pending_sec_level == BT_SECURITY_HIGH && cp->pin_len != 16) {
3025 struct mgmt_cp_pin_code_neg_reply ncp;
3027 memcpy(&ncp.addr, &cp->addr, sizeof(ncp.addr));
3029 BT_ERR("PIN code is not 16 bytes long");
3031 err = send_pin_code_neg_reply(sk, hdev, &ncp);
3033 err = cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
3034 MGMT_STATUS_INVALID_PARAMS);
3039 cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_REPLY, hdev, data, len);
3045 cmd->cmd_complete = addr_cmd_complete;
3047 bacpy(&reply.bdaddr, &cp->addr.bdaddr);
3048 reply.pin_len = cp->pin_len;
3049 memcpy(reply.pin_code, cp->pin_code, sizeof(reply.pin_code));
3051 err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_REPLY, sizeof(reply), &reply);
3053 mgmt_pending_remove(cmd);
3056 hci_dev_unlock(hdev);
3060 static int set_io_capability(struct sock *sk, struct hci_dev *hdev, void *data,
3063 struct mgmt_cp_set_io_capability *cp = data;
3067 if (cp->io_capability > SMP_IO_KEYBOARD_DISPLAY)
3068 return cmd_complete(sk, hdev->id, MGMT_OP_SET_IO_CAPABILITY,
3069 MGMT_STATUS_INVALID_PARAMS, NULL, 0);
3073 hdev->io_capability = cp->io_capability;
3075 BT_DBG("%s IO capability set to 0x%02x", hdev->name,
3076 hdev->io_capability);
3078 hci_dev_unlock(hdev);
3080 return cmd_complete(sk, hdev->id, MGMT_OP_SET_IO_CAPABILITY, 0, NULL,
3084 static struct pending_cmd *find_pairing(struct hci_conn *conn)
3086 struct hci_dev *hdev = conn->hdev;
3087 struct pending_cmd *cmd;
3089 list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
3090 if (cmd->opcode != MGMT_OP_PAIR_DEVICE)
3093 if (cmd->user_data != conn)
3102 static int pairing_complete(struct pending_cmd *cmd, u8 status)
3104 struct mgmt_rp_pair_device rp;
3105 struct hci_conn *conn = cmd->user_data;
3108 bacpy(&rp.addr.bdaddr, &conn->dst);
3109 rp.addr.type = link_to_bdaddr(conn->type, conn->dst_type);
3111 err = cmd_complete(cmd->sk, cmd->index, MGMT_OP_PAIR_DEVICE, status,
3114 /* So we don't get further callbacks for this connection */
3115 conn->connect_cfm_cb = NULL;
3116 conn->security_cfm_cb = NULL;
3117 conn->disconn_cfm_cb = NULL;
3119 hci_conn_drop(conn);
3121 /* The device is paired so there is no need to remove
3122 * its connection parameters anymore.
3124 clear_bit(HCI_CONN_PARAM_REMOVAL_PEND, &conn->flags);
3131 void mgmt_smp_complete(struct hci_conn *conn, bool complete)
3133 u8 status = complete ? MGMT_STATUS_SUCCESS : MGMT_STATUS_FAILED;
3134 struct pending_cmd *cmd;
3136 cmd = find_pairing(conn);
3138 cmd->cmd_complete(cmd, status);
3139 mgmt_pending_remove(cmd);
3143 static void pairing_complete_cb(struct hci_conn *conn, u8 status)
3145 struct pending_cmd *cmd;
3147 BT_DBG("status %u", status);
3149 cmd = find_pairing(conn);
3151 BT_DBG("Unable to find a pending command");
3155 cmd->cmd_complete(cmd, mgmt_status(status));
3156 mgmt_pending_remove(cmd);
3159 static void le_pairing_complete_cb(struct hci_conn *conn, u8 status)
3161 struct pending_cmd *cmd;
3163 BT_DBG("status %u", status);
3168 cmd = find_pairing(conn);
3170 BT_DBG("Unable to find a pending command");
3174 cmd->cmd_complete(cmd, mgmt_status(status));
3175 mgmt_pending_remove(cmd);
3178 static int pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
3181 struct mgmt_cp_pair_device *cp = data;
3182 struct mgmt_rp_pair_device rp;
3183 struct pending_cmd *cmd;
3184 u8 sec_level, auth_type;
3185 struct hci_conn *conn;
3190 memset(&rp, 0, sizeof(rp));
3191 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
3192 rp.addr.type = cp->addr.type;
3194 if (!bdaddr_type_is_valid(cp->addr.type))
3195 return cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3196 MGMT_STATUS_INVALID_PARAMS,
3199 if (cp->io_cap > SMP_IO_KEYBOARD_DISPLAY)
3200 return cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3201 MGMT_STATUS_INVALID_PARAMS,
3206 if (!hdev_is_powered(hdev)) {
3207 err = cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3208 MGMT_STATUS_NOT_POWERED, &rp, sizeof(rp));
3212 sec_level = BT_SECURITY_MEDIUM;
3213 auth_type = HCI_AT_DEDICATED_BONDING;
3215 if (cp->addr.type == BDADDR_BREDR) {
3216 conn = hci_connect_acl(hdev, &cp->addr.bdaddr, sec_level,
3221 /* Convert from L2CAP channel address type to HCI address type
3223 if (cp->addr.type == BDADDR_LE_PUBLIC)
3224 addr_type = ADDR_LE_DEV_PUBLIC;
3226 addr_type = ADDR_LE_DEV_RANDOM;
3228 /* When pairing a new device, it is expected to remember
3229 * this device for future connections. Adding the connection
3230 * parameter information ahead of time allows tracking
3231 * of the slave preferred values and will speed up any
3232 * further connection establishment.
3234 * If connection parameters already exist, then they
3235 * will be kept and this function does nothing.
3237 hci_conn_params_add(hdev, &cp->addr.bdaddr, addr_type);
3239 conn = hci_connect_le(hdev, &cp->addr.bdaddr, addr_type,
3240 sec_level, HCI_LE_CONN_TIMEOUT,
3247 if (PTR_ERR(conn) == -EBUSY)
3248 status = MGMT_STATUS_BUSY;
3250 status = MGMT_STATUS_CONNECT_FAILED;
3252 err = cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3258 if (conn->connect_cfm_cb) {
3259 hci_conn_drop(conn);
3260 err = cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3261 MGMT_STATUS_BUSY, &rp, sizeof(rp));
3265 cmd = mgmt_pending_add(sk, MGMT_OP_PAIR_DEVICE, hdev, data, len);
3268 hci_conn_drop(conn);
3272 cmd->cmd_complete = pairing_complete;
3274 /* For LE, just connecting isn't a proof that the pairing finished */
3275 if (cp->addr.type == BDADDR_BREDR) {
3276 conn->connect_cfm_cb = pairing_complete_cb;
3277 conn->security_cfm_cb = pairing_complete_cb;
3278 conn->disconn_cfm_cb = pairing_complete_cb;
3280 conn->connect_cfm_cb = le_pairing_complete_cb;
3281 conn->security_cfm_cb = le_pairing_complete_cb;
3282 conn->disconn_cfm_cb = le_pairing_complete_cb;
3285 conn->io_capability = cp->io_cap;
3286 cmd->user_data = hci_conn_get(conn);
3288 if ((conn->state == BT_CONNECTED || conn->state == BT_CONFIG) &&
3289 hci_conn_security(conn, sec_level, auth_type, true)) {
3290 cmd->cmd_complete(cmd, 0);
3291 mgmt_pending_remove(cmd);
3297 hci_dev_unlock(hdev);
3301 static int cancel_pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
3304 struct mgmt_addr_info *addr = data;
3305 struct pending_cmd *cmd;
3306 struct hci_conn *conn;
3313 if (!hdev_is_powered(hdev)) {
3314 err = cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
3315 MGMT_STATUS_NOT_POWERED);
3319 cmd = mgmt_pending_find(MGMT_OP_PAIR_DEVICE, hdev);
3321 err = cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
3322 MGMT_STATUS_INVALID_PARAMS);
3326 conn = cmd->user_data;
3328 if (bacmp(&addr->bdaddr, &conn->dst) != 0) {
3329 err = cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
3330 MGMT_STATUS_INVALID_PARAMS);
3334 cmd->cmd_complete(cmd, MGMT_STATUS_CANCELLED);
3335 mgmt_pending_remove(cmd);
3337 err = cmd_complete(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE, 0,
3338 addr, sizeof(*addr));
3340 hci_dev_unlock(hdev);
3344 static int user_pairing_resp(struct sock *sk, struct hci_dev *hdev,
3345 struct mgmt_addr_info *addr, u16 mgmt_op,
3346 u16 hci_op, __le32 passkey)
3348 struct pending_cmd *cmd;
3349 struct hci_conn *conn;
3354 if (!hdev_is_powered(hdev)) {
3355 err = cmd_complete(sk, hdev->id, mgmt_op,
3356 MGMT_STATUS_NOT_POWERED, addr,
3361 if (addr->type == BDADDR_BREDR)
3362 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &addr->bdaddr);
3364 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &addr->bdaddr);
3367 err = cmd_complete(sk, hdev->id, mgmt_op,
3368 MGMT_STATUS_NOT_CONNECTED, addr,
3373 if (addr->type == BDADDR_LE_PUBLIC || addr->type == BDADDR_LE_RANDOM) {
3374 err = smp_user_confirm_reply(conn, mgmt_op, passkey);
3376 err = cmd_complete(sk, hdev->id, mgmt_op,
3377 MGMT_STATUS_SUCCESS, addr,
3380 err = cmd_complete(sk, hdev->id, mgmt_op,
3381 MGMT_STATUS_FAILED, addr,
3387 cmd = mgmt_pending_add(sk, mgmt_op, hdev, addr, sizeof(*addr));
3393 cmd->cmd_complete = addr_cmd_complete;
3395 /* Continue with pairing via HCI */
3396 if (hci_op == HCI_OP_USER_PASSKEY_REPLY) {
3397 struct hci_cp_user_passkey_reply cp;
3399 bacpy(&cp.bdaddr, &addr->bdaddr);
3400 cp.passkey = passkey;
3401 err = hci_send_cmd(hdev, hci_op, sizeof(cp), &cp);
3403 err = hci_send_cmd(hdev, hci_op, sizeof(addr->bdaddr),
3407 mgmt_pending_remove(cmd);
3410 hci_dev_unlock(hdev);
3414 static int pin_code_neg_reply(struct sock *sk, struct hci_dev *hdev,
3415 void *data, u16 len)
3417 struct mgmt_cp_pin_code_neg_reply *cp = data;
3421 return user_pairing_resp(sk, hdev, &cp->addr,
3422 MGMT_OP_PIN_CODE_NEG_REPLY,
3423 HCI_OP_PIN_CODE_NEG_REPLY, 0);
3426 static int user_confirm_reply(struct sock *sk, struct hci_dev *hdev, void *data,
3429 struct mgmt_cp_user_confirm_reply *cp = data;
3433 if (len != sizeof(*cp))
3434 return cmd_status(sk, hdev->id, MGMT_OP_USER_CONFIRM_REPLY,
3435 MGMT_STATUS_INVALID_PARAMS);
3437 return user_pairing_resp(sk, hdev, &cp->addr,
3438 MGMT_OP_USER_CONFIRM_REPLY,
3439 HCI_OP_USER_CONFIRM_REPLY, 0);
3442 static int user_confirm_neg_reply(struct sock *sk, struct hci_dev *hdev,
3443 void *data, u16 len)
3445 struct mgmt_cp_user_confirm_neg_reply *cp = data;
3449 return user_pairing_resp(sk, hdev, &cp->addr,
3450 MGMT_OP_USER_CONFIRM_NEG_REPLY,
3451 HCI_OP_USER_CONFIRM_NEG_REPLY, 0);
3454 static int user_passkey_reply(struct sock *sk, struct hci_dev *hdev, void *data,
3457 struct mgmt_cp_user_passkey_reply *cp = data;
3461 return user_pairing_resp(sk, hdev, &cp->addr,
3462 MGMT_OP_USER_PASSKEY_REPLY,
3463 HCI_OP_USER_PASSKEY_REPLY, cp->passkey);
3466 static int user_passkey_neg_reply(struct sock *sk, struct hci_dev *hdev,
3467 void *data, u16 len)
3469 struct mgmt_cp_user_passkey_neg_reply *cp = data;
3473 return user_pairing_resp(sk, hdev, &cp->addr,
3474 MGMT_OP_USER_PASSKEY_NEG_REPLY,
3475 HCI_OP_USER_PASSKEY_NEG_REPLY, 0);
3478 static void update_name(struct hci_request *req)
3480 struct hci_dev *hdev = req->hdev;
3481 struct hci_cp_write_local_name cp;
3483 memcpy(cp.name, hdev->dev_name, sizeof(cp.name));
3485 hci_req_add(req, HCI_OP_WRITE_LOCAL_NAME, sizeof(cp), &cp);
3488 static void set_name_complete(struct hci_dev *hdev, u8 status, u16 opcode)
3490 struct mgmt_cp_set_local_name *cp;
3491 struct pending_cmd *cmd;
3493 BT_DBG("status 0x%02x", status);
3497 cmd = mgmt_pending_find(MGMT_OP_SET_LOCAL_NAME, hdev);
3504 cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME,
3505 mgmt_status(status));
3507 cmd_complete(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3510 mgmt_pending_remove(cmd);
3513 hci_dev_unlock(hdev);
3516 static int set_local_name(struct sock *sk, struct hci_dev *hdev, void *data,
3519 struct mgmt_cp_set_local_name *cp = data;
3520 struct pending_cmd *cmd;
3521 struct hci_request req;
3528 /* If the old values are the same as the new ones just return a
3529 * direct command complete event.
3531 if (!memcmp(hdev->dev_name, cp->name, sizeof(hdev->dev_name)) &&
3532 !memcmp(hdev->short_name, cp->short_name,
3533 sizeof(hdev->short_name))) {
3534 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3539 memcpy(hdev->short_name, cp->short_name, sizeof(hdev->short_name));
3541 if (!hdev_is_powered(hdev)) {
3542 memcpy(hdev->dev_name, cp->name, sizeof(hdev->dev_name));
3544 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3549 err = mgmt_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev, data, len,
3555 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LOCAL_NAME, hdev, data, len);
3561 memcpy(hdev->dev_name, cp->name, sizeof(hdev->dev_name));
3563 hci_req_init(&req, hdev);
3565 if (lmp_bredr_capable(hdev)) {
3570 /* The name is stored in the scan response data and so
3571 * no need to udpate the advertising data here.
3573 if (lmp_le_capable(hdev))
3574 update_scan_rsp_data(&req);
3576 err = hci_req_run(&req, set_name_complete);
3578 mgmt_pending_remove(cmd);
3581 hci_dev_unlock(hdev);
3585 static int read_local_oob_data(struct sock *sk, struct hci_dev *hdev,
3586 void *data, u16 data_len)
3588 struct pending_cmd *cmd;
3591 BT_DBG("%s", hdev->name);
3595 if (!hdev_is_powered(hdev)) {
3596 err = cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
3597 MGMT_STATUS_NOT_POWERED);
3601 if (!lmp_ssp_capable(hdev)) {
3602 err = cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
3603 MGMT_STATUS_NOT_SUPPORTED);
3607 if (mgmt_pending_find(MGMT_OP_READ_LOCAL_OOB_DATA, hdev)) {
3608 err = cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
3613 cmd = mgmt_pending_add(sk, MGMT_OP_READ_LOCAL_OOB_DATA, hdev, NULL, 0);
3619 if (bredr_sc_enabled(hdev))
3620 err = hci_send_cmd(hdev, HCI_OP_READ_LOCAL_OOB_EXT_DATA,
3623 err = hci_send_cmd(hdev, HCI_OP_READ_LOCAL_OOB_DATA, 0, NULL);
3626 mgmt_pending_remove(cmd);
3629 hci_dev_unlock(hdev);
3633 static int add_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
3634 void *data, u16 len)
3638 BT_DBG("%s ", hdev->name);
3642 if (len == MGMT_ADD_REMOTE_OOB_DATA_SIZE) {
3643 struct mgmt_cp_add_remote_oob_data *cp = data;
3646 if (cp->addr.type != BDADDR_BREDR) {
3647 err = cmd_complete(sk, hdev->id,
3648 MGMT_OP_ADD_REMOTE_OOB_DATA,
3649 MGMT_STATUS_INVALID_PARAMS,
3650 &cp->addr, sizeof(cp->addr));
3654 err = hci_add_remote_oob_data(hdev, &cp->addr.bdaddr,
3655 cp->addr.type, cp->hash,
3656 cp->rand, NULL, NULL);
3658 status = MGMT_STATUS_FAILED;
3660 status = MGMT_STATUS_SUCCESS;
3662 err = cmd_complete(sk, hdev->id, MGMT_OP_ADD_REMOTE_OOB_DATA,
3663 status, &cp->addr, sizeof(cp->addr));
3664 } else if (len == MGMT_ADD_REMOTE_OOB_EXT_DATA_SIZE) {
3665 struct mgmt_cp_add_remote_oob_ext_data *cp = data;
3666 u8 *rand192, *hash192;
3669 if (cp->addr.type != BDADDR_BREDR) {
3670 err = cmd_complete(sk, hdev->id,
3671 MGMT_OP_ADD_REMOTE_OOB_DATA,
3672 MGMT_STATUS_INVALID_PARAMS,
3673 &cp->addr, sizeof(cp->addr));
3677 if (bdaddr_type_is_le(cp->addr.type)) {
3681 rand192 = cp->rand192;
3682 hash192 = cp->hash192;
3685 err = hci_add_remote_oob_data(hdev, &cp->addr.bdaddr,
3686 cp->addr.type, hash192, rand192,
3687 cp->hash256, cp->rand256);
3689 status = MGMT_STATUS_FAILED;
3691 status = MGMT_STATUS_SUCCESS;
3693 err = cmd_complete(sk, hdev->id, MGMT_OP_ADD_REMOTE_OOB_DATA,
3694 status, &cp->addr, sizeof(cp->addr));
3696 BT_ERR("add_remote_oob_data: invalid length of %u bytes", len);
3697 err = cmd_status(sk, hdev->id, MGMT_OP_ADD_REMOTE_OOB_DATA,
3698 MGMT_STATUS_INVALID_PARAMS);
3702 hci_dev_unlock(hdev);
3706 static int remove_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
3707 void *data, u16 len)
3709 struct mgmt_cp_remove_remote_oob_data *cp = data;
3713 BT_DBG("%s", hdev->name);
3715 if (cp->addr.type != BDADDR_BREDR)
3716 return cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_REMOTE_OOB_DATA,
3717 MGMT_STATUS_INVALID_PARAMS,
3718 &cp->addr, sizeof(cp->addr));
3722 if (!bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
3723 hci_remote_oob_data_clear(hdev);
3724 status = MGMT_STATUS_SUCCESS;
3728 err = hci_remove_remote_oob_data(hdev, &cp->addr.bdaddr, cp->addr.type);
3730 status = MGMT_STATUS_INVALID_PARAMS;
3732 status = MGMT_STATUS_SUCCESS;
3735 err = cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_REMOTE_OOB_DATA,
3736 status, &cp->addr, sizeof(cp->addr));
3738 hci_dev_unlock(hdev);
3742 static bool trigger_discovery(struct hci_request *req, u8 *status)
3744 struct hci_dev *hdev = req->hdev;
3745 struct hci_cp_le_set_scan_param param_cp;
3746 struct hci_cp_le_set_scan_enable enable_cp;
3747 struct hci_cp_inquiry inq_cp;
3748 /* General inquiry access code (GIAC) */
3749 u8 lap[3] = { 0x33, 0x8b, 0x9e };
3753 switch (hdev->discovery.type) {
3754 case DISCOV_TYPE_BREDR:
3755 *status = mgmt_bredr_support(hdev);
3759 if (test_bit(HCI_INQUIRY, &hdev->flags)) {
3760 *status = MGMT_STATUS_BUSY;
3764 hci_inquiry_cache_flush(hdev);
3766 memset(&inq_cp, 0, sizeof(inq_cp));
3767 memcpy(&inq_cp.lap, lap, sizeof(inq_cp.lap));
3768 inq_cp.length = DISCOV_BREDR_INQUIRY_LEN;
3769 hci_req_add(req, HCI_OP_INQUIRY, sizeof(inq_cp), &inq_cp);
3772 case DISCOV_TYPE_LE:
3773 case DISCOV_TYPE_INTERLEAVED:
3774 *status = mgmt_le_support(hdev);
3778 if (hdev->discovery.type == DISCOV_TYPE_INTERLEAVED &&
3779 !test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
3780 *status = MGMT_STATUS_NOT_SUPPORTED;
3784 if (test_bit(HCI_LE_ADV, &hdev->dev_flags)) {
3785 /* Don't let discovery abort an outgoing
3786 * connection attempt that's using directed
3789 if (hci_conn_hash_lookup_state(hdev, LE_LINK,
3791 *status = MGMT_STATUS_REJECTED;
3795 disable_advertising(req);
3798 /* If controller is scanning, it means the background scanning
3799 * is running. Thus, we should temporarily stop it in order to
3800 * set the discovery scanning parameters.
3802 if (test_bit(HCI_LE_SCAN, &hdev->dev_flags))
3803 hci_req_add_le_scan_disable(req);
3805 memset(¶m_cp, 0, sizeof(param_cp));
3807 /* All active scans will be done with either a resolvable
3808 * private address (when privacy feature has been enabled)
3809 * or non-resolvable private address.
3811 err = hci_update_random_address(req, true, &own_addr_type);
3813 *status = MGMT_STATUS_FAILED;
3817 param_cp.type = LE_SCAN_ACTIVE;
3818 param_cp.interval = cpu_to_le16(DISCOV_LE_SCAN_INT);
3819 param_cp.window = cpu_to_le16(DISCOV_LE_SCAN_WIN);
3820 param_cp.own_address_type = own_addr_type;
3821 hci_req_add(req, HCI_OP_LE_SET_SCAN_PARAM, sizeof(param_cp),
3824 memset(&enable_cp, 0, sizeof(enable_cp));
3825 enable_cp.enable = LE_SCAN_ENABLE;
3826 enable_cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
3827 hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(enable_cp),
3832 *status = MGMT_STATUS_INVALID_PARAMS;
3839 static void start_discovery_complete(struct hci_dev *hdev, u8 status,
3842 struct pending_cmd *cmd;
3843 unsigned long timeout;
3845 BT_DBG("status %d", status);
3849 cmd = mgmt_pending_find(MGMT_OP_START_DISCOVERY, hdev);
3851 cmd = mgmt_pending_find(MGMT_OP_START_SERVICE_DISCOVERY, hdev);
3854 cmd->cmd_complete(cmd, mgmt_status(status));
3855 mgmt_pending_remove(cmd);
3859 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3863 hci_discovery_set_state(hdev, DISCOVERY_FINDING);
3865 switch (hdev->discovery.type) {
3866 case DISCOV_TYPE_LE:
3867 timeout = msecs_to_jiffies(DISCOV_LE_TIMEOUT);
3869 case DISCOV_TYPE_INTERLEAVED:
3870 timeout = msecs_to_jiffies(hdev->discov_interleaved_timeout);
3872 case DISCOV_TYPE_BREDR:
3876 BT_ERR("Invalid discovery type %d", hdev->discovery.type);
3882 queue_delayed_work(hdev->workqueue,
3883 &hdev->le_scan_disable, timeout);
3886 hci_dev_unlock(hdev);
3889 static int start_discovery(struct sock *sk, struct hci_dev *hdev,
3890 void *data, u16 len)
3892 struct mgmt_cp_start_discovery *cp = data;
3893 struct pending_cmd *cmd;
3894 struct hci_request req;
3898 BT_DBG("%s", hdev->name);
3902 if (!hdev_is_powered(hdev)) {
3903 err = cmd_complete(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3904 MGMT_STATUS_NOT_POWERED,
3905 &cp->type, sizeof(cp->type));
3909 if (hdev->discovery.state != DISCOVERY_STOPPED ||
3910 test_bit(HCI_PERIODIC_INQ, &hdev->dev_flags)) {
3911 err = cmd_complete(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3912 MGMT_STATUS_BUSY, &cp->type,
3917 cmd = mgmt_pending_add(sk, MGMT_OP_START_DISCOVERY, hdev, data, len);
3923 cmd->cmd_complete = generic_cmd_complete;
3925 /* Clear the discovery filter first to free any previously
3926 * allocated memory for the UUID list.
3928 hci_discovery_filter_clear(hdev);
3930 hdev->discovery.type = cp->type;
3931 hdev->discovery.report_invalid_rssi = false;
3933 hci_req_init(&req, hdev);
3935 if (!trigger_discovery(&req, &status)) {
3936 err = cmd_complete(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3937 status, &cp->type, sizeof(cp->type));
3938 mgmt_pending_remove(cmd);
3942 err = hci_req_run(&req, start_discovery_complete);
3944 mgmt_pending_remove(cmd);
3948 hci_discovery_set_state(hdev, DISCOVERY_STARTING);
3951 hci_dev_unlock(hdev);
3955 static int service_discovery_cmd_complete(struct pending_cmd *cmd, u8 status)
3957 return cmd_complete(cmd->sk, cmd->index, cmd->opcode, status,
3961 static int start_service_discovery(struct sock *sk, struct hci_dev *hdev,
3962 void *data, u16 len)
3964 struct mgmt_cp_start_service_discovery *cp = data;
3965 struct pending_cmd *cmd;
3966 struct hci_request req;
3967 const u16 max_uuid_count = ((U16_MAX - sizeof(*cp)) / 16);
3968 u16 uuid_count, expected_len;
3972 BT_DBG("%s", hdev->name);
3976 if (!hdev_is_powered(hdev)) {
3977 err = cmd_complete(sk, hdev->id,
3978 MGMT_OP_START_SERVICE_DISCOVERY,
3979 MGMT_STATUS_NOT_POWERED,
3980 &cp->type, sizeof(cp->type));
3984 if (hdev->discovery.state != DISCOVERY_STOPPED ||
3985 test_bit(HCI_PERIODIC_INQ, &hdev->dev_flags)) {
3986 err = cmd_complete(sk, hdev->id,
3987 MGMT_OP_START_SERVICE_DISCOVERY,
3988 MGMT_STATUS_BUSY, &cp->type,
3993 uuid_count = __le16_to_cpu(cp->uuid_count);
3994 if (uuid_count > max_uuid_count) {
3995 BT_ERR("service_discovery: too big uuid_count value %u",
3997 err = cmd_complete(sk, hdev->id,
3998 MGMT_OP_START_SERVICE_DISCOVERY,
3999 MGMT_STATUS_INVALID_PARAMS, &cp->type,
4004 expected_len = sizeof(*cp) + uuid_count * 16;
4005 if (expected_len != len) {
4006 BT_ERR("service_discovery: expected %u bytes, got %u bytes",
4008 err = cmd_complete(sk, hdev->id,
4009 MGMT_OP_START_SERVICE_DISCOVERY,
4010 MGMT_STATUS_INVALID_PARAMS, &cp->type,
4015 cmd = mgmt_pending_add(sk, MGMT_OP_START_SERVICE_DISCOVERY,
4022 cmd->cmd_complete = service_discovery_cmd_complete;
4024 /* Clear the discovery filter first to free any previously
4025 * allocated memory for the UUID list.
4027 hci_discovery_filter_clear(hdev);
4029 hdev->discovery.type = cp->type;
4030 hdev->discovery.rssi = cp->rssi;
4031 hdev->discovery.uuid_count = uuid_count;
4033 if (uuid_count > 0) {
4034 hdev->discovery.uuids = kmemdup(cp->uuids, uuid_count * 16,
4036 if (!hdev->discovery.uuids) {
4037 err = cmd_complete(sk, hdev->id,
4038 MGMT_OP_START_SERVICE_DISCOVERY,
4040 &cp->type, sizeof(cp->type));
4041 mgmt_pending_remove(cmd);
4046 hci_req_init(&req, hdev);
4048 if (!trigger_discovery(&req, &status)) {
4049 err = cmd_complete(sk, hdev->id,
4050 MGMT_OP_START_SERVICE_DISCOVERY,
4051 status, &cp->type, sizeof(cp->type));
4052 mgmt_pending_remove(cmd);
4056 err = hci_req_run(&req, start_discovery_complete);
4058 mgmt_pending_remove(cmd);
4062 hci_discovery_set_state(hdev, DISCOVERY_STARTING);
4065 hci_dev_unlock(hdev);
4069 static void stop_discovery_complete(struct hci_dev *hdev, u8 status, u16 opcode)
4071 struct pending_cmd *cmd;
4073 BT_DBG("status %d", status);
4077 cmd = mgmt_pending_find(MGMT_OP_STOP_DISCOVERY, hdev);
4079 cmd->cmd_complete(cmd, mgmt_status(status));
4080 mgmt_pending_remove(cmd);
4084 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
4086 hci_dev_unlock(hdev);
4089 static int stop_discovery(struct sock *sk, struct hci_dev *hdev, void *data,
4092 struct mgmt_cp_stop_discovery *mgmt_cp = data;
4093 struct pending_cmd *cmd;
4094 struct hci_request req;
4097 BT_DBG("%s", hdev->name);
4101 if (!hci_discovery_active(hdev)) {
4102 err = cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
4103 MGMT_STATUS_REJECTED, &mgmt_cp->type,
4104 sizeof(mgmt_cp->type));
4108 if (hdev->discovery.type != mgmt_cp->type) {
4109 err = cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
4110 MGMT_STATUS_INVALID_PARAMS, &mgmt_cp->type,
4111 sizeof(mgmt_cp->type));
4115 cmd = mgmt_pending_add(sk, MGMT_OP_STOP_DISCOVERY, hdev, data, len);
4121 cmd->cmd_complete = generic_cmd_complete;
4123 hci_req_init(&req, hdev);
4125 hci_stop_discovery(&req);
4127 err = hci_req_run(&req, stop_discovery_complete);
4129 hci_discovery_set_state(hdev, DISCOVERY_STOPPING);
4133 mgmt_pending_remove(cmd);
4135 /* If no HCI commands were sent we're done */
4136 if (err == -ENODATA) {
4137 err = cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY, 0,
4138 &mgmt_cp->type, sizeof(mgmt_cp->type));
4139 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
4143 hci_dev_unlock(hdev);
4147 static int confirm_name(struct sock *sk, struct hci_dev *hdev, void *data,
4150 struct mgmt_cp_confirm_name *cp = data;
4151 struct inquiry_entry *e;
4154 BT_DBG("%s", hdev->name);
4158 if (!hci_discovery_active(hdev)) {
4159 err = cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME,
4160 MGMT_STATUS_FAILED, &cp->addr,
4165 e = hci_inquiry_cache_lookup_unknown(hdev, &cp->addr.bdaddr);
4167 err = cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME,
4168 MGMT_STATUS_INVALID_PARAMS, &cp->addr,
4173 if (cp->name_known) {
4174 e->name_state = NAME_KNOWN;
4177 e->name_state = NAME_NEEDED;
4178 hci_inquiry_cache_update_resolve(hdev, e);
4181 err = cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME, 0, &cp->addr,
4185 hci_dev_unlock(hdev);
4189 static int block_device(struct sock *sk, struct hci_dev *hdev, void *data,
4192 struct mgmt_cp_block_device *cp = data;
4196 BT_DBG("%s", hdev->name);
4198 if (!bdaddr_type_is_valid(cp->addr.type))
4199 return cmd_complete(sk, hdev->id, MGMT_OP_BLOCK_DEVICE,
4200 MGMT_STATUS_INVALID_PARAMS,
4201 &cp->addr, sizeof(cp->addr));
4205 err = hci_bdaddr_list_add(&hdev->blacklist, &cp->addr.bdaddr,
4208 status = MGMT_STATUS_FAILED;
4212 mgmt_event(MGMT_EV_DEVICE_BLOCKED, hdev, &cp->addr, sizeof(cp->addr),
4214 status = MGMT_STATUS_SUCCESS;
4217 err = cmd_complete(sk, hdev->id, MGMT_OP_BLOCK_DEVICE, status,
4218 &cp->addr, sizeof(cp->addr));
4220 hci_dev_unlock(hdev);
4225 static int unblock_device(struct sock *sk, struct hci_dev *hdev, void *data,
4228 struct mgmt_cp_unblock_device *cp = data;
4232 BT_DBG("%s", hdev->name);
4234 if (!bdaddr_type_is_valid(cp->addr.type))
4235 return cmd_complete(sk, hdev->id, MGMT_OP_UNBLOCK_DEVICE,
4236 MGMT_STATUS_INVALID_PARAMS,
4237 &cp->addr, sizeof(cp->addr));
4241 err = hci_bdaddr_list_del(&hdev->blacklist, &cp->addr.bdaddr,
4244 status = MGMT_STATUS_INVALID_PARAMS;
4248 mgmt_event(MGMT_EV_DEVICE_UNBLOCKED, hdev, &cp->addr, sizeof(cp->addr),
4250 status = MGMT_STATUS_SUCCESS;
4253 err = cmd_complete(sk, hdev->id, MGMT_OP_UNBLOCK_DEVICE, status,
4254 &cp->addr, sizeof(cp->addr));
4256 hci_dev_unlock(hdev);
4261 static int set_device_id(struct sock *sk, struct hci_dev *hdev, void *data,
4264 struct mgmt_cp_set_device_id *cp = data;
4265 struct hci_request req;
4269 BT_DBG("%s", hdev->name);
4271 source = __le16_to_cpu(cp->source);
4273 if (source > 0x0002)
4274 return cmd_status(sk, hdev->id, MGMT_OP_SET_DEVICE_ID,
4275 MGMT_STATUS_INVALID_PARAMS);
4279 hdev->devid_source = source;
4280 hdev->devid_vendor = __le16_to_cpu(cp->vendor);
4281 hdev->devid_product = __le16_to_cpu(cp->product);
4282 hdev->devid_version = __le16_to_cpu(cp->version);
4284 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_DEVICE_ID, 0, NULL, 0);
4286 hci_req_init(&req, hdev);
4288 hci_req_run(&req, NULL);
4290 hci_dev_unlock(hdev);
4295 static void set_advertising_complete(struct hci_dev *hdev, u8 status,
4298 struct cmd_lookup match = { NULL, hdev };
4303 u8 mgmt_err = mgmt_status(status);
4305 mgmt_pending_foreach(MGMT_OP_SET_ADVERTISING, hdev,
4306 cmd_status_rsp, &mgmt_err);
4310 if (test_bit(HCI_LE_ADV, &hdev->dev_flags))
4311 set_bit(HCI_ADVERTISING, &hdev->dev_flags);
4313 clear_bit(HCI_ADVERTISING, &hdev->dev_flags);
4315 mgmt_pending_foreach(MGMT_OP_SET_ADVERTISING, hdev, settings_rsp,
4318 new_settings(hdev, match.sk);
4324 hci_dev_unlock(hdev);
4327 static int set_advertising(struct sock *sk, struct hci_dev *hdev, void *data,
4330 struct mgmt_mode *cp = data;
4331 struct pending_cmd *cmd;
4332 struct hci_request req;
4333 u8 val, enabled, status;
4336 BT_DBG("request for %s", hdev->name);
4338 status = mgmt_le_support(hdev);
4340 return cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
4343 if (cp->val != 0x00 && cp->val != 0x01)
4344 return cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
4345 MGMT_STATUS_INVALID_PARAMS);
4350 enabled = test_bit(HCI_ADVERTISING, &hdev->dev_flags);
4352 /* The following conditions are ones which mean that we should
4353 * not do any HCI communication but directly send a mgmt
4354 * response to user space (after toggling the flag if
4357 if (!hdev_is_powered(hdev) || val == enabled ||
4358 hci_conn_num(hdev, LE_LINK) > 0 ||
4359 (test_bit(HCI_LE_SCAN, &hdev->dev_flags) &&
4360 hdev->le_scan_type == LE_SCAN_ACTIVE)) {
4361 bool changed = false;
4363 if (val != test_bit(HCI_ADVERTISING, &hdev->dev_flags)) {
4364 change_bit(HCI_ADVERTISING, &hdev->dev_flags);
4368 err = send_settings_rsp(sk, MGMT_OP_SET_ADVERTISING, hdev);
4373 err = new_settings(hdev, sk);
4378 if (mgmt_pending_find(MGMT_OP_SET_ADVERTISING, hdev) ||
4379 mgmt_pending_find(MGMT_OP_SET_LE, hdev)) {
4380 err = cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
4385 cmd = mgmt_pending_add(sk, MGMT_OP_SET_ADVERTISING, hdev, data, len);
4391 hci_req_init(&req, hdev);
4394 enable_advertising(&req);
4396 disable_advertising(&req);
4398 err = hci_req_run(&req, set_advertising_complete);
4400 mgmt_pending_remove(cmd);
4403 hci_dev_unlock(hdev);
4407 static int set_static_address(struct sock *sk, struct hci_dev *hdev,
4408 void *data, u16 len)
4410 struct mgmt_cp_set_static_address *cp = data;
4413 BT_DBG("%s", hdev->name);
4415 if (!lmp_le_capable(hdev))
4416 return cmd_status(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS,
4417 MGMT_STATUS_NOT_SUPPORTED);
4419 if (hdev_is_powered(hdev))
4420 return cmd_status(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS,
4421 MGMT_STATUS_REJECTED);
4423 if (bacmp(&cp->bdaddr, BDADDR_ANY)) {
4424 if (!bacmp(&cp->bdaddr, BDADDR_NONE))
4425 return cmd_status(sk, hdev->id,
4426 MGMT_OP_SET_STATIC_ADDRESS,
4427 MGMT_STATUS_INVALID_PARAMS);
4429 /* Two most significant bits shall be set */
4430 if ((cp->bdaddr.b[5] & 0xc0) != 0xc0)
4431 return cmd_status(sk, hdev->id,
4432 MGMT_OP_SET_STATIC_ADDRESS,
4433 MGMT_STATUS_INVALID_PARAMS);
4438 bacpy(&hdev->static_addr, &cp->bdaddr);
4440 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS, 0, NULL, 0);
4442 hci_dev_unlock(hdev);
4447 static int set_scan_params(struct sock *sk, struct hci_dev *hdev,
4448 void *data, u16 len)
4450 struct mgmt_cp_set_scan_params *cp = data;
4451 __u16 interval, window;
4454 BT_DBG("%s", hdev->name);
4456 if (!lmp_le_capable(hdev))
4457 return cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
4458 MGMT_STATUS_NOT_SUPPORTED);
4460 interval = __le16_to_cpu(cp->interval);
4462 if (interval < 0x0004 || interval > 0x4000)
4463 return cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
4464 MGMT_STATUS_INVALID_PARAMS);
4466 window = __le16_to_cpu(cp->window);
4468 if (window < 0x0004 || window > 0x4000)
4469 return cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
4470 MGMT_STATUS_INVALID_PARAMS);
4472 if (window > interval)
4473 return cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
4474 MGMT_STATUS_INVALID_PARAMS);
4478 hdev->le_scan_interval = interval;
4479 hdev->le_scan_window = window;
4481 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS, 0, NULL, 0);
4483 /* If background scan is running, restart it so new parameters are
4486 if (test_bit(HCI_LE_SCAN, &hdev->dev_flags) &&
4487 hdev->discovery.state == DISCOVERY_STOPPED) {
4488 struct hci_request req;
4490 hci_req_init(&req, hdev);
4492 hci_req_add_le_scan_disable(&req);
4493 hci_req_add_le_passive_scan(&req);
4495 hci_req_run(&req, NULL);
4498 hci_dev_unlock(hdev);
4503 static void fast_connectable_complete(struct hci_dev *hdev, u8 status,
4506 struct pending_cmd *cmd;
4508 BT_DBG("status 0x%02x", status);
4512 cmd = mgmt_pending_find(MGMT_OP_SET_FAST_CONNECTABLE, hdev);
4517 cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
4518 mgmt_status(status));
4520 struct mgmt_mode *cp = cmd->param;
4523 set_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags);
4525 clear_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags);
4527 send_settings_rsp(cmd->sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev);
4528 new_settings(hdev, cmd->sk);
4531 mgmt_pending_remove(cmd);
4534 hci_dev_unlock(hdev);
4537 static int set_fast_connectable(struct sock *sk, struct hci_dev *hdev,
4538 void *data, u16 len)
4540 struct mgmt_mode *cp = data;
4541 struct pending_cmd *cmd;
4542 struct hci_request req;
4545 BT_DBG("%s", hdev->name);
4547 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags) ||
4548 hdev->hci_ver < BLUETOOTH_VER_1_2)
4549 return cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
4550 MGMT_STATUS_NOT_SUPPORTED);
4552 if (cp->val != 0x00 && cp->val != 0x01)
4553 return cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
4554 MGMT_STATUS_INVALID_PARAMS);
4556 if (!hdev_is_powered(hdev))
4557 return cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
4558 MGMT_STATUS_NOT_POWERED);
4560 if (!test_bit(HCI_CONNECTABLE, &hdev->dev_flags))
4561 return cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
4562 MGMT_STATUS_REJECTED);
4566 if (mgmt_pending_find(MGMT_OP_SET_FAST_CONNECTABLE, hdev)) {
4567 err = cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
4572 if (!!cp->val == test_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags)) {
4573 err = send_settings_rsp(sk, MGMT_OP_SET_FAST_CONNECTABLE,
4578 cmd = mgmt_pending_add(sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev,
4585 hci_req_init(&req, hdev);
4587 write_fast_connectable(&req, cp->val);
4589 err = hci_req_run(&req, fast_connectable_complete);
4591 err = cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
4592 MGMT_STATUS_FAILED);
4593 mgmt_pending_remove(cmd);
4597 hci_dev_unlock(hdev);
4602 static void set_bredr_complete(struct hci_dev *hdev, u8 status, u16 opcode)
4604 struct pending_cmd *cmd;
4606 BT_DBG("status 0x%02x", status);
4610 cmd = mgmt_pending_find(MGMT_OP_SET_BREDR, hdev);
4615 u8 mgmt_err = mgmt_status(status);
4617 /* We need to restore the flag if related HCI commands
4620 clear_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
4622 cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
4624 send_settings_rsp(cmd->sk, MGMT_OP_SET_BREDR, hdev);
4625 new_settings(hdev, cmd->sk);
4628 mgmt_pending_remove(cmd);
4631 hci_dev_unlock(hdev);
4634 static int set_bredr(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
4636 struct mgmt_mode *cp = data;
4637 struct pending_cmd *cmd;
4638 struct hci_request req;
4641 BT_DBG("request for %s", hdev->name);
4643 if (!lmp_bredr_capable(hdev) || !lmp_le_capable(hdev))
4644 return cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
4645 MGMT_STATUS_NOT_SUPPORTED);
4647 if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
4648 return cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
4649 MGMT_STATUS_REJECTED);
4651 if (cp->val != 0x00 && cp->val != 0x01)
4652 return cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
4653 MGMT_STATUS_INVALID_PARAMS);
4657 if (cp->val == test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
4658 err = send_settings_rsp(sk, MGMT_OP_SET_BREDR, hdev);
4662 if (!hdev_is_powered(hdev)) {
4664 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
4665 clear_bit(HCI_SSP_ENABLED, &hdev->dev_flags);
4666 clear_bit(HCI_LINK_SECURITY, &hdev->dev_flags);
4667 clear_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags);
4668 clear_bit(HCI_HS_ENABLED, &hdev->dev_flags);
4671 change_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
4673 err = send_settings_rsp(sk, MGMT_OP_SET_BREDR, hdev);
4677 err = new_settings(hdev, sk);
4681 /* Reject disabling when powered on */
4683 err = cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
4684 MGMT_STATUS_REJECTED);
4688 if (mgmt_pending_find(MGMT_OP_SET_BREDR, hdev)) {
4689 err = cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
4694 cmd = mgmt_pending_add(sk, MGMT_OP_SET_BREDR, hdev, data, len);
4700 /* We need to flip the bit already here so that update_adv_data
4701 * generates the correct flags.
4703 set_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
4705 hci_req_init(&req, hdev);
4707 write_fast_connectable(&req, false);
4708 __hci_update_page_scan(&req);
4710 /* Since only the advertising data flags will change, there
4711 * is no need to update the scan response data.
4713 update_adv_data(&req);
4715 err = hci_req_run(&req, set_bredr_complete);
4717 mgmt_pending_remove(cmd);
4720 hci_dev_unlock(hdev);
4724 static int set_secure_conn(struct sock *sk, struct hci_dev *hdev,
4725 void *data, u16 len)
4727 struct mgmt_mode *cp = data;
4728 struct pending_cmd *cmd;
4732 BT_DBG("request for %s", hdev->name);
4734 if (!lmp_sc_capable(hdev) &&
4735 !test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
4736 return cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
4737 MGMT_STATUS_NOT_SUPPORTED);
4739 if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
4740 return cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
4741 MGMT_STATUS_INVALID_PARAMS);
4745 if (!hdev_is_powered(hdev) || !lmp_sc_capable(hdev) ||
4746 !test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
4750 changed = !test_and_set_bit(HCI_SC_ENABLED,
4752 if (cp->val == 0x02)
4753 set_bit(HCI_SC_ONLY, &hdev->dev_flags);
4755 clear_bit(HCI_SC_ONLY, &hdev->dev_flags);
4757 changed = test_and_clear_bit(HCI_SC_ENABLED,
4759 clear_bit(HCI_SC_ONLY, &hdev->dev_flags);
4762 err = send_settings_rsp(sk, MGMT_OP_SET_SECURE_CONN, hdev);
4767 err = new_settings(hdev, sk);
4772 if (mgmt_pending_find(MGMT_OP_SET_SECURE_CONN, hdev)) {
4773 err = cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
4780 if (val == test_bit(HCI_SC_ENABLED, &hdev->dev_flags) &&
4781 (cp->val == 0x02) == test_bit(HCI_SC_ONLY, &hdev->dev_flags)) {
4782 err = send_settings_rsp(sk, MGMT_OP_SET_SECURE_CONN, hdev);
4786 cmd = mgmt_pending_add(sk, MGMT_OP_SET_SECURE_CONN, hdev, data, len);
4792 err = hci_send_cmd(hdev, HCI_OP_WRITE_SC_SUPPORT, 1, &val);
4794 mgmt_pending_remove(cmd);
4798 if (cp->val == 0x02)
4799 set_bit(HCI_SC_ONLY, &hdev->dev_flags);
4801 clear_bit(HCI_SC_ONLY, &hdev->dev_flags);
4804 hci_dev_unlock(hdev);
4808 static int set_debug_keys(struct sock *sk, struct hci_dev *hdev,
4809 void *data, u16 len)
4811 struct mgmt_mode *cp = data;
4812 bool changed, use_changed;
4815 BT_DBG("request for %s", hdev->name);
4817 if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
4818 return cmd_status(sk, hdev->id, MGMT_OP_SET_DEBUG_KEYS,
4819 MGMT_STATUS_INVALID_PARAMS);
4824 changed = !test_and_set_bit(HCI_KEEP_DEBUG_KEYS,
4827 changed = test_and_clear_bit(HCI_KEEP_DEBUG_KEYS,
4830 if (cp->val == 0x02)
4831 use_changed = !test_and_set_bit(HCI_USE_DEBUG_KEYS,
4834 use_changed = test_and_clear_bit(HCI_USE_DEBUG_KEYS,
4837 if (hdev_is_powered(hdev) && use_changed &&
4838 test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
4839 u8 mode = (cp->val == 0x02) ? 0x01 : 0x00;
4840 hci_send_cmd(hdev, HCI_OP_WRITE_SSP_DEBUG_MODE,
4841 sizeof(mode), &mode);
4844 err = send_settings_rsp(sk, MGMT_OP_SET_DEBUG_KEYS, hdev);
4849 err = new_settings(hdev, sk);
4852 hci_dev_unlock(hdev);
4856 static int set_privacy(struct sock *sk, struct hci_dev *hdev, void *cp_data,
4859 struct mgmt_cp_set_privacy *cp = cp_data;
4863 BT_DBG("request for %s", hdev->name);
4865 if (!lmp_le_capable(hdev))
4866 return cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
4867 MGMT_STATUS_NOT_SUPPORTED);
4869 if (cp->privacy != 0x00 && cp->privacy != 0x01)
4870 return cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
4871 MGMT_STATUS_INVALID_PARAMS);
4873 if (hdev_is_powered(hdev))
4874 return cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
4875 MGMT_STATUS_REJECTED);
4879 /* If user space supports this command it is also expected to
4880 * handle IRKs. Therefore, set the HCI_RPA_RESOLVING flag.
4882 set_bit(HCI_RPA_RESOLVING, &hdev->dev_flags);
4885 changed = !test_and_set_bit(HCI_PRIVACY, &hdev->dev_flags);
4886 memcpy(hdev->irk, cp->irk, sizeof(hdev->irk));
4887 set_bit(HCI_RPA_EXPIRED, &hdev->dev_flags);
4889 changed = test_and_clear_bit(HCI_PRIVACY, &hdev->dev_flags);
4890 memset(hdev->irk, 0, sizeof(hdev->irk));
4891 clear_bit(HCI_RPA_EXPIRED, &hdev->dev_flags);
4894 err = send_settings_rsp(sk, MGMT_OP_SET_PRIVACY, hdev);
4899 err = new_settings(hdev, sk);
4902 hci_dev_unlock(hdev);
4906 static bool irk_is_valid(struct mgmt_irk_info *irk)
4908 switch (irk->addr.type) {
4909 case BDADDR_LE_PUBLIC:
4912 case BDADDR_LE_RANDOM:
4913 /* Two most significant bits shall be set */
4914 if ((irk->addr.bdaddr.b[5] & 0xc0) != 0xc0)
4922 static int load_irks(struct sock *sk, struct hci_dev *hdev, void *cp_data,
4925 struct mgmt_cp_load_irks *cp = cp_data;
4926 const u16 max_irk_count = ((U16_MAX - sizeof(*cp)) /
4927 sizeof(struct mgmt_irk_info));
4928 u16 irk_count, expected_len;
4931 BT_DBG("request for %s", hdev->name);
4933 if (!lmp_le_capable(hdev))
4934 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
4935 MGMT_STATUS_NOT_SUPPORTED);
4937 irk_count = __le16_to_cpu(cp->irk_count);
4938 if (irk_count > max_irk_count) {
4939 BT_ERR("load_irks: too big irk_count value %u", irk_count);
4940 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
4941 MGMT_STATUS_INVALID_PARAMS);
4944 expected_len = sizeof(*cp) + irk_count * sizeof(struct mgmt_irk_info);
4945 if (expected_len != len) {
4946 BT_ERR("load_irks: expected %u bytes, got %u bytes",
4948 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
4949 MGMT_STATUS_INVALID_PARAMS);
4952 BT_DBG("%s irk_count %u", hdev->name, irk_count);
4954 for (i = 0; i < irk_count; i++) {
4955 struct mgmt_irk_info *key = &cp->irks[i];
4957 if (!irk_is_valid(key))
4958 return cmd_status(sk, hdev->id,
4960 MGMT_STATUS_INVALID_PARAMS);
4965 hci_smp_irks_clear(hdev);
4967 for (i = 0; i < irk_count; i++) {
4968 struct mgmt_irk_info *irk = &cp->irks[i];
4971 if (irk->addr.type == BDADDR_LE_PUBLIC)
4972 addr_type = ADDR_LE_DEV_PUBLIC;
4974 addr_type = ADDR_LE_DEV_RANDOM;
4976 hci_add_irk(hdev, &irk->addr.bdaddr, addr_type, irk->val,
4980 set_bit(HCI_RPA_RESOLVING, &hdev->dev_flags);
4982 err = cmd_complete(sk, hdev->id, MGMT_OP_LOAD_IRKS, 0, NULL, 0);
4984 hci_dev_unlock(hdev);
4989 static bool ltk_is_valid(struct mgmt_ltk_info *key)
4991 if (key->master != 0x00 && key->master != 0x01)
4994 switch (key->addr.type) {
4995 case BDADDR_LE_PUBLIC:
4998 case BDADDR_LE_RANDOM:
4999 /* Two most significant bits shall be set */
5000 if ((key->addr.bdaddr.b[5] & 0xc0) != 0xc0)
5008 static int load_long_term_keys(struct sock *sk, struct hci_dev *hdev,
5009 void *cp_data, u16 len)
5011 struct mgmt_cp_load_long_term_keys *cp = cp_data;
5012 const u16 max_key_count = ((U16_MAX - sizeof(*cp)) /
5013 sizeof(struct mgmt_ltk_info));
5014 u16 key_count, expected_len;
5017 BT_DBG("request for %s", hdev->name);
5019 if (!lmp_le_capable(hdev))
5020 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
5021 MGMT_STATUS_NOT_SUPPORTED);
5023 key_count = __le16_to_cpu(cp->key_count);
5024 if (key_count > max_key_count) {
5025 BT_ERR("load_ltks: too big key_count value %u", key_count);
5026 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
5027 MGMT_STATUS_INVALID_PARAMS);
5030 expected_len = sizeof(*cp) + key_count *
5031 sizeof(struct mgmt_ltk_info);
5032 if (expected_len != len) {
5033 BT_ERR("load_keys: expected %u bytes, got %u bytes",
5035 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
5036 MGMT_STATUS_INVALID_PARAMS);
5039 BT_DBG("%s key_count %u", hdev->name, key_count);
5041 for (i = 0; i < key_count; i++) {
5042 struct mgmt_ltk_info *key = &cp->keys[i];
5044 if (!ltk_is_valid(key))
5045 return cmd_status(sk, hdev->id,
5046 MGMT_OP_LOAD_LONG_TERM_KEYS,
5047 MGMT_STATUS_INVALID_PARAMS);
5052 hci_smp_ltks_clear(hdev);
5054 for (i = 0; i < key_count; i++) {
5055 struct mgmt_ltk_info *key = &cp->keys[i];
5056 u8 type, addr_type, authenticated;
5058 if (key->addr.type == BDADDR_LE_PUBLIC)
5059 addr_type = ADDR_LE_DEV_PUBLIC;
5061 addr_type = ADDR_LE_DEV_RANDOM;
5063 switch (key->type) {
5064 case MGMT_LTK_UNAUTHENTICATED:
5065 authenticated = 0x00;
5066 type = key->master ? SMP_LTK : SMP_LTK_SLAVE;
5068 case MGMT_LTK_AUTHENTICATED:
5069 authenticated = 0x01;
5070 type = key->master ? SMP_LTK : SMP_LTK_SLAVE;
5072 case MGMT_LTK_P256_UNAUTH:
5073 authenticated = 0x00;
5074 type = SMP_LTK_P256;
5076 case MGMT_LTK_P256_AUTH:
5077 authenticated = 0x01;
5078 type = SMP_LTK_P256;
5080 case MGMT_LTK_P256_DEBUG:
5081 authenticated = 0x00;
5082 type = SMP_LTK_P256_DEBUG;
5087 hci_add_ltk(hdev, &key->addr.bdaddr, addr_type, type,
5088 authenticated, key->val, key->enc_size, key->ediv,
5092 err = cmd_complete(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS, 0,
5095 hci_dev_unlock(hdev);
5100 static int conn_info_cmd_complete(struct pending_cmd *cmd, u8 status)
5102 struct hci_conn *conn = cmd->user_data;
5103 struct mgmt_rp_get_conn_info rp;
5106 memcpy(&rp.addr, cmd->param, sizeof(rp.addr));
5108 if (status == MGMT_STATUS_SUCCESS) {
5109 rp.rssi = conn->rssi;
5110 rp.tx_power = conn->tx_power;
5111 rp.max_tx_power = conn->max_tx_power;
5113 rp.rssi = HCI_RSSI_INVALID;
5114 rp.tx_power = HCI_TX_POWER_INVALID;
5115 rp.max_tx_power = HCI_TX_POWER_INVALID;
5118 err = cmd_complete(cmd->sk, cmd->index, MGMT_OP_GET_CONN_INFO, status,
5121 hci_conn_drop(conn);
5127 static void conn_info_refresh_complete(struct hci_dev *hdev, u8 hci_status,
5130 struct hci_cp_read_rssi *cp;
5131 struct pending_cmd *cmd;
5132 struct hci_conn *conn;
5136 BT_DBG("status 0x%02x", hci_status);
5140 /* Commands sent in request are either Read RSSI or Read Transmit Power
5141 * Level so we check which one was last sent to retrieve connection
5142 * handle. Both commands have handle as first parameter so it's safe to
5143 * cast data on the same command struct.
5145 * First command sent is always Read RSSI and we fail only if it fails.
5146 * In other case we simply override error to indicate success as we
5147 * already remembered if TX power value is actually valid.
5149 cp = hci_sent_cmd_data(hdev, HCI_OP_READ_RSSI);
5151 cp = hci_sent_cmd_data(hdev, HCI_OP_READ_TX_POWER);
5152 status = MGMT_STATUS_SUCCESS;
5154 status = mgmt_status(hci_status);
5158 BT_ERR("invalid sent_cmd in conn_info response");
5162 handle = __le16_to_cpu(cp->handle);
5163 conn = hci_conn_hash_lookup_handle(hdev, handle);
5165 BT_ERR("unknown handle (%d) in conn_info response", handle);
5169 cmd = mgmt_pending_find_data(MGMT_OP_GET_CONN_INFO, hdev, conn);
5173 cmd->cmd_complete(cmd, status);
5174 mgmt_pending_remove(cmd);
5177 hci_dev_unlock(hdev);
5180 static int get_conn_info(struct sock *sk, struct hci_dev *hdev, void *data,
5183 struct mgmt_cp_get_conn_info *cp = data;
5184 struct mgmt_rp_get_conn_info rp;
5185 struct hci_conn *conn;
5186 unsigned long conn_info_age;
5189 BT_DBG("%s", hdev->name);
5191 memset(&rp, 0, sizeof(rp));
5192 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
5193 rp.addr.type = cp->addr.type;
5195 if (!bdaddr_type_is_valid(cp->addr.type))
5196 return cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
5197 MGMT_STATUS_INVALID_PARAMS,
5202 if (!hdev_is_powered(hdev)) {
5203 err = cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
5204 MGMT_STATUS_NOT_POWERED, &rp, sizeof(rp));
5208 if (cp->addr.type == BDADDR_BREDR)
5209 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
5212 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->addr.bdaddr);
5214 if (!conn || conn->state != BT_CONNECTED) {
5215 err = cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
5216 MGMT_STATUS_NOT_CONNECTED, &rp, sizeof(rp));
5220 if (mgmt_pending_find_data(MGMT_OP_GET_CONN_INFO, hdev, conn)) {
5221 err = cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
5222 MGMT_STATUS_BUSY, &rp, sizeof(rp));
5226 /* To avoid client trying to guess when to poll again for information we
5227 * calculate conn info age as random value between min/max set in hdev.
5229 conn_info_age = hdev->conn_info_min_age +
5230 prandom_u32_max(hdev->conn_info_max_age -
5231 hdev->conn_info_min_age);
5233 /* Query controller to refresh cached values if they are too old or were
5236 if (time_after(jiffies, conn->conn_info_timestamp +
5237 msecs_to_jiffies(conn_info_age)) ||
5238 !conn->conn_info_timestamp) {
5239 struct hci_request req;
5240 struct hci_cp_read_tx_power req_txp_cp;
5241 struct hci_cp_read_rssi req_rssi_cp;
5242 struct pending_cmd *cmd;
5244 hci_req_init(&req, hdev);
5245 req_rssi_cp.handle = cpu_to_le16(conn->handle);
5246 hci_req_add(&req, HCI_OP_READ_RSSI, sizeof(req_rssi_cp),
5249 /* For LE links TX power does not change thus we don't need to
5250 * query for it once value is known.
5252 if (!bdaddr_type_is_le(cp->addr.type) ||
5253 conn->tx_power == HCI_TX_POWER_INVALID) {
5254 req_txp_cp.handle = cpu_to_le16(conn->handle);
5255 req_txp_cp.type = 0x00;
5256 hci_req_add(&req, HCI_OP_READ_TX_POWER,
5257 sizeof(req_txp_cp), &req_txp_cp);
5260 /* Max TX power needs to be read only once per connection */
5261 if (conn->max_tx_power == HCI_TX_POWER_INVALID) {
5262 req_txp_cp.handle = cpu_to_le16(conn->handle);
5263 req_txp_cp.type = 0x01;
5264 hci_req_add(&req, HCI_OP_READ_TX_POWER,
5265 sizeof(req_txp_cp), &req_txp_cp);
5268 err = hci_req_run(&req, conn_info_refresh_complete);
5272 cmd = mgmt_pending_add(sk, MGMT_OP_GET_CONN_INFO, hdev,
5279 hci_conn_hold(conn);
5280 cmd->user_data = hci_conn_get(conn);
5281 cmd->cmd_complete = conn_info_cmd_complete;
5283 conn->conn_info_timestamp = jiffies;
5285 /* Cache is valid, just reply with values cached in hci_conn */
5286 rp.rssi = conn->rssi;
5287 rp.tx_power = conn->tx_power;
5288 rp.max_tx_power = conn->max_tx_power;
5290 err = cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
5291 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
5295 hci_dev_unlock(hdev);
5299 static int clock_info_cmd_complete(struct pending_cmd *cmd, u8 status)
5301 struct hci_conn *conn = cmd->user_data;
5302 struct mgmt_rp_get_clock_info rp;
5303 struct hci_dev *hdev;
5306 memset(&rp, 0, sizeof(rp));
5307 memcpy(&rp.addr, &cmd->param, sizeof(rp.addr));
5312 hdev = hci_dev_get(cmd->index);
5314 rp.local_clock = cpu_to_le32(hdev->clock);
5319 rp.piconet_clock = cpu_to_le32(conn->clock);
5320 rp.accuracy = cpu_to_le16(conn->clock_accuracy);
5324 err = cmd_complete(cmd->sk, cmd->index, cmd->opcode, status, &rp,
5328 hci_conn_drop(conn);
5335 static void get_clock_info_complete(struct hci_dev *hdev, u8 status, u16 opcode)
5337 struct hci_cp_read_clock *hci_cp;
5338 struct pending_cmd *cmd;
5339 struct hci_conn *conn;
5341 BT_DBG("%s status %u", hdev->name, status);
5345 hci_cp = hci_sent_cmd_data(hdev, HCI_OP_READ_CLOCK);
5349 if (hci_cp->which) {
5350 u16 handle = __le16_to_cpu(hci_cp->handle);
5351 conn = hci_conn_hash_lookup_handle(hdev, handle);
5356 cmd = mgmt_pending_find_data(MGMT_OP_GET_CLOCK_INFO, hdev, conn);
5360 cmd->cmd_complete(cmd, mgmt_status(status));
5361 mgmt_pending_remove(cmd);
5364 hci_dev_unlock(hdev);
5367 static int get_clock_info(struct sock *sk, struct hci_dev *hdev, void *data,
5370 struct mgmt_cp_get_clock_info *cp = data;
5371 struct mgmt_rp_get_clock_info rp;
5372 struct hci_cp_read_clock hci_cp;
5373 struct pending_cmd *cmd;
5374 struct hci_request req;
5375 struct hci_conn *conn;
5378 BT_DBG("%s", hdev->name);
5380 memset(&rp, 0, sizeof(rp));
5381 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
5382 rp.addr.type = cp->addr.type;
5384 if (cp->addr.type != BDADDR_BREDR)
5385 return cmd_complete(sk, hdev->id, MGMT_OP_GET_CLOCK_INFO,
5386 MGMT_STATUS_INVALID_PARAMS,
5391 if (!hdev_is_powered(hdev)) {
5392 err = cmd_complete(sk, hdev->id, MGMT_OP_GET_CLOCK_INFO,
5393 MGMT_STATUS_NOT_POWERED, &rp, sizeof(rp));
5397 if (bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
5398 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
5400 if (!conn || conn->state != BT_CONNECTED) {
5401 err = cmd_complete(sk, hdev->id,
5402 MGMT_OP_GET_CLOCK_INFO,
5403 MGMT_STATUS_NOT_CONNECTED,
5411 cmd = mgmt_pending_add(sk, MGMT_OP_GET_CLOCK_INFO, hdev, data, len);
5417 cmd->cmd_complete = clock_info_cmd_complete;
5419 hci_req_init(&req, hdev);
5421 memset(&hci_cp, 0, sizeof(hci_cp));
5422 hci_req_add(&req, HCI_OP_READ_CLOCK, sizeof(hci_cp), &hci_cp);
5425 hci_conn_hold(conn);
5426 cmd->user_data = hci_conn_get(conn);
5428 hci_cp.handle = cpu_to_le16(conn->handle);
5429 hci_cp.which = 0x01; /* Piconet clock */
5430 hci_req_add(&req, HCI_OP_READ_CLOCK, sizeof(hci_cp), &hci_cp);
5433 err = hci_req_run(&req, get_clock_info_complete);
5435 mgmt_pending_remove(cmd);
5438 hci_dev_unlock(hdev);
5442 static bool is_connected(struct hci_dev *hdev, bdaddr_t *addr, u8 type)
5444 struct hci_conn *conn;
5446 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, addr);
5450 if (conn->dst_type != type)
5453 if (conn->state != BT_CONNECTED)
5459 /* This function requires the caller holds hdev->lock */
5460 static int hci_conn_params_set(struct hci_request *req, bdaddr_t *addr,
5461 u8 addr_type, u8 auto_connect)
5463 struct hci_dev *hdev = req->hdev;
5464 struct hci_conn_params *params;
5466 params = hci_conn_params_add(hdev, addr, addr_type);
5470 if (params->auto_connect == auto_connect)
5473 list_del_init(¶ms->action);
5475 switch (auto_connect) {
5476 case HCI_AUTO_CONN_DISABLED:
5477 case HCI_AUTO_CONN_LINK_LOSS:
5478 __hci_update_background_scan(req);
5480 case HCI_AUTO_CONN_REPORT:
5481 list_add(¶ms->action, &hdev->pend_le_reports);
5482 __hci_update_background_scan(req);
5484 case HCI_AUTO_CONN_DIRECT:
5485 case HCI_AUTO_CONN_ALWAYS:
5486 if (!is_connected(hdev, addr, addr_type)) {
5487 list_add(¶ms->action, &hdev->pend_le_conns);
5488 __hci_update_background_scan(req);
5493 params->auto_connect = auto_connect;
5495 BT_DBG("addr %pMR (type %u) auto_connect %u", addr, addr_type,
5501 static void device_added(struct sock *sk, struct hci_dev *hdev,
5502 bdaddr_t *bdaddr, u8 type, u8 action)
5504 struct mgmt_ev_device_added ev;
5506 bacpy(&ev.addr.bdaddr, bdaddr);
5507 ev.addr.type = type;
5510 mgmt_event(MGMT_EV_DEVICE_ADDED, hdev, &ev, sizeof(ev), sk);
5513 static void add_device_complete(struct hci_dev *hdev, u8 status, u16 opcode)
5515 struct pending_cmd *cmd;
5517 BT_DBG("status 0x%02x", status);
5521 cmd = mgmt_pending_find(MGMT_OP_ADD_DEVICE, hdev);
5525 cmd->cmd_complete(cmd, mgmt_status(status));
5526 mgmt_pending_remove(cmd);
5529 hci_dev_unlock(hdev);
5532 static int add_device(struct sock *sk, struct hci_dev *hdev,
5533 void *data, u16 len)
5535 struct mgmt_cp_add_device *cp = data;
5536 struct pending_cmd *cmd;
5537 struct hci_request req;
5538 u8 auto_conn, addr_type;
5541 BT_DBG("%s", hdev->name);
5543 if (!bdaddr_type_is_valid(cp->addr.type) ||
5544 !bacmp(&cp->addr.bdaddr, BDADDR_ANY))
5545 return cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
5546 MGMT_STATUS_INVALID_PARAMS,
5547 &cp->addr, sizeof(cp->addr));
5549 if (cp->action != 0x00 && cp->action != 0x01 && cp->action != 0x02)
5550 return cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
5551 MGMT_STATUS_INVALID_PARAMS,
5552 &cp->addr, sizeof(cp->addr));
5554 hci_req_init(&req, hdev);
5558 cmd = mgmt_pending_add(sk, MGMT_OP_ADD_DEVICE, hdev, data, len);
5564 cmd->cmd_complete = addr_cmd_complete;
5566 if (cp->addr.type == BDADDR_BREDR) {
5567 /* Only incoming connections action is supported for now */
5568 if (cp->action != 0x01) {
5569 err = cmd->cmd_complete(cmd,
5570 MGMT_STATUS_INVALID_PARAMS);
5571 mgmt_pending_remove(cmd);
5575 err = hci_bdaddr_list_add(&hdev->whitelist, &cp->addr.bdaddr,
5580 __hci_update_page_scan(&req);
5585 if (cp->addr.type == BDADDR_LE_PUBLIC)
5586 addr_type = ADDR_LE_DEV_PUBLIC;
5588 addr_type = ADDR_LE_DEV_RANDOM;
5590 if (cp->action == 0x02)
5591 auto_conn = HCI_AUTO_CONN_ALWAYS;
5592 else if (cp->action == 0x01)
5593 auto_conn = HCI_AUTO_CONN_DIRECT;
5595 auto_conn = HCI_AUTO_CONN_REPORT;
5597 /* If the connection parameters don't exist for this device,
5598 * they will be created and configured with defaults.
5600 if (hci_conn_params_set(&req, &cp->addr.bdaddr, addr_type,
5602 err = cmd->cmd_complete(cmd, MGMT_STATUS_FAILED);
5603 mgmt_pending_remove(cmd);
5608 device_added(sk, hdev, &cp->addr.bdaddr, cp->addr.type, cp->action);
5610 err = hci_req_run(&req, add_device_complete);
5612 /* ENODATA means no HCI commands were needed (e.g. if
5613 * the adapter is powered off).
5615 if (err == -ENODATA)
5616 err = cmd->cmd_complete(cmd, MGMT_STATUS_SUCCESS);
5617 mgmt_pending_remove(cmd);
5621 hci_dev_unlock(hdev);
5625 static void device_removed(struct sock *sk, struct hci_dev *hdev,
5626 bdaddr_t *bdaddr, u8 type)
5628 struct mgmt_ev_device_removed ev;
5630 bacpy(&ev.addr.bdaddr, bdaddr);
5631 ev.addr.type = type;
5633 mgmt_event(MGMT_EV_DEVICE_REMOVED, hdev, &ev, sizeof(ev), sk);
5636 static void remove_device_complete(struct hci_dev *hdev, u8 status, u16 opcode)
5638 struct pending_cmd *cmd;
5640 BT_DBG("status 0x%02x", status);
5644 cmd = mgmt_pending_find(MGMT_OP_REMOVE_DEVICE, hdev);
5648 cmd->cmd_complete(cmd, mgmt_status(status));
5649 mgmt_pending_remove(cmd);
5652 hci_dev_unlock(hdev);
5655 static int remove_device(struct sock *sk, struct hci_dev *hdev,
5656 void *data, u16 len)
5658 struct mgmt_cp_remove_device *cp = data;
5659 struct pending_cmd *cmd;
5660 struct hci_request req;
5663 BT_DBG("%s", hdev->name);
5665 hci_req_init(&req, hdev);
5669 cmd = mgmt_pending_add(sk, MGMT_OP_REMOVE_DEVICE, hdev, data, len);
5675 cmd->cmd_complete = addr_cmd_complete;
5677 if (bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
5678 struct hci_conn_params *params;
5681 if (!bdaddr_type_is_valid(cp->addr.type)) {
5682 err = cmd->cmd_complete(cmd,
5683 MGMT_STATUS_INVALID_PARAMS);
5684 mgmt_pending_remove(cmd);
5688 if (cp->addr.type == BDADDR_BREDR) {
5689 err = hci_bdaddr_list_del(&hdev->whitelist,
5693 err = cmd->cmd_complete(cmd,
5694 MGMT_STATUS_INVALID_PARAMS);
5695 mgmt_pending_remove(cmd);
5699 __hci_update_page_scan(&req);
5701 device_removed(sk, hdev, &cp->addr.bdaddr,
5706 if (cp->addr.type == BDADDR_LE_PUBLIC)
5707 addr_type = ADDR_LE_DEV_PUBLIC;
5709 addr_type = ADDR_LE_DEV_RANDOM;
5711 params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
5714 err = cmd->cmd_complete(cmd,
5715 MGMT_STATUS_INVALID_PARAMS);
5716 mgmt_pending_remove(cmd);
5720 if (params->auto_connect == HCI_AUTO_CONN_DISABLED) {
5721 err = cmd->cmd_complete(cmd,
5722 MGMT_STATUS_INVALID_PARAMS);
5723 mgmt_pending_remove(cmd);
5727 list_del(¶ms->action);
5728 list_del(¶ms->list);
5730 __hci_update_background_scan(&req);
5732 device_removed(sk, hdev, &cp->addr.bdaddr, cp->addr.type);
5734 struct hci_conn_params *p, *tmp;
5735 struct bdaddr_list *b, *btmp;
5737 if (cp->addr.type) {
5738 err = cmd->cmd_complete(cmd,
5739 MGMT_STATUS_INVALID_PARAMS);
5740 mgmt_pending_remove(cmd);
5744 list_for_each_entry_safe(b, btmp, &hdev->whitelist, list) {
5745 device_removed(sk, hdev, &b->bdaddr, b->bdaddr_type);
5750 __hci_update_page_scan(&req);
5752 list_for_each_entry_safe(p, tmp, &hdev->le_conn_params, list) {
5753 if (p->auto_connect == HCI_AUTO_CONN_DISABLED)
5755 device_removed(sk, hdev, &p->addr, p->addr_type);
5756 list_del(&p->action);
5761 BT_DBG("All LE connection parameters were removed");
5763 __hci_update_background_scan(&req);
5767 err = hci_req_run(&req, remove_device_complete);
5769 /* ENODATA means no HCI commands were needed (e.g. if
5770 * the adapter is powered off).
5772 if (err == -ENODATA)
5773 err = cmd->cmd_complete(cmd, MGMT_STATUS_SUCCESS);
5774 mgmt_pending_remove(cmd);
5778 hci_dev_unlock(hdev);
5782 static int load_conn_param(struct sock *sk, struct hci_dev *hdev, void *data,
5785 struct mgmt_cp_load_conn_param *cp = data;
5786 const u16 max_param_count = ((U16_MAX - sizeof(*cp)) /
5787 sizeof(struct mgmt_conn_param));
5788 u16 param_count, expected_len;
5791 if (!lmp_le_capable(hdev))
5792 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
5793 MGMT_STATUS_NOT_SUPPORTED);
5795 param_count = __le16_to_cpu(cp->param_count);
5796 if (param_count > max_param_count) {
5797 BT_ERR("load_conn_param: too big param_count value %u",
5799 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
5800 MGMT_STATUS_INVALID_PARAMS);
5803 expected_len = sizeof(*cp) + param_count *
5804 sizeof(struct mgmt_conn_param);
5805 if (expected_len != len) {
5806 BT_ERR("load_conn_param: expected %u bytes, got %u bytes",
5808 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
5809 MGMT_STATUS_INVALID_PARAMS);
5812 BT_DBG("%s param_count %u", hdev->name, param_count);
5816 hci_conn_params_clear_disabled(hdev);
5818 for (i = 0; i < param_count; i++) {
5819 struct mgmt_conn_param *param = &cp->params[i];
5820 struct hci_conn_params *hci_param;
5821 u16 min, max, latency, timeout;
5824 BT_DBG("Adding %pMR (type %u)", ¶m->addr.bdaddr,
5827 if (param->addr.type == BDADDR_LE_PUBLIC) {
5828 addr_type = ADDR_LE_DEV_PUBLIC;
5829 } else if (param->addr.type == BDADDR_LE_RANDOM) {
5830 addr_type = ADDR_LE_DEV_RANDOM;
5832 BT_ERR("Ignoring invalid connection parameters");
5836 min = le16_to_cpu(param->min_interval);
5837 max = le16_to_cpu(param->max_interval);
5838 latency = le16_to_cpu(param->latency);
5839 timeout = le16_to_cpu(param->timeout);
5841 BT_DBG("min 0x%04x max 0x%04x latency 0x%04x timeout 0x%04x",
5842 min, max, latency, timeout);
5844 if (hci_check_conn_params(min, max, latency, timeout) < 0) {
5845 BT_ERR("Ignoring invalid connection parameters");
5849 hci_param = hci_conn_params_add(hdev, ¶m->addr.bdaddr,
5852 BT_ERR("Failed to add connection parameters");
5856 hci_param->conn_min_interval = min;
5857 hci_param->conn_max_interval = max;
5858 hci_param->conn_latency = latency;
5859 hci_param->supervision_timeout = timeout;
5862 hci_dev_unlock(hdev);
5864 return cmd_complete(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM, 0, NULL, 0);
5867 static int set_external_config(struct sock *sk, struct hci_dev *hdev,
5868 void *data, u16 len)
5870 struct mgmt_cp_set_external_config *cp = data;
5874 BT_DBG("%s", hdev->name);
5876 if (hdev_is_powered(hdev))
5877 return cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
5878 MGMT_STATUS_REJECTED);
5880 if (cp->config != 0x00 && cp->config != 0x01)
5881 return cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
5882 MGMT_STATUS_INVALID_PARAMS);
5884 if (!test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks))
5885 return cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
5886 MGMT_STATUS_NOT_SUPPORTED);
5891 changed = !test_and_set_bit(HCI_EXT_CONFIGURED,
5894 changed = test_and_clear_bit(HCI_EXT_CONFIGURED,
5897 err = send_options_rsp(sk, MGMT_OP_SET_EXTERNAL_CONFIG, hdev);
5904 err = new_options(hdev, sk);
5906 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) == is_configured(hdev)) {
5907 mgmt_index_removed(hdev);
5909 if (test_and_change_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
5910 set_bit(HCI_CONFIG, &hdev->dev_flags);
5911 set_bit(HCI_AUTO_OFF, &hdev->dev_flags);
5913 queue_work(hdev->req_workqueue, &hdev->power_on);
5915 set_bit(HCI_RAW, &hdev->flags);
5916 mgmt_index_added(hdev);
5921 hci_dev_unlock(hdev);
5925 static int set_public_address(struct sock *sk, struct hci_dev *hdev,
5926 void *data, u16 len)
5928 struct mgmt_cp_set_public_address *cp = data;
5932 BT_DBG("%s", hdev->name);
5934 if (hdev_is_powered(hdev))
5935 return cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
5936 MGMT_STATUS_REJECTED);
5938 if (!bacmp(&cp->bdaddr, BDADDR_ANY))
5939 return cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
5940 MGMT_STATUS_INVALID_PARAMS);
5942 if (!hdev->set_bdaddr)
5943 return cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
5944 MGMT_STATUS_NOT_SUPPORTED);
5948 changed = !!bacmp(&hdev->public_addr, &cp->bdaddr);
5949 bacpy(&hdev->public_addr, &cp->bdaddr);
5951 err = send_options_rsp(sk, MGMT_OP_SET_PUBLIC_ADDRESS, hdev);
5958 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags))
5959 err = new_options(hdev, sk);
5961 if (is_configured(hdev)) {
5962 mgmt_index_removed(hdev);
5964 clear_bit(HCI_UNCONFIGURED, &hdev->dev_flags);
5966 set_bit(HCI_CONFIG, &hdev->dev_flags);
5967 set_bit(HCI_AUTO_OFF, &hdev->dev_flags);
5969 queue_work(hdev->req_workqueue, &hdev->power_on);
5973 hci_dev_unlock(hdev);
5977 static const struct mgmt_handler {
5978 int (*func) (struct sock *sk, struct hci_dev *hdev, void *data,
5982 } mgmt_handlers[] = {
5983 { NULL }, /* 0x0000 (no command) */
5984 { read_version, false, MGMT_READ_VERSION_SIZE },
5985 { read_commands, false, MGMT_READ_COMMANDS_SIZE },
5986 { read_index_list, false, MGMT_READ_INDEX_LIST_SIZE },
5987 { read_controller_info, false, MGMT_READ_INFO_SIZE },
5988 { set_powered, false, MGMT_SETTING_SIZE },
5989 { set_discoverable, false, MGMT_SET_DISCOVERABLE_SIZE },
5990 { set_connectable, false, MGMT_SETTING_SIZE },
5991 { set_fast_connectable, false, MGMT_SETTING_SIZE },
5992 { set_bondable, false, MGMT_SETTING_SIZE },
5993 { set_link_security, false, MGMT_SETTING_SIZE },
5994 { set_ssp, false, MGMT_SETTING_SIZE },
5995 { set_hs, false, MGMT_SETTING_SIZE },
5996 { set_le, false, MGMT_SETTING_SIZE },
5997 { set_dev_class, false, MGMT_SET_DEV_CLASS_SIZE },
5998 { set_local_name, false, MGMT_SET_LOCAL_NAME_SIZE },
5999 { add_uuid, false, MGMT_ADD_UUID_SIZE },
6000 { remove_uuid, false, MGMT_REMOVE_UUID_SIZE },
6001 { load_link_keys, true, MGMT_LOAD_LINK_KEYS_SIZE },
6002 { load_long_term_keys, true, MGMT_LOAD_LONG_TERM_KEYS_SIZE },
6003 { disconnect, false, MGMT_DISCONNECT_SIZE },
6004 { get_connections, false, MGMT_GET_CONNECTIONS_SIZE },
6005 { pin_code_reply, false, MGMT_PIN_CODE_REPLY_SIZE },
6006 { pin_code_neg_reply, false, MGMT_PIN_CODE_NEG_REPLY_SIZE },
6007 { set_io_capability, false, MGMT_SET_IO_CAPABILITY_SIZE },
6008 { pair_device, false, MGMT_PAIR_DEVICE_SIZE },
6009 { cancel_pair_device, false, MGMT_CANCEL_PAIR_DEVICE_SIZE },
6010 { unpair_device, false, MGMT_UNPAIR_DEVICE_SIZE },
6011 { user_confirm_reply, false, MGMT_USER_CONFIRM_REPLY_SIZE },
6012 { user_confirm_neg_reply, false, MGMT_USER_CONFIRM_NEG_REPLY_SIZE },
6013 { user_passkey_reply, false, MGMT_USER_PASSKEY_REPLY_SIZE },
6014 { user_passkey_neg_reply, false, MGMT_USER_PASSKEY_NEG_REPLY_SIZE },
6015 { read_local_oob_data, false, MGMT_READ_LOCAL_OOB_DATA_SIZE },
6016 { add_remote_oob_data, true, MGMT_ADD_REMOTE_OOB_DATA_SIZE },
6017 { remove_remote_oob_data, false, MGMT_REMOVE_REMOTE_OOB_DATA_SIZE },
6018 { start_discovery, false, MGMT_START_DISCOVERY_SIZE },
6019 { stop_discovery, false, MGMT_STOP_DISCOVERY_SIZE },
6020 { confirm_name, false, MGMT_CONFIRM_NAME_SIZE },
6021 { block_device, false, MGMT_BLOCK_DEVICE_SIZE },
6022 { unblock_device, false, MGMT_UNBLOCK_DEVICE_SIZE },
6023 { set_device_id, false, MGMT_SET_DEVICE_ID_SIZE },
6024 { set_advertising, false, MGMT_SETTING_SIZE },
6025 { set_bredr, false, MGMT_SETTING_SIZE },
6026 { set_static_address, false, MGMT_SET_STATIC_ADDRESS_SIZE },
6027 { set_scan_params, false, MGMT_SET_SCAN_PARAMS_SIZE },
6028 { set_secure_conn, false, MGMT_SETTING_SIZE },
6029 { set_debug_keys, false, MGMT_SETTING_SIZE },
6030 { set_privacy, false, MGMT_SET_PRIVACY_SIZE },
6031 { load_irks, true, MGMT_LOAD_IRKS_SIZE },
6032 { get_conn_info, false, MGMT_GET_CONN_INFO_SIZE },
6033 { get_clock_info, false, MGMT_GET_CLOCK_INFO_SIZE },
6034 { add_device, false, MGMT_ADD_DEVICE_SIZE },
6035 { remove_device, false, MGMT_REMOVE_DEVICE_SIZE },
6036 { load_conn_param, true, MGMT_LOAD_CONN_PARAM_SIZE },
6037 { read_unconf_index_list, false, MGMT_READ_UNCONF_INDEX_LIST_SIZE },
6038 { read_config_info, false, MGMT_READ_CONFIG_INFO_SIZE },
6039 { set_external_config, false, MGMT_SET_EXTERNAL_CONFIG_SIZE },
6040 { set_public_address, false, MGMT_SET_PUBLIC_ADDRESS_SIZE },
6041 { start_service_discovery,true, MGMT_START_SERVICE_DISCOVERY_SIZE },
6044 int mgmt_control(struct sock *sk, struct msghdr *msg, size_t msglen)
6048 struct mgmt_hdr *hdr;
6049 u16 opcode, index, len;
6050 struct hci_dev *hdev = NULL;
6051 const struct mgmt_handler *handler;
6054 BT_DBG("got %zu bytes", msglen);
6056 if (msglen < sizeof(*hdr))
6059 buf = kmalloc(msglen, GFP_KERNEL);
6063 if (memcpy_from_msg(buf, msg, msglen)) {
6069 opcode = __le16_to_cpu(hdr->opcode);
6070 index = __le16_to_cpu(hdr->index);
6071 len = __le16_to_cpu(hdr->len);
6073 if (len != msglen - sizeof(*hdr)) {
6078 if (index != MGMT_INDEX_NONE) {
6079 hdev = hci_dev_get(index);
6081 err = cmd_status(sk, index, opcode,
6082 MGMT_STATUS_INVALID_INDEX);
6086 if (test_bit(HCI_SETUP, &hdev->dev_flags) ||
6087 test_bit(HCI_CONFIG, &hdev->dev_flags) ||
6088 test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
6089 err = cmd_status(sk, index, opcode,
6090 MGMT_STATUS_INVALID_INDEX);
6094 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) &&
6095 opcode != MGMT_OP_READ_CONFIG_INFO &&
6096 opcode != MGMT_OP_SET_EXTERNAL_CONFIG &&
6097 opcode != MGMT_OP_SET_PUBLIC_ADDRESS) {
6098 err = cmd_status(sk, index, opcode,
6099 MGMT_STATUS_INVALID_INDEX);
6104 if (opcode >= ARRAY_SIZE(mgmt_handlers) ||
6105 mgmt_handlers[opcode].func == NULL) {
6106 BT_DBG("Unknown op %u", opcode);
6107 err = cmd_status(sk, index, opcode,
6108 MGMT_STATUS_UNKNOWN_COMMAND);
6112 if (hdev && (opcode <= MGMT_OP_READ_INDEX_LIST ||
6113 opcode == MGMT_OP_READ_UNCONF_INDEX_LIST)) {
6114 err = cmd_status(sk, index, opcode,
6115 MGMT_STATUS_INVALID_INDEX);
6119 if (!hdev && (opcode > MGMT_OP_READ_INDEX_LIST &&
6120 opcode != MGMT_OP_READ_UNCONF_INDEX_LIST)) {
6121 err = cmd_status(sk, index, opcode,
6122 MGMT_STATUS_INVALID_INDEX);
6126 handler = &mgmt_handlers[opcode];
6128 if ((handler->var_len && len < handler->data_len) ||
6129 (!handler->var_len && len != handler->data_len)) {
6130 err = cmd_status(sk, index, opcode,
6131 MGMT_STATUS_INVALID_PARAMS);
6136 mgmt_init_hdev(sk, hdev);
6138 cp = buf + sizeof(*hdr);
6140 err = handler->func(sk, hdev, cp, len);
6154 void mgmt_index_added(struct hci_dev *hdev)
6156 if (hdev->dev_type != HCI_BREDR)
6159 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
6162 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags))
6163 mgmt_event(MGMT_EV_UNCONF_INDEX_ADDED, hdev, NULL, 0, NULL);
6165 mgmt_event(MGMT_EV_INDEX_ADDED, hdev, NULL, 0, NULL);
6168 void mgmt_index_removed(struct hci_dev *hdev)
6170 u8 status = MGMT_STATUS_INVALID_INDEX;
6172 if (hdev->dev_type != HCI_BREDR)
6175 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
6178 mgmt_pending_foreach(0, hdev, cmd_complete_rsp, &status);
6180 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags))
6181 mgmt_event(MGMT_EV_UNCONF_INDEX_REMOVED, hdev, NULL, 0, NULL);
6183 mgmt_event(MGMT_EV_INDEX_REMOVED, hdev, NULL, 0, NULL);
6186 /* This function requires the caller holds hdev->lock */
6187 static void restart_le_actions(struct hci_request *req)
6189 struct hci_dev *hdev = req->hdev;
6190 struct hci_conn_params *p;
6192 list_for_each_entry(p, &hdev->le_conn_params, list) {
6193 /* Needed for AUTO_OFF case where might not "really"
6194 * have been powered off.
6196 list_del_init(&p->action);
6198 switch (p->auto_connect) {
6199 case HCI_AUTO_CONN_DIRECT:
6200 case HCI_AUTO_CONN_ALWAYS:
6201 list_add(&p->action, &hdev->pend_le_conns);
6203 case HCI_AUTO_CONN_REPORT:
6204 list_add(&p->action, &hdev->pend_le_reports);
6211 __hci_update_background_scan(req);
6214 static void powered_complete(struct hci_dev *hdev, u8 status, u16 opcode)
6216 struct cmd_lookup match = { NULL, hdev };
6218 BT_DBG("status 0x%02x", status);
6222 mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp, &match);
6224 new_settings(hdev, match.sk);
6226 hci_dev_unlock(hdev);
6232 static int powered_update_hci(struct hci_dev *hdev)
6234 struct hci_request req;
6237 hci_req_init(&req, hdev);
6239 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags) &&
6240 !lmp_host_ssp_capable(hdev)) {
6243 hci_req_add(&req, HCI_OP_WRITE_SSP_MODE, 1, &ssp);
6246 if (bredr_sc_enabled(hdev) && !lmp_host_sc_capable(hdev)) {
6248 hci_req_add(&req, HCI_OP_WRITE_SC_SUPPORT, sizeof(sc), &sc);
6251 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags) &&
6252 lmp_bredr_capable(hdev)) {
6253 struct hci_cp_write_le_host_supported cp;
6258 /* Check first if we already have the right
6259 * host state (host features set)
6261 if (cp.le != lmp_host_le_capable(hdev) ||
6262 cp.simul != lmp_host_le_br_capable(hdev))
6263 hci_req_add(&req, HCI_OP_WRITE_LE_HOST_SUPPORTED,
6267 if (lmp_le_capable(hdev)) {
6268 /* Make sure the controller has a good default for
6269 * advertising data. This also applies to the case
6270 * where BR/EDR was toggled during the AUTO_OFF phase.
6272 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
6273 update_adv_data(&req);
6274 update_scan_rsp_data(&req);
6277 if (test_bit(HCI_ADVERTISING, &hdev->dev_flags))
6278 enable_advertising(&req);
6280 restart_le_actions(&req);
6283 link_sec = test_bit(HCI_LINK_SECURITY, &hdev->dev_flags);
6284 if (link_sec != test_bit(HCI_AUTH, &hdev->flags))
6285 hci_req_add(&req, HCI_OP_WRITE_AUTH_ENABLE,
6286 sizeof(link_sec), &link_sec);
6288 if (lmp_bredr_capable(hdev)) {
6289 write_fast_connectable(&req, false);
6290 __hci_update_page_scan(&req);
6296 return hci_req_run(&req, powered_complete);
6299 int mgmt_powered(struct hci_dev *hdev, u8 powered)
6301 struct cmd_lookup match = { NULL, hdev };
6302 u8 status, zero_cod[] = { 0, 0, 0 };
6305 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
6309 if (powered_update_hci(hdev) == 0)
6312 mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp,
6317 mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp, &match);
6319 /* If the power off is because of hdev unregistration let
6320 * use the appropriate INVALID_INDEX status. Otherwise use
6321 * NOT_POWERED. We cover both scenarios here since later in
6322 * mgmt_index_removed() any hci_conn callbacks will have already
6323 * been triggered, potentially causing misleading DISCONNECTED
6326 if (test_bit(HCI_UNREGISTER, &hdev->dev_flags))
6327 status = MGMT_STATUS_INVALID_INDEX;
6329 status = MGMT_STATUS_NOT_POWERED;
6331 mgmt_pending_foreach(0, hdev, cmd_complete_rsp, &status);
6333 if (memcmp(hdev->dev_class, zero_cod, sizeof(zero_cod)) != 0)
6334 mgmt_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev,
6335 zero_cod, sizeof(zero_cod), NULL);
6338 err = new_settings(hdev, match.sk);
6346 void mgmt_set_powered_failed(struct hci_dev *hdev, int err)
6348 struct pending_cmd *cmd;
6351 cmd = mgmt_pending_find(MGMT_OP_SET_POWERED, hdev);
6355 if (err == -ERFKILL)
6356 status = MGMT_STATUS_RFKILLED;
6358 status = MGMT_STATUS_FAILED;
6360 cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_POWERED, status);
6362 mgmt_pending_remove(cmd);
6365 void mgmt_discoverable_timeout(struct hci_dev *hdev)
6367 struct hci_request req;
6371 /* When discoverable timeout triggers, then just make sure
6372 * the limited discoverable flag is cleared. Even in the case
6373 * of a timeout triggered from general discoverable, it is
6374 * safe to unconditionally clear the flag.
6376 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
6377 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
6379 hci_req_init(&req, hdev);
6380 if (test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
6381 u8 scan = SCAN_PAGE;
6382 hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE,
6383 sizeof(scan), &scan);
6386 update_adv_data(&req);
6387 hci_req_run(&req, NULL);
6389 hdev->discov_timeout = 0;
6391 new_settings(hdev, NULL);
6393 hci_dev_unlock(hdev);
6396 void mgmt_new_link_key(struct hci_dev *hdev, struct link_key *key,
6399 struct mgmt_ev_new_link_key ev;
6401 memset(&ev, 0, sizeof(ev));
6403 ev.store_hint = persistent;
6404 bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
6405 ev.key.addr.type = BDADDR_BREDR;
6406 ev.key.type = key->type;
6407 memcpy(ev.key.val, key->val, HCI_LINK_KEY_SIZE);
6408 ev.key.pin_len = key->pin_len;
6410 mgmt_event(MGMT_EV_NEW_LINK_KEY, hdev, &ev, sizeof(ev), NULL);
6413 static u8 mgmt_ltk_type(struct smp_ltk *ltk)
6415 switch (ltk->type) {
6418 if (ltk->authenticated)
6419 return MGMT_LTK_AUTHENTICATED;
6420 return MGMT_LTK_UNAUTHENTICATED;
6422 if (ltk->authenticated)
6423 return MGMT_LTK_P256_AUTH;
6424 return MGMT_LTK_P256_UNAUTH;
6425 case SMP_LTK_P256_DEBUG:
6426 return MGMT_LTK_P256_DEBUG;
6429 return MGMT_LTK_UNAUTHENTICATED;
6432 void mgmt_new_ltk(struct hci_dev *hdev, struct smp_ltk *key, bool persistent)
6434 struct mgmt_ev_new_long_term_key ev;
6436 memset(&ev, 0, sizeof(ev));
6438 /* Devices using resolvable or non-resolvable random addresses
6439 * without providing an indentity resolving key don't require
6440 * to store long term keys. Their addresses will change the
6443 * Only when a remote device provides an identity address
6444 * make sure the long term key is stored. If the remote
6445 * identity is known, the long term keys are internally
6446 * mapped to the identity address. So allow static random
6447 * and public addresses here.
6449 if (key->bdaddr_type == ADDR_LE_DEV_RANDOM &&
6450 (key->bdaddr.b[5] & 0xc0) != 0xc0)
6451 ev.store_hint = 0x00;
6453 ev.store_hint = persistent;
6455 bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
6456 ev.key.addr.type = link_to_bdaddr(LE_LINK, key->bdaddr_type);
6457 ev.key.type = mgmt_ltk_type(key);
6458 ev.key.enc_size = key->enc_size;
6459 ev.key.ediv = key->ediv;
6460 ev.key.rand = key->rand;
6462 if (key->type == SMP_LTK)
6465 memcpy(ev.key.val, key->val, sizeof(key->val));
6467 mgmt_event(MGMT_EV_NEW_LONG_TERM_KEY, hdev, &ev, sizeof(ev), NULL);
6470 void mgmt_new_irk(struct hci_dev *hdev, struct smp_irk *irk)
6472 struct mgmt_ev_new_irk ev;
6474 memset(&ev, 0, sizeof(ev));
6476 /* For identity resolving keys from devices that are already
6477 * using a public address or static random address, do not
6478 * ask for storing this key. The identity resolving key really
6479 * is only mandatory for devices using resovlable random
6482 * Storing all identity resolving keys has the downside that
6483 * they will be also loaded on next boot of they system. More
6484 * identity resolving keys, means more time during scanning is
6485 * needed to actually resolve these addresses.
6487 if (bacmp(&irk->rpa, BDADDR_ANY))
6488 ev.store_hint = 0x01;
6490 ev.store_hint = 0x00;
6492 bacpy(&ev.rpa, &irk->rpa);
6493 bacpy(&ev.irk.addr.bdaddr, &irk->bdaddr);
6494 ev.irk.addr.type = link_to_bdaddr(LE_LINK, irk->addr_type);
6495 memcpy(ev.irk.val, irk->val, sizeof(irk->val));
6497 mgmt_event(MGMT_EV_NEW_IRK, hdev, &ev, sizeof(ev), NULL);
6500 void mgmt_new_csrk(struct hci_dev *hdev, struct smp_csrk *csrk,
6503 struct mgmt_ev_new_csrk ev;
6505 memset(&ev, 0, sizeof(ev));
6507 /* Devices using resolvable or non-resolvable random addresses
6508 * without providing an indentity resolving key don't require
6509 * to store signature resolving keys. Their addresses will change
6510 * the next time around.
6512 * Only when a remote device provides an identity address
6513 * make sure the signature resolving key is stored. So allow
6514 * static random and public addresses here.
6516 if (csrk->bdaddr_type == ADDR_LE_DEV_RANDOM &&
6517 (csrk->bdaddr.b[5] & 0xc0) != 0xc0)
6518 ev.store_hint = 0x00;
6520 ev.store_hint = persistent;
6522 bacpy(&ev.key.addr.bdaddr, &csrk->bdaddr);
6523 ev.key.addr.type = link_to_bdaddr(LE_LINK, csrk->bdaddr_type);
6524 ev.key.master = csrk->master;
6525 memcpy(ev.key.val, csrk->val, sizeof(csrk->val));
6527 mgmt_event(MGMT_EV_NEW_CSRK, hdev, &ev, sizeof(ev), NULL);
6530 void mgmt_new_conn_param(struct hci_dev *hdev, bdaddr_t *bdaddr,
6531 u8 bdaddr_type, u8 store_hint, u16 min_interval,
6532 u16 max_interval, u16 latency, u16 timeout)
6534 struct mgmt_ev_new_conn_param ev;
6536 if (!hci_is_identity_address(bdaddr, bdaddr_type))
6539 memset(&ev, 0, sizeof(ev));
6540 bacpy(&ev.addr.bdaddr, bdaddr);
6541 ev.addr.type = link_to_bdaddr(LE_LINK, bdaddr_type);
6542 ev.store_hint = store_hint;
6543 ev.min_interval = cpu_to_le16(min_interval);
6544 ev.max_interval = cpu_to_le16(max_interval);
6545 ev.latency = cpu_to_le16(latency);
6546 ev.timeout = cpu_to_le16(timeout);
6548 mgmt_event(MGMT_EV_NEW_CONN_PARAM, hdev, &ev, sizeof(ev), NULL);
6551 static inline u16 eir_append_data(u8 *eir, u16 eir_len, u8 type, u8 *data,
6554 eir[eir_len++] = sizeof(type) + data_len;
6555 eir[eir_len++] = type;
6556 memcpy(&eir[eir_len], data, data_len);
6557 eir_len += data_len;
6562 void mgmt_device_connected(struct hci_dev *hdev, struct hci_conn *conn,
6563 u32 flags, u8 *name, u8 name_len)
6566 struct mgmt_ev_device_connected *ev = (void *) buf;
6569 bacpy(&ev->addr.bdaddr, &conn->dst);
6570 ev->addr.type = link_to_bdaddr(conn->type, conn->dst_type);
6572 ev->flags = __cpu_to_le32(flags);
6574 /* We must ensure that the EIR Data fields are ordered and
6575 * unique. Keep it simple for now and avoid the problem by not
6576 * adding any BR/EDR data to the LE adv.
6578 if (conn->le_adv_data_len > 0) {
6579 memcpy(&ev->eir[eir_len],
6580 conn->le_adv_data, conn->le_adv_data_len);
6581 eir_len = conn->le_adv_data_len;
6584 eir_len = eir_append_data(ev->eir, 0, EIR_NAME_COMPLETE,
6587 if (memcmp(conn->dev_class, "\0\0\0", 3) != 0)
6588 eir_len = eir_append_data(ev->eir, eir_len,
6590 conn->dev_class, 3);
6593 ev->eir_len = cpu_to_le16(eir_len);
6595 mgmt_event(MGMT_EV_DEVICE_CONNECTED, hdev, buf,
6596 sizeof(*ev) + eir_len, NULL);
6599 static void disconnect_rsp(struct pending_cmd *cmd, void *data)
6601 struct sock **sk = data;
6603 cmd->cmd_complete(cmd, 0);
6608 mgmt_pending_remove(cmd);
6611 static void unpair_device_rsp(struct pending_cmd *cmd, void *data)
6613 struct hci_dev *hdev = data;
6614 struct mgmt_cp_unpair_device *cp = cmd->param;
6616 device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, cmd->sk);
6618 cmd->cmd_complete(cmd, 0);
6619 mgmt_pending_remove(cmd);
6622 bool mgmt_powering_down(struct hci_dev *hdev)
6624 struct pending_cmd *cmd;
6625 struct mgmt_mode *cp;
6627 cmd = mgmt_pending_find(MGMT_OP_SET_POWERED, hdev);
6638 void mgmt_device_disconnected(struct hci_dev *hdev, bdaddr_t *bdaddr,
6639 u8 link_type, u8 addr_type, u8 reason,
6640 bool mgmt_connected)
6642 struct mgmt_ev_device_disconnected ev;
6643 struct sock *sk = NULL;
6645 /* The connection is still in hci_conn_hash so test for 1
6646 * instead of 0 to know if this is the last one.
6648 if (mgmt_powering_down(hdev) && hci_conn_count(hdev) == 1) {
6649 cancel_delayed_work(&hdev->power_off);
6650 queue_work(hdev->req_workqueue, &hdev->power_off.work);
6653 if (!mgmt_connected)
6656 if (link_type != ACL_LINK && link_type != LE_LINK)
6659 mgmt_pending_foreach(MGMT_OP_DISCONNECT, hdev, disconnect_rsp, &sk);
6661 bacpy(&ev.addr.bdaddr, bdaddr);
6662 ev.addr.type = link_to_bdaddr(link_type, addr_type);
6665 mgmt_event(MGMT_EV_DEVICE_DISCONNECTED, hdev, &ev, sizeof(ev), sk);
6670 mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, unpair_device_rsp,
6674 void mgmt_disconnect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr,
6675 u8 link_type, u8 addr_type, u8 status)
6677 u8 bdaddr_type = link_to_bdaddr(link_type, addr_type);
6678 struct mgmt_cp_disconnect *cp;
6679 struct pending_cmd *cmd;
6681 mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, unpair_device_rsp,
6684 cmd = mgmt_pending_find(MGMT_OP_DISCONNECT, hdev);
6690 if (bacmp(bdaddr, &cp->addr.bdaddr))
6693 if (cp->addr.type != bdaddr_type)
6696 cmd->cmd_complete(cmd, mgmt_status(status));
6697 mgmt_pending_remove(cmd);
6700 void mgmt_connect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
6701 u8 addr_type, u8 status)
6703 struct mgmt_ev_connect_failed ev;
6705 /* The connection is still in hci_conn_hash so test for 1
6706 * instead of 0 to know if this is the last one.
6708 if (mgmt_powering_down(hdev) && hci_conn_count(hdev) == 1) {
6709 cancel_delayed_work(&hdev->power_off);
6710 queue_work(hdev->req_workqueue, &hdev->power_off.work);
6713 bacpy(&ev.addr.bdaddr, bdaddr);
6714 ev.addr.type = link_to_bdaddr(link_type, addr_type);
6715 ev.status = mgmt_status(status);
6717 mgmt_event(MGMT_EV_CONNECT_FAILED, hdev, &ev, sizeof(ev), NULL);
6720 void mgmt_pin_code_request(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 secure)
6722 struct mgmt_ev_pin_code_request ev;
6724 bacpy(&ev.addr.bdaddr, bdaddr);
6725 ev.addr.type = BDADDR_BREDR;
6728 mgmt_event(MGMT_EV_PIN_CODE_REQUEST, hdev, &ev, sizeof(ev), NULL);
6731 void mgmt_pin_code_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
6734 struct pending_cmd *cmd;
6736 cmd = mgmt_pending_find(MGMT_OP_PIN_CODE_REPLY, hdev);
6740 cmd->cmd_complete(cmd, mgmt_status(status));
6741 mgmt_pending_remove(cmd);
6744 void mgmt_pin_code_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
6747 struct pending_cmd *cmd;
6749 cmd = mgmt_pending_find(MGMT_OP_PIN_CODE_NEG_REPLY, hdev);
6753 cmd->cmd_complete(cmd, mgmt_status(status));
6754 mgmt_pending_remove(cmd);
6757 int mgmt_user_confirm_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
6758 u8 link_type, u8 addr_type, u32 value,
6761 struct mgmt_ev_user_confirm_request ev;
6763 BT_DBG("%s", hdev->name);
6765 bacpy(&ev.addr.bdaddr, bdaddr);
6766 ev.addr.type = link_to_bdaddr(link_type, addr_type);
6767 ev.confirm_hint = confirm_hint;
6768 ev.value = cpu_to_le32(value);
6770 return mgmt_event(MGMT_EV_USER_CONFIRM_REQUEST, hdev, &ev, sizeof(ev),
6774 int mgmt_user_passkey_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
6775 u8 link_type, u8 addr_type)
6777 struct mgmt_ev_user_passkey_request ev;
6779 BT_DBG("%s", hdev->name);
6781 bacpy(&ev.addr.bdaddr, bdaddr);
6782 ev.addr.type = link_to_bdaddr(link_type, addr_type);
6784 return mgmt_event(MGMT_EV_USER_PASSKEY_REQUEST, hdev, &ev, sizeof(ev),
6788 static int user_pairing_resp_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
6789 u8 link_type, u8 addr_type, u8 status,
6792 struct pending_cmd *cmd;
6794 cmd = mgmt_pending_find(opcode, hdev);
6798 cmd->cmd_complete(cmd, mgmt_status(status));
6799 mgmt_pending_remove(cmd);
6804 int mgmt_user_confirm_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
6805 u8 link_type, u8 addr_type, u8 status)
6807 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
6808 status, MGMT_OP_USER_CONFIRM_REPLY);
6811 int mgmt_user_confirm_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
6812 u8 link_type, u8 addr_type, u8 status)
6814 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
6816 MGMT_OP_USER_CONFIRM_NEG_REPLY);
6819 int mgmt_user_passkey_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
6820 u8 link_type, u8 addr_type, u8 status)
6822 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
6823 status, MGMT_OP_USER_PASSKEY_REPLY);
6826 int mgmt_user_passkey_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
6827 u8 link_type, u8 addr_type, u8 status)
6829 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
6831 MGMT_OP_USER_PASSKEY_NEG_REPLY);
6834 int mgmt_user_passkey_notify(struct hci_dev *hdev, bdaddr_t *bdaddr,
6835 u8 link_type, u8 addr_type, u32 passkey,
6838 struct mgmt_ev_passkey_notify ev;
6840 BT_DBG("%s", hdev->name);
6842 bacpy(&ev.addr.bdaddr, bdaddr);
6843 ev.addr.type = link_to_bdaddr(link_type, addr_type);
6844 ev.passkey = __cpu_to_le32(passkey);
6845 ev.entered = entered;
6847 return mgmt_event(MGMT_EV_PASSKEY_NOTIFY, hdev, &ev, sizeof(ev), NULL);
6850 void mgmt_auth_failed(struct hci_conn *conn, u8 hci_status)
6852 struct mgmt_ev_auth_failed ev;
6853 struct pending_cmd *cmd;
6854 u8 status = mgmt_status(hci_status);
6856 bacpy(&ev.addr.bdaddr, &conn->dst);
6857 ev.addr.type = link_to_bdaddr(conn->type, conn->dst_type);
6860 cmd = find_pairing(conn);
6862 mgmt_event(MGMT_EV_AUTH_FAILED, conn->hdev, &ev, sizeof(ev),
6863 cmd ? cmd->sk : NULL);
6866 cmd->cmd_complete(cmd, status);
6867 mgmt_pending_remove(cmd);
6871 void mgmt_auth_enable_complete(struct hci_dev *hdev, u8 status)
6873 struct cmd_lookup match = { NULL, hdev };
6877 u8 mgmt_err = mgmt_status(status);
6878 mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev,
6879 cmd_status_rsp, &mgmt_err);
6883 if (test_bit(HCI_AUTH, &hdev->flags))
6884 changed = !test_and_set_bit(HCI_LINK_SECURITY,
6887 changed = test_and_clear_bit(HCI_LINK_SECURITY,
6890 mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev, settings_rsp,
6894 new_settings(hdev, match.sk);
6900 static void clear_eir(struct hci_request *req)
6902 struct hci_dev *hdev = req->hdev;
6903 struct hci_cp_write_eir cp;
6905 if (!lmp_ext_inq_capable(hdev))
6908 memset(hdev->eir, 0, sizeof(hdev->eir));
6910 memset(&cp, 0, sizeof(cp));
6912 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
6915 void mgmt_ssp_enable_complete(struct hci_dev *hdev, u8 enable, u8 status)
6917 struct cmd_lookup match = { NULL, hdev };
6918 struct hci_request req;
6919 bool changed = false;
6922 u8 mgmt_err = mgmt_status(status);
6924 if (enable && test_and_clear_bit(HCI_SSP_ENABLED,
6925 &hdev->dev_flags)) {
6926 clear_bit(HCI_HS_ENABLED, &hdev->dev_flags);
6927 new_settings(hdev, NULL);
6930 mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, cmd_status_rsp,
6936 changed = !test_and_set_bit(HCI_SSP_ENABLED, &hdev->dev_flags);
6938 changed = test_and_clear_bit(HCI_SSP_ENABLED, &hdev->dev_flags);
6940 changed = test_and_clear_bit(HCI_HS_ENABLED,
6943 clear_bit(HCI_HS_ENABLED, &hdev->dev_flags);
6946 mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, settings_rsp, &match);
6949 new_settings(hdev, match.sk);
6954 hci_req_init(&req, hdev);
6956 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
6957 if (test_bit(HCI_USE_DEBUG_KEYS, &hdev->dev_flags))
6958 hci_req_add(&req, HCI_OP_WRITE_SSP_DEBUG_MODE,
6959 sizeof(enable), &enable);
6965 hci_req_run(&req, NULL);
6968 void mgmt_sc_enable_complete(struct hci_dev *hdev, u8 enable, u8 status)
6970 struct cmd_lookup match = { NULL, hdev };
6971 bool changed = false;
6974 u8 mgmt_err = mgmt_status(status);
6977 if (test_and_clear_bit(HCI_SC_ENABLED,
6979 new_settings(hdev, NULL);
6980 clear_bit(HCI_SC_ONLY, &hdev->dev_flags);
6983 mgmt_pending_foreach(MGMT_OP_SET_SECURE_CONN, hdev,
6984 cmd_status_rsp, &mgmt_err);
6989 changed = !test_and_set_bit(HCI_SC_ENABLED, &hdev->dev_flags);
6991 changed = test_and_clear_bit(HCI_SC_ENABLED, &hdev->dev_flags);
6992 clear_bit(HCI_SC_ONLY, &hdev->dev_flags);
6995 mgmt_pending_foreach(MGMT_OP_SET_SECURE_CONN, hdev,
6996 settings_rsp, &match);
6999 new_settings(hdev, match.sk);
7005 static void sk_lookup(struct pending_cmd *cmd, void *data)
7007 struct cmd_lookup *match = data;
7009 if (match->sk == NULL) {
7010 match->sk = cmd->sk;
7011 sock_hold(match->sk);
7015 void mgmt_set_class_of_dev_complete(struct hci_dev *hdev, u8 *dev_class,
7018 struct cmd_lookup match = { NULL, hdev, mgmt_status(status) };
7020 mgmt_pending_foreach(MGMT_OP_SET_DEV_CLASS, hdev, sk_lookup, &match);
7021 mgmt_pending_foreach(MGMT_OP_ADD_UUID, hdev, sk_lookup, &match);
7022 mgmt_pending_foreach(MGMT_OP_REMOVE_UUID, hdev, sk_lookup, &match);
7025 mgmt_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev, dev_class, 3,
7032 void mgmt_set_local_name_complete(struct hci_dev *hdev, u8 *name, u8 status)
7034 struct mgmt_cp_set_local_name ev;
7035 struct pending_cmd *cmd;
7040 memset(&ev, 0, sizeof(ev));
7041 memcpy(ev.name, name, HCI_MAX_NAME_LENGTH);
7042 memcpy(ev.short_name, hdev->short_name, HCI_MAX_SHORT_NAME_LENGTH);
7044 cmd = mgmt_pending_find(MGMT_OP_SET_LOCAL_NAME, hdev);
7046 memcpy(hdev->dev_name, name, sizeof(hdev->dev_name));
7048 /* If this is a HCI command related to powering on the
7049 * HCI dev don't send any mgmt signals.
7051 if (mgmt_pending_find(MGMT_OP_SET_POWERED, hdev))
7055 mgmt_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev, &ev, sizeof(ev),
7056 cmd ? cmd->sk : NULL);
7059 void mgmt_read_local_oob_data_complete(struct hci_dev *hdev, u8 *hash192,
7060 u8 *rand192, u8 *hash256, u8 *rand256,
7063 struct pending_cmd *cmd;
7065 BT_DBG("%s status %u", hdev->name, status);
7067 cmd = mgmt_pending_find(MGMT_OP_READ_LOCAL_OOB_DATA, hdev);
7072 cmd_status(cmd->sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
7073 mgmt_status(status));
7075 if (bredr_sc_enabled(hdev) && hash256 && rand256) {
7076 struct mgmt_rp_read_local_oob_ext_data rp;
7078 memcpy(rp.hash192, hash192, sizeof(rp.hash192));
7079 memcpy(rp.rand192, rand192, sizeof(rp.rand192));
7081 memcpy(rp.hash256, hash256, sizeof(rp.hash256));
7082 memcpy(rp.rand256, rand256, sizeof(rp.rand256));
7084 cmd_complete(cmd->sk, hdev->id,
7085 MGMT_OP_READ_LOCAL_OOB_DATA, 0,
7088 struct mgmt_rp_read_local_oob_data rp;
7090 memcpy(rp.hash, hash192, sizeof(rp.hash));
7091 memcpy(rp.rand, rand192, sizeof(rp.rand));
7093 cmd_complete(cmd->sk, hdev->id,
7094 MGMT_OP_READ_LOCAL_OOB_DATA, 0,
7099 mgmt_pending_remove(cmd);
7102 static inline bool has_uuid(u8 *uuid, u16 uuid_count, u8 (*uuids)[16])
7106 for (i = 0; i < uuid_count; i++) {
7107 if (!memcmp(uuid, uuids[i], 16))
7114 static bool eir_has_uuids(u8 *eir, u16 eir_len, u16 uuid_count, u8 (*uuids)[16])
7118 while (parsed < eir_len) {
7119 u8 field_len = eir[0];
7126 if (eir_len - parsed < field_len + 1)
7130 case EIR_UUID16_ALL:
7131 case EIR_UUID16_SOME:
7132 for (i = 0; i + 3 <= field_len; i += 2) {
7133 memcpy(uuid, bluetooth_base_uuid, 16);
7134 uuid[13] = eir[i + 3];
7135 uuid[12] = eir[i + 2];
7136 if (has_uuid(uuid, uuid_count, uuids))
7140 case EIR_UUID32_ALL:
7141 case EIR_UUID32_SOME:
7142 for (i = 0; i + 5 <= field_len; i += 4) {
7143 memcpy(uuid, bluetooth_base_uuid, 16);
7144 uuid[15] = eir[i + 5];
7145 uuid[14] = eir[i + 4];
7146 uuid[13] = eir[i + 3];
7147 uuid[12] = eir[i + 2];
7148 if (has_uuid(uuid, uuid_count, uuids))
7152 case EIR_UUID128_ALL:
7153 case EIR_UUID128_SOME:
7154 for (i = 0; i + 17 <= field_len; i += 16) {
7155 memcpy(uuid, eir + i + 2, 16);
7156 if (has_uuid(uuid, uuid_count, uuids))
7162 parsed += field_len + 1;
7163 eir += field_len + 1;
7169 void mgmt_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
7170 u8 addr_type, u8 *dev_class, s8 rssi, u32 flags,
7171 u8 *eir, u16 eir_len, u8 *scan_rsp, u8 scan_rsp_len)
7174 struct mgmt_ev_device_found *ev = (void *) buf;
7178 /* Don't send events for a non-kernel initiated discovery. With
7179 * LE one exception is if we have pend_le_reports > 0 in which
7180 * case we're doing passive scanning and want these events.
7182 if (!hci_discovery_active(hdev)) {
7183 if (link_type == ACL_LINK)
7185 if (link_type == LE_LINK && list_empty(&hdev->pend_le_reports))
7189 /* When using service discovery with a RSSI threshold, then check
7190 * if such a RSSI threshold is specified. If a RSSI threshold has
7191 * been specified, then all results with a RSSI smaller than the
7192 * RSSI threshold will be dropped.
7194 * For BR/EDR devices (pre 1.2) providing no RSSI during inquiry,
7195 * the results are also dropped.
7197 if (hdev->discovery.rssi != HCI_RSSI_INVALID &&
7198 (rssi < hdev->discovery.rssi || rssi == HCI_RSSI_INVALID))
7201 /* Make sure that the buffer is big enough. The 5 extra bytes
7202 * are for the potential CoD field.
7204 if (sizeof(*ev) + eir_len + scan_rsp_len + 5 > sizeof(buf))
7207 memset(buf, 0, sizeof(buf));
7209 /* In case of device discovery with BR/EDR devices (pre 1.2), the
7210 * RSSI value was reported as 0 when not available. This behavior
7211 * is kept when using device discovery. This is required for full
7212 * backwards compatibility with the API.
7214 * However when using service discovery, the value 127 will be
7215 * returned when the RSSI is not available.
7217 if (rssi == HCI_RSSI_INVALID && !hdev->discovery.report_invalid_rssi)
7220 bacpy(&ev->addr.bdaddr, bdaddr);
7221 ev->addr.type = link_to_bdaddr(link_type, addr_type);
7223 ev->flags = cpu_to_le32(flags);
7226 /* When using service discovery and a list of UUID is
7227 * provided, results with no matching UUID should be
7228 * dropped. In case there is a match the result is
7229 * kept and checking possible scan response data
7232 if (hdev->discovery.uuid_count > 0)
7233 match = eir_has_uuids(eir, eir_len,
7234 hdev->discovery.uuid_count,
7235 hdev->discovery.uuids);
7239 if (!match && !scan_rsp_len)
7242 /* Copy EIR or advertising data into event */
7243 memcpy(ev->eir, eir, eir_len);
7245 /* When using service discovery and a list of UUID is
7246 * provided, results with empty EIR or advertising data
7247 * should be dropped since they do not match any UUID.
7249 if (hdev->discovery.uuid_count > 0 && !scan_rsp_len)
7255 if (dev_class && !eir_has_data_type(ev->eir, eir_len, EIR_CLASS_OF_DEV))
7256 eir_len = eir_append_data(ev->eir, eir_len, EIR_CLASS_OF_DEV,
7259 if (scan_rsp_len > 0) {
7260 /* When using service discovery and a list of UUID is
7261 * provided, results with no matching UUID should be
7262 * dropped if there is no previous match from the
7265 if (hdev->discovery.uuid_count > 0) {
7266 if (!match && !eir_has_uuids(scan_rsp, scan_rsp_len,
7267 hdev->discovery.uuid_count,
7268 hdev->discovery.uuids))
7272 /* Append scan response data to event */
7273 memcpy(ev->eir + eir_len, scan_rsp, scan_rsp_len);
7275 /* When using service discovery and a list of UUID is
7276 * provided, results with empty scan response and no
7277 * previous matched advertising data should be dropped.
7279 if (hdev->discovery.uuid_count > 0 && !match)
7283 ev->eir_len = cpu_to_le16(eir_len + scan_rsp_len);
7284 ev_size = sizeof(*ev) + eir_len + scan_rsp_len;
7286 mgmt_event(MGMT_EV_DEVICE_FOUND, hdev, ev, ev_size, NULL);
7289 void mgmt_remote_name(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
7290 u8 addr_type, s8 rssi, u8 *name, u8 name_len)
7292 struct mgmt_ev_device_found *ev;
7293 char buf[sizeof(*ev) + HCI_MAX_NAME_LENGTH + 2];
7296 ev = (struct mgmt_ev_device_found *) buf;
7298 memset(buf, 0, sizeof(buf));
7300 bacpy(&ev->addr.bdaddr, bdaddr);
7301 ev->addr.type = link_to_bdaddr(link_type, addr_type);
7304 eir_len = eir_append_data(ev->eir, 0, EIR_NAME_COMPLETE, name,
7307 ev->eir_len = cpu_to_le16(eir_len);
7309 mgmt_event(MGMT_EV_DEVICE_FOUND, hdev, ev, sizeof(*ev) + eir_len, NULL);
7312 void mgmt_discovering(struct hci_dev *hdev, u8 discovering)
7314 struct mgmt_ev_discovering ev;
7316 BT_DBG("%s discovering %u", hdev->name, discovering);
7318 memset(&ev, 0, sizeof(ev));
7319 ev.type = hdev->discovery.type;
7320 ev.discovering = discovering;
7322 mgmt_event(MGMT_EV_DISCOVERING, hdev, &ev, sizeof(ev), NULL);
7325 static void adv_enable_complete(struct hci_dev *hdev, u8 status, u16 opcode)
7327 BT_DBG("%s status %u", hdev->name, status);
7330 void mgmt_reenable_advertising(struct hci_dev *hdev)
7332 struct hci_request req;
7334 if (!test_bit(HCI_ADVERTISING, &hdev->dev_flags))
7337 hci_req_init(&req, hdev);
7338 enable_advertising(&req);
7339 hci_req_run(&req, adv_enable_complete);