2 BlueZ - Bluetooth protocol stack for Linux
4 Copyright (C) 2010 Nokia Corporation
5 Copyright (C) 2011-2012 Intel Corporation
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
25 /* Bluetooth HCI Management interface */
27 #include <linux/module.h>
28 #include <asm/unaligned.h>
30 #include <net/bluetooth/bluetooth.h>
31 #include <net/bluetooth/hci_core.h>
32 #include <net/bluetooth/l2cap.h>
33 #include <net/bluetooth/mgmt.h>
37 #define MGMT_VERSION 1
38 #define MGMT_REVISION 7
40 static const u16 mgmt_commands[] = {
41 MGMT_OP_READ_INDEX_LIST,
44 MGMT_OP_SET_DISCOVERABLE,
45 MGMT_OP_SET_CONNECTABLE,
46 MGMT_OP_SET_FAST_CONNECTABLE,
48 MGMT_OP_SET_LINK_SECURITY,
52 MGMT_OP_SET_DEV_CLASS,
53 MGMT_OP_SET_LOCAL_NAME,
56 MGMT_OP_LOAD_LINK_KEYS,
57 MGMT_OP_LOAD_LONG_TERM_KEYS,
59 MGMT_OP_GET_CONNECTIONS,
60 MGMT_OP_PIN_CODE_REPLY,
61 MGMT_OP_PIN_CODE_NEG_REPLY,
62 MGMT_OP_SET_IO_CAPABILITY,
64 MGMT_OP_CANCEL_PAIR_DEVICE,
65 MGMT_OP_UNPAIR_DEVICE,
66 MGMT_OP_USER_CONFIRM_REPLY,
67 MGMT_OP_USER_CONFIRM_NEG_REPLY,
68 MGMT_OP_USER_PASSKEY_REPLY,
69 MGMT_OP_USER_PASSKEY_NEG_REPLY,
70 MGMT_OP_READ_LOCAL_OOB_DATA,
71 MGMT_OP_ADD_REMOTE_OOB_DATA,
72 MGMT_OP_REMOVE_REMOTE_OOB_DATA,
73 MGMT_OP_START_DISCOVERY,
74 MGMT_OP_STOP_DISCOVERY,
77 MGMT_OP_UNBLOCK_DEVICE,
78 MGMT_OP_SET_DEVICE_ID,
79 MGMT_OP_SET_ADVERTISING,
81 MGMT_OP_SET_STATIC_ADDRESS,
82 MGMT_OP_SET_SCAN_PARAMS,
83 MGMT_OP_SET_SECURE_CONN,
84 MGMT_OP_SET_DEBUG_KEYS,
87 MGMT_OP_GET_CONN_INFO,
88 MGMT_OP_GET_CLOCK_INFO,
90 MGMT_OP_REMOVE_DEVICE,
91 MGMT_OP_LOAD_CONN_PARAM,
92 MGMT_OP_READ_UNCONF_INDEX_LIST,
93 MGMT_OP_READ_CONFIG_INFO,
94 MGMT_OP_SET_EXTERNAL_CONFIG,
95 MGMT_OP_SET_PUBLIC_ADDRESS,
98 static const u16 mgmt_events[] = {
99 MGMT_EV_CONTROLLER_ERROR,
101 MGMT_EV_INDEX_REMOVED,
102 MGMT_EV_NEW_SETTINGS,
103 MGMT_EV_CLASS_OF_DEV_CHANGED,
104 MGMT_EV_LOCAL_NAME_CHANGED,
105 MGMT_EV_NEW_LINK_KEY,
106 MGMT_EV_NEW_LONG_TERM_KEY,
107 MGMT_EV_DEVICE_CONNECTED,
108 MGMT_EV_DEVICE_DISCONNECTED,
109 MGMT_EV_CONNECT_FAILED,
110 MGMT_EV_PIN_CODE_REQUEST,
111 MGMT_EV_USER_CONFIRM_REQUEST,
112 MGMT_EV_USER_PASSKEY_REQUEST,
114 MGMT_EV_DEVICE_FOUND,
116 MGMT_EV_DEVICE_BLOCKED,
117 MGMT_EV_DEVICE_UNBLOCKED,
118 MGMT_EV_DEVICE_UNPAIRED,
119 MGMT_EV_PASSKEY_NOTIFY,
122 MGMT_EV_DEVICE_ADDED,
123 MGMT_EV_DEVICE_REMOVED,
124 MGMT_EV_NEW_CONN_PARAM,
125 MGMT_EV_UNCONF_INDEX_ADDED,
126 MGMT_EV_UNCONF_INDEX_REMOVED,
127 MGMT_EV_NEW_CONFIG_OPTIONS,
130 #define CACHE_TIMEOUT msecs_to_jiffies(2 * 1000)
132 #define hdev_is_powered(hdev) (test_bit(HCI_UP, &hdev->flags) && \
133 !test_bit(HCI_AUTO_OFF, &hdev->dev_flags))
136 struct list_head list;
144 /* HCI to MGMT error code conversion table */
145 static u8 mgmt_status_table[] = {
147 MGMT_STATUS_UNKNOWN_COMMAND, /* Unknown Command */
148 MGMT_STATUS_NOT_CONNECTED, /* No Connection */
149 MGMT_STATUS_FAILED, /* Hardware Failure */
150 MGMT_STATUS_CONNECT_FAILED, /* Page Timeout */
151 MGMT_STATUS_AUTH_FAILED, /* Authentication Failed */
152 MGMT_STATUS_AUTH_FAILED, /* PIN or Key Missing */
153 MGMT_STATUS_NO_RESOURCES, /* Memory Full */
154 MGMT_STATUS_TIMEOUT, /* Connection Timeout */
155 MGMT_STATUS_NO_RESOURCES, /* Max Number of Connections */
156 MGMT_STATUS_NO_RESOURCES, /* Max Number of SCO Connections */
157 MGMT_STATUS_ALREADY_CONNECTED, /* ACL Connection Exists */
158 MGMT_STATUS_BUSY, /* Command Disallowed */
159 MGMT_STATUS_NO_RESOURCES, /* Rejected Limited Resources */
160 MGMT_STATUS_REJECTED, /* Rejected Security */
161 MGMT_STATUS_REJECTED, /* Rejected Personal */
162 MGMT_STATUS_TIMEOUT, /* Host Timeout */
163 MGMT_STATUS_NOT_SUPPORTED, /* Unsupported Feature */
164 MGMT_STATUS_INVALID_PARAMS, /* Invalid Parameters */
165 MGMT_STATUS_DISCONNECTED, /* OE User Ended Connection */
166 MGMT_STATUS_NO_RESOURCES, /* OE Low Resources */
167 MGMT_STATUS_DISCONNECTED, /* OE Power Off */
168 MGMT_STATUS_DISCONNECTED, /* Connection Terminated */
169 MGMT_STATUS_BUSY, /* Repeated Attempts */
170 MGMT_STATUS_REJECTED, /* Pairing Not Allowed */
171 MGMT_STATUS_FAILED, /* Unknown LMP PDU */
172 MGMT_STATUS_NOT_SUPPORTED, /* Unsupported Remote Feature */
173 MGMT_STATUS_REJECTED, /* SCO Offset Rejected */
174 MGMT_STATUS_REJECTED, /* SCO Interval Rejected */
175 MGMT_STATUS_REJECTED, /* Air Mode Rejected */
176 MGMT_STATUS_INVALID_PARAMS, /* Invalid LMP Parameters */
177 MGMT_STATUS_FAILED, /* Unspecified Error */
178 MGMT_STATUS_NOT_SUPPORTED, /* Unsupported LMP Parameter Value */
179 MGMT_STATUS_FAILED, /* Role Change Not Allowed */
180 MGMT_STATUS_TIMEOUT, /* LMP Response Timeout */
181 MGMT_STATUS_FAILED, /* LMP Error Transaction Collision */
182 MGMT_STATUS_FAILED, /* LMP PDU Not Allowed */
183 MGMT_STATUS_REJECTED, /* Encryption Mode Not Accepted */
184 MGMT_STATUS_FAILED, /* Unit Link Key Used */
185 MGMT_STATUS_NOT_SUPPORTED, /* QoS Not Supported */
186 MGMT_STATUS_TIMEOUT, /* Instant Passed */
187 MGMT_STATUS_NOT_SUPPORTED, /* Pairing Not Supported */
188 MGMT_STATUS_FAILED, /* Transaction Collision */
189 MGMT_STATUS_INVALID_PARAMS, /* Unacceptable Parameter */
190 MGMT_STATUS_REJECTED, /* QoS Rejected */
191 MGMT_STATUS_NOT_SUPPORTED, /* Classification Not Supported */
192 MGMT_STATUS_REJECTED, /* Insufficient Security */
193 MGMT_STATUS_INVALID_PARAMS, /* Parameter Out Of Range */
194 MGMT_STATUS_BUSY, /* Role Switch Pending */
195 MGMT_STATUS_FAILED, /* Slot Violation */
196 MGMT_STATUS_FAILED, /* Role Switch Failed */
197 MGMT_STATUS_INVALID_PARAMS, /* EIR Too Large */
198 MGMT_STATUS_NOT_SUPPORTED, /* Simple Pairing Not Supported */
199 MGMT_STATUS_BUSY, /* Host Busy Pairing */
200 MGMT_STATUS_REJECTED, /* Rejected, No Suitable Channel */
201 MGMT_STATUS_BUSY, /* Controller Busy */
202 MGMT_STATUS_INVALID_PARAMS, /* Unsuitable Connection Interval */
203 MGMT_STATUS_TIMEOUT, /* Directed Advertising Timeout */
204 MGMT_STATUS_AUTH_FAILED, /* Terminated Due to MIC Failure */
205 MGMT_STATUS_CONNECT_FAILED, /* Connection Establishment Failed */
206 MGMT_STATUS_CONNECT_FAILED, /* MAC Connection Failed */
209 static u8 mgmt_status(u8 hci_status)
211 if (hci_status < ARRAY_SIZE(mgmt_status_table))
212 return mgmt_status_table[hci_status];
214 return MGMT_STATUS_FAILED;
217 static int mgmt_event(u16 event, struct hci_dev *hdev, void *data, u16 data_len,
218 struct sock *skip_sk)
221 struct mgmt_hdr *hdr;
223 skb = alloc_skb(sizeof(*hdr) + data_len, GFP_KERNEL);
227 hdr = (void *) skb_put(skb, sizeof(*hdr));
228 hdr->opcode = cpu_to_le16(event);
230 hdr->index = cpu_to_le16(hdev->id);
232 hdr->index = cpu_to_le16(MGMT_INDEX_NONE);
233 hdr->len = cpu_to_le16(data_len);
236 memcpy(skb_put(skb, data_len), data, data_len);
239 __net_timestamp(skb);
241 hci_send_to_control(skb, skip_sk);
247 static int cmd_status(struct sock *sk, u16 index, u16 cmd, u8 status)
250 struct mgmt_hdr *hdr;
251 struct mgmt_ev_cmd_status *ev;
254 BT_DBG("sock %p, index %u, cmd %u, status %u", sk, index, cmd, status);
256 skb = alloc_skb(sizeof(*hdr) + sizeof(*ev), GFP_KERNEL);
260 hdr = (void *) skb_put(skb, sizeof(*hdr));
262 hdr->opcode = cpu_to_le16(MGMT_EV_CMD_STATUS);
263 hdr->index = cpu_to_le16(index);
264 hdr->len = cpu_to_le16(sizeof(*ev));
266 ev = (void *) skb_put(skb, sizeof(*ev));
268 ev->opcode = cpu_to_le16(cmd);
270 err = sock_queue_rcv_skb(sk, skb);
277 static int cmd_complete(struct sock *sk, u16 index, u16 cmd, u8 status,
278 void *rp, size_t rp_len)
281 struct mgmt_hdr *hdr;
282 struct mgmt_ev_cmd_complete *ev;
285 BT_DBG("sock %p", sk);
287 skb = alloc_skb(sizeof(*hdr) + sizeof(*ev) + rp_len, GFP_KERNEL);
291 hdr = (void *) skb_put(skb, sizeof(*hdr));
293 hdr->opcode = cpu_to_le16(MGMT_EV_CMD_COMPLETE);
294 hdr->index = cpu_to_le16(index);
295 hdr->len = cpu_to_le16(sizeof(*ev) + rp_len);
297 ev = (void *) skb_put(skb, sizeof(*ev) + rp_len);
298 ev->opcode = cpu_to_le16(cmd);
302 memcpy(ev->data, rp, rp_len);
304 err = sock_queue_rcv_skb(sk, skb);
311 static int read_version(struct sock *sk, struct hci_dev *hdev, void *data,
314 struct mgmt_rp_read_version rp;
316 BT_DBG("sock %p", sk);
318 rp.version = MGMT_VERSION;
319 rp.revision = cpu_to_le16(MGMT_REVISION);
321 return cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_VERSION, 0, &rp,
325 static int read_commands(struct sock *sk, struct hci_dev *hdev, void *data,
328 struct mgmt_rp_read_commands *rp;
329 const u16 num_commands = ARRAY_SIZE(mgmt_commands);
330 const u16 num_events = ARRAY_SIZE(mgmt_events);
335 BT_DBG("sock %p", sk);
337 rp_size = sizeof(*rp) + ((num_commands + num_events) * sizeof(u16));
339 rp = kmalloc(rp_size, GFP_KERNEL);
343 rp->num_commands = cpu_to_le16(num_commands);
344 rp->num_events = cpu_to_le16(num_events);
346 for (i = 0, opcode = rp->opcodes; i < num_commands; i++, opcode++)
347 put_unaligned_le16(mgmt_commands[i], opcode);
349 for (i = 0; i < num_events; i++, opcode++)
350 put_unaligned_le16(mgmt_events[i], opcode);
352 err = cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_COMMANDS, 0, rp,
359 static int read_index_list(struct sock *sk, struct hci_dev *hdev, void *data,
362 struct mgmt_rp_read_index_list *rp;
368 BT_DBG("sock %p", sk);
370 read_lock(&hci_dev_list_lock);
373 list_for_each_entry(d, &hci_dev_list, list) {
374 if (d->dev_type == HCI_BREDR &&
375 !test_bit(HCI_UNCONFIGURED, &d->dev_flags))
379 rp_len = sizeof(*rp) + (2 * count);
380 rp = kmalloc(rp_len, GFP_ATOMIC);
382 read_unlock(&hci_dev_list_lock);
387 list_for_each_entry(d, &hci_dev_list, list) {
388 if (test_bit(HCI_SETUP, &d->dev_flags) ||
389 test_bit(HCI_CONFIG, &d->dev_flags) ||
390 test_bit(HCI_USER_CHANNEL, &d->dev_flags))
393 /* Devices marked as raw-only are neither configured
394 * nor unconfigured controllers.
396 if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks))
399 if (d->dev_type == HCI_BREDR &&
400 !test_bit(HCI_UNCONFIGURED, &d->dev_flags)) {
401 rp->index[count++] = cpu_to_le16(d->id);
402 BT_DBG("Added hci%u", d->id);
406 rp->num_controllers = cpu_to_le16(count);
407 rp_len = sizeof(*rp) + (2 * count);
409 read_unlock(&hci_dev_list_lock);
411 err = cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_INDEX_LIST, 0, rp,
419 static int read_unconf_index_list(struct sock *sk, struct hci_dev *hdev,
420 void *data, u16 data_len)
422 struct mgmt_rp_read_unconf_index_list *rp;
428 BT_DBG("sock %p", sk);
430 read_lock(&hci_dev_list_lock);
433 list_for_each_entry(d, &hci_dev_list, list) {
434 if (d->dev_type == HCI_BREDR &&
435 test_bit(HCI_UNCONFIGURED, &d->dev_flags))
439 rp_len = sizeof(*rp) + (2 * count);
440 rp = kmalloc(rp_len, GFP_ATOMIC);
442 read_unlock(&hci_dev_list_lock);
447 list_for_each_entry(d, &hci_dev_list, list) {
448 if (test_bit(HCI_SETUP, &d->dev_flags) ||
449 test_bit(HCI_CONFIG, &d->dev_flags) ||
450 test_bit(HCI_USER_CHANNEL, &d->dev_flags))
453 /* Devices marked as raw-only are neither configured
454 * nor unconfigured controllers.
456 if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks))
459 if (d->dev_type == HCI_BREDR &&
460 test_bit(HCI_UNCONFIGURED, &d->dev_flags)) {
461 rp->index[count++] = cpu_to_le16(d->id);
462 BT_DBG("Added hci%u", d->id);
466 rp->num_controllers = cpu_to_le16(count);
467 rp_len = sizeof(*rp) + (2 * count);
469 read_unlock(&hci_dev_list_lock);
471 err = cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_UNCONF_INDEX_LIST,
479 static bool is_configured(struct hci_dev *hdev)
481 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) &&
482 !test_bit(HCI_EXT_CONFIGURED, &hdev->dev_flags))
485 if (test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks) &&
486 !bacmp(&hdev->public_addr, BDADDR_ANY))
492 static __le32 get_missing_options(struct hci_dev *hdev)
496 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) &&
497 !test_bit(HCI_EXT_CONFIGURED, &hdev->dev_flags))
498 options |= MGMT_OPTION_EXTERNAL_CONFIG;
500 if (test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks) &&
501 !bacmp(&hdev->public_addr, BDADDR_ANY))
502 options |= MGMT_OPTION_PUBLIC_ADDRESS;
504 return cpu_to_le32(options);
507 static int new_options(struct hci_dev *hdev, struct sock *skip)
509 __le32 options = get_missing_options(hdev);
511 return mgmt_event(MGMT_EV_NEW_CONFIG_OPTIONS, hdev, &options,
512 sizeof(options), skip);
515 static int send_options_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev)
517 __le32 options = get_missing_options(hdev);
519 return cmd_complete(sk, hdev->id, opcode, 0, &options,
523 static int read_config_info(struct sock *sk, struct hci_dev *hdev,
524 void *data, u16 data_len)
526 struct mgmt_rp_read_config_info rp;
529 BT_DBG("sock %p %s", sk, hdev->name);
533 memset(&rp, 0, sizeof(rp));
534 rp.manufacturer = cpu_to_le16(hdev->manufacturer);
536 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks))
537 options |= MGMT_OPTION_EXTERNAL_CONFIG;
539 if (hdev->set_bdaddr)
540 options |= MGMT_OPTION_PUBLIC_ADDRESS;
542 rp.supported_options = cpu_to_le32(options);
543 rp.missing_options = get_missing_options(hdev);
545 hci_dev_unlock(hdev);
547 return cmd_complete(sk, hdev->id, MGMT_OP_READ_CONFIG_INFO, 0, &rp,
551 static u32 get_supported_settings(struct hci_dev *hdev)
555 settings |= MGMT_SETTING_POWERED;
556 settings |= MGMT_SETTING_PAIRABLE;
557 settings |= MGMT_SETTING_DEBUG_KEYS;
558 settings |= MGMT_SETTING_CONNECTABLE;
559 settings |= MGMT_SETTING_DISCOVERABLE;
561 if (lmp_bredr_capable(hdev)) {
562 if (hdev->hci_ver >= BLUETOOTH_VER_1_2)
563 settings |= MGMT_SETTING_FAST_CONNECTABLE;
564 settings |= MGMT_SETTING_BREDR;
565 settings |= MGMT_SETTING_LINK_SECURITY;
567 if (lmp_ssp_capable(hdev)) {
568 settings |= MGMT_SETTING_SSP;
569 settings |= MGMT_SETTING_HS;
572 if (lmp_sc_capable(hdev) ||
573 test_bit(HCI_FORCE_SC, &hdev->dbg_flags))
574 settings |= MGMT_SETTING_SECURE_CONN;
577 if (lmp_le_capable(hdev)) {
578 settings |= MGMT_SETTING_LE;
579 settings |= MGMT_SETTING_ADVERTISING;
580 settings |= MGMT_SETTING_PRIVACY;
583 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) ||
585 settings |= MGMT_SETTING_CONFIGURATION;
590 static u32 get_current_settings(struct hci_dev *hdev)
594 if (hdev_is_powered(hdev))
595 settings |= MGMT_SETTING_POWERED;
597 if (test_bit(HCI_CONNECTABLE, &hdev->dev_flags))
598 settings |= MGMT_SETTING_CONNECTABLE;
600 if (test_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags))
601 settings |= MGMT_SETTING_FAST_CONNECTABLE;
603 if (test_bit(HCI_DISCOVERABLE, &hdev->dev_flags))
604 settings |= MGMT_SETTING_DISCOVERABLE;
606 if (test_bit(HCI_PAIRABLE, &hdev->dev_flags))
607 settings |= MGMT_SETTING_PAIRABLE;
609 if (test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
610 settings |= MGMT_SETTING_BREDR;
612 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
613 settings |= MGMT_SETTING_LE;
615 if (test_bit(HCI_LINK_SECURITY, &hdev->dev_flags))
616 settings |= MGMT_SETTING_LINK_SECURITY;
618 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags))
619 settings |= MGMT_SETTING_SSP;
621 if (test_bit(HCI_HS_ENABLED, &hdev->dev_flags))
622 settings |= MGMT_SETTING_HS;
624 if (test_bit(HCI_ADVERTISING, &hdev->dev_flags))
625 settings |= MGMT_SETTING_ADVERTISING;
627 if (test_bit(HCI_SC_ENABLED, &hdev->dev_flags))
628 settings |= MGMT_SETTING_SECURE_CONN;
630 if (test_bit(HCI_KEEP_DEBUG_KEYS, &hdev->dev_flags))
631 settings |= MGMT_SETTING_DEBUG_KEYS;
633 if (test_bit(HCI_PRIVACY, &hdev->dev_flags))
634 settings |= MGMT_SETTING_PRIVACY;
639 #define PNP_INFO_SVCLASS_ID 0x1200
641 static u8 *create_uuid16_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
643 u8 *ptr = data, *uuids_start = NULL;
644 struct bt_uuid *uuid;
649 list_for_each_entry(uuid, &hdev->uuids, list) {
652 if (uuid->size != 16)
655 uuid16 = get_unaligned_le16(&uuid->uuid[12]);
659 if (uuid16 == PNP_INFO_SVCLASS_ID)
665 uuids_start[1] = EIR_UUID16_ALL;
669 /* Stop if not enough space to put next UUID */
670 if ((ptr - data) + sizeof(u16) > len) {
671 uuids_start[1] = EIR_UUID16_SOME;
675 *ptr++ = (uuid16 & 0x00ff);
676 *ptr++ = (uuid16 & 0xff00) >> 8;
677 uuids_start[0] += sizeof(uuid16);
683 static u8 *create_uuid32_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
685 u8 *ptr = data, *uuids_start = NULL;
686 struct bt_uuid *uuid;
691 list_for_each_entry(uuid, &hdev->uuids, list) {
692 if (uuid->size != 32)
698 uuids_start[1] = EIR_UUID32_ALL;
702 /* Stop if not enough space to put next UUID */
703 if ((ptr - data) + sizeof(u32) > len) {
704 uuids_start[1] = EIR_UUID32_SOME;
708 memcpy(ptr, &uuid->uuid[12], sizeof(u32));
710 uuids_start[0] += sizeof(u32);
716 static u8 *create_uuid128_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
718 u8 *ptr = data, *uuids_start = NULL;
719 struct bt_uuid *uuid;
724 list_for_each_entry(uuid, &hdev->uuids, list) {
725 if (uuid->size != 128)
731 uuids_start[1] = EIR_UUID128_ALL;
735 /* Stop if not enough space to put next UUID */
736 if ((ptr - data) + 16 > len) {
737 uuids_start[1] = EIR_UUID128_SOME;
741 memcpy(ptr, uuid->uuid, 16);
743 uuids_start[0] += 16;
749 static struct pending_cmd *mgmt_pending_find(u16 opcode, struct hci_dev *hdev)
751 struct pending_cmd *cmd;
753 list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
754 if (cmd->opcode == opcode)
761 static struct pending_cmd *mgmt_pending_find_data(u16 opcode,
762 struct hci_dev *hdev,
765 struct pending_cmd *cmd;
767 list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
768 if (cmd->user_data != data)
770 if (cmd->opcode == opcode)
777 static u8 create_scan_rsp_data(struct hci_dev *hdev, u8 *ptr)
782 name_len = strlen(hdev->dev_name);
784 size_t max_len = HCI_MAX_AD_LENGTH - ad_len - 2;
786 if (name_len > max_len) {
788 ptr[1] = EIR_NAME_SHORT;
790 ptr[1] = EIR_NAME_COMPLETE;
792 ptr[0] = name_len + 1;
794 memcpy(ptr + 2, hdev->dev_name, name_len);
796 ad_len += (name_len + 2);
797 ptr += (name_len + 2);
803 static void update_scan_rsp_data(struct hci_request *req)
805 struct hci_dev *hdev = req->hdev;
806 struct hci_cp_le_set_scan_rsp_data cp;
809 if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
812 memset(&cp, 0, sizeof(cp));
814 len = create_scan_rsp_data(hdev, cp.data);
816 if (hdev->scan_rsp_data_len == len &&
817 memcmp(cp.data, hdev->scan_rsp_data, len) == 0)
820 memcpy(hdev->scan_rsp_data, cp.data, sizeof(cp.data));
821 hdev->scan_rsp_data_len = len;
825 hci_req_add(req, HCI_OP_LE_SET_SCAN_RSP_DATA, sizeof(cp), &cp);
828 static u8 get_adv_discov_flags(struct hci_dev *hdev)
830 struct pending_cmd *cmd;
832 /* If there's a pending mgmt command the flags will not yet have
833 * their final values, so check for this first.
835 cmd = mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, hdev);
837 struct mgmt_mode *cp = cmd->param;
839 return LE_AD_GENERAL;
840 else if (cp->val == 0x02)
841 return LE_AD_LIMITED;
843 if (test_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags))
844 return LE_AD_LIMITED;
845 else if (test_bit(HCI_DISCOVERABLE, &hdev->dev_flags))
846 return LE_AD_GENERAL;
852 static u8 create_adv_data(struct hci_dev *hdev, u8 *ptr)
854 u8 ad_len = 0, flags = 0;
856 flags |= get_adv_discov_flags(hdev);
858 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
859 flags |= LE_AD_NO_BREDR;
862 BT_DBG("adv flags 0x%02x", flags);
872 if (hdev->adv_tx_power != HCI_TX_POWER_INVALID) {
874 ptr[1] = EIR_TX_POWER;
875 ptr[2] = (u8) hdev->adv_tx_power;
884 static void update_adv_data(struct hci_request *req)
886 struct hci_dev *hdev = req->hdev;
887 struct hci_cp_le_set_adv_data cp;
890 if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
893 memset(&cp, 0, sizeof(cp));
895 len = create_adv_data(hdev, cp.data);
897 if (hdev->adv_data_len == len &&
898 memcmp(cp.data, hdev->adv_data, len) == 0)
901 memcpy(hdev->adv_data, cp.data, sizeof(cp.data));
902 hdev->adv_data_len = len;
906 hci_req_add(req, HCI_OP_LE_SET_ADV_DATA, sizeof(cp), &cp);
909 static void create_eir(struct hci_dev *hdev, u8 *data)
914 name_len = strlen(hdev->dev_name);
920 ptr[1] = EIR_NAME_SHORT;
922 ptr[1] = EIR_NAME_COMPLETE;
924 /* EIR Data length */
925 ptr[0] = name_len + 1;
927 memcpy(ptr + 2, hdev->dev_name, name_len);
929 ptr += (name_len + 2);
932 if (hdev->inq_tx_power != HCI_TX_POWER_INVALID) {
934 ptr[1] = EIR_TX_POWER;
935 ptr[2] = (u8) hdev->inq_tx_power;
940 if (hdev->devid_source > 0) {
942 ptr[1] = EIR_DEVICE_ID;
944 put_unaligned_le16(hdev->devid_source, ptr + 2);
945 put_unaligned_le16(hdev->devid_vendor, ptr + 4);
946 put_unaligned_le16(hdev->devid_product, ptr + 6);
947 put_unaligned_le16(hdev->devid_version, ptr + 8);
952 ptr = create_uuid16_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
953 ptr = create_uuid32_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
954 ptr = create_uuid128_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
957 static void update_eir(struct hci_request *req)
959 struct hci_dev *hdev = req->hdev;
960 struct hci_cp_write_eir cp;
962 if (!hdev_is_powered(hdev))
965 if (!lmp_ext_inq_capable(hdev))
968 if (!test_bit(HCI_SSP_ENABLED, &hdev->dev_flags))
971 if (test_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
974 memset(&cp, 0, sizeof(cp));
976 create_eir(hdev, cp.data);
978 if (memcmp(cp.data, hdev->eir, sizeof(cp.data)) == 0)
981 memcpy(hdev->eir, cp.data, sizeof(cp.data));
983 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
986 static u8 get_service_classes(struct hci_dev *hdev)
988 struct bt_uuid *uuid;
991 list_for_each_entry(uuid, &hdev->uuids, list)
992 val |= uuid->svc_hint;
997 static void update_class(struct hci_request *req)
999 struct hci_dev *hdev = req->hdev;
1002 BT_DBG("%s", hdev->name);
1004 if (!hdev_is_powered(hdev))
1007 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
1010 if (test_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
1013 cod[0] = hdev->minor_class;
1014 cod[1] = hdev->major_class;
1015 cod[2] = get_service_classes(hdev);
1017 if (test_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags))
1020 if (memcmp(cod, hdev->dev_class, 3) == 0)
1023 hci_req_add(req, HCI_OP_WRITE_CLASS_OF_DEV, sizeof(cod), cod);
1026 static bool get_connectable(struct hci_dev *hdev)
1028 struct pending_cmd *cmd;
1030 /* If there's a pending mgmt command the flag will not yet have
1031 * it's final value, so check for this first.
1033 cmd = mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev);
1035 struct mgmt_mode *cp = cmd->param;
1039 return test_bit(HCI_CONNECTABLE, &hdev->dev_flags);
1042 static void disable_advertising(struct hci_request *req)
1046 hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable);
1049 static void enable_advertising(struct hci_request *req)
1051 struct hci_dev *hdev = req->hdev;
1052 struct hci_cp_le_set_adv_param cp;
1053 u8 own_addr_type, enable = 0x01;
1056 if (hci_conn_num(hdev, LE_LINK) > 0)
1059 if (test_bit(HCI_LE_ADV, &hdev->dev_flags))
1060 disable_advertising(req);
1062 /* Clear the HCI_LE_ADV bit temporarily so that the
1063 * hci_update_random_address knows that it's safe to go ahead
1064 * and write a new random address. The flag will be set back on
1065 * as soon as the SET_ADV_ENABLE HCI command completes.
1067 clear_bit(HCI_LE_ADV, &hdev->dev_flags);
1069 connectable = get_connectable(hdev);
1071 /* Set require_privacy to true only when non-connectable
1072 * advertising is used. In that case it is fine to use a
1073 * non-resolvable private address.
1075 if (hci_update_random_address(req, !connectable, &own_addr_type) < 0)
1078 memset(&cp, 0, sizeof(cp));
1079 cp.min_interval = cpu_to_le16(0x0800);
1080 cp.max_interval = cpu_to_le16(0x0800);
1081 cp.type = connectable ? LE_ADV_IND : LE_ADV_NONCONN_IND;
1082 cp.own_address_type = own_addr_type;
1083 cp.channel_map = hdev->le_adv_channel_map;
1085 hci_req_add(req, HCI_OP_LE_SET_ADV_PARAM, sizeof(cp), &cp);
1087 hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable);
1090 static void service_cache_off(struct work_struct *work)
1092 struct hci_dev *hdev = container_of(work, struct hci_dev,
1093 service_cache.work);
1094 struct hci_request req;
1096 if (!test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
1099 hci_req_init(&req, hdev);
1106 hci_dev_unlock(hdev);
1108 hci_req_run(&req, NULL);
1111 static void rpa_expired(struct work_struct *work)
1113 struct hci_dev *hdev = container_of(work, struct hci_dev,
1115 struct hci_request req;
1119 set_bit(HCI_RPA_EXPIRED, &hdev->dev_flags);
1121 if (!test_bit(HCI_ADVERTISING, &hdev->dev_flags))
1124 /* The generation of a new RPA and programming it into the
1125 * controller happens in the enable_advertising() function.
1127 hci_req_init(&req, hdev);
1128 enable_advertising(&req);
1129 hci_req_run(&req, NULL);
1132 static void mgmt_init_hdev(struct sock *sk, struct hci_dev *hdev)
1134 if (test_and_set_bit(HCI_MGMT, &hdev->dev_flags))
1137 INIT_DELAYED_WORK(&hdev->service_cache, service_cache_off);
1138 INIT_DELAYED_WORK(&hdev->rpa_expired, rpa_expired);
1140 /* Non-mgmt controlled devices get this bit set
1141 * implicitly so that pairing works for them, however
1142 * for mgmt we require user-space to explicitly enable
1145 clear_bit(HCI_PAIRABLE, &hdev->dev_flags);
1148 static int read_controller_info(struct sock *sk, struct hci_dev *hdev,
1149 void *data, u16 data_len)
1151 struct mgmt_rp_read_info rp;
1153 BT_DBG("sock %p %s", sk, hdev->name);
1157 memset(&rp, 0, sizeof(rp));
1159 bacpy(&rp.bdaddr, &hdev->bdaddr);
1161 rp.version = hdev->hci_ver;
1162 rp.manufacturer = cpu_to_le16(hdev->manufacturer);
1164 rp.supported_settings = cpu_to_le32(get_supported_settings(hdev));
1165 rp.current_settings = cpu_to_le32(get_current_settings(hdev));
1167 memcpy(rp.dev_class, hdev->dev_class, 3);
1169 memcpy(rp.name, hdev->dev_name, sizeof(hdev->dev_name));
1170 memcpy(rp.short_name, hdev->short_name, sizeof(hdev->short_name));
1172 hci_dev_unlock(hdev);
1174 return cmd_complete(sk, hdev->id, MGMT_OP_READ_INFO, 0, &rp,
1178 static void mgmt_pending_free(struct pending_cmd *cmd)
1185 static struct pending_cmd *mgmt_pending_add(struct sock *sk, u16 opcode,
1186 struct hci_dev *hdev, void *data,
1189 struct pending_cmd *cmd;
1191 cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
1195 cmd->opcode = opcode;
1196 cmd->index = hdev->id;
1198 cmd->param = kmalloc(len, GFP_KERNEL);
1205 memcpy(cmd->param, data, len);
1210 list_add(&cmd->list, &hdev->mgmt_pending);
1215 static void mgmt_pending_foreach(u16 opcode, struct hci_dev *hdev,
1216 void (*cb)(struct pending_cmd *cmd,
1220 struct pending_cmd *cmd, *tmp;
1222 list_for_each_entry_safe(cmd, tmp, &hdev->mgmt_pending, list) {
1223 if (opcode > 0 && cmd->opcode != opcode)
1230 static void mgmt_pending_remove(struct pending_cmd *cmd)
1232 list_del(&cmd->list);
1233 mgmt_pending_free(cmd);
1236 static int send_settings_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev)
1238 __le32 settings = cpu_to_le32(get_current_settings(hdev));
1240 return cmd_complete(sk, hdev->id, opcode, 0, &settings,
1244 static void clean_up_hci_complete(struct hci_dev *hdev, u8 status)
1246 BT_DBG("%s status 0x%02x", hdev->name, status);
1248 if (hci_conn_count(hdev) == 0) {
1249 cancel_delayed_work(&hdev->power_off);
1250 queue_work(hdev->req_workqueue, &hdev->power_off.work);
1254 static bool hci_stop_discovery(struct hci_request *req)
1256 struct hci_dev *hdev = req->hdev;
1257 struct hci_cp_remote_name_req_cancel cp;
1258 struct inquiry_entry *e;
1260 switch (hdev->discovery.state) {
1261 case DISCOVERY_FINDING:
1262 if (test_bit(HCI_INQUIRY, &hdev->flags)) {
1263 hci_req_add(req, HCI_OP_INQUIRY_CANCEL, 0, NULL);
1265 cancel_delayed_work(&hdev->le_scan_disable);
1266 hci_req_add_le_scan_disable(req);
1271 case DISCOVERY_RESOLVING:
1272 e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY,
1277 bacpy(&cp.bdaddr, &e->data.bdaddr);
1278 hci_req_add(req, HCI_OP_REMOTE_NAME_REQ_CANCEL, sizeof(cp),
1284 /* Passive scanning */
1285 if (test_bit(HCI_LE_SCAN, &hdev->dev_flags)) {
1286 hci_req_add_le_scan_disable(req);
1296 static int clean_up_hci_state(struct hci_dev *hdev)
1298 struct hci_request req;
1299 struct hci_conn *conn;
1300 bool discov_stopped;
1303 hci_req_init(&req, hdev);
1305 if (test_bit(HCI_ISCAN, &hdev->flags) ||
1306 test_bit(HCI_PSCAN, &hdev->flags)) {
1308 hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
1311 if (test_bit(HCI_LE_ADV, &hdev->dev_flags))
1312 disable_advertising(&req);
1314 discov_stopped = hci_stop_discovery(&req);
1316 list_for_each_entry(conn, &hdev->conn_hash.list, list) {
1317 struct hci_cp_disconnect dc;
1318 struct hci_cp_reject_conn_req rej;
1320 switch (conn->state) {
1323 dc.handle = cpu_to_le16(conn->handle);
1324 dc.reason = 0x15; /* Terminated due to Power Off */
1325 hci_req_add(&req, HCI_OP_DISCONNECT, sizeof(dc), &dc);
1328 if (conn->type == LE_LINK)
1329 hci_req_add(&req, HCI_OP_LE_CREATE_CONN_CANCEL,
1331 else if (conn->type == ACL_LINK)
1332 hci_req_add(&req, HCI_OP_CREATE_CONN_CANCEL,
1336 bacpy(&rej.bdaddr, &conn->dst);
1337 rej.reason = 0x15; /* Terminated due to Power Off */
1338 if (conn->type == ACL_LINK)
1339 hci_req_add(&req, HCI_OP_REJECT_CONN_REQ,
1341 else if (conn->type == SCO_LINK)
1342 hci_req_add(&req, HCI_OP_REJECT_SYNC_CONN_REQ,
1348 err = hci_req_run(&req, clean_up_hci_complete);
1349 if (!err && discov_stopped)
1350 hci_discovery_set_state(hdev, DISCOVERY_STOPPING);
1355 static int set_powered(struct sock *sk, struct hci_dev *hdev, void *data,
1358 struct mgmt_mode *cp = data;
1359 struct pending_cmd *cmd;
1362 BT_DBG("request for %s", hdev->name);
1364 if (cp->val != 0x00 && cp->val != 0x01)
1365 return cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
1366 MGMT_STATUS_INVALID_PARAMS);
1370 if (mgmt_pending_find(MGMT_OP_SET_POWERED, hdev)) {
1371 err = cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
1376 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
1377 cancel_delayed_work(&hdev->power_off);
1380 mgmt_pending_add(sk, MGMT_OP_SET_POWERED, hdev,
1382 err = mgmt_powered(hdev, 1);
1387 if (!!cp->val == hdev_is_powered(hdev)) {
1388 err = send_settings_rsp(sk, MGMT_OP_SET_POWERED, hdev);
1392 cmd = mgmt_pending_add(sk, MGMT_OP_SET_POWERED, hdev, data, len);
1399 queue_work(hdev->req_workqueue, &hdev->power_on);
1402 /* Disconnect connections, stop scans, etc */
1403 err = clean_up_hci_state(hdev);
1405 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
1406 HCI_POWER_OFF_TIMEOUT);
1408 /* ENODATA means there were no HCI commands queued */
1409 if (err == -ENODATA) {
1410 cancel_delayed_work(&hdev->power_off);
1411 queue_work(hdev->req_workqueue, &hdev->power_off.work);
1417 hci_dev_unlock(hdev);
1421 static int new_settings(struct hci_dev *hdev, struct sock *skip)
1425 ev = cpu_to_le32(get_current_settings(hdev));
1427 return mgmt_event(MGMT_EV_NEW_SETTINGS, hdev, &ev, sizeof(ev), skip);
1432 struct hci_dev *hdev;
1436 static void settings_rsp(struct pending_cmd *cmd, void *data)
1438 struct cmd_lookup *match = data;
1440 send_settings_rsp(cmd->sk, cmd->opcode, match->hdev);
1442 list_del(&cmd->list);
1444 if (match->sk == NULL) {
1445 match->sk = cmd->sk;
1446 sock_hold(match->sk);
1449 mgmt_pending_free(cmd);
1452 static void cmd_status_rsp(struct pending_cmd *cmd, void *data)
1456 cmd_status(cmd->sk, cmd->index, cmd->opcode, *status);
1457 mgmt_pending_remove(cmd);
1460 static u8 mgmt_bredr_support(struct hci_dev *hdev)
1462 if (!lmp_bredr_capable(hdev))
1463 return MGMT_STATUS_NOT_SUPPORTED;
1464 else if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
1465 return MGMT_STATUS_REJECTED;
1467 return MGMT_STATUS_SUCCESS;
1470 static u8 mgmt_le_support(struct hci_dev *hdev)
1472 if (!lmp_le_capable(hdev))
1473 return MGMT_STATUS_NOT_SUPPORTED;
1474 else if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
1475 return MGMT_STATUS_REJECTED;
1477 return MGMT_STATUS_SUCCESS;
1480 static void set_discoverable_complete(struct hci_dev *hdev, u8 status)
1482 struct pending_cmd *cmd;
1483 struct mgmt_mode *cp;
1484 struct hci_request req;
1487 BT_DBG("status 0x%02x", status);
1491 cmd = mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, hdev);
1496 u8 mgmt_err = mgmt_status(status);
1497 cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
1498 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
1504 changed = !test_and_set_bit(HCI_DISCOVERABLE,
1507 if (hdev->discov_timeout > 0) {
1508 int to = msecs_to_jiffies(hdev->discov_timeout * 1000);
1509 queue_delayed_work(hdev->workqueue, &hdev->discov_off,
1513 changed = test_and_clear_bit(HCI_DISCOVERABLE,
1517 send_settings_rsp(cmd->sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1520 new_settings(hdev, cmd->sk);
1522 /* When the discoverable mode gets changed, make sure
1523 * that class of device has the limited discoverable
1524 * bit correctly set.
1526 hci_req_init(&req, hdev);
1528 hci_req_run(&req, NULL);
1531 mgmt_pending_remove(cmd);
1534 hci_dev_unlock(hdev);
1537 static int set_discoverable(struct sock *sk, struct hci_dev *hdev, void *data,
1540 struct mgmt_cp_set_discoverable *cp = data;
1541 struct pending_cmd *cmd;
1542 struct hci_request req;
1547 BT_DBG("request for %s", hdev->name);
1549 if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags) &&
1550 !test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
1551 return cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1552 MGMT_STATUS_REJECTED);
1554 if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
1555 return cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1556 MGMT_STATUS_INVALID_PARAMS);
1558 timeout = __le16_to_cpu(cp->timeout);
1560 /* Disabling discoverable requires that no timeout is set,
1561 * and enabling limited discoverable requires a timeout.
1563 if ((cp->val == 0x00 && timeout > 0) ||
1564 (cp->val == 0x02 && timeout == 0))
1565 return cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1566 MGMT_STATUS_INVALID_PARAMS);
1570 if (!hdev_is_powered(hdev) && timeout > 0) {
1571 err = cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1572 MGMT_STATUS_NOT_POWERED);
1576 if (mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
1577 mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
1578 err = cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1583 if (!test_bit(HCI_CONNECTABLE, &hdev->dev_flags)) {
1584 err = cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1585 MGMT_STATUS_REJECTED);
1589 if (!hdev_is_powered(hdev)) {
1590 bool changed = false;
1592 /* Setting limited discoverable when powered off is
1593 * not a valid operation since it requires a timeout
1594 * and so no need to check HCI_LIMITED_DISCOVERABLE.
1596 if (!!cp->val != test_bit(HCI_DISCOVERABLE, &hdev->dev_flags)) {
1597 change_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
1601 err = send_settings_rsp(sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1606 err = new_settings(hdev, sk);
1611 /* If the current mode is the same, then just update the timeout
1612 * value with the new value. And if only the timeout gets updated,
1613 * then no need for any HCI transactions.
1615 if (!!cp->val == test_bit(HCI_DISCOVERABLE, &hdev->dev_flags) &&
1616 (cp->val == 0x02) == test_bit(HCI_LIMITED_DISCOVERABLE,
1617 &hdev->dev_flags)) {
1618 cancel_delayed_work(&hdev->discov_off);
1619 hdev->discov_timeout = timeout;
1621 if (cp->val && hdev->discov_timeout > 0) {
1622 int to = msecs_to_jiffies(hdev->discov_timeout * 1000);
1623 queue_delayed_work(hdev->workqueue, &hdev->discov_off,
1627 err = send_settings_rsp(sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1631 cmd = mgmt_pending_add(sk, MGMT_OP_SET_DISCOVERABLE, hdev, data, len);
1637 /* Cancel any potential discoverable timeout that might be
1638 * still active and store new timeout value. The arming of
1639 * the timeout happens in the complete handler.
1641 cancel_delayed_work(&hdev->discov_off);
1642 hdev->discov_timeout = timeout;
1644 /* Limited discoverable mode */
1645 if (cp->val == 0x02)
1646 set_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
1648 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
1650 hci_req_init(&req, hdev);
1652 /* The procedure for LE-only controllers is much simpler - just
1653 * update the advertising data.
1655 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
1661 struct hci_cp_write_current_iac_lap hci_cp;
1663 if (cp->val == 0x02) {
1664 /* Limited discoverable mode */
1665 hci_cp.num_iac = min_t(u8, hdev->num_iac, 2);
1666 hci_cp.iac_lap[0] = 0x00; /* LIAC */
1667 hci_cp.iac_lap[1] = 0x8b;
1668 hci_cp.iac_lap[2] = 0x9e;
1669 hci_cp.iac_lap[3] = 0x33; /* GIAC */
1670 hci_cp.iac_lap[4] = 0x8b;
1671 hci_cp.iac_lap[5] = 0x9e;
1673 /* General discoverable mode */
1675 hci_cp.iac_lap[0] = 0x33; /* GIAC */
1676 hci_cp.iac_lap[1] = 0x8b;
1677 hci_cp.iac_lap[2] = 0x9e;
1680 hci_req_add(&req, HCI_OP_WRITE_CURRENT_IAC_LAP,
1681 (hci_cp.num_iac * 3) + 1, &hci_cp);
1683 scan |= SCAN_INQUIRY;
1685 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
1688 hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE, sizeof(scan), &scan);
1691 update_adv_data(&req);
1693 err = hci_req_run(&req, set_discoverable_complete);
1695 mgmt_pending_remove(cmd);
1698 hci_dev_unlock(hdev);
1702 static void write_fast_connectable(struct hci_request *req, bool enable)
1704 struct hci_dev *hdev = req->hdev;
1705 struct hci_cp_write_page_scan_activity acp;
1708 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
1711 if (hdev->hci_ver < BLUETOOTH_VER_1_2)
1715 type = PAGE_SCAN_TYPE_INTERLACED;
1717 /* 160 msec page scan interval */
1718 acp.interval = cpu_to_le16(0x0100);
1720 type = PAGE_SCAN_TYPE_STANDARD; /* default */
1722 /* default 1.28 sec page scan */
1723 acp.interval = cpu_to_le16(0x0800);
1726 acp.window = cpu_to_le16(0x0012);
1728 if (__cpu_to_le16(hdev->page_scan_interval) != acp.interval ||
1729 __cpu_to_le16(hdev->page_scan_window) != acp.window)
1730 hci_req_add(req, HCI_OP_WRITE_PAGE_SCAN_ACTIVITY,
1733 if (hdev->page_scan_type != type)
1734 hci_req_add(req, HCI_OP_WRITE_PAGE_SCAN_TYPE, 1, &type);
1737 static void set_connectable_complete(struct hci_dev *hdev, u8 status)
1739 struct pending_cmd *cmd;
1740 struct mgmt_mode *cp;
1743 BT_DBG("status 0x%02x", status);
1747 cmd = mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev);
1752 u8 mgmt_err = mgmt_status(status);
1753 cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
1759 changed = !test_and_set_bit(HCI_CONNECTABLE, &hdev->dev_flags);
1761 changed = test_and_clear_bit(HCI_CONNECTABLE, &hdev->dev_flags);
1763 send_settings_rsp(cmd->sk, MGMT_OP_SET_CONNECTABLE, hdev);
1766 new_settings(hdev, cmd->sk);
1767 hci_update_background_scan(hdev);
1771 mgmt_pending_remove(cmd);
1774 hci_dev_unlock(hdev);
1777 static int set_connectable_update_settings(struct hci_dev *hdev,
1778 struct sock *sk, u8 val)
1780 bool changed = false;
1783 if (!!val != test_bit(HCI_CONNECTABLE, &hdev->dev_flags))
1787 set_bit(HCI_CONNECTABLE, &hdev->dev_flags);
1789 clear_bit(HCI_CONNECTABLE, &hdev->dev_flags);
1790 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
1793 err = send_settings_rsp(sk, MGMT_OP_SET_CONNECTABLE, hdev);
1798 hci_update_background_scan(hdev);
1799 return new_settings(hdev, sk);
1805 static int set_connectable(struct sock *sk, struct hci_dev *hdev, void *data,
1808 struct mgmt_mode *cp = data;
1809 struct pending_cmd *cmd;
1810 struct hci_request req;
1814 BT_DBG("request for %s", hdev->name);
1816 if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags) &&
1817 !test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
1818 return cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1819 MGMT_STATUS_REJECTED);
1821 if (cp->val != 0x00 && cp->val != 0x01)
1822 return cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1823 MGMT_STATUS_INVALID_PARAMS);
1827 if (!hdev_is_powered(hdev)) {
1828 err = set_connectable_update_settings(hdev, sk, cp->val);
1832 if (mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
1833 mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
1834 err = cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1839 cmd = mgmt_pending_add(sk, MGMT_OP_SET_CONNECTABLE, hdev, data, len);
1845 hci_req_init(&req, hdev);
1847 /* If BR/EDR is not enabled and we disable advertising as a
1848 * by-product of disabling connectable, we need to update the
1849 * advertising flags.
1851 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
1853 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
1854 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
1856 update_adv_data(&req);
1857 } else if (cp->val != test_bit(HCI_PSCAN, &hdev->flags)) {
1863 if (test_bit(HCI_ISCAN, &hdev->flags) &&
1864 hdev->discov_timeout > 0)
1865 cancel_delayed_work(&hdev->discov_off);
1868 hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
1871 /* If we're going from non-connectable to connectable or
1872 * vice-versa when fast connectable is enabled ensure that fast
1873 * connectable gets disabled. write_fast_connectable won't do
1874 * anything if the page scan parameters are already what they
1877 if (cp->val || test_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags))
1878 write_fast_connectable(&req, false);
1880 if (test_bit(HCI_ADVERTISING, &hdev->dev_flags) &&
1881 !test_bit(HCI_LE_ADV, &hdev->dev_flags))
1882 enable_advertising(&req);
1884 err = hci_req_run(&req, set_connectable_complete);
1886 mgmt_pending_remove(cmd);
1887 if (err == -ENODATA)
1888 err = set_connectable_update_settings(hdev, sk,
1894 hci_dev_unlock(hdev);
1898 static int set_pairable(struct sock *sk, struct hci_dev *hdev, void *data,
1901 struct mgmt_mode *cp = data;
1905 BT_DBG("request for %s", hdev->name);
1907 if (cp->val != 0x00 && cp->val != 0x01)
1908 return cmd_status(sk, hdev->id, MGMT_OP_SET_PAIRABLE,
1909 MGMT_STATUS_INVALID_PARAMS);
1914 changed = !test_and_set_bit(HCI_PAIRABLE, &hdev->dev_flags);
1916 changed = test_and_clear_bit(HCI_PAIRABLE, &hdev->dev_flags);
1918 err = send_settings_rsp(sk, MGMT_OP_SET_PAIRABLE, hdev);
1923 err = new_settings(hdev, sk);
1926 hci_dev_unlock(hdev);
1930 static int set_link_security(struct sock *sk, struct hci_dev *hdev, void *data,
1933 struct mgmt_mode *cp = data;
1934 struct pending_cmd *cmd;
1938 BT_DBG("request for %s", hdev->name);
1940 status = mgmt_bredr_support(hdev);
1942 return cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1945 if (cp->val != 0x00 && cp->val != 0x01)
1946 return cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1947 MGMT_STATUS_INVALID_PARAMS);
1951 if (!hdev_is_powered(hdev)) {
1952 bool changed = false;
1954 if (!!cp->val != test_bit(HCI_LINK_SECURITY,
1955 &hdev->dev_flags)) {
1956 change_bit(HCI_LINK_SECURITY, &hdev->dev_flags);
1960 err = send_settings_rsp(sk, MGMT_OP_SET_LINK_SECURITY, hdev);
1965 err = new_settings(hdev, sk);
1970 if (mgmt_pending_find(MGMT_OP_SET_LINK_SECURITY, hdev)) {
1971 err = cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1978 if (test_bit(HCI_AUTH, &hdev->flags) == val) {
1979 err = send_settings_rsp(sk, MGMT_OP_SET_LINK_SECURITY, hdev);
1983 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LINK_SECURITY, hdev, data, len);
1989 err = hci_send_cmd(hdev, HCI_OP_WRITE_AUTH_ENABLE, sizeof(val), &val);
1991 mgmt_pending_remove(cmd);
1996 hci_dev_unlock(hdev);
2000 static int set_ssp(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2002 struct mgmt_mode *cp = data;
2003 struct pending_cmd *cmd;
2007 BT_DBG("request for %s", hdev->name);
2009 status = mgmt_bredr_support(hdev);
2011 return cmd_status(sk, hdev->id, MGMT_OP_SET_SSP, status);
2013 if (!lmp_ssp_capable(hdev))
2014 return cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
2015 MGMT_STATUS_NOT_SUPPORTED);
2017 if (cp->val != 0x00 && cp->val != 0x01)
2018 return cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
2019 MGMT_STATUS_INVALID_PARAMS);
2023 if (!hdev_is_powered(hdev)) {
2027 changed = !test_and_set_bit(HCI_SSP_ENABLED,
2030 changed = test_and_clear_bit(HCI_SSP_ENABLED,
2033 changed = test_and_clear_bit(HCI_HS_ENABLED,
2036 clear_bit(HCI_HS_ENABLED, &hdev->dev_flags);
2039 err = send_settings_rsp(sk, MGMT_OP_SET_SSP, hdev);
2044 err = new_settings(hdev, sk);
2049 if (mgmt_pending_find(MGMT_OP_SET_SSP, hdev) ||
2050 mgmt_pending_find(MGMT_OP_SET_HS, hdev)) {
2051 err = cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
2056 if (!!cp->val == test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
2057 err = send_settings_rsp(sk, MGMT_OP_SET_SSP, hdev);
2061 cmd = mgmt_pending_add(sk, MGMT_OP_SET_SSP, hdev, data, len);
2067 if (!cp->val && test_bit(HCI_USE_DEBUG_KEYS, &hdev->dev_flags))
2068 hci_send_cmd(hdev, HCI_OP_WRITE_SSP_DEBUG_MODE,
2069 sizeof(cp->val), &cp->val);
2071 err = hci_send_cmd(hdev, HCI_OP_WRITE_SSP_MODE, 1, &cp->val);
2073 mgmt_pending_remove(cmd);
2078 hci_dev_unlock(hdev);
2082 static int set_hs(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2084 struct mgmt_mode *cp = data;
2089 BT_DBG("request for %s", hdev->name);
2091 status = mgmt_bredr_support(hdev);
2093 return cmd_status(sk, hdev->id, MGMT_OP_SET_HS, status);
2095 if (!lmp_ssp_capable(hdev))
2096 return cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
2097 MGMT_STATUS_NOT_SUPPORTED);
2099 if (!test_bit(HCI_SSP_ENABLED, &hdev->dev_flags))
2100 return cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
2101 MGMT_STATUS_REJECTED);
2103 if (cp->val != 0x00 && cp->val != 0x01)
2104 return cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
2105 MGMT_STATUS_INVALID_PARAMS);
2110 changed = !test_and_set_bit(HCI_HS_ENABLED, &hdev->dev_flags);
2112 if (hdev_is_powered(hdev)) {
2113 err = cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
2114 MGMT_STATUS_REJECTED);
2118 changed = test_and_clear_bit(HCI_HS_ENABLED, &hdev->dev_flags);
2121 err = send_settings_rsp(sk, MGMT_OP_SET_HS, hdev);
2126 err = new_settings(hdev, sk);
2129 hci_dev_unlock(hdev);
2133 static void le_enable_complete(struct hci_dev *hdev, u8 status)
2135 struct cmd_lookup match = { NULL, hdev };
2138 u8 mgmt_err = mgmt_status(status);
2140 mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, cmd_status_rsp,
2145 mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, settings_rsp, &match);
2147 new_settings(hdev, match.sk);
2152 /* Make sure the controller has a good default for
2153 * advertising data. Restrict the update to when LE
2154 * has actually been enabled. During power on, the
2155 * update in powered_update_hci will take care of it.
2157 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
2158 struct hci_request req;
2162 hci_req_init(&req, hdev);
2163 update_adv_data(&req);
2164 update_scan_rsp_data(&req);
2165 hci_req_run(&req, NULL);
2167 hci_update_background_scan(hdev);
2169 hci_dev_unlock(hdev);
2173 static int set_le(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2175 struct mgmt_mode *cp = data;
2176 struct hci_cp_write_le_host_supported hci_cp;
2177 struct pending_cmd *cmd;
2178 struct hci_request req;
2182 BT_DBG("request for %s", hdev->name);
2184 if (!lmp_le_capable(hdev))
2185 return cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2186 MGMT_STATUS_NOT_SUPPORTED);
2188 if (cp->val != 0x00 && cp->val != 0x01)
2189 return cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2190 MGMT_STATUS_INVALID_PARAMS);
2192 /* LE-only devices do not allow toggling LE on/off */
2193 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
2194 return cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2195 MGMT_STATUS_REJECTED);
2200 enabled = lmp_host_le_capable(hdev);
2202 if (!hdev_is_powered(hdev) || val == enabled) {
2203 bool changed = false;
2205 if (val != test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
2206 change_bit(HCI_LE_ENABLED, &hdev->dev_flags);
2210 if (!val && test_bit(HCI_ADVERTISING, &hdev->dev_flags)) {
2211 clear_bit(HCI_ADVERTISING, &hdev->dev_flags);
2215 err = send_settings_rsp(sk, MGMT_OP_SET_LE, hdev);
2220 err = new_settings(hdev, sk);
2225 if (mgmt_pending_find(MGMT_OP_SET_LE, hdev) ||
2226 mgmt_pending_find(MGMT_OP_SET_ADVERTISING, hdev)) {
2227 err = cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2232 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LE, hdev, data, len);
2238 hci_req_init(&req, hdev);
2240 memset(&hci_cp, 0, sizeof(hci_cp));
2244 hci_cp.simul = lmp_le_br_capable(hdev);
2246 if (test_bit(HCI_LE_ADV, &hdev->dev_flags))
2247 disable_advertising(&req);
2250 hci_req_add(&req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(hci_cp),
2253 err = hci_req_run(&req, le_enable_complete);
2255 mgmt_pending_remove(cmd);
2258 hci_dev_unlock(hdev);
2262 /* This is a helper function to test for pending mgmt commands that can
2263 * cause CoD or EIR HCI commands. We can only allow one such pending
2264 * mgmt command at a time since otherwise we cannot easily track what
2265 * the current values are, will be, and based on that calculate if a new
2266 * HCI command needs to be sent and if yes with what value.
2268 static bool pending_eir_or_class(struct hci_dev *hdev)
2270 struct pending_cmd *cmd;
2272 list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
2273 switch (cmd->opcode) {
2274 case MGMT_OP_ADD_UUID:
2275 case MGMT_OP_REMOVE_UUID:
2276 case MGMT_OP_SET_DEV_CLASS:
2277 case MGMT_OP_SET_POWERED:
2285 static const u8 bluetooth_base_uuid[] = {
2286 0xfb, 0x34, 0x9b, 0x5f, 0x80, 0x00, 0x00, 0x80,
2287 0x00, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
2290 static u8 get_uuid_size(const u8 *uuid)
2294 if (memcmp(uuid, bluetooth_base_uuid, 12))
2297 val = get_unaligned_le32(&uuid[12]);
2304 static void mgmt_class_complete(struct hci_dev *hdev, u16 mgmt_op, u8 status)
2306 struct pending_cmd *cmd;
2310 cmd = mgmt_pending_find(mgmt_op, hdev);
2314 cmd_complete(cmd->sk, cmd->index, cmd->opcode, mgmt_status(status),
2315 hdev->dev_class, 3);
2317 mgmt_pending_remove(cmd);
2320 hci_dev_unlock(hdev);
2323 static void add_uuid_complete(struct hci_dev *hdev, u8 status)
2325 BT_DBG("status 0x%02x", status);
2327 mgmt_class_complete(hdev, MGMT_OP_ADD_UUID, status);
2330 static int add_uuid(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2332 struct mgmt_cp_add_uuid *cp = data;
2333 struct pending_cmd *cmd;
2334 struct hci_request req;
2335 struct bt_uuid *uuid;
2338 BT_DBG("request for %s", hdev->name);
2342 if (pending_eir_or_class(hdev)) {
2343 err = cmd_status(sk, hdev->id, MGMT_OP_ADD_UUID,
2348 uuid = kmalloc(sizeof(*uuid), GFP_KERNEL);
2354 memcpy(uuid->uuid, cp->uuid, 16);
2355 uuid->svc_hint = cp->svc_hint;
2356 uuid->size = get_uuid_size(cp->uuid);
2358 list_add_tail(&uuid->list, &hdev->uuids);
2360 hci_req_init(&req, hdev);
2365 err = hci_req_run(&req, add_uuid_complete);
2367 if (err != -ENODATA)
2370 err = cmd_complete(sk, hdev->id, MGMT_OP_ADD_UUID, 0,
2371 hdev->dev_class, 3);
2375 cmd = mgmt_pending_add(sk, MGMT_OP_ADD_UUID, hdev, data, len);
2384 hci_dev_unlock(hdev);
2388 static bool enable_service_cache(struct hci_dev *hdev)
2390 if (!hdev_is_powered(hdev))
2393 if (!test_and_set_bit(HCI_SERVICE_CACHE, &hdev->dev_flags)) {
2394 queue_delayed_work(hdev->workqueue, &hdev->service_cache,
2402 static void remove_uuid_complete(struct hci_dev *hdev, u8 status)
2404 BT_DBG("status 0x%02x", status);
2406 mgmt_class_complete(hdev, MGMT_OP_REMOVE_UUID, status);
2409 static int remove_uuid(struct sock *sk, struct hci_dev *hdev, void *data,
2412 struct mgmt_cp_remove_uuid *cp = data;
2413 struct pending_cmd *cmd;
2414 struct bt_uuid *match, *tmp;
2415 u8 bt_uuid_any[] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
2416 struct hci_request req;
2419 BT_DBG("request for %s", hdev->name);
2423 if (pending_eir_or_class(hdev)) {
2424 err = cmd_status(sk, hdev->id, MGMT_OP_REMOVE_UUID,
2429 if (memcmp(cp->uuid, bt_uuid_any, 16) == 0) {
2430 hci_uuids_clear(hdev);
2432 if (enable_service_cache(hdev)) {
2433 err = cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_UUID,
2434 0, hdev->dev_class, 3);
2443 list_for_each_entry_safe(match, tmp, &hdev->uuids, list) {
2444 if (memcmp(match->uuid, cp->uuid, 16) != 0)
2447 list_del(&match->list);
2453 err = cmd_status(sk, hdev->id, MGMT_OP_REMOVE_UUID,
2454 MGMT_STATUS_INVALID_PARAMS);
2459 hci_req_init(&req, hdev);
2464 err = hci_req_run(&req, remove_uuid_complete);
2466 if (err != -ENODATA)
2469 err = cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_UUID, 0,
2470 hdev->dev_class, 3);
2474 cmd = mgmt_pending_add(sk, MGMT_OP_REMOVE_UUID, hdev, data, len);
2483 hci_dev_unlock(hdev);
2487 static void set_class_complete(struct hci_dev *hdev, u8 status)
2489 BT_DBG("status 0x%02x", status);
2491 mgmt_class_complete(hdev, MGMT_OP_SET_DEV_CLASS, status);
2494 static int set_dev_class(struct sock *sk, struct hci_dev *hdev, void *data,
2497 struct mgmt_cp_set_dev_class *cp = data;
2498 struct pending_cmd *cmd;
2499 struct hci_request req;
2502 BT_DBG("request for %s", hdev->name);
2504 if (!lmp_bredr_capable(hdev))
2505 return cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2506 MGMT_STATUS_NOT_SUPPORTED);
2510 if (pending_eir_or_class(hdev)) {
2511 err = cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2516 if ((cp->minor & 0x03) != 0 || (cp->major & 0xe0) != 0) {
2517 err = cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2518 MGMT_STATUS_INVALID_PARAMS);
2522 hdev->major_class = cp->major;
2523 hdev->minor_class = cp->minor;
2525 if (!hdev_is_powered(hdev)) {
2526 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_DEV_CLASS, 0,
2527 hdev->dev_class, 3);
2531 hci_req_init(&req, hdev);
2533 if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags)) {
2534 hci_dev_unlock(hdev);
2535 cancel_delayed_work_sync(&hdev->service_cache);
2542 err = hci_req_run(&req, set_class_complete);
2544 if (err != -ENODATA)
2547 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_DEV_CLASS, 0,
2548 hdev->dev_class, 3);
2552 cmd = mgmt_pending_add(sk, MGMT_OP_SET_DEV_CLASS, hdev, data, len);
2561 hci_dev_unlock(hdev);
2565 static int load_link_keys(struct sock *sk, struct hci_dev *hdev, void *data,
2568 struct mgmt_cp_load_link_keys *cp = data;
2569 const u16 max_key_count = ((U16_MAX - sizeof(*cp)) /
2570 sizeof(struct mgmt_link_key_info));
2571 u16 key_count, expected_len;
2575 BT_DBG("request for %s", hdev->name);
2577 if (!lmp_bredr_capable(hdev))
2578 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2579 MGMT_STATUS_NOT_SUPPORTED);
2581 key_count = __le16_to_cpu(cp->key_count);
2582 if (key_count > max_key_count) {
2583 BT_ERR("load_link_keys: too big key_count value %u",
2585 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2586 MGMT_STATUS_INVALID_PARAMS);
2589 expected_len = sizeof(*cp) + key_count *
2590 sizeof(struct mgmt_link_key_info);
2591 if (expected_len != len) {
2592 BT_ERR("load_link_keys: expected %u bytes, got %u bytes",
2594 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2595 MGMT_STATUS_INVALID_PARAMS);
2598 if (cp->debug_keys != 0x00 && cp->debug_keys != 0x01)
2599 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2600 MGMT_STATUS_INVALID_PARAMS);
2602 BT_DBG("%s debug_keys %u key_count %u", hdev->name, cp->debug_keys,
2605 for (i = 0; i < key_count; i++) {
2606 struct mgmt_link_key_info *key = &cp->keys[i];
2608 if (key->addr.type != BDADDR_BREDR || key->type > 0x08)
2609 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2610 MGMT_STATUS_INVALID_PARAMS);
2615 hci_link_keys_clear(hdev);
2618 changed = !test_and_set_bit(HCI_KEEP_DEBUG_KEYS,
2621 changed = test_and_clear_bit(HCI_KEEP_DEBUG_KEYS,
2625 new_settings(hdev, NULL);
2627 for (i = 0; i < key_count; i++) {
2628 struct mgmt_link_key_info *key = &cp->keys[i];
2630 /* Always ignore debug keys and require a new pairing if
2631 * the user wants to use them.
2633 if (key->type == HCI_LK_DEBUG_COMBINATION)
2636 hci_add_link_key(hdev, NULL, &key->addr.bdaddr, key->val,
2637 key->type, key->pin_len, NULL);
2640 cmd_complete(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS, 0, NULL, 0);
2642 hci_dev_unlock(hdev);
2647 static int device_unpaired(struct hci_dev *hdev, bdaddr_t *bdaddr,
2648 u8 addr_type, struct sock *skip_sk)
2650 struct mgmt_ev_device_unpaired ev;
2652 bacpy(&ev.addr.bdaddr, bdaddr);
2653 ev.addr.type = addr_type;
2655 return mgmt_event(MGMT_EV_DEVICE_UNPAIRED, hdev, &ev, sizeof(ev),
2659 static int unpair_device(struct sock *sk, struct hci_dev *hdev, void *data,
2662 struct mgmt_cp_unpair_device *cp = data;
2663 struct mgmt_rp_unpair_device rp;
2664 struct hci_cp_disconnect dc;
2665 struct pending_cmd *cmd;
2666 struct hci_conn *conn;
2669 memset(&rp, 0, sizeof(rp));
2670 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
2671 rp.addr.type = cp->addr.type;
2673 if (!bdaddr_type_is_valid(cp->addr.type))
2674 return cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2675 MGMT_STATUS_INVALID_PARAMS,
2678 if (cp->disconnect != 0x00 && cp->disconnect != 0x01)
2679 return cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2680 MGMT_STATUS_INVALID_PARAMS,
2685 if (!hdev_is_powered(hdev)) {
2686 err = cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2687 MGMT_STATUS_NOT_POWERED, &rp, sizeof(rp));
2691 if (cp->addr.type == BDADDR_BREDR) {
2692 err = hci_remove_link_key(hdev, &cp->addr.bdaddr);
2696 if (cp->addr.type == BDADDR_LE_PUBLIC)
2697 addr_type = ADDR_LE_DEV_PUBLIC;
2699 addr_type = ADDR_LE_DEV_RANDOM;
2701 hci_remove_irk(hdev, &cp->addr.bdaddr, addr_type);
2703 hci_conn_params_del(hdev, &cp->addr.bdaddr, addr_type);
2705 err = hci_remove_ltk(hdev, &cp->addr.bdaddr, addr_type);
2709 err = cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2710 MGMT_STATUS_NOT_PAIRED, &rp, sizeof(rp));
2714 if (cp->disconnect) {
2715 if (cp->addr.type == BDADDR_BREDR)
2716 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
2719 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK,
2726 err = cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE, 0,
2728 device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, sk);
2732 cmd = mgmt_pending_add(sk, MGMT_OP_UNPAIR_DEVICE, hdev, cp,
2739 dc.handle = cpu_to_le16(conn->handle);
2740 dc.reason = 0x13; /* Remote User Terminated Connection */
2741 err = hci_send_cmd(hdev, HCI_OP_DISCONNECT, sizeof(dc), &dc);
2743 mgmt_pending_remove(cmd);
2746 hci_dev_unlock(hdev);
2750 static int disconnect(struct sock *sk, struct hci_dev *hdev, void *data,
2753 struct mgmt_cp_disconnect *cp = data;
2754 struct mgmt_rp_disconnect rp;
2755 struct hci_cp_disconnect dc;
2756 struct pending_cmd *cmd;
2757 struct hci_conn *conn;
2762 memset(&rp, 0, sizeof(rp));
2763 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
2764 rp.addr.type = cp->addr.type;
2766 if (!bdaddr_type_is_valid(cp->addr.type))
2767 return cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2768 MGMT_STATUS_INVALID_PARAMS,
2773 if (!test_bit(HCI_UP, &hdev->flags)) {
2774 err = cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2775 MGMT_STATUS_NOT_POWERED, &rp, sizeof(rp));
2779 if (mgmt_pending_find(MGMT_OP_DISCONNECT, hdev)) {
2780 err = cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2781 MGMT_STATUS_BUSY, &rp, sizeof(rp));
2785 if (cp->addr.type == BDADDR_BREDR)
2786 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
2789 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->addr.bdaddr);
2791 if (!conn || conn->state == BT_OPEN || conn->state == BT_CLOSED) {
2792 err = cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2793 MGMT_STATUS_NOT_CONNECTED, &rp, sizeof(rp));
2797 cmd = mgmt_pending_add(sk, MGMT_OP_DISCONNECT, hdev, data, len);
2803 dc.handle = cpu_to_le16(conn->handle);
2804 dc.reason = HCI_ERROR_REMOTE_USER_TERM;
2806 err = hci_send_cmd(hdev, HCI_OP_DISCONNECT, sizeof(dc), &dc);
2808 mgmt_pending_remove(cmd);
2811 hci_dev_unlock(hdev);
2815 static u8 link_to_bdaddr(u8 link_type, u8 addr_type)
2817 switch (link_type) {
2819 switch (addr_type) {
2820 case ADDR_LE_DEV_PUBLIC:
2821 return BDADDR_LE_PUBLIC;
2824 /* Fallback to LE Random address type */
2825 return BDADDR_LE_RANDOM;
2829 /* Fallback to BR/EDR type */
2830 return BDADDR_BREDR;
2834 static int get_connections(struct sock *sk, struct hci_dev *hdev, void *data,
2837 struct mgmt_rp_get_connections *rp;
2847 if (!hdev_is_powered(hdev)) {
2848 err = cmd_status(sk, hdev->id, MGMT_OP_GET_CONNECTIONS,
2849 MGMT_STATUS_NOT_POWERED);
2854 list_for_each_entry(c, &hdev->conn_hash.list, list) {
2855 if (test_bit(HCI_CONN_MGMT_CONNECTED, &c->flags))
2859 rp_len = sizeof(*rp) + (i * sizeof(struct mgmt_addr_info));
2860 rp = kmalloc(rp_len, GFP_KERNEL);
2867 list_for_each_entry(c, &hdev->conn_hash.list, list) {
2868 if (!test_bit(HCI_CONN_MGMT_CONNECTED, &c->flags))
2870 bacpy(&rp->addr[i].bdaddr, &c->dst);
2871 rp->addr[i].type = link_to_bdaddr(c->type, c->dst_type);
2872 if (c->type == SCO_LINK || c->type == ESCO_LINK)
2877 rp->conn_count = cpu_to_le16(i);
2879 /* Recalculate length in case of filtered SCO connections, etc */
2880 rp_len = sizeof(*rp) + (i * sizeof(struct mgmt_addr_info));
2882 err = cmd_complete(sk, hdev->id, MGMT_OP_GET_CONNECTIONS, 0, rp,
2888 hci_dev_unlock(hdev);
2892 static int send_pin_code_neg_reply(struct sock *sk, struct hci_dev *hdev,
2893 struct mgmt_cp_pin_code_neg_reply *cp)
2895 struct pending_cmd *cmd;
2898 cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_NEG_REPLY, hdev, cp,
2903 err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_NEG_REPLY,
2904 sizeof(cp->addr.bdaddr), &cp->addr.bdaddr);
2906 mgmt_pending_remove(cmd);
2911 static int pin_code_reply(struct sock *sk, struct hci_dev *hdev, void *data,
2914 struct hci_conn *conn;
2915 struct mgmt_cp_pin_code_reply *cp = data;
2916 struct hci_cp_pin_code_reply reply;
2917 struct pending_cmd *cmd;
2924 if (!hdev_is_powered(hdev)) {
2925 err = cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
2926 MGMT_STATUS_NOT_POWERED);
2930 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->addr.bdaddr);
2932 err = cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
2933 MGMT_STATUS_NOT_CONNECTED);
2937 if (conn->pending_sec_level == BT_SECURITY_HIGH && cp->pin_len != 16) {
2938 struct mgmt_cp_pin_code_neg_reply ncp;
2940 memcpy(&ncp.addr, &cp->addr, sizeof(ncp.addr));
2942 BT_ERR("PIN code is not 16 bytes long");
2944 err = send_pin_code_neg_reply(sk, hdev, &ncp);
2946 err = cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
2947 MGMT_STATUS_INVALID_PARAMS);
2952 cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_REPLY, hdev, data, len);
2958 bacpy(&reply.bdaddr, &cp->addr.bdaddr);
2959 reply.pin_len = cp->pin_len;
2960 memcpy(reply.pin_code, cp->pin_code, sizeof(reply.pin_code));
2962 err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_REPLY, sizeof(reply), &reply);
2964 mgmt_pending_remove(cmd);
2967 hci_dev_unlock(hdev);
2971 static int set_io_capability(struct sock *sk, struct hci_dev *hdev, void *data,
2974 struct mgmt_cp_set_io_capability *cp = data;
2978 if (cp->io_capability > SMP_IO_KEYBOARD_DISPLAY)
2979 return cmd_complete(sk, hdev->id, MGMT_OP_SET_IO_CAPABILITY,
2980 MGMT_STATUS_INVALID_PARAMS, NULL, 0);
2984 hdev->io_capability = cp->io_capability;
2986 BT_DBG("%s IO capability set to 0x%02x", hdev->name,
2987 hdev->io_capability);
2989 hci_dev_unlock(hdev);
2991 return cmd_complete(sk, hdev->id, MGMT_OP_SET_IO_CAPABILITY, 0, NULL,
2995 static struct pending_cmd *find_pairing(struct hci_conn *conn)
2997 struct hci_dev *hdev = conn->hdev;
2998 struct pending_cmd *cmd;
3000 list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
3001 if (cmd->opcode != MGMT_OP_PAIR_DEVICE)
3004 if (cmd->user_data != conn)
3013 static void pairing_complete(struct pending_cmd *cmd, u8 status)
3015 struct mgmt_rp_pair_device rp;
3016 struct hci_conn *conn = cmd->user_data;
3018 bacpy(&rp.addr.bdaddr, &conn->dst);
3019 rp.addr.type = link_to_bdaddr(conn->type, conn->dst_type);
3021 cmd_complete(cmd->sk, cmd->index, MGMT_OP_PAIR_DEVICE, status,
3024 /* So we don't get further callbacks for this connection */
3025 conn->connect_cfm_cb = NULL;
3026 conn->security_cfm_cb = NULL;
3027 conn->disconn_cfm_cb = NULL;
3029 hci_conn_drop(conn);
3031 mgmt_pending_remove(cmd);
3034 void mgmt_smp_complete(struct hci_conn *conn, bool complete)
3036 u8 status = complete ? MGMT_STATUS_SUCCESS : MGMT_STATUS_FAILED;
3037 struct pending_cmd *cmd;
3039 cmd = find_pairing(conn);
3041 pairing_complete(cmd, status);
3044 static void pairing_complete_cb(struct hci_conn *conn, u8 status)
3046 struct pending_cmd *cmd;
3048 BT_DBG("status %u", status);
3050 cmd = find_pairing(conn);
3052 BT_DBG("Unable to find a pending command");
3054 pairing_complete(cmd, mgmt_status(status));
3057 static void le_pairing_complete_cb(struct hci_conn *conn, u8 status)
3059 struct pending_cmd *cmd;
3061 BT_DBG("status %u", status);
3066 cmd = find_pairing(conn);
3068 BT_DBG("Unable to find a pending command");
3070 pairing_complete(cmd, mgmt_status(status));
3073 static int pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
3076 struct mgmt_cp_pair_device *cp = data;
3077 struct mgmt_rp_pair_device rp;
3078 struct pending_cmd *cmd;
3079 u8 sec_level, auth_type;
3080 struct hci_conn *conn;
3085 memset(&rp, 0, sizeof(rp));
3086 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
3087 rp.addr.type = cp->addr.type;
3089 if (!bdaddr_type_is_valid(cp->addr.type))
3090 return cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3091 MGMT_STATUS_INVALID_PARAMS,
3094 if (cp->io_cap > SMP_IO_KEYBOARD_DISPLAY)
3095 return cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3096 MGMT_STATUS_INVALID_PARAMS,
3101 if (!hdev_is_powered(hdev)) {
3102 err = cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3103 MGMT_STATUS_NOT_POWERED, &rp, sizeof(rp));
3107 sec_level = BT_SECURITY_MEDIUM;
3108 auth_type = HCI_AT_DEDICATED_BONDING;
3110 if (cp->addr.type == BDADDR_BREDR) {
3111 conn = hci_connect_acl(hdev, &cp->addr.bdaddr, sec_level,
3116 /* Convert from L2CAP channel address type to HCI address type
3118 if (cp->addr.type == BDADDR_LE_PUBLIC)
3119 addr_type = ADDR_LE_DEV_PUBLIC;
3121 addr_type = ADDR_LE_DEV_RANDOM;
3123 /* When pairing a new device, it is expected to remember
3124 * this device for future connections. Adding the connection
3125 * parameter information ahead of time allows tracking
3126 * of the slave preferred values and will speed up any
3127 * further connection establishment.
3129 * If connection parameters already exist, then they
3130 * will be kept and this function does nothing.
3132 hci_conn_params_add(hdev, &cp->addr.bdaddr, addr_type);
3134 /* Request a connection with master = true role */
3135 conn = hci_connect_le(hdev, &cp->addr.bdaddr, addr_type,
3136 sec_level, HCI_LE_CONN_TIMEOUT, true);
3142 if (PTR_ERR(conn) == -EBUSY)
3143 status = MGMT_STATUS_BUSY;
3145 status = MGMT_STATUS_CONNECT_FAILED;
3147 err = cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3153 if (conn->connect_cfm_cb) {
3154 hci_conn_drop(conn);
3155 err = cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3156 MGMT_STATUS_BUSY, &rp, sizeof(rp));
3160 cmd = mgmt_pending_add(sk, MGMT_OP_PAIR_DEVICE, hdev, data, len);
3163 hci_conn_drop(conn);
3167 /* For LE, just connecting isn't a proof that the pairing finished */
3168 if (cp->addr.type == BDADDR_BREDR) {
3169 conn->connect_cfm_cb = pairing_complete_cb;
3170 conn->security_cfm_cb = pairing_complete_cb;
3171 conn->disconn_cfm_cb = pairing_complete_cb;
3173 conn->connect_cfm_cb = le_pairing_complete_cb;
3174 conn->security_cfm_cb = le_pairing_complete_cb;
3175 conn->disconn_cfm_cb = le_pairing_complete_cb;
3178 conn->io_capability = cp->io_cap;
3179 cmd->user_data = conn;
3181 if (conn->state == BT_CONNECTED &&
3182 hci_conn_security(conn, sec_level, auth_type))
3183 pairing_complete(cmd, 0);
3188 hci_dev_unlock(hdev);
3192 static int cancel_pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
3195 struct mgmt_addr_info *addr = data;
3196 struct pending_cmd *cmd;
3197 struct hci_conn *conn;
3204 if (!hdev_is_powered(hdev)) {
3205 err = cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
3206 MGMT_STATUS_NOT_POWERED);
3210 cmd = mgmt_pending_find(MGMT_OP_PAIR_DEVICE, hdev);
3212 err = cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
3213 MGMT_STATUS_INVALID_PARAMS);
3217 conn = cmd->user_data;
3219 if (bacmp(&addr->bdaddr, &conn->dst) != 0) {
3220 err = cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
3221 MGMT_STATUS_INVALID_PARAMS);
3225 pairing_complete(cmd, MGMT_STATUS_CANCELLED);
3227 err = cmd_complete(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE, 0,
3228 addr, sizeof(*addr));
3230 hci_dev_unlock(hdev);
3234 static int user_pairing_resp(struct sock *sk, struct hci_dev *hdev,
3235 struct mgmt_addr_info *addr, u16 mgmt_op,
3236 u16 hci_op, __le32 passkey)
3238 struct pending_cmd *cmd;
3239 struct hci_conn *conn;
3244 if (!hdev_is_powered(hdev)) {
3245 err = cmd_complete(sk, hdev->id, mgmt_op,
3246 MGMT_STATUS_NOT_POWERED, addr,
3251 if (addr->type == BDADDR_BREDR)
3252 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &addr->bdaddr);
3254 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &addr->bdaddr);
3257 err = cmd_complete(sk, hdev->id, mgmt_op,
3258 MGMT_STATUS_NOT_CONNECTED, addr,
3263 if (addr->type == BDADDR_LE_PUBLIC || addr->type == BDADDR_LE_RANDOM) {
3264 err = smp_user_confirm_reply(conn, mgmt_op, passkey);
3266 err = cmd_complete(sk, hdev->id, mgmt_op,
3267 MGMT_STATUS_SUCCESS, addr,
3270 err = cmd_complete(sk, hdev->id, mgmt_op,
3271 MGMT_STATUS_FAILED, addr,
3277 cmd = mgmt_pending_add(sk, mgmt_op, hdev, addr, sizeof(*addr));
3283 /* Continue with pairing via HCI */
3284 if (hci_op == HCI_OP_USER_PASSKEY_REPLY) {
3285 struct hci_cp_user_passkey_reply cp;
3287 bacpy(&cp.bdaddr, &addr->bdaddr);
3288 cp.passkey = passkey;
3289 err = hci_send_cmd(hdev, hci_op, sizeof(cp), &cp);
3291 err = hci_send_cmd(hdev, hci_op, sizeof(addr->bdaddr),
3295 mgmt_pending_remove(cmd);
3298 hci_dev_unlock(hdev);
3302 static int pin_code_neg_reply(struct sock *sk, struct hci_dev *hdev,
3303 void *data, u16 len)
3305 struct mgmt_cp_pin_code_neg_reply *cp = data;
3309 return user_pairing_resp(sk, hdev, &cp->addr,
3310 MGMT_OP_PIN_CODE_NEG_REPLY,
3311 HCI_OP_PIN_CODE_NEG_REPLY, 0);
3314 static int user_confirm_reply(struct sock *sk, struct hci_dev *hdev, void *data,
3317 struct mgmt_cp_user_confirm_reply *cp = data;
3321 if (len != sizeof(*cp))
3322 return cmd_status(sk, hdev->id, MGMT_OP_USER_CONFIRM_REPLY,
3323 MGMT_STATUS_INVALID_PARAMS);
3325 return user_pairing_resp(sk, hdev, &cp->addr,
3326 MGMT_OP_USER_CONFIRM_REPLY,
3327 HCI_OP_USER_CONFIRM_REPLY, 0);
3330 static int user_confirm_neg_reply(struct sock *sk, struct hci_dev *hdev,
3331 void *data, u16 len)
3333 struct mgmt_cp_user_confirm_neg_reply *cp = data;
3337 return user_pairing_resp(sk, hdev, &cp->addr,
3338 MGMT_OP_USER_CONFIRM_NEG_REPLY,
3339 HCI_OP_USER_CONFIRM_NEG_REPLY, 0);
3342 static int user_passkey_reply(struct sock *sk, struct hci_dev *hdev, void *data,
3345 struct mgmt_cp_user_passkey_reply *cp = data;
3349 return user_pairing_resp(sk, hdev, &cp->addr,
3350 MGMT_OP_USER_PASSKEY_REPLY,
3351 HCI_OP_USER_PASSKEY_REPLY, cp->passkey);
3354 static int user_passkey_neg_reply(struct sock *sk, struct hci_dev *hdev,
3355 void *data, u16 len)
3357 struct mgmt_cp_user_passkey_neg_reply *cp = data;
3361 return user_pairing_resp(sk, hdev, &cp->addr,
3362 MGMT_OP_USER_PASSKEY_NEG_REPLY,
3363 HCI_OP_USER_PASSKEY_NEG_REPLY, 0);
3366 static void update_name(struct hci_request *req)
3368 struct hci_dev *hdev = req->hdev;
3369 struct hci_cp_write_local_name cp;
3371 memcpy(cp.name, hdev->dev_name, sizeof(cp.name));
3373 hci_req_add(req, HCI_OP_WRITE_LOCAL_NAME, sizeof(cp), &cp);
3376 static void set_name_complete(struct hci_dev *hdev, u8 status)
3378 struct mgmt_cp_set_local_name *cp;
3379 struct pending_cmd *cmd;
3381 BT_DBG("status 0x%02x", status);
3385 cmd = mgmt_pending_find(MGMT_OP_SET_LOCAL_NAME, hdev);
3392 cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME,
3393 mgmt_status(status));
3395 cmd_complete(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3398 mgmt_pending_remove(cmd);
3401 hci_dev_unlock(hdev);
3404 static int set_local_name(struct sock *sk, struct hci_dev *hdev, void *data,
3407 struct mgmt_cp_set_local_name *cp = data;
3408 struct pending_cmd *cmd;
3409 struct hci_request req;
3416 /* If the old values are the same as the new ones just return a
3417 * direct command complete event.
3419 if (!memcmp(hdev->dev_name, cp->name, sizeof(hdev->dev_name)) &&
3420 !memcmp(hdev->short_name, cp->short_name,
3421 sizeof(hdev->short_name))) {
3422 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3427 memcpy(hdev->short_name, cp->short_name, sizeof(hdev->short_name));
3429 if (!hdev_is_powered(hdev)) {
3430 memcpy(hdev->dev_name, cp->name, sizeof(hdev->dev_name));
3432 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3437 err = mgmt_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev, data, len,
3443 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LOCAL_NAME, hdev, data, len);
3449 memcpy(hdev->dev_name, cp->name, sizeof(hdev->dev_name));
3451 hci_req_init(&req, hdev);
3453 if (lmp_bredr_capable(hdev)) {
3458 /* The name is stored in the scan response data and so
3459 * no need to udpate the advertising data here.
3461 if (lmp_le_capable(hdev))
3462 update_scan_rsp_data(&req);
3464 err = hci_req_run(&req, set_name_complete);
3466 mgmt_pending_remove(cmd);
3469 hci_dev_unlock(hdev);
3473 static int read_local_oob_data(struct sock *sk, struct hci_dev *hdev,
3474 void *data, u16 data_len)
3476 struct pending_cmd *cmd;
3479 BT_DBG("%s", hdev->name);
3483 if (!hdev_is_powered(hdev)) {
3484 err = cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
3485 MGMT_STATUS_NOT_POWERED);
3489 if (!lmp_ssp_capable(hdev)) {
3490 err = cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
3491 MGMT_STATUS_NOT_SUPPORTED);
3495 if (mgmt_pending_find(MGMT_OP_READ_LOCAL_OOB_DATA, hdev)) {
3496 err = cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
3501 cmd = mgmt_pending_add(sk, MGMT_OP_READ_LOCAL_OOB_DATA, hdev, NULL, 0);
3507 if (test_bit(HCI_SC_ENABLED, &hdev->dev_flags))
3508 err = hci_send_cmd(hdev, HCI_OP_READ_LOCAL_OOB_EXT_DATA,
3511 err = hci_send_cmd(hdev, HCI_OP_READ_LOCAL_OOB_DATA, 0, NULL);
3514 mgmt_pending_remove(cmd);
3517 hci_dev_unlock(hdev);
3521 static int add_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
3522 void *data, u16 len)
3526 BT_DBG("%s ", hdev->name);
3530 if (len == MGMT_ADD_REMOTE_OOB_DATA_SIZE) {
3531 struct mgmt_cp_add_remote_oob_data *cp = data;
3534 err = hci_add_remote_oob_data(hdev, &cp->addr.bdaddr,
3535 cp->hash, cp->randomizer);
3537 status = MGMT_STATUS_FAILED;
3539 status = MGMT_STATUS_SUCCESS;
3541 err = cmd_complete(sk, hdev->id, MGMT_OP_ADD_REMOTE_OOB_DATA,
3542 status, &cp->addr, sizeof(cp->addr));
3543 } else if (len == MGMT_ADD_REMOTE_OOB_EXT_DATA_SIZE) {
3544 struct mgmt_cp_add_remote_oob_ext_data *cp = data;
3547 err = hci_add_remote_oob_ext_data(hdev, &cp->addr.bdaddr,
3553 status = MGMT_STATUS_FAILED;
3555 status = MGMT_STATUS_SUCCESS;
3557 err = cmd_complete(sk, hdev->id, MGMT_OP_ADD_REMOTE_OOB_DATA,
3558 status, &cp->addr, sizeof(cp->addr));
3560 BT_ERR("add_remote_oob_data: invalid length of %u bytes", len);
3561 err = cmd_status(sk, hdev->id, MGMT_OP_ADD_REMOTE_OOB_DATA,
3562 MGMT_STATUS_INVALID_PARAMS);
3565 hci_dev_unlock(hdev);
3569 static int remove_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
3570 void *data, u16 len)
3572 struct mgmt_cp_remove_remote_oob_data *cp = data;
3576 BT_DBG("%s", hdev->name);
3580 err = hci_remove_remote_oob_data(hdev, &cp->addr.bdaddr);
3582 status = MGMT_STATUS_INVALID_PARAMS;
3584 status = MGMT_STATUS_SUCCESS;
3586 err = cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_REMOTE_OOB_DATA,
3587 status, &cp->addr, sizeof(cp->addr));
3589 hci_dev_unlock(hdev);
3593 static int mgmt_start_discovery_failed(struct hci_dev *hdev, u8 status)
3595 struct pending_cmd *cmd;
3599 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3601 cmd = mgmt_pending_find(MGMT_OP_START_DISCOVERY, hdev);
3605 type = hdev->discovery.type;
3607 err = cmd_complete(cmd->sk, hdev->id, cmd->opcode, mgmt_status(status),
3608 &type, sizeof(type));
3609 mgmt_pending_remove(cmd);
3614 static void start_discovery_complete(struct hci_dev *hdev, u8 status)
3616 unsigned long timeout = 0;
3618 BT_DBG("status %d", status);
3622 mgmt_start_discovery_failed(hdev, status);
3623 hci_dev_unlock(hdev);
3628 hci_discovery_set_state(hdev, DISCOVERY_FINDING);
3629 hci_dev_unlock(hdev);
3631 switch (hdev->discovery.type) {
3632 case DISCOV_TYPE_LE:
3633 timeout = msecs_to_jiffies(DISCOV_LE_TIMEOUT);
3636 case DISCOV_TYPE_INTERLEAVED:
3637 timeout = msecs_to_jiffies(hdev->discov_interleaved_timeout);
3640 case DISCOV_TYPE_BREDR:
3644 BT_ERR("Invalid discovery type %d", hdev->discovery.type);
3650 queue_delayed_work(hdev->workqueue, &hdev->le_scan_disable, timeout);
3653 static int start_discovery(struct sock *sk, struct hci_dev *hdev,
3654 void *data, u16 len)
3656 struct mgmt_cp_start_discovery *cp = data;
3657 struct pending_cmd *cmd;
3658 struct hci_cp_le_set_scan_param param_cp;
3659 struct hci_cp_le_set_scan_enable enable_cp;
3660 struct hci_cp_inquiry inq_cp;
3661 struct hci_request req;
3662 /* General inquiry access code (GIAC) */
3663 u8 lap[3] = { 0x33, 0x8b, 0x9e };
3664 u8 status, own_addr_type;
3667 BT_DBG("%s", hdev->name);
3671 if (!hdev_is_powered(hdev)) {
3672 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3673 MGMT_STATUS_NOT_POWERED);
3677 if (test_bit(HCI_PERIODIC_INQ, &hdev->dev_flags)) {
3678 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3683 if (hdev->discovery.state != DISCOVERY_STOPPED) {
3684 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3689 cmd = mgmt_pending_add(sk, MGMT_OP_START_DISCOVERY, hdev, NULL, 0);
3695 hdev->discovery.type = cp->type;
3697 hci_req_init(&req, hdev);
3699 switch (hdev->discovery.type) {
3700 case DISCOV_TYPE_BREDR:
3701 status = mgmt_bredr_support(hdev);
3703 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3705 mgmt_pending_remove(cmd);
3709 if (test_bit(HCI_INQUIRY, &hdev->flags)) {
3710 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3712 mgmt_pending_remove(cmd);
3716 hci_inquiry_cache_flush(hdev);
3718 memset(&inq_cp, 0, sizeof(inq_cp));
3719 memcpy(&inq_cp.lap, lap, sizeof(inq_cp.lap));
3720 inq_cp.length = DISCOV_BREDR_INQUIRY_LEN;
3721 hci_req_add(&req, HCI_OP_INQUIRY, sizeof(inq_cp), &inq_cp);
3724 case DISCOV_TYPE_LE:
3725 case DISCOV_TYPE_INTERLEAVED:
3726 status = mgmt_le_support(hdev);
3728 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3730 mgmt_pending_remove(cmd);
3734 if (hdev->discovery.type == DISCOV_TYPE_INTERLEAVED &&
3735 !test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
3736 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3737 MGMT_STATUS_NOT_SUPPORTED);
3738 mgmt_pending_remove(cmd);
3742 if (test_bit(HCI_LE_ADV, &hdev->dev_flags)) {
3743 /* Don't let discovery abort an outgoing
3744 * connection attempt that's using directed
3747 if (hci_conn_hash_lookup_state(hdev, LE_LINK,
3749 err = cmd_status(sk, hdev->id,
3750 MGMT_OP_START_DISCOVERY,
3751 MGMT_STATUS_REJECTED);
3752 mgmt_pending_remove(cmd);
3756 disable_advertising(&req);
3759 /* If controller is scanning, it means the background scanning
3760 * is running. Thus, we should temporarily stop it in order to
3761 * set the discovery scanning parameters.
3763 if (test_bit(HCI_LE_SCAN, &hdev->dev_flags))
3764 hci_req_add_le_scan_disable(&req);
3766 memset(¶m_cp, 0, sizeof(param_cp));
3768 /* All active scans will be done with either a resolvable
3769 * private address (when privacy feature has been enabled)
3770 * or unresolvable private address.
3772 err = hci_update_random_address(&req, true, &own_addr_type);
3774 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3775 MGMT_STATUS_FAILED);
3776 mgmt_pending_remove(cmd);
3780 param_cp.type = LE_SCAN_ACTIVE;
3781 param_cp.interval = cpu_to_le16(DISCOV_LE_SCAN_INT);
3782 param_cp.window = cpu_to_le16(DISCOV_LE_SCAN_WIN);
3783 param_cp.own_address_type = own_addr_type;
3784 hci_req_add(&req, HCI_OP_LE_SET_SCAN_PARAM, sizeof(param_cp),
3787 memset(&enable_cp, 0, sizeof(enable_cp));
3788 enable_cp.enable = LE_SCAN_ENABLE;
3789 enable_cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
3790 hci_req_add(&req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(enable_cp),
3795 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3796 MGMT_STATUS_INVALID_PARAMS);
3797 mgmt_pending_remove(cmd);
3801 err = hci_req_run(&req, start_discovery_complete);
3803 mgmt_pending_remove(cmd);
3805 hci_discovery_set_state(hdev, DISCOVERY_STARTING);
3808 hci_dev_unlock(hdev);
3812 static int mgmt_stop_discovery_failed(struct hci_dev *hdev, u8 status)
3814 struct pending_cmd *cmd;
3817 cmd = mgmt_pending_find(MGMT_OP_STOP_DISCOVERY, hdev);
3821 err = cmd_complete(cmd->sk, hdev->id, cmd->opcode, mgmt_status(status),
3822 &hdev->discovery.type, sizeof(hdev->discovery.type));
3823 mgmt_pending_remove(cmd);
3828 static void stop_discovery_complete(struct hci_dev *hdev, u8 status)
3830 BT_DBG("status %d", status);
3835 mgmt_stop_discovery_failed(hdev, status);
3839 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3842 hci_dev_unlock(hdev);
3845 static int stop_discovery(struct sock *sk, struct hci_dev *hdev, void *data,
3848 struct mgmt_cp_stop_discovery *mgmt_cp = data;
3849 struct pending_cmd *cmd;
3850 struct hci_request req;
3853 BT_DBG("%s", hdev->name);
3857 if (!hci_discovery_active(hdev)) {
3858 err = cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
3859 MGMT_STATUS_REJECTED, &mgmt_cp->type,
3860 sizeof(mgmt_cp->type));
3864 if (hdev->discovery.type != mgmt_cp->type) {
3865 err = cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
3866 MGMT_STATUS_INVALID_PARAMS, &mgmt_cp->type,
3867 sizeof(mgmt_cp->type));
3871 cmd = mgmt_pending_add(sk, MGMT_OP_STOP_DISCOVERY, hdev, NULL, 0);
3877 hci_req_init(&req, hdev);
3879 hci_stop_discovery(&req);
3881 err = hci_req_run(&req, stop_discovery_complete);
3883 hci_discovery_set_state(hdev, DISCOVERY_STOPPING);
3887 mgmt_pending_remove(cmd);
3889 /* If no HCI commands were sent we're done */
3890 if (err == -ENODATA) {
3891 err = cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY, 0,
3892 &mgmt_cp->type, sizeof(mgmt_cp->type));
3893 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3897 hci_dev_unlock(hdev);
3901 static int confirm_name(struct sock *sk, struct hci_dev *hdev, void *data,
3904 struct mgmt_cp_confirm_name *cp = data;
3905 struct inquiry_entry *e;
3908 BT_DBG("%s", hdev->name);
3912 if (!hci_discovery_active(hdev)) {
3913 err = cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME,
3914 MGMT_STATUS_FAILED, &cp->addr,
3919 e = hci_inquiry_cache_lookup_unknown(hdev, &cp->addr.bdaddr);
3921 err = cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME,
3922 MGMT_STATUS_INVALID_PARAMS, &cp->addr,
3927 if (cp->name_known) {
3928 e->name_state = NAME_KNOWN;
3931 e->name_state = NAME_NEEDED;
3932 hci_inquiry_cache_update_resolve(hdev, e);
3935 err = cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME, 0, &cp->addr,
3939 hci_dev_unlock(hdev);
3943 static int block_device(struct sock *sk, struct hci_dev *hdev, void *data,
3946 struct mgmt_cp_block_device *cp = data;
3950 BT_DBG("%s", hdev->name);
3952 if (!bdaddr_type_is_valid(cp->addr.type))
3953 return cmd_complete(sk, hdev->id, MGMT_OP_BLOCK_DEVICE,
3954 MGMT_STATUS_INVALID_PARAMS,
3955 &cp->addr, sizeof(cp->addr));
3959 err = hci_bdaddr_list_add(&hdev->blacklist, &cp->addr.bdaddr,
3962 status = MGMT_STATUS_FAILED;
3966 mgmt_event(MGMT_EV_DEVICE_BLOCKED, hdev, &cp->addr, sizeof(cp->addr),
3968 status = MGMT_STATUS_SUCCESS;
3971 err = cmd_complete(sk, hdev->id, MGMT_OP_BLOCK_DEVICE, status,
3972 &cp->addr, sizeof(cp->addr));
3974 hci_dev_unlock(hdev);
3979 static int unblock_device(struct sock *sk, struct hci_dev *hdev, void *data,
3982 struct mgmt_cp_unblock_device *cp = data;
3986 BT_DBG("%s", hdev->name);
3988 if (!bdaddr_type_is_valid(cp->addr.type))
3989 return cmd_complete(sk, hdev->id, MGMT_OP_UNBLOCK_DEVICE,
3990 MGMT_STATUS_INVALID_PARAMS,
3991 &cp->addr, sizeof(cp->addr));
3995 err = hci_bdaddr_list_del(&hdev->blacklist, &cp->addr.bdaddr,
3998 status = MGMT_STATUS_INVALID_PARAMS;
4002 mgmt_event(MGMT_EV_DEVICE_UNBLOCKED, hdev, &cp->addr, sizeof(cp->addr),
4004 status = MGMT_STATUS_SUCCESS;
4007 err = cmd_complete(sk, hdev->id, MGMT_OP_UNBLOCK_DEVICE, status,
4008 &cp->addr, sizeof(cp->addr));
4010 hci_dev_unlock(hdev);
4015 static int set_device_id(struct sock *sk, struct hci_dev *hdev, void *data,
4018 struct mgmt_cp_set_device_id *cp = data;
4019 struct hci_request req;
4023 BT_DBG("%s", hdev->name);
4025 source = __le16_to_cpu(cp->source);
4027 if (source > 0x0002)
4028 return cmd_status(sk, hdev->id, MGMT_OP_SET_DEVICE_ID,
4029 MGMT_STATUS_INVALID_PARAMS);
4033 hdev->devid_source = source;
4034 hdev->devid_vendor = __le16_to_cpu(cp->vendor);
4035 hdev->devid_product = __le16_to_cpu(cp->product);
4036 hdev->devid_version = __le16_to_cpu(cp->version);
4038 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_DEVICE_ID, 0, NULL, 0);
4040 hci_req_init(&req, hdev);
4042 hci_req_run(&req, NULL);
4044 hci_dev_unlock(hdev);
4049 static void set_advertising_complete(struct hci_dev *hdev, u8 status)
4051 struct cmd_lookup match = { NULL, hdev };
4054 u8 mgmt_err = mgmt_status(status);
4056 mgmt_pending_foreach(MGMT_OP_SET_ADVERTISING, hdev,
4057 cmd_status_rsp, &mgmt_err);
4061 if (test_bit(HCI_LE_ADV, &hdev->dev_flags))
4062 set_bit(HCI_ADVERTISING, &hdev->dev_flags);
4064 clear_bit(HCI_ADVERTISING, &hdev->dev_flags);
4066 mgmt_pending_foreach(MGMT_OP_SET_ADVERTISING, hdev, settings_rsp,
4069 new_settings(hdev, match.sk);
4075 static int set_advertising(struct sock *sk, struct hci_dev *hdev, void *data,
4078 struct mgmt_mode *cp = data;
4079 struct pending_cmd *cmd;
4080 struct hci_request req;
4081 u8 val, enabled, status;
4084 BT_DBG("request for %s", hdev->name);
4086 status = mgmt_le_support(hdev);
4088 return cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
4091 if (cp->val != 0x00 && cp->val != 0x01)
4092 return cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
4093 MGMT_STATUS_INVALID_PARAMS);
4098 enabled = test_bit(HCI_ADVERTISING, &hdev->dev_flags);
4100 /* The following conditions are ones which mean that we should
4101 * not do any HCI communication but directly send a mgmt
4102 * response to user space (after toggling the flag if
4105 if (!hdev_is_powered(hdev) || val == enabled ||
4106 hci_conn_num(hdev, LE_LINK) > 0 ||
4107 (test_bit(HCI_LE_SCAN, &hdev->dev_flags) &&
4108 hdev->le_scan_type == LE_SCAN_ACTIVE)) {
4109 bool changed = false;
4111 if (val != test_bit(HCI_ADVERTISING, &hdev->dev_flags)) {
4112 change_bit(HCI_ADVERTISING, &hdev->dev_flags);
4116 err = send_settings_rsp(sk, MGMT_OP_SET_ADVERTISING, hdev);
4121 err = new_settings(hdev, sk);
4126 if (mgmt_pending_find(MGMT_OP_SET_ADVERTISING, hdev) ||
4127 mgmt_pending_find(MGMT_OP_SET_LE, hdev)) {
4128 err = cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
4133 cmd = mgmt_pending_add(sk, MGMT_OP_SET_ADVERTISING, hdev, data, len);
4139 hci_req_init(&req, hdev);
4142 enable_advertising(&req);
4144 disable_advertising(&req);
4146 err = hci_req_run(&req, set_advertising_complete);
4148 mgmt_pending_remove(cmd);
4151 hci_dev_unlock(hdev);
4155 static int set_static_address(struct sock *sk, struct hci_dev *hdev,
4156 void *data, u16 len)
4158 struct mgmt_cp_set_static_address *cp = data;
4161 BT_DBG("%s", hdev->name);
4163 if (!lmp_le_capable(hdev))
4164 return cmd_status(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS,
4165 MGMT_STATUS_NOT_SUPPORTED);
4167 if (hdev_is_powered(hdev))
4168 return cmd_status(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS,
4169 MGMT_STATUS_REJECTED);
4171 if (bacmp(&cp->bdaddr, BDADDR_ANY)) {
4172 if (!bacmp(&cp->bdaddr, BDADDR_NONE))
4173 return cmd_status(sk, hdev->id,
4174 MGMT_OP_SET_STATIC_ADDRESS,
4175 MGMT_STATUS_INVALID_PARAMS);
4177 /* Two most significant bits shall be set */
4178 if ((cp->bdaddr.b[5] & 0xc0) != 0xc0)
4179 return cmd_status(sk, hdev->id,
4180 MGMT_OP_SET_STATIC_ADDRESS,
4181 MGMT_STATUS_INVALID_PARAMS);
4186 bacpy(&hdev->static_addr, &cp->bdaddr);
4188 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS, 0, NULL, 0);
4190 hci_dev_unlock(hdev);
4195 static int set_scan_params(struct sock *sk, struct hci_dev *hdev,
4196 void *data, u16 len)
4198 struct mgmt_cp_set_scan_params *cp = data;
4199 __u16 interval, window;
4202 BT_DBG("%s", hdev->name);
4204 if (!lmp_le_capable(hdev))
4205 return cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
4206 MGMT_STATUS_NOT_SUPPORTED);
4208 interval = __le16_to_cpu(cp->interval);
4210 if (interval < 0x0004 || interval > 0x4000)
4211 return cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
4212 MGMT_STATUS_INVALID_PARAMS);
4214 window = __le16_to_cpu(cp->window);
4216 if (window < 0x0004 || window > 0x4000)
4217 return cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
4218 MGMT_STATUS_INVALID_PARAMS);
4220 if (window > interval)
4221 return cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
4222 MGMT_STATUS_INVALID_PARAMS);
4226 hdev->le_scan_interval = interval;
4227 hdev->le_scan_window = window;
4229 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS, 0, NULL, 0);
4231 /* If background scan is running, restart it so new parameters are
4234 if (test_bit(HCI_LE_SCAN, &hdev->dev_flags) &&
4235 hdev->discovery.state == DISCOVERY_STOPPED) {
4236 struct hci_request req;
4238 hci_req_init(&req, hdev);
4240 hci_req_add_le_scan_disable(&req);
4241 hci_req_add_le_passive_scan(&req);
4243 hci_req_run(&req, NULL);
4246 hci_dev_unlock(hdev);
4251 static void fast_connectable_complete(struct hci_dev *hdev, u8 status)
4253 struct pending_cmd *cmd;
4255 BT_DBG("status 0x%02x", status);
4259 cmd = mgmt_pending_find(MGMT_OP_SET_FAST_CONNECTABLE, hdev);
4264 cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
4265 mgmt_status(status));
4267 struct mgmt_mode *cp = cmd->param;
4270 set_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags);
4272 clear_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags);
4274 send_settings_rsp(cmd->sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev);
4275 new_settings(hdev, cmd->sk);
4278 mgmt_pending_remove(cmd);
4281 hci_dev_unlock(hdev);
4284 static int set_fast_connectable(struct sock *sk, struct hci_dev *hdev,
4285 void *data, u16 len)
4287 struct mgmt_mode *cp = data;
4288 struct pending_cmd *cmd;
4289 struct hci_request req;
4292 BT_DBG("%s", hdev->name);
4294 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags) ||
4295 hdev->hci_ver < BLUETOOTH_VER_1_2)
4296 return cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
4297 MGMT_STATUS_NOT_SUPPORTED);
4299 if (cp->val != 0x00 && cp->val != 0x01)
4300 return cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
4301 MGMT_STATUS_INVALID_PARAMS);
4303 if (!hdev_is_powered(hdev))
4304 return cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
4305 MGMT_STATUS_NOT_POWERED);
4307 if (!test_bit(HCI_CONNECTABLE, &hdev->dev_flags))
4308 return cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
4309 MGMT_STATUS_REJECTED);
4313 if (mgmt_pending_find(MGMT_OP_SET_FAST_CONNECTABLE, hdev)) {
4314 err = cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
4319 if (!!cp->val == test_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags)) {
4320 err = send_settings_rsp(sk, MGMT_OP_SET_FAST_CONNECTABLE,
4325 cmd = mgmt_pending_add(sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev,
4332 hci_req_init(&req, hdev);
4334 write_fast_connectable(&req, cp->val);
4336 err = hci_req_run(&req, fast_connectable_complete);
4338 err = cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
4339 MGMT_STATUS_FAILED);
4340 mgmt_pending_remove(cmd);
4344 hci_dev_unlock(hdev);
4349 static void set_bredr_scan(struct hci_request *req)
4351 struct hci_dev *hdev = req->hdev;
4354 /* Ensure that fast connectable is disabled. This function will
4355 * not do anything if the page scan parameters are already what
4358 write_fast_connectable(req, false);
4360 if (test_bit(HCI_CONNECTABLE, &hdev->dev_flags) ||
4361 !list_empty(&hdev->whitelist))
4363 if (test_bit(HCI_DISCOVERABLE, &hdev->dev_flags))
4364 scan |= SCAN_INQUIRY;
4367 hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
4370 static void set_bredr_complete(struct hci_dev *hdev, u8 status)
4372 struct pending_cmd *cmd;
4374 BT_DBG("status 0x%02x", status);
4378 cmd = mgmt_pending_find(MGMT_OP_SET_BREDR, hdev);
4383 u8 mgmt_err = mgmt_status(status);
4385 /* We need to restore the flag if related HCI commands
4388 clear_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
4390 cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
4392 send_settings_rsp(cmd->sk, MGMT_OP_SET_BREDR, hdev);
4393 new_settings(hdev, cmd->sk);
4396 mgmt_pending_remove(cmd);
4399 hci_dev_unlock(hdev);
4402 static int set_bredr(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
4404 struct mgmt_mode *cp = data;
4405 struct pending_cmd *cmd;
4406 struct hci_request req;
4409 BT_DBG("request for %s", hdev->name);
4411 if (!lmp_bredr_capable(hdev) || !lmp_le_capable(hdev))
4412 return cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
4413 MGMT_STATUS_NOT_SUPPORTED);
4415 if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
4416 return cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
4417 MGMT_STATUS_REJECTED);
4419 if (cp->val != 0x00 && cp->val != 0x01)
4420 return cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
4421 MGMT_STATUS_INVALID_PARAMS);
4425 if (cp->val == test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
4426 err = send_settings_rsp(sk, MGMT_OP_SET_BREDR, hdev);
4430 if (!hdev_is_powered(hdev)) {
4432 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
4433 clear_bit(HCI_SSP_ENABLED, &hdev->dev_flags);
4434 clear_bit(HCI_LINK_SECURITY, &hdev->dev_flags);
4435 clear_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags);
4436 clear_bit(HCI_HS_ENABLED, &hdev->dev_flags);
4439 change_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
4441 err = send_settings_rsp(sk, MGMT_OP_SET_BREDR, hdev);
4445 err = new_settings(hdev, sk);
4449 /* Reject disabling when powered on */
4451 err = cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
4452 MGMT_STATUS_REJECTED);
4456 if (mgmt_pending_find(MGMT_OP_SET_BREDR, hdev)) {
4457 err = cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
4462 cmd = mgmt_pending_add(sk, MGMT_OP_SET_BREDR, hdev, data, len);
4468 /* We need to flip the bit already here so that update_adv_data
4469 * generates the correct flags.
4471 set_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
4473 hci_req_init(&req, hdev);
4475 if (test_bit(HCI_CONNECTABLE, &hdev->dev_flags) ||
4476 !list_empty(&hdev->whitelist))
4477 set_bredr_scan(&req);
4479 /* Since only the advertising data flags will change, there
4480 * is no need to update the scan response data.
4482 update_adv_data(&req);
4484 err = hci_req_run(&req, set_bredr_complete);
4486 mgmt_pending_remove(cmd);
4489 hci_dev_unlock(hdev);
4493 static int set_secure_conn(struct sock *sk, struct hci_dev *hdev,
4494 void *data, u16 len)
4496 struct mgmt_mode *cp = data;
4497 struct pending_cmd *cmd;
4501 BT_DBG("request for %s", hdev->name);
4503 status = mgmt_bredr_support(hdev);
4505 return cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
4508 if (!lmp_sc_capable(hdev) &&
4509 !test_bit(HCI_FORCE_SC, &hdev->dbg_flags))
4510 return cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
4511 MGMT_STATUS_NOT_SUPPORTED);
4513 if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
4514 return cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
4515 MGMT_STATUS_INVALID_PARAMS);
4519 if (!hdev_is_powered(hdev)) {
4523 changed = !test_and_set_bit(HCI_SC_ENABLED,
4525 if (cp->val == 0x02)
4526 set_bit(HCI_SC_ONLY, &hdev->dev_flags);
4528 clear_bit(HCI_SC_ONLY, &hdev->dev_flags);
4530 changed = test_and_clear_bit(HCI_SC_ENABLED,
4532 clear_bit(HCI_SC_ONLY, &hdev->dev_flags);
4535 err = send_settings_rsp(sk, MGMT_OP_SET_SECURE_CONN, hdev);
4540 err = new_settings(hdev, sk);
4545 if (mgmt_pending_find(MGMT_OP_SET_SECURE_CONN, hdev)) {
4546 err = cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
4553 if (val == test_bit(HCI_SC_ENABLED, &hdev->dev_flags) &&
4554 (cp->val == 0x02) == test_bit(HCI_SC_ONLY, &hdev->dev_flags)) {
4555 err = send_settings_rsp(sk, MGMT_OP_SET_SECURE_CONN, hdev);
4559 cmd = mgmt_pending_add(sk, MGMT_OP_SET_SECURE_CONN, hdev, data, len);
4565 err = hci_send_cmd(hdev, HCI_OP_WRITE_SC_SUPPORT, 1, &val);
4567 mgmt_pending_remove(cmd);
4571 if (cp->val == 0x02)
4572 set_bit(HCI_SC_ONLY, &hdev->dev_flags);
4574 clear_bit(HCI_SC_ONLY, &hdev->dev_flags);
4577 hci_dev_unlock(hdev);
4581 static int set_debug_keys(struct sock *sk, struct hci_dev *hdev,
4582 void *data, u16 len)
4584 struct mgmt_mode *cp = data;
4585 bool changed, use_changed;
4588 BT_DBG("request for %s", hdev->name);
4590 if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
4591 return cmd_status(sk, hdev->id, MGMT_OP_SET_DEBUG_KEYS,
4592 MGMT_STATUS_INVALID_PARAMS);
4597 changed = !test_and_set_bit(HCI_KEEP_DEBUG_KEYS,
4600 changed = test_and_clear_bit(HCI_KEEP_DEBUG_KEYS,
4603 if (cp->val == 0x02)
4604 use_changed = !test_and_set_bit(HCI_USE_DEBUG_KEYS,
4607 use_changed = test_and_clear_bit(HCI_USE_DEBUG_KEYS,
4610 if (hdev_is_powered(hdev) && use_changed &&
4611 test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
4612 u8 mode = (cp->val == 0x02) ? 0x01 : 0x00;
4613 hci_send_cmd(hdev, HCI_OP_WRITE_SSP_DEBUG_MODE,
4614 sizeof(mode), &mode);
4617 err = send_settings_rsp(sk, MGMT_OP_SET_DEBUG_KEYS, hdev);
4622 err = new_settings(hdev, sk);
4625 hci_dev_unlock(hdev);
4629 static int set_privacy(struct sock *sk, struct hci_dev *hdev, void *cp_data,
4632 struct mgmt_cp_set_privacy *cp = cp_data;
4636 BT_DBG("request for %s", hdev->name);
4638 if (!lmp_le_capable(hdev))
4639 return cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
4640 MGMT_STATUS_NOT_SUPPORTED);
4642 if (cp->privacy != 0x00 && cp->privacy != 0x01)
4643 return cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
4644 MGMT_STATUS_INVALID_PARAMS);
4646 if (hdev_is_powered(hdev))
4647 return cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
4648 MGMT_STATUS_REJECTED);
4652 /* If user space supports this command it is also expected to
4653 * handle IRKs. Therefore, set the HCI_RPA_RESOLVING flag.
4655 set_bit(HCI_RPA_RESOLVING, &hdev->dev_flags);
4658 changed = !test_and_set_bit(HCI_PRIVACY, &hdev->dev_flags);
4659 memcpy(hdev->irk, cp->irk, sizeof(hdev->irk));
4660 set_bit(HCI_RPA_EXPIRED, &hdev->dev_flags);
4662 changed = test_and_clear_bit(HCI_PRIVACY, &hdev->dev_flags);
4663 memset(hdev->irk, 0, sizeof(hdev->irk));
4664 clear_bit(HCI_RPA_EXPIRED, &hdev->dev_flags);
4667 err = send_settings_rsp(sk, MGMT_OP_SET_PRIVACY, hdev);
4672 err = new_settings(hdev, sk);
4675 hci_dev_unlock(hdev);
4679 static bool irk_is_valid(struct mgmt_irk_info *irk)
4681 switch (irk->addr.type) {
4682 case BDADDR_LE_PUBLIC:
4685 case BDADDR_LE_RANDOM:
4686 /* Two most significant bits shall be set */
4687 if ((irk->addr.bdaddr.b[5] & 0xc0) != 0xc0)
4695 static int load_irks(struct sock *sk, struct hci_dev *hdev, void *cp_data,
4698 struct mgmt_cp_load_irks *cp = cp_data;
4699 const u16 max_irk_count = ((U16_MAX - sizeof(*cp)) /
4700 sizeof(struct mgmt_irk_info));
4701 u16 irk_count, expected_len;
4704 BT_DBG("request for %s", hdev->name);
4706 if (!lmp_le_capable(hdev))
4707 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
4708 MGMT_STATUS_NOT_SUPPORTED);
4710 irk_count = __le16_to_cpu(cp->irk_count);
4711 if (irk_count > max_irk_count) {
4712 BT_ERR("load_irks: too big irk_count value %u", irk_count);
4713 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
4714 MGMT_STATUS_INVALID_PARAMS);
4717 expected_len = sizeof(*cp) + irk_count * sizeof(struct mgmt_irk_info);
4718 if (expected_len != len) {
4719 BT_ERR("load_irks: expected %u bytes, got %u bytes",
4721 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
4722 MGMT_STATUS_INVALID_PARAMS);
4725 BT_DBG("%s irk_count %u", hdev->name, irk_count);
4727 for (i = 0; i < irk_count; i++) {
4728 struct mgmt_irk_info *key = &cp->irks[i];
4730 if (!irk_is_valid(key))
4731 return cmd_status(sk, hdev->id,
4733 MGMT_STATUS_INVALID_PARAMS);
4738 hci_smp_irks_clear(hdev);
4740 for (i = 0; i < irk_count; i++) {
4741 struct mgmt_irk_info *irk = &cp->irks[i];
4744 if (irk->addr.type == BDADDR_LE_PUBLIC)
4745 addr_type = ADDR_LE_DEV_PUBLIC;
4747 addr_type = ADDR_LE_DEV_RANDOM;
4749 hci_add_irk(hdev, &irk->addr.bdaddr, addr_type, irk->val,
4753 set_bit(HCI_RPA_RESOLVING, &hdev->dev_flags);
4755 err = cmd_complete(sk, hdev->id, MGMT_OP_LOAD_IRKS, 0, NULL, 0);
4757 hci_dev_unlock(hdev);
4762 static bool ltk_is_valid(struct mgmt_ltk_info *key)
4764 if (key->master != 0x00 && key->master != 0x01)
4767 switch (key->addr.type) {
4768 case BDADDR_LE_PUBLIC:
4771 case BDADDR_LE_RANDOM:
4772 /* Two most significant bits shall be set */
4773 if ((key->addr.bdaddr.b[5] & 0xc0) != 0xc0)
4781 static int load_long_term_keys(struct sock *sk, struct hci_dev *hdev,
4782 void *cp_data, u16 len)
4784 struct mgmt_cp_load_long_term_keys *cp = cp_data;
4785 const u16 max_key_count = ((U16_MAX - sizeof(*cp)) /
4786 sizeof(struct mgmt_ltk_info));
4787 u16 key_count, expected_len;
4790 BT_DBG("request for %s", hdev->name);
4792 if (!lmp_le_capable(hdev))
4793 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
4794 MGMT_STATUS_NOT_SUPPORTED);
4796 key_count = __le16_to_cpu(cp->key_count);
4797 if (key_count > max_key_count) {
4798 BT_ERR("load_ltks: too big key_count value %u", key_count);
4799 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
4800 MGMT_STATUS_INVALID_PARAMS);
4803 expected_len = sizeof(*cp) + key_count *
4804 sizeof(struct mgmt_ltk_info);
4805 if (expected_len != len) {
4806 BT_ERR("load_keys: expected %u bytes, got %u bytes",
4808 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
4809 MGMT_STATUS_INVALID_PARAMS);
4812 BT_DBG("%s key_count %u", hdev->name, key_count);
4814 for (i = 0; i < key_count; i++) {
4815 struct mgmt_ltk_info *key = &cp->keys[i];
4817 if (!ltk_is_valid(key))
4818 return cmd_status(sk, hdev->id,
4819 MGMT_OP_LOAD_LONG_TERM_KEYS,
4820 MGMT_STATUS_INVALID_PARAMS);
4825 hci_smp_ltks_clear(hdev);
4827 for (i = 0; i < key_count; i++) {
4828 struct mgmt_ltk_info *key = &cp->keys[i];
4829 u8 type, addr_type, authenticated;
4831 if (key->addr.type == BDADDR_LE_PUBLIC)
4832 addr_type = ADDR_LE_DEV_PUBLIC;
4834 addr_type = ADDR_LE_DEV_RANDOM;
4839 type = SMP_LTK_SLAVE;
4841 switch (key->type) {
4842 case MGMT_LTK_UNAUTHENTICATED:
4843 authenticated = 0x00;
4845 case MGMT_LTK_AUTHENTICATED:
4846 authenticated = 0x01;
4852 hci_add_ltk(hdev, &key->addr.bdaddr, addr_type, type,
4853 authenticated, key->val, key->enc_size, key->ediv,
4857 err = cmd_complete(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS, 0,
4860 hci_dev_unlock(hdev);
4865 struct cmd_conn_lookup {
4866 struct hci_conn *conn;
4867 bool valid_tx_power;
4871 static void get_conn_info_complete(struct pending_cmd *cmd, void *data)
4873 struct cmd_conn_lookup *match = data;
4874 struct mgmt_cp_get_conn_info *cp;
4875 struct mgmt_rp_get_conn_info rp;
4876 struct hci_conn *conn = cmd->user_data;
4878 if (conn != match->conn)
4881 cp = (struct mgmt_cp_get_conn_info *) cmd->param;
4883 memset(&rp, 0, sizeof(rp));
4884 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
4885 rp.addr.type = cp->addr.type;
4887 if (!match->mgmt_status) {
4888 rp.rssi = conn->rssi;
4890 if (match->valid_tx_power) {
4891 rp.tx_power = conn->tx_power;
4892 rp.max_tx_power = conn->max_tx_power;
4894 rp.tx_power = HCI_TX_POWER_INVALID;
4895 rp.max_tx_power = HCI_TX_POWER_INVALID;
4899 cmd_complete(cmd->sk, cmd->index, MGMT_OP_GET_CONN_INFO,
4900 match->mgmt_status, &rp, sizeof(rp));
4902 hci_conn_drop(conn);
4904 mgmt_pending_remove(cmd);
4907 static void conn_info_refresh_complete(struct hci_dev *hdev, u8 status)
4909 struct hci_cp_read_rssi *cp;
4910 struct hci_conn *conn;
4911 struct cmd_conn_lookup match;
4914 BT_DBG("status 0x%02x", status);
4918 /* TX power data is valid in case request completed successfully,
4919 * otherwise we assume it's not valid. At the moment we assume that
4920 * either both or none of current and max values are valid to keep code
4923 match.valid_tx_power = !status;
4925 /* Commands sent in request are either Read RSSI or Read Transmit Power
4926 * Level so we check which one was last sent to retrieve connection
4927 * handle. Both commands have handle as first parameter so it's safe to
4928 * cast data on the same command struct.
4930 * First command sent is always Read RSSI and we fail only if it fails.
4931 * In other case we simply override error to indicate success as we
4932 * already remembered if TX power value is actually valid.
4934 cp = hci_sent_cmd_data(hdev, HCI_OP_READ_RSSI);
4936 cp = hci_sent_cmd_data(hdev, HCI_OP_READ_TX_POWER);
4941 BT_ERR("invalid sent_cmd in response");
4945 handle = __le16_to_cpu(cp->handle);
4946 conn = hci_conn_hash_lookup_handle(hdev, handle);
4948 BT_ERR("unknown handle (%d) in response", handle);
4953 match.mgmt_status = mgmt_status(status);
4955 /* Cache refresh is complete, now reply for mgmt request for given
4958 mgmt_pending_foreach(MGMT_OP_GET_CONN_INFO, hdev,
4959 get_conn_info_complete, &match);
4962 hci_dev_unlock(hdev);
4965 static int get_conn_info(struct sock *sk, struct hci_dev *hdev, void *data,
4968 struct mgmt_cp_get_conn_info *cp = data;
4969 struct mgmt_rp_get_conn_info rp;
4970 struct hci_conn *conn;
4971 unsigned long conn_info_age;
4974 BT_DBG("%s", hdev->name);
4976 memset(&rp, 0, sizeof(rp));
4977 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
4978 rp.addr.type = cp->addr.type;
4980 if (!bdaddr_type_is_valid(cp->addr.type))
4981 return cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
4982 MGMT_STATUS_INVALID_PARAMS,
4987 if (!hdev_is_powered(hdev)) {
4988 err = cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
4989 MGMT_STATUS_NOT_POWERED, &rp, sizeof(rp));
4993 if (cp->addr.type == BDADDR_BREDR)
4994 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
4997 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->addr.bdaddr);
4999 if (!conn || conn->state != BT_CONNECTED) {
5000 err = cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
5001 MGMT_STATUS_NOT_CONNECTED, &rp, sizeof(rp));
5005 /* To avoid client trying to guess when to poll again for information we
5006 * calculate conn info age as random value between min/max set in hdev.
5008 conn_info_age = hdev->conn_info_min_age +
5009 prandom_u32_max(hdev->conn_info_max_age -
5010 hdev->conn_info_min_age);
5012 /* Query controller to refresh cached values if they are too old or were
5015 if (time_after(jiffies, conn->conn_info_timestamp +
5016 msecs_to_jiffies(conn_info_age)) ||
5017 !conn->conn_info_timestamp) {
5018 struct hci_request req;
5019 struct hci_cp_read_tx_power req_txp_cp;
5020 struct hci_cp_read_rssi req_rssi_cp;
5021 struct pending_cmd *cmd;
5023 hci_req_init(&req, hdev);
5024 req_rssi_cp.handle = cpu_to_le16(conn->handle);
5025 hci_req_add(&req, HCI_OP_READ_RSSI, sizeof(req_rssi_cp),
5028 /* For LE links TX power does not change thus we don't need to
5029 * query for it once value is known.
5031 if (!bdaddr_type_is_le(cp->addr.type) ||
5032 conn->tx_power == HCI_TX_POWER_INVALID) {
5033 req_txp_cp.handle = cpu_to_le16(conn->handle);
5034 req_txp_cp.type = 0x00;
5035 hci_req_add(&req, HCI_OP_READ_TX_POWER,
5036 sizeof(req_txp_cp), &req_txp_cp);
5039 /* Max TX power needs to be read only once per connection */
5040 if (conn->max_tx_power == HCI_TX_POWER_INVALID) {
5041 req_txp_cp.handle = cpu_to_le16(conn->handle);
5042 req_txp_cp.type = 0x01;
5043 hci_req_add(&req, HCI_OP_READ_TX_POWER,
5044 sizeof(req_txp_cp), &req_txp_cp);
5047 err = hci_req_run(&req, conn_info_refresh_complete);
5051 cmd = mgmt_pending_add(sk, MGMT_OP_GET_CONN_INFO, hdev,
5058 hci_conn_hold(conn);
5059 cmd->user_data = conn;
5061 conn->conn_info_timestamp = jiffies;
5063 /* Cache is valid, just reply with values cached in hci_conn */
5064 rp.rssi = conn->rssi;
5065 rp.tx_power = conn->tx_power;
5066 rp.max_tx_power = conn->max_tx_power;
5068 err = cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
5069 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
5073 hci_dev_unlock(hdev);
5077 static void get_clock_info_complete(struct hci_dev *hdev, u8 status)
5079 struct mgmt_cp_get_clock_info *cp;
5080 struct mgmt_rp_get_clock_info rp;
5081 struct hci_cp_read_clock *hci_cp;
5082 struct pending_cmd *cmd;
5083 struct hci_conn *conn;
5085 BT_DBG("%s status %u", hdev->name, status);
5089 hci_cp = hci_sent_cmd_data(hdev, HCI_OP_READ_CLOCK);
5093 if (hci_cp->which) {
5094 u16 handle = __le16_to_cpu(hci_cp->handle);
5095 conn = hci_conn_hash_lookup_handle(hdev, handle);
5100 cmd = mgmt_pending_find_data(MGMT_OP_GET_CLOCK_INFO, hdev, conn);
5106 memset(&rp, 0, sizeof(rp));
5107 memcpy(&rp.addr, &cp->addr, sizeof(rp.addr));
5112 rp.local_clock = cpu_to_le32(hdev->clock);
5115 rp.piconet_clock = cpu_to_le32(conn->clock);
5116 rp.accuracy = cpu_to_le16(conn->clock_accuracy);
5120 cmd_complete(cmd->sk, cmd->index, cmd->opcode, mgmt_status(status),
5122 mgmt_pending_remove(cmd);
5124 hci_conn_drop(conn);
5127 hci_dev_unlock(hdev);
5130 static int get_clock_info(struct sock *sk, struct hci_dev *hdev, void *data,
5133 struct mgmt_cp_get_clock_info *cp = data;
5134 struct mgmt_rp_get_clock_info rp;
5135 struct hci_cp_read_clock hci_cp;
5136 struct pending_cmd *cmd;
5137 struct hci_request req;
5138 struct hci_conn *conn;
5141 BT_DBG("%s", hdev->name);
5143 memset(&rp, 0, sizeof(rp));
5144 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
5145 rp.addr.type = cp->addr.type;
5147 if (cp->addr.type != BDADDR_BREDR)
5148 return cmd_complete(sk, hdev->id, MGMT_OP_GET_CLOCK_INFO,
5149 MGMT_STATUS_INVALID_PARAMS,
5154 if (!hdev_is_powered(hdev)) {
5155 err = cmd_complete(sk, hdev->id, MGMT_OP_GET_CLOCK_INFO,
5156 MGMT_STATUS_NOT_POWERED, &rp, sizeof(rp));
5160 if (bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
5161 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
5163 if (!conn || conn->state != BT_CONNECTED) {
5164 err = cmd_complete(sk, hdev->id,
5165 MGMT_OP_GET_CLOCK_INFO,
5166 MGMT_STATUS_NOT_CONNECTED,
5174 cmd = mgmt_pending_add(sk, MGMT_OP_GET_CLOCK_INFO, hdev, data, len);
5180 hci_req_init(&req, hdev);
5182 memset(&hci_cp, 0, sizeof(hci_cp));
5183 hci_req_add(&req, HCI_OP_READ_CLOCK, sizeof(hci_cp), &hci_cp);
5186 hci_conn_hold(conn);
5187 cmd->user_data = conn;
5189 hci_cp.handle = cpu_to_le16(conn->handle);
5190 hci_cp.which = 0x01; /* Piconet clock */
5191 hci_req_add(&req, HCI_OP_READ_CLOCK, sizeof(hci_cp), &hci_cp);
5194 err = hci_req_run(&req, get_clock_info_complete);
5196 mgmt_pending_remove(cmd);
5199 hci_dev_unlock(hdev);
5203 /* Helper for Add/Remove Device commands */
5204 static void update_page_scan(struct hci_dev *hdev, u8 scan)
5206 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
5209 if (!hdev_is_powered(hdev))
5212 /* If HCI_CONNECTABLE is set then Add/Remove Device should not
5213 * make any changes to page scanning.
5215 if (test_bit(HCI_CONNECTABLE, &hdev->dev_flags))
5218 if (test_bit(HCI_DISCOVERABLE, &hdev->dev_flags))
5219 scan |= SCAN_INQUIRY;
5221 hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
5224 static void device_added(struct sock *sk, struct hci_dev *hdev,
5225 bdaddr_t *bdaddr, u8 type, u8 action)
5227 struct mgmt_ev_device_added ev;
5229 bacpy(&ev.addr.bdaddr, bdaddr);
5230 ev.addr.type = type;
5233 mgmt_event(MGMT_EV_DEVICE_ADDED, hdev, &ev, sizeof(ev), sk);
5236 static int add_device(struct sock *sk, struct hci_dev *hdev,
5237 void *data, u16 len)
5239 struct mgmt_cp_add_device *cp = data;
5240 u8 auto_conn, addr_type;
5243 BT_DBG("%s", hdev->name);
5245 if (!bdaddr_type_is_valid(cp->addr.type) ||
5246 !bacmp(&cp->addr.bdaddr, BDADDR_ANY))
5247 return cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
5248 MGMT_STATUS_INVALID_PARAMS,
5249 &cp->addr, sizeof(cp->addr));
5251 if (cp->action != 0x00 && cp->action != 0x01)
5252 return cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
5253 MGMT_STATUS_INVALID_PARAMS,
5254 &cp->addr, sizeof(cp->addr));
5258 if (cp->addr.type == BDADDR_BREDR) {
5261 /* Only "connect" action supported for now */
5262 if (cp->action != 0x01) {
5263 err = cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
5264 MGMT_STATUS_INVALID_PARAMS,
5265 &cp->addr, sizeof(cp->addr));
5269 update_scan = list_empty(&hdev->whitelist);
5271 err = hci_bdaddr_list_add(&hdev->whitelist, &cp->addr.bdaddr,
5277 update_page_scan(hdev, SCAN_PAGE);
5282 if (cp->addr.type == BDADDR_LE_PUBLIC)
5283 addr_type = ADDR_LE_DEV_PUBLIC;
5285 addr_type = ADDR_LE_DEV_RANDOM;
5288 auto_conn = HCI_AUTO_CONN_ALWAYS;
5290 auto_conn = HCI_AUTO_CONN_REPORT;
5292 /* If the connection parameters don't exist for this device,
5293 * they will be created and configured with defaults.
5295 if (hci_conn_params_set(hdev, &cp->addr.bdaddr, addr_type,
5297 err = cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
5299 &cp->addr, sizeof(cp->addr));
5304 device_added(sk, hdev, &cp->addr.bdaddr, cp->addr.type, cp->action);
5306 err = cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
5307 MGMT_STATUS_SUCCESS, &cp->addr, sizeof(cp->addr));
5310 hci_dev_unlock(hdev);
5314 static void device_removed(struct sock *sk, struct hci_dev *hdev,
5315 bdaddr_t *bdaddr, u8 type)
5317 struct mgmt_ev_device_removed ev;
5319 bacpy(&ev.addr.bdaddr, bdaddr);
5320 ev.addr.type = type;
5322 mgmt_event(MGMT_EV_DEVICE_REMOVED, hdev, &ev, sizeof(ev), sk);
5325 static int remove_device(struct sock *sk, struct hci_dev *hdev,
5326 void *data, u16 len)
5328 struct mgmt_cp_remove_device *cp = data;
5331 BT_DBG("%s", hdev->name);
5335 if (bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
5336 struct hci_conn_params *params;
5339 if (!bdaddr_type_is_valid(cp->addr.type)) {
5340 err = cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_DEVICE,
5341 MGMT_STATUS_INVALID_PARAMS,
5342 &cp->addr, sizeof(cp->addr));
5346 if (cp->addr.type == BDADDR_BREDR) {
5347 err = hci_bdaddr_list_del(&hdev->whitelist,
5351 err = cmd_complete(sk, hdev->id,
5352 MGMT_OP_REMOVE_DEVICE,
5353 MGMT_STATUS_INVALID_PARAMS,
5354 &cp->addr, sizeof(cp->addr));
5358 if (list_empty(&hdev->whitelist))
5359 update_page_scan(hdev, SCAN_DISABLED);
5361 device_removed(sk, hdev, &cp->addr.bdaddr,
5366 if (cp->addr.type == BDADDR_LE_PUBLIC)
5367 addr_type = ADDR_LE_DEV_PUBLIC;
5369 addr_type = ADDR_LE_DEV_RANDOM;
5371 params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
5374 err = cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_DEVICE,
5375 MGMT_STATUS_INVALID_PARAMS,
5376 &cp->addr, sizeof(cp->addr));
5380 if (params->auto_connect == HCI_AUTO_CONN_DISABLED) {
5381 err = cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_DEVICE,
5382 MGMT_STATUS_INVALID_PARAMS,
5383 &cp->addr, sizeof(cp->addr));
5387 list_del(¶ms->action);
5388 list_del(¶ms->list);
5390 hci_update_background_scan(hdev);
5392 device_removed(sk, hdev, &cp->addr.bdaddr, cp->addr.type);
5394 struct hci_conn_params *p, *tmp;
5395 struct bdaddr_list *b, *btmp;
5397 if (cp->addr.type) {
5398 err = cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_DEVICE,
5399 MGMT_STATUS_INVALID_PARAMS,
5400 &cp->addr, sizeof(cp->addr));
5404 list_for_each_entry_safe(b, btmp, &hdev->whitelist, list) {
5405 device_removed(sk, hdev, &b->bdaddr, b->bdaddr_type);
5410 update_page_scan(hdev, SCAN_DISABLED);
5412 list_for_each_entry_safe(p, tmp, &hdev->le_conn_params, list) {
5413 if (p->auto_connect == HCI_AUTO_CONN_DISABLED)
5415 device_removed(sk, hdev, &p->addr, p->addr_type);
5416 list_del(&p->action);
5421 BT_DBG("All LE connection parameters were removed");
5423 hci_update_background_scan(hdev);
5427 err = cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_DEVICE,
5428 MGMT_STATUS_SUCCESS, &cp->addr, sizeof(cp->addr));
5431 hci_dev_unlock(hdev);
5435 static int load_conn_param(struct sock *sk, struct hci_dev *hdev, void *data,
5438 struct mgmt_cp_load_conn_param *cp = data;
5439 const u16 max_param_count = ((U16_MAX - sizeof(*cp)) /
5440 sizeof(struct mgmt_conn_param));
5441 u16 param_count, expected_len;
5444 if (!lmp_le_capable(hdev))
5445 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
5446 MGMT_STATUS_NOT_SUPPORTED);
5448 param_count = __le16_to_cpu(cp->param_count);
5449 if (param_count > max_param_count) {
5450 BT_ERR("load_conn_param: too big param_count value %u",
5452 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
5453 MGMT_STATUS_INVALID_PARAMS);
5456 expected_len = sizeof(*cp) + param_count *
5457 sizeof(struct mgmt_conn_param);
5458 if (expected_len != len) {
5459 BT_ERR("load_conn_param: expected %u bytes, got %u bytes",
5461 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
5462 MGMT_STATUS_INVALID_PARAMS);
5465 BT_DBG("%s param_count %u", hdev->name, param_count);
5469 hci_conn_params_clear_disabled(hdev);
5471 for (i = 0; i < param_count; i++) {
5472 struct mgmt_conn_param *param = &cp->params[i];
5473 struct hci_conn_params *hci_param;
5474 u16 min, max, latency, timeout;
5477 BT_DBG("Adding %pMR (type %u)", ¶m->addr.bdaddr,
5480 if (param->addr.type == BDADDR_LE_PUBLIC) {
5481 addr_type = ADDR_LE_DEV_PUBLIC;
5482 } else if (param->addr.type == BDADDR_LE_RANDOM) {
5483 addr_type = ADDR_LE_DEV_RANDOM;
5485 BT_ERR("Ignoring invalid connection parameters");
5489 min = le16_to_cpu(param->min_interval);
5490 max = le16_to_cpu(param->max_interval);
5491 latency = le16_to_cpu(param->latency);
5492 timeout = le16_to_cpu(param->timeout);
5494 BT_DBG("min 0x%04x max 0x%04x latency 0x%04x timeout 0x%04x",
5495 min, max, latency, timeout);
5497 if (hci_check_conn_params(min, max, latency, timeout) < 0) {
5498 BT_ERR("Ignoring invalid connection parameters");
5502 hci_param = hci_conn_params_add(hdev, ¶m->addr.bdaddr,
5505 BT_ERR("Failed to add connection parameters");
5509 hci_param->conn_min_interval = min;
5510 hci_param->conn_max_interval = max;
5511 hci_param->conn_latency = latency;
5512 hci_param->supervision_timeout = timeout;
5515 hci_dev_unlock(hdev);
5517 return cmd_complete(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM, 0, NULL, 0);
5520 static int set_external_config(struct sock *sk, struct hci_dev *hdev,
5521 void *data, u16 len)
5523 struct mgmt_cp_set_external_config *cp = data;
5527 BT_DBG("%s", hdev->name);
5529 if (hdev_is_powered(hdev))
5530 return cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
5531 MGMT_STATUS_REJECTED);
5533 if (cp->config != 0x00 && cp->config != 0x01)
5534 return cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
5535 MGMT_STATUS_INVALID_PARAMS);
5537 if (!test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks))
5538 return cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
5539 MGMT_STATUS_NOT_SUPPORTED);
5544 changed = !test_and_set_bit(HCI_EXT_CONFIGURED,
5547 changed = test_and_clear_bit(HCI_EXT_CONFIGURED,
5550 err = send_options_rsp(sk, MGMT_OP_SET_EXTERNAL_CONFIG, hdev);
5557 err = new_options(hdev, sk);
5559 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) == is_configured(hdev)) {
5560 mgmt_index_removed(hdev);
5562 if (test_and_change_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
5563 set_bit(HCI_CONFIG, &hdev->dev_flags);
5564 set_bit(HCI_AUTO_OFF, &hdev->dev_flags);
5566 queue_work(hdev->req_workqueue, &hdev->power_on);
5568 set_bit(HCI_RAW, &hdev->flags);
5569 mgmt_index_added(hdev);
5574 hci_dev_unlock(hdev);
5578 static int set_public_address(struct sock *sk, struct hci_dev *hdev,
5579 void *data, u16 len)
5581 struct mgmt_cp_set_public_address *cp = data;
5585 BT_DBG("%s", hdev->name);
5587 if (hdev_is_powered(hdev))
5588 return cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
5589 MGMT_STATUS_REJECTED);
5591 if (!bacmp(&cp->bdaddr, BDADDR_ANY))
5592 return cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
5593 MGMT_STATUS_INVALID_PARAMS);
5595 if (!hdev->set_bdaddr)
5596 return cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
5597 MGMT_STATUS_NOT_SUPPORTED);
5601 changed = !!bacmp(&hdev->public_addr, &cp->bdaddr);
5602 bacpy(&hdev->public_addr, &cp->bdaddr);
5604 err = send_options_rsp(sk, MGMT_OP_SET_PUBLIC_ADDRESS, hdev);
5611 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags))
5612 err = new_options(hdev, sk);
5614 if (is_configured(hdev)) {
5615 mgmt_index_removed(hdev);
5617 clear_bit(HCI_UNCONFIGURED, &hdev->dev_flags);
5619 set_bit(HCI_CONFIG, &hdev->dev_flags);
5620 set_bit(HCI_AUTO_OFF, &hdev->dev_flags);
5622 queue_work(hdev->req_workqueue, &hdev->power_on);
5626 hci_dev_unlock(hdev);
5630 static const struct mgmt_handler {
5631 int (*func) (struct sock *sk, struct hci_dev *hdev, void *data,
5635 } mgmt_handlers[] = {
5636 { NULL }, /* 0x0000 (no command) */
5637 { read_version, false, MGMT_READ_VERSION_SIZE },
5638 { read_commands, false, MGMT_READ_COMMANDS_SIZE },
5639 { read_index_list, false, MGMT_READ_INDEX_LIST_SIZE },
5640 { read_controller_info, false, MGMT_READ_INFO_SIZE },
5641 { set_powered, false, MGMT_SETTING_SIZE },
5642 { set_discoverable, false, MGMT_SET_DISCOVERABLE_SIZE },
5643 { set_connectable, false, MGMT_SETTING_SIZE },
5644 { set_fast_connectable, false, MGMT_SETTING_SIZE },
5645 { set_pairable, false, MGMT_SETTING_SIZE },
5646 { set_link_security, false, MGMT_SETTING_SIZE },
5647 { set_ssp, false, MGMT_SETTING_SIZE },
5648 { set_hs, false, MGMT_SETTING_SIZE },
5649 { set_le, false, MGMT_SETTING_SIZE },
5650 { set_dev_class, false, MGMT_SET_DEV_CLASS_SIZE },
5651 { set_local_name, false, MGMT_SET_LOCAL_NAME_SIZE },
5652 { add_uuid, false, MGMT_ADD_UUID_SIZE },
5653 { remove_uuid, false, MGMT_REMOVE_UUID_SIZE },
5654 { load_link_keys, true, MGMT_LOAD_LINK_KEYS_SIZE },
5655 { load_long_term_keys, true, MGMT_LOAD_LONG_TERM_KEYS_SIZE },
5656 { disconnect, false, MGMT_DISCONNECT_SIZE },
5657 { get_connections, false, MGMT_GET_CONNECTIONS_SIZE },
5658 { pin_code_reply, false, MGMT_PIN_CODE_REPLY_SIZE },
5659 { pin_code_neg_reply, false, MGMT_PIN_CODE_NEG_REPLY_SIZE },
5660 { set_io_capability, false, MGMT_SET_IO_CAPABILITY_SIZE },
5661 { pair_device, false, MGMT_PAIR_DEVICE_SIZE },
5662 { cancel_pair_device, false, MGMT_CANCEL_PAIR_DEVICE_SIZE },
5663 { unpair_device, false, MGMT_UNPAIR_DEVICE_SIZE },
5664 { user_confirm_reply, false, MGMT_USER_CONFIRM_REPLY_SIZE },
5665 { user_confirm_neg_reply, false, MGMT_USER_CONFIRM_NEG_REPLY_SIZE },
5666 { user_passkey_reply, false, MGMT_USER_PASSKEY_REPLY_SIZE },
5667 { user_passkey_neg_reply, false, MGMT_USER_PASSKEY_NEG_REPLY_SIZE },
5668 { read_local_oob_data, false, MGMT_READ_LOCAL_OOB_DATA_SIZE },
5669 { add_remote_oob_data, true, MGMT_ADD_REMOTE_OOB_DATA_SIZE },
5670 { remove_remote_oob_data, false, MGMT_REMOVE_REMOTE_OOB_DATA_SIZE },
5671 { start_discovery, false, MGMT_START_DISCOVERY_SIZE },
5672 { stop_discovery, false, MGMT_STOP_DISCOVERY_SIZE },
5673 { confirm_name, false, MGMT_CONFIRM_NAME_SIZE },
5674 { block_device, false, MGMT_BLOCK_DEVICE_SIZE },
5675 { unblock_device, false, MGMT_UNBLOCK_DEVICE_SIZE },
5676 { set_device_id, false, MGMT_SET_DEVICE_ID_SIZE },
5677 { set_advertising, false, MGMT_SETTING_SIZE },
5678 { set_bredr, false, MGMT_SETTING_SIZE },
5679 { set_static_address, false, MGMT_SET_STATIC_ADDRESS_SIZE },
5680 { set_scan_params, false, MGMT_SET_SCAN_PARAMS_SIZE },
5681 { set_secure_conn, false, MGMT_SETTING_SIZE },
5682 { set_debug_keys, false, MGMT_SETTING_SIZE },
5683 { set_privacy, false, MGMT_SET_PRIVACY_SIZE },
5684 { load_irks, true, MGMT_LOAD_IRKS_SIZE },
5685 { get_conn_info, false, MGMT_GET_CONN_INFO_SIZE },
5686 { get_clock_info, false, MGMT_GET_CLOCK_INFO_SIZE },
5687 { add_device, false, MGMT_ADD_DEVICE_SIZE },
5688 { remove_device, false, MGMT_REMOVE_DEVICE_SIZE },
5689 { load_conn_param, true, MGMT_LOAD_CONN_PARAM_SIZE },
5690 { read_unconf_index_list, false, MGMT_READ_UNCONF_INDEX_LIST_SIZE },
5691 { read_config_info, false, MGMT_READ_CONFIG_INFO_SIZE },
5692 { set_external_config, false, MGMT_SET_EXTERNAL_CONFIG_SIZE },
5693 { set_public_address, false, MGMT_SET_PUBLIC_ADDRESS_SIZE },
5696 int mgmt_control(struct sock *sk, struct msghdr *msg, size_t msglen)
5700 struct mgmt_hdr *hdr;
5701 u16 opcode, index, len;
5702 struct hci_dev *hdev = NULL;
5703 const struct mgmt_handler *handler;
5706 BT_DBG("got %zu bytes", msglen);
5708 if (msglen < sizeof(*hdr))
5711 buf = kmalloc(msglen, GFP_KERNEL);
5715 if (memcpy_fromiovec(buf, msg->msg_iov, msglen)) {
5721 opcode = __le16_to_cpu(hdr->opcode);
5722 index = __le16_to_cpu(hdr->index);
5723 len = __le16_to_cpu(hdr->len);
5725 if (len != msglen - sizeof(*hdr)) {
5730 if (index != MGMT_INDEX_NONE) {
5731 hdev = hci_dev_get(index);
5733 err = cmd_status(sk, index, opcode,
5734 MGMT_STATUS_INVALID_INDEX);
5738 if (test_bit(HCI_SETUP, &hdev->dev_flags) ||
5739 test_bit(HCI_CONFIG, &hdev->dev_flags) ||
5740 test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
5741 err = cmd_status(sk, index, opcode,
5742 MGMT_STATUS_INVALID_INDEX);
5746 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) &&
5747 opcode != MGMT_OP_READ_CONFIG_INFO &&
5748 opcode != MGMT_OP_SET_EXTERNAL_CONFIG &&
5749 opcode != MGMT_OP_SET_PUBLIC_ADDRESS) {
5750 err = cmd_status(sk, index, opcode,
5751 MGMT_STATUS_INVALID_INDEX);
5756 if (opcode >= ARRAY_SIZE(mgmt_handlers) ||
5757 mgmt_handlers[opcode].func == NULL) {
5758 BT_DBG("Unknown op %u", opcode);
5759 err = cmd_status(sk, index, opcode,
5760 MGMT_STATUS_UNKNOWN_COMMAND);
5764 if (hdev && (opcode <= MGMT_OP_READ_INDEX_LIST ||
5765 opcode == MGMT_OP_READ_UNCONF_INDEX_LIST)) {
5766 err = cmd_status(sk, index, opcode,
5767 MGMT_STATUS_INVALID_INDEX);
5771 if (!hdev && (opcode > MGMT_OP_READ_INDEX_LIST &&
5772 opcode != MGMT_OP_READ_UNCONF_INDEX_LIST)) {
5773 err = cmd_status(sk, index, opcode,
5774 MGMT_STATUS_INVALID_INDEX);
5778 handler = &mgmt_handlers[opcode];
5780 if ((handler->var_len && len < handler->data_len) ||
5781 (!handler->var_len && len != handler->data_len)) {
5782 err = cmd_status(sk, index, opcode,
5783 MGMT_STATUS_INVALID_PARAMS);
5788 mgmt_init_hdev(sk, hdev);
5790 cp = buf + sizeof(*hdr);
5792 err = handler->func(sk, hdev, cp, len);
5806 void mgmt_index_added(struct hci_dev *hdev)
5808 if (hdev->dev_type != HCI_BREDR)
5811 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
5814 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags))
5815 mgmt_event(MGMT_EV_UNCONF_INDEX_ADDED, hdev, NULL, 0, NULL);
5817 mgmt_event(MGMT_EV_INDEX_ADDED, hdev, NULL, 0, NULL);
5820 void mgmt_index_removed(struct hci_dev *hdev)
5822 u8 status = MGMT_STATUS_INVALID_INDEX;
5824 if (hdev->dev_type != HCI_BREDR)
5827 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
5830 mgmt_pending_foreach(0, hdev, cmd_status_rsp, &status);
5832 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags))
5833 mgmt_event(MGMT_EV_UNCONF_INDEX_REMOVED, hdev, NULL, 0, NULL);
5835 mgmt_event(MGMT_EV_INDEX_REMOVED, hdev, NULL, 0, NULL);
5838 /* This function requires the caller holds hdev->lock */
5839 static void restart_le_actions(struct hci_dev *hdev)
5841 struct hci_conn_params *p;
5843 list_for_each_entry(p, &hdev->le_conn_params, list) {
5844 /* Needed for AUTO_OFF case where might not "really"
5845 * have been powered off.
5847 list_del_init(&p->action);
5849 switch (p->auto_connect) {
5850 case HCI_AUTO_CONN_ALWAYS:
5851 list_add(&p->action, &hdev->pend_le_conns);
5853 case HCI_AUTO_CONN_REPORT:
5854 list_add(&p->action, &hdev->pend_le_reports);
5861 hci_update_background_scan(hdev);
5864 static void powered_complete(struct hci_dev *hdev, u8 status)
5866 struct cmd_lookup match = { NULL, hdev };
5868 BT_DBG("status 0x%02x", status);
5872 restart_le_actions(hdev);
5874 mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp, &match);
5876 new_settings(hdev, match.sk);
5878 hci_dev_unlock(hdev);
5884 static int powered_update_hci(struct hci_dev *hdev)
5886 struct hci_request req;
5889 hci_req_init(&req, hdev);
5891 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags) &&
5892 !lmp_host_ssp_capable(hdev)) {
5895 hci_req_add(&req, HCI_OP_WRITE_SSP_MODE, 1, &ssp);
5898 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags) &&
5899 lmp_bredr_capable(hdev)) {
5900 struct hci_cp_write_le_host_supported cp;
5903 cp.simul = lmp_le_br_capable(hdev);
5905 /* Check first if we already have the right
5906 * host state (host features set)
5908 if (cp.le != lmp_host_le_capable(hdev) ||
5909 cp.simul != lmp_host_le_br_capable(hdev))
5910 hci_req_add(&req, HCI_OP_WRITE_LE_HOST_SUPPORTED,
5914 if (lmp_le_capable(hdev)) {
5915 /* Make sure the controller has a good default for
5916 * advertising data. This also applies to the case
5917 * where BR/EDR was toggled during the AUTO_OFF phase.
5919 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
5920 update_adv_data(&req);
5921 update_scan_rsp_data(&req);
5924 if (test_bit(HCI_ADVERTISING, &hdev->dev_flags))
5925 enable_advertising(&req);
5928 link_sec = test_bit(HCI_LINK_SECURITY, &hdev->dev_flags);
5929 if (link_sec != test_bit(HCI_AUTH, &hdev->flags))
5930 hci_req_add(&req, HCI_OP_WRITE_AUTH_ENABLE,
5931 sizeof(link_sec), &link_sec);
5933 if (lmp_bredr_capable(hdev)) {
5934 if (test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
5935 set_bredr_scan(&req);
5941 return hci_req_run(&req, powered_complete);
5944 int mgmt_powered(struct hci_dev *hdev, u8 powered)
5946 struct cmd_lookup match = { NULL, hdev };
5947 u8 status_not_powered = MGMT_STATUS_NOT_POWERED;
5948 u8 zero_cod[] = { 0, 0, 0 };
5951 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
5955 if (powered_update_hci(hdev) == 0)
5958 mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp,
5963 mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp, &match);
5964 mgmt_pending_foreach(0, hdev, cmd_status_rsp, &status_not_powered);
5966 if (memcmp(hdev->dev_class, zero_cod, sizeof(zero_cod)) != 0)
5967 mgmt_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev,
5968 zero_cod, sizeof(zero_cod), NULL);
5971 err = new_settings(hdev, match.sk);
5979 void mgmt_set_powered_failed(struct hci_dev *hdev, int err)
5981 struct pending_cmd *cmd;
5984 cmd = mgmt_pending_find(MGMT_OP_SET_POWERED, hdev);
5988 if (err == -ERFKILL)
5989 status = MGMT_STATUS_RFKILLED;
5991 status = MGMT_STATUS_FAILED;
5993 cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_POWERED, status);
5995 mgmt_pending_remove(cmd);
5998 void mgmt_discoverable_timeout(struct hci_dev *hdev)
6000 struct hci_request req;
6004 /* When discoverable timeout triggers, then just make sure
6005 * the limited discoverable flag is cleared. Even in the case
6006 * of a timeout triggered from general discoverable, it is
6007 * safe to unconditionally clear the flag.
6009 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
6010 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
6012 hci_req_init(&req, hdev);
6013 if (test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
6014 u8 scan = SCAN_PAGE;
6015 hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE,
6016 sizeof(scan), &scan);
6019 update_adv_data(&req);
6020 hci_req_run(&req, NULL);
6022 hdev->discov_timeout = 0;
6024 new_settings(hdev, NULL);
6026 hci_dev_unlock(hdev);
6029 void mgmt_discoverable(struct hci_dev *hdev, u8 discoverable)
6033 /* Nothing needed here if there's a pending command since that
6034 * commands request completion callback takes care of everything
6037 if (mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, hdev))
6040 /* Powering off may clear the scan mode - don't let that interfere */
6041 if (!discoverable && mgmt_pending_find(MGMT_OP_SET_POWERED, hdev))
6045 changed = !test_and_set_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
6047 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
6048 changed = test_and_clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
6052 struct hci_request req;
6054 /* In case this change in discoverable was triggered by
6055 * a disabling of connectable there could be a need to
6056 * update the advertising flags.
6058 hci_req_init(&req, hdev);
6059 update_adv_data(&req);
6060 hci_req_run(&req, NULL);
6062 new_settings(hdev, NULL);
6066 void mgmt_connectable(struct hci_dev *hdev, u8 connectable)
6070 /* Nothing needed here if there's a pending command since that
6071 * commands request completion callback takes care of everything
6074 if (mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev))
6077 /* Powering off may clear the scan mode - don't let that interfere */
6078 if (!connectable && mgmt_pending_find(MGMT_OP_SET_POWERED, hdev))
6081 /* If something else than mgmt changed the page scan state we
6082 * can't differentiate this from a change triggered by adding
6083 * the first element to the whitelist. Therefore, avoid
6084 * incorrectly setting HCI_CONNECTABLE.
6086 if (connectable && !list_empty(&hdev->whitelist))
6090 changed = !test_and_set_bit(HCI_CONNECTABLE, &hdev->dev_flags);
6092 changed = test_and_clear_bit(HCI_CONNECTABLE, &hdev->dev_flags);
6095 new_settings(hdev, NULL);
6098 void mgmt_write_scan_failed(struct hci_dev *hdev, u8 scan, u8 status)
6100 u8 mgmt_err = mgmt_status(status);
6102 if (scan & SCAN_PAGE)
6103 mgmt_pending_foreach(MGMT_OP_SET_CONNECTABLE, hdev,
6104 cmd_status_rsp, &mgmt_err);
6106 if (scan & SCAN_INQUIRY)
6107 mgmt_pending_foreach(MGMT_OP_SET_DISCOVERABLE, hdev,
6108 cmd_status_rsp, &mgmt_err);
6111 void mgmt_new_link_key(struct hci_dev *hdev, struct link_key *key,
6114 struct mgmt_ev_new_link_key ev;
6116 memset(&ev, 0, sizeof(ev));
6118 ev.store_hint = persistent;
6119 bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
6120 ev.key.addr.type = BDADDR_BREDR;
6121 ev.key.type = key->type;
6122 memcpy(ev.key.val, key->val, HCI_LINK_KEY_SIZE);
6123 ev.key.pin_len = key->pin_len;
6125 mgmt_event(MGMT_EV_NEW_LINK_KEY, hdev, &ev, sizeof(ev), NULL);
6128 static u8 mgmt_ltk_type(struct smp_ltk *ltk)
6130 if (ltk->authenticated)
6131 return MGMT_LTK_AUTHENTICATED;
6133 return MGMT_LTK_UNAUTHENTICATED;
6136 void mgmt_new_ltk(struct hci_dev *hdev, struct smp_ltk *key, bool persistent)
6138 struct mgmt_ev_new_long_term_key ev;
6140 memset(&ev, 0, sizeof(ev));
6142 /* Devices using resolvable or non-resolvable random addresses
6143 * without providing an indentity resolving key don't require
6144 * to store long term keys. Their addresses will change the
6147 * Only when a remote device provides an identity address
6148 * make sure the long term key is stored. If the remote
6149 * identity is known, the long term keys are internally
6150 * mapped to the identity address. So allow static random
6151 * and public addresses here.
6153 if (key->bdaddr_type == ADDR_LE_DEV_RANDOM &&
6154 (key->bdaddr.b[5] & 0xc0) != 0xc0)
6155 ev.store_hint = 0x00;
6157 ev.store_hint = persistent;
6159 bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
6160 ev.key.addr.type = link_to_bdaddr(LE_LINK, key->bdaddr_type);
6161 ev.key.type = mgmt_ltk_type(key);
6162 ev.key.enc_size = key->enc_size;
6163 ev.key.ediv = key->ediv;
6164 ev.key.rand = key->rand;
6166 if (key->type == SMP_LTK)
6169 memcpy(ev.key.val, key->val, sizeof(key->val));
6171 mgmt_event(MGMT_EV_NEW_LONG_TERM_KEY, hdev, &ev, sizeof(ev), NULL);
6174 void mgmt_new_irk(struct hci_dev *hdev, struct smp_irk *irk)
6176 struct mgmt_ev_new_irk ev;
6178 memset(&ev, 0, sizeof(ev));
6180 /* For identity resolving keys from devices that are already
6181 * using a public address or static random address, do not
6182 * ask for storing this key. The identity resolving key really
6183 * is only mandatory for devices using resovlable random
6186 * Storing all identity resolving keys has the downside that
6187 * they will be also loaded on next boot of they system. More
6188 * identity resolving keys, means more time during scanning is
6189 * needed to actually resolve these addresses.
6191 if (bacmp(&irk->rpa, BDADDR_ANY))
6192 ev.store_hint = 0x01;
6194 ev.store_hint = 0x00;
6196 bacpy(&ev.rpa, &irk->rpa);
6197 bacpy(&ev.irk.addr.bdaddr, &irk->bdaddr);
6198 ev.irk.addr.type = link_to_bdaddr(LE_LINK, irk->addr_type);
6199 memcpy(ev.irk.val, irk->val, sizeof(irk->val));
6201 mgmt_event(MGMT_EV_NEW_IRK, hdev, &ev, sizeof(ev), NULL);
6204 void mgmt_new_csrk(struct hci_dev *hdev, struct smp_csrk *csrk,
6207 struct mgmt_ev_new_csrk ev;
6209 memset(&ev, 0, sizeof(ev));
6211 /* Devices using resolvable or non-resolvable random addresses
6212 * without providing an indentity resolving key don't require
6213 * to store signature resolving keys. Their addresses will change
6214 * the next time around.
6216 * Only when a remote device provides an identity address
6217 * make sure the signature resolving key is stored. So allow
6218 * static random and public addresses here.
6220 if (csrk->bdaddr_type == ADDR_LE_DEV_RANDOM &&
6221 (csrk->bdaddr.b[5] & 0xc0) != 0xc0)
6222 ev.store_hint = 0x00;
6224 ev.store_hint = persistent;
6226 bacpy(&ev.key.addr.bdaddr, &csrk->bdaddr);
6227 ev.key.addr.type = link_to_bdaddr(LE_LINK, csrk->bdaddr_type);
6228 ev.key.master = csrk->master;
6229 memcpy(ev.key.val, csrk->val, sizeof(csrk->val));
6231 mgmt_event(MGMT_EV_NEW_CSRK, hdev, &ev, sizeof(ev), NULL);
6234 void mgmt_new_conn_param(struct hci_dev *hdev, bdaddr_t *bdaddr,
6235 u8 bdaddr_type, u8 store_hint, u16 min_interval,
6236 u16 max_interval, u16 latency, u16 timeout)
6238 struct mgmt_ev_new_conn_param ev;
6240 if (!hci_is_identity_address(bdaddr, bdaddr_type))
6243 memset(&ev, 0, sizeof(ev));
6244 bacpy(&ev.addr.bdaddr, bdaddr);
6245 ev.addr.type = link_to_bdaddr(LE_LINK, bdaddr_type);
6246 ev.store_hint = store_hint;
6247 ev.min_interval = cpu_to_le16(min_interval);
6248 ev.max_interval = cpu_to_le16(max_interval);
6249 ev.latency = cpu_to_le16(latency);
6250 ev.timeout = cpu_to_le16(timeout);
6252 mgmt_event(MGMT_EV_NEW_CONN_PARAM, hdev, &ev, sizeof(ev), NULL);
6255 static inline u16 eir_append_data(u8 *eir, u16 eir_len, u8 type, u8 *data,
6258 eir[eir_len++] = sizeof(type) + data_len;
6259 eir[eir_len++] = type;
6260 memcpy(&eir[eir_len], data, data_len);
6261 eir_len += data_len;
6266 void mgmt_device_connected(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
6267 u8 addr_type, u32 flags, u8 *name, u8 name_len,
6271 struct mgmt_ev_device_connected *ev = (void *) buf;
6274 bacpy(&ev->addr.bdaddr, bdaddr);
6275 ev->addr.type = link_to_bdaddr(link_type, addr_type);
6277 ev->flags = __cpu_to_le32(flags);
6280 eir_len = eir_append_data(ev->eir, 0, EIR_NAME_COMPLETE,
6283 if (dev_class && memcmp(dev_class, "\0\0\0", 3) != 0)
6284 eir_len = eir_append_data(ev->eir, eir_len,
6285 EIR_CLASS_OF_DEV, dev_class, 3);
6287 ev->eir_len = cpu_to_le16(eir_len);
6289 mgmt_event(MGMT_EV_DEVICE_CONNECTED, hdev, buf,
6290 sizeof(*ev) + eir_len, NULL);
6293 static void disconnect_rsp(struct pending_cmd *cmd, void *data)
6295 struct mgmt_cp_disconnect *cp = cmd->param;
6296 struct sock **sk = data;
6297 struct mgmt_rp_disconnect rp;
6299 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
6300 rp.addr.type = cp->addr.type;
6302 cmd_complete(cmd->sk, cmd->index, MGMT_OP_DISCONNECT, 0, &rp,
6308 mgmt_pending_remove(cmd);
6311 static void unpair_device_rsp(struct pending_cmd *cmd, void *data)
6313 struct hci_dev *hdev = data;
6314 struct mgmt_cp_unpair_device *cp = cmd->param;
6315 struct mgmt_rp_unpair_device rp;
6317 memset(&rp, 0, sizeof(rp));
6318 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
6319 rp.addr.type = cp->addr.type;
6321 device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, cmd->sk);
6323 cmd_complete(cmd->sk, cmd->index, cmd->opcode, 0, &rp, sizeof(rp));
6325 mgmt_pending_remove(cmd);
6328 void mgmt_device_disconnected(struct hci_dev *hdev, bdaddr_t *bdaddr,
6329 u8 link_type, u8 addr_type, u8 reason,
6330 bool mgmt_connected)
6332 struct mgmt_ev_device_disconnected ev;
6333 struct pending_cmd *power_off;
6334 struct sock *sk = NULL;
6336 power_off = mgmt_pending_find(MGMT_OP_SET_POWERED, hdev);
6338 struct mgmt_mode *cp = power_off->param;
6340 /* The connection is still in hci_conn_hash so test for 1
6341 * instead of 0 to know if this is the last one.
6343 if (!cp->val && hci_conn_count(hdev) == 1) {
6344 cancel_delayed_work(&hdev->power_off);
6345 queue_work(hdev->req_workqueue, &hdev->power_off.work);
6349 if (!mgmt_connected)
6352 if (link_type != ACL_LINK && link_type != LE_LINK)
6355 mgmt_pending_foreach(MGMT_OP_DISCONNECT, hdev, disconnect_rsp, &sk);
6357 bacpy(&ev.addr.bdaddr, bdaddr);
6358 ev.addr.type = link_to_bdaddr(link_type, addr_type);
6361 mgmt_event(MGMT_EV_DEVICE_DISCONNECTED, hdev, &ev, sizeof(ev), sk);
6366 mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, unpair_device_rsp,
6370 void mgmt_disconnect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr,
6371 u8 link_type, u8 addr_type, u8 status)
6373 u8 bdaddr_type = link_to_bdaddr(link_type, addr_type);
6374 struct mgmt_cp_disconnect *cp;
6375 struct mgmt_rp_disconnect rp;
6376 struct pending_cmd *cmd;
6378 mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, unpair_device_rsp,
6381 cmd = mgmt_pending_find(MGMT_OP_DISCONNECT, hdev);
6387 if (bacmp(bdaddr, &cp->addr.bdaddr))
6390 if (cp->addr.type != bdaddr_type)
6393 bacpy(&rp.addr.bdaddr, bdaddr);
6394 rp.addr.type = bdaddr_type;
6396 cmd_complete(cmd->sk, cmd->index, MGMT_OP_DISCONNECT,
6397 mgmt_status(status), &rp, sizeof(rp));
6399 mgmt_pending_remove(cmd);
6402 void mgmt_connect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
6403 u8 addr_type, u8 status)
6405 struct mgmt_ev_connect_failed ev;
6406 struct pending_cmd *power_off;
6408 power_off = mgmt_pending_find(MGMT_OP_SET_POWERED, hdev);
6410 struct mgmt_mode *cp = power_off->param;
6412 /* The connection is still in hci_conn_hash so test for 1
6413 * instead of 0 to know if this is the last one.
6415 if (!cp->val && hci_conn_count(hdev) == 1) {
6416 cancel_delayed_work(&hdev->power_off);
6417 queue_work(hdev->req_workqueue, &hdev->power_off.work);
6421 bacpy(&ev.addr.bdaddr, bdaddr);
6422 ev.addr.type = link_to_bdaddr(link_type, addr_type);
6423 ev.status = mgmt_status(status);
6425 mgmt_event(MGMT_EV_CONNECT_FAILED, hdev, &ev, sizeof(ev), NULL);
6428 void mgmt_pin_code_request(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 secure)
6430 struct mgmt_ev_pin_code_request ev;
6432 bacpy(&ev.addr.bdaddr, bdaddr);
6433 ev.addr.type = BDADDR_BREDR;
6436 mgmt_event(MGMT_EV_PIN_CODE_REQUEST, hdev, &ev, sizeof(ev), NULL);
6439 void mgmt_pin_code_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
6442 struct pending_cmd *cmd;
6443 struct mgmt_rp_pin_code_reply rp;
6445 cmd = mgmt_pending_find(MGMT_OP_PIN_CODE_REPLY, hdev);
6449 bacpy(&rp.addr.bdaddr, bdaddr);
6450 rp.addr.type = BDADDR_BREDR;
6452 cmd_complete(cmd->sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
6453 mgmt_status(status), &rp, sizeof(rp));
6455 mgmt_pending_remove(cmd);
6458 void mgmt_pin_code_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
6461 struct pending_cmd *cmd;
6462 struct mgmt_rp_pin_code_reply rp;
6464 cmd = mgmt_pending_find(MGMT_OP_PIN_CODE_NEG_REPLY, hdev);
6468 bacpy(&rp.addr.bdaddr, bdaddr);
6469 rp.addr.type = BDADDR_BREDR;
6471 cmd_complete(cmd->sk, hdev->id, MGMT_OP_PIN_CODE_NEG_REPLY,
6472 mgmt_status(status), &rp, sizeof(rp));
6474 mgmt_pending_remove(cmd);
6477 int mgmt_user_confirm_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
6478 u8 link_type, u8 addr_type, u32 value,
6481 struct mgmt_ev_user_confirm_request ev;
6483 BT_DBG("%s", hdev->name);
6485 bacpy(&ev.addr.bdaddr, bdaddr);
6486 ev.addr.type = link_to_bdaddr(link_type, addr_type);
6487 ev.confirm_hint = confirm_hint;
6488 ev.value = cpu_to_le32(value);
6490 return mgmt_event(MGMT_EV_USER_CONFIRM_REQUEST, hdev, &ev, sizeof(ev),
6494 int mgmt_user_passkey_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
6495 u8 link_type, u8 addr_type)
6497 struct mgmt_ev_user_passkey_request ev;
6499 BT_DBG("%s", hdev->name);
6501 bacpy(&ev.addr.bdaddr, bdaddr);
6502 ev.addr.type = link_to_bdaddr(link_type, addr_type);
6504 return mgmt_event(MGMT_EV_USER_PASSKEY_REQUEST, hdev, &ev, sizeof(ev),
6508 static int user_pairing_resp_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
6509 u8 link_type, u8 addr_type, u8 status,
6512 struct pending_cmd *cmd;
6513 struct mgmt_rp_user_confirm_reply rp;
6516 cmd = mgmt_pending_find(opcode, hdev);
6520 bacpy(&rp.addr.bdaddr, bdaddr);
6521 rp.addr.type = link_to_bdaddr(link_type, addr_type);
6522 err = cmd_complete(cmd->sk, hdev->id, opcode, mgmt_status(status),
6525 mgmt_pending_remove(cmd);
6530 int mgmt_user_confirm_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
6531 u8 link_type, u8 addr_type, u8 status)
6533 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
6534 status, MGMT_OP_USER_CONFIRM_REPLY);
6537 int mgmt_user_confirm_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
6538 u8 link_type, u8 addr_type, u8 status)
6540 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
6542 MGMT_OP_USER_CONFIRM_NEG_REPLY);
6545 int mgmt_user_passkey_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
6546 u8 link_type, u8 addr_type, u8 status)
6548 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
6549 status, MGMT_OP_USER_PASSKEY_REPLY);
6552 int mgmt_user_passkey_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
6553 u8 link_type, u8 addr_type, u8 status)
6555 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
6557 MGMT_OP_USER_PASSKEY_NEG_REPLY);
6560 int mgmt_user_passkey_notify(struct hci_dev *hdev, bdaddr_t *bdaddr,
6561 u8 link_type, u8 addr_type, u32 passkey,
6564 struct mgmt_ev_passkey_notify ev;
6566 BT_DBG("%s", hdev->name);
6568 bacpy(&ev.addr.bdaddr, bdaddr);
6569 ev.addr.type = link_to_bdaddr(link_type, addr_type);
6570 ev.passkey = __cpu_to_le32(passkey);
6571 ev.entered = entered;
6573 return mgmt_event(MGMT_EV_PASSKEY_NOTIFY, hdev, &ev, sizeof(ev), NULL);
6576 void mgmt_auth_failed(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
6577 u8 addr_type, u8 status)
6579 struct mgmt_ev_auth_failed ev;
6581 bacpy(&ev.addr.bdaddr, bdaddr);
6582 ev.addr.type = link_to_bdaddr(link_type, addr_type);
6583 ev.status = mgmt_status(status);
6585 mgmt_event(MGMT_EV_AUTH_FAILED, hdev, &ev, sizeof(ev), NULL);
6588 void mgmt_auth_enable_complete(struct hci_dev *hdev, u8 status)
6590 struct cmd_lookup match = { NULL, hdev };
6594 u8 mgmt_err = mgmt_status(status);
6595 mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev,
6596 cmd_status_rsp, &mgmt_err);
6600 if (test_bit(HCI_AUTH, &hdev->flags))
6601 changed = !test_and_set_bit(HCI_LINK_SECURITY,
6604 changed = test_and_clear_bit(HCI_LINK_SECURITY,
6607 mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev, settings_rsp,
6611 new_settings(hdev, match.sk);
6617 static void clear_eir(struct hci_request *req)
6619 struct hci_dev *hdev = req->hdev;
6620 struct hci_cp_write_eir cp;
6622 if (!lmp_ext_inq_capable(hdev))
6625 memset(hdev->eir, 0, sizeof(hdev->eir));
6627 memset(&cp, 0, sizeof(cp));
6629 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
6632 void mgmt_ssp_enable_complete(struct hci_dev *hdev, u8 enable, u8 status)
6634 struct cmd_lookup match = { NULL, hdev };
6635 struct hci_request req;
6636 bool changed = false;
6639 u8 mgmt_err = mgmt_status(status);
6641 if (enable && test_and_clear_bit(HCI_SSP_ENABLED,
6642 &hdev->dev_flags)) {
6643 clear_bit(HCI_HS_ENABLED, &hdev->dev_flags);
6644 new_settings(hdev, NULL);
6647 mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, cmd_status_rsp,
6653 changed = !test_and_set_bit(HCI_SSP_ENABLED, &hdev->dev_flags);
6655 changed = test_and_clear_bit(HCI_SSP_ENABLED, &hdev->dev_flags);
6657 changed = test_and_clear_bit(HCI_HS_ENABLED,
6660 clear_bit(HCI_HS_ENABLED, &hdev->dev_flags);
6663 mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, settings_rsp, &match);
6666 new_settings(hdev, match.sk);
6671 hci_req_init(&req, hdev);
6673 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
6674 if (test_bit(HCI_USE_DEBUG_KEYS, &hdev->dev_flags))
6675 hci_req_add(&req, HCI_OP_WRITE_SSP_DEBUG_MODE,
6676 sizeof(enable), &enable);
6682 hci_req_run(&req, NULL);
6685 void mgmt_sc_enable_complete(struct hci_dev *hdev, u8 enable, u8 status)
6687 struct cmd_lookup match = { NULL, hdev };
6688 bool changed = false;
6691 u8 mgmt_err = mgmt_status(status);
6694 if (test_and_clear_bit(HCI_SC_ENABLED,
6696 new_settings(hdev, NULL);
6697 clear_bit(HCI_SC_ONLY, &hdev->dev_flags);
6700 mgmt_pending_foreach(MGMT_OP_SET_SECURE_CONN, hdev,
6701 cmd_status_rsp, &mgmt_err);
6706 changed = !test_and_set_bit(HCI_SC_ENABLED, &hdev->dev_flags);
6708 changed = test_and_clear_bit(HCI_SC_ENABLED, &hdev->dev_flags);
6709 clear_bit(HCI_SC_ONLY, &hdev->dev_flags);
6712 mgmt_pending_foreach(MGMT_OP_SET_SECURE_CONN, hdev,
6713 settings_rsp, &match);
6716 new_settings(hdev, match.sk);
6722 static void sk_lookup(struct pending_cmd *cmd, void *data)
6724 struct cmd_lookup *match = data;
6726 if (match->sk == NULL) {
6727 match->sk = cmd->sk;
6728 sock_hold(match->sk);
6732 void mgmt_set_class_of_dev_complete(struct hci_dev *hdev, u8 *dev_class,
6735 struct cmd_lookup match = { NULL, hdev, mgmt_status(status) };
6737 mgmt_pending_foreach(MGMT_OP_SET_DEV_CLASS, hdev, sk_lookup, &match);
6738 mgmt_pending_foreach(MGMT_OP_ADD_UUID, hdev, sk_lookup, &match);
6739 mgmt_pending_foreach(MGMT_OP_REMOVE_UUID, hdev, sk_lookup, &match);
6742 mgmt_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev, dev_class, 3,
6749 void mgmt_set_local_name_complete(struct hci_dev *hdev, u8 *name, u8 status)
6751 struct mgmt_cp_set_local_name ev;
6752 struct pending_cmd *cmd;
6757 memset(&ev, 0, sizeof(ev));
6758 memcpy(ev.name, name, HCI_MAX_NAME_LENGTH);
6759 memcpy(ev.short_name, hdev->short_name, HCI_MAX_SHORT_NAME_LENGTH);
6761 cmd = mgmt_pending_find(MGMT_OP_SET_LOCAL_NAME, hdev);
6763 memcpy(hdev->dev_name, name, sizeof(hdev->dev_name));
6765 /* If this is a HCI command related to powering on the
6766 * HCI dev don't send any mgmt signals.
6768 if (mgmt_pending_find(MGMT_OP_SET_POWERED, hdev))
6772 mgmt_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev, &ev, sizeof(ev),
6773 cmd ? cmd->sk : NULL);
6776 void mgmt_read_local_oob_data_complete(struct hci_dev *hdev, u8 *hash192,
6777 u8 *randomizer192, u8 *hash256,
6778 u8 *randomizer256, u8 status)
6780 struct pending_cmd *cmd;
6782 BT_DBG("%s status %u", hdev->name, status);
6784 cmd = mgmt_pending_find(MGMT_OP_READ_LOCAL_OOB_DATA, hdev);
6789 cmd_status(cmd->sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
6790 mgmt_status(status));
6792 if (test_bit(HCI_SC_ENABLED, &hdev->dev_flags) &&
6793 hash256 && randomizer256) {
6794 struct mgmt_rp_read_local_oob_ext_data rp;
6796 memcpy(rp.hash192, hash192, sizeof(rp.hash192));
6797 memcpy(rp.randomizer192, randomizer192,
6798 sizeof(rp.randomizer192));
6800 memcpy(rp.hash256, hash256, sizeof(rp.hash256));
6801 memcpy(rp.randomizer256, randomizer256,
6802 sizeof(rp.randomizer256));
6804 cmd_complete(cmd->sk, hdev->id,
6805 MGMT_OP_READ_LOCAL_OOB_DATA, 0,
6808 struct mgmt_rp_read_local_oob_data rp;
6810 memcpy(rp.hash, hash192, sizeof(rp.hash));
6811 memcpy(rp.randomizer, randomizer192,
6812 sizeof(rp.randomizer));
6814 cmd_complete(cmd->sk, hdev->id,
6815 MGMT_OP_READ_LOCAL_OOB_DATA, 0,
6820 mgmt_pending_remove(cmd);
6823 void mgmt_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
6824 u8 addr_type, u8 *dev_class, s8 rssi, u32 flags,
6825 u8 *eir, u16 eir_len, u8 *scan_rsp, u8 scan_rsp_len)
6828 struct mgmt_ev_device_found *ev = (void *) buf;
6831 /* Don't send events for a non-kernel initiated discovery. With
6832 * LE one exception is if we have pend_le_reports > 0 in which
6833 * case we're doing passive scanning and want these events.
6835 if (!hci_discovery_active(hdev)) {
6836 if (link_type == ACL_LINK)
6838 if (link_type == LE_LINK && list_empty(&hdev->pend_le_reports))
6842 /* Make sure that the buffer is big enough. The 5 extra bytes
6843 * are for the potential CoD field.
6845 if (sizeof(*ev) + eir_len + scan_rsp_len + 5 > sizeof(buf))
6848 memset(buf, 0, sizeof(buf));
6850 bacpy(&ev->addr.bdaddr, bdaddr);
6851 ev->addr.type = link_to_bdaddr(link_type, addr_type);
6853 ev->flags = cpu_to_le32(flags);
6856 memcpy(ev->eir, eir, eir_len);
6858 if (dev_class && !eir_has_data_type(ev->eir, eir_len, EIR_CLASS_OF_DEV))
6859 eir_len = eir_append_data(ev->eir, eir_len, EIR_CLASS_OF_DEV,
6862 if (scan_rsp_len > 0)
6863 memcpy(ev->eir + eir_len, scan_rsp, scan_rsp_len);
6865 ev->eir_len = cpu_to_le16(eir_len + scan_rsp_len);
6866 ev_size = sizeof(*ev) + eir_len + scan_rsp_len;
6868 mgmt_event(MGMT_EV_DEVICE_FOUND, hdev, ev, ev_size, NULL);
6871 void mgmt_remote_name(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
6872 u8 addr_type, s8 rssi, u8 *name, u8 name_len)
6874 struct mgmt_ev_device_found *ev;
6875 char buf[sizeof(*ev) + HCI_MAX_NAME_LENGTH + 2];
6878 ev = (struct mgmt_ev_device_found *) buf;
6880 memset(buf, 0, sizeof(buf));
6882 bacpy(&ev->addr.bdaddr, bdaddr);
6883 ev->addr.type = link_to_bdaddr(link_type, addr_type);
6886 eir_len = eir_append_data(ev->eir, 0, EIR_NAME_COMPLETE, name,
6889 ev->eir_len = cpu_to_le16(eir_len);
6891 mgmt_event(MGMT_EV_DEVICE_FOUND, hdev, ev, sizeof(*ev) + eir_len, NULL);
6894 void mgmt_discovering(struct hci_dev *hdev, u8 discovering)
6896 struct mgmt_ev_discovering ev;
6897 struct pending_cmd *cmd;
6899 BT_DBG("%s discovering %u", hdev->name, discovering);
6902 cmd = mgmt_pending_find(MGMT_OP_START_DISCOVERY, hdev);
6904 cmd = mgmt_pending_find(MGMT_OP_STOP_DISCOVERY, hdev);
6907 u8 type = hdev->discovery.type;
6909 cmd_complete(cmd->sk, hdev->id, cmd->opcode, 0, &type,
6911 mgmt_pending_remove(cmd);
6914 memset(&ev, 0, sizeof(ev));
6915 ev.type = hdev->discovery.type;
6916 ev.discovering = discovering;
6918 mgmt_event(MGMT_EV_DISCOVERING, hdev, &ev, sizeof(ev), NULL);
6921 static void adv_enable_complete(struct hci_dev *hdev, u8 status)
6923 BT_DBG("%s status %u", hdev->name, status);
6926 void mgmt_reenable_advertising(struct hci_dev *hdev)
6928 struct hci_request req;
6930 if (!test_bit(HCI_ADVERTISING, &hdev->dev_flags))
6933 hci_req_init(&req, hdev);
6934 enable_advertising(&req);
6935 hci_req_run(&req, adv_enable_complete);