2 BlueZ - Bluetooth protocol stack for Linux
4 Copyright (C) 2010 Nokia Corporation
5 Copyright (C) 2011-2012 Intel Corporation
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
25 /* Bluetooth HCI Management interface */
27 #include <linux/module.h>
28 #include <asm/unaligned.h>
30 #include <net/bluetooth/bluetooth.h>
31 #include <net/bluetooth/hci_core.h>
32 #include <net/bluetooth/mgmt.h>
36 #define MGMT_VERSION 1
37 #define MGMT_REVISION 4
39 static const u16 mgmt_commands[] = {
40 MGMT_OP_READ_INDEX_LIST,
43 MGMT_OP_SET_DISCOVERABLE,
44 MGMT_OP_SET_CONNECTABLE,
45 MGMT_OP_SET_FAST_CONNECTABLE,
47 MGMT_OP_SET_LINK_SECURITY,
51 MGMT_OP_SET_DEV_CLASS,
52 MGMT_OP_SET_LOCAL_NAME,
55 MGMT_OP_LOAD_LINK_KEYS,
56 MGMT_OP_LOAD_LONG_TERM_KEYS,
58 MGMT_OP_GET_CONNECTIONS,
59 MGMT_OP_PIN_CODE_REPLY,
60 MGMT_OP_PIN_CODE_NEG_REPLY,
61 MGMT_OP_SET_IO_CAPABILITY,
63 MGMT_OP_CANCEL_PAIR_DEVICE,
64 MGMT_OP_UNPAIR_DEVICE,
65 MGMT_OP_USER_CONFIRM_REPLY,
66 MGMT_OP_USER_CONFIRM_NEG_REPLY,
67 MGMT_OP_USER_PASSKEY_REPLY,
68 MGMT_OP_USER_PASSKEY_NEG_REPLY,
69 MGMT_OP_READ_LOCAL_OOB_DATA,
70 MGMT_OP_ADD_REMOTE_OOB_DATA,
71 MGMT_OP_REMOVE_REMOTE_OOB_DATA,
72 MGMT_OP_START_DISCOVERY,
73 MGMT_OP_STOP_DISCOVERY,
76 MGMT_OP_UNBLOCK_DEVICE,
77 MGMT_OP_SET_DEVICE_ID,
78 MGMT_OP_SET_ADVERTISING,
80 MGMT_OP_SET_STATIC_ADDRESS,
81 MGMT_OP_SET_SCAN_PARAMS,
84 static const u16 mgmt_events[] = {
85 MGMT_EV_CONTROLLER_ERROR,
87 MGMT_EV_INDEX_REMOVED,
89 MGMT_EV_CLASS_OF_DEV_CHANGED,
90 MGMT_EV_LOCAL_NAME_CHANGED,
92 MGMT_EV_NEW_LONG_TERM_KEY,
93 MGMT_EV_DEVICE_CONNECTED,
94 MGMT_EV_DEVICE_DISCONNECTED,
95 MGMT_EV_CONNECT_FAILED,
96 MGMT_EV_PIN_CODE_REQUEST,
97 MGMT_EV_USER_CONFIRM_REQUEST,
98 MGMT_EV_USER_PASSKEY_REQUEST,
100 MGMT_EV_DEVICE_FOUND,
102 MGMT_EV_DEVICE_BLOCKED,
103 MGMT_EV_DEVICE_UNBLOCKED,
104 MGMT_EV_DEVICE_UNPAIRED,
105 MGMT_EV_PASSKEY_NOTIFY,
108 #define CACHE_TIMEOUT msecs_to_jiffies(2 * 1000)
110 #define hdev_is_powered(hdev) (test_bit(HCI_UP, &hdev->flags) && \
111 !test_bit(HCI_AUTO_OFF, &hdev->dev_flags))
114 struct list_head list;
122 /* HCI to MGMT error code conversion table */
123 static u8 mgmt_status_table[] = {
125 MGMT_STATUS_UNKNOWN_COMMAND, /* Unknown Command */
126 MGMT_STATUS_NOT_CONNECTED, /* No Connection */
127 MGMT_STATUS_FAILED, /* Hardware Failure */
128 MGMT_STATUS_CONNECT_FAILED, /* Page Timeout */
129 MGMT_STATUS_AUTH_FAILED, /* Authentication Failed */
130 MGMT_STATUS_NOT_PAIRED, /* PIN or Key Missing */
131 MGMT_STATUS_NO_RESOURCES, /* Memory Full */
132 MGMT_STATUS_TIMEOUT, /* Connection Timeout */
133 MGMT_STATUS_NO_RESOURCES, /* Max Number of Connections */
134 MGMT_STATUS_NO_RESOURCES, /* Max Number of SCO Connections */
135 MGMT_STATUS_ALREADY_CONNECTED, /* ACL Connection Exists */
136 MGMT_STATUS_BUSY, /* Command Disallowed */
137 MGMT_STATUS_NO_RESOURCES, /* Rejected Limited Resources */
138 MGMT_STATUS_REJECTED, /* Rejected Security */
139 MGMT_STATUS_REJECTED, /* Rejected Personal */
140 MGMT_STATUS_TIMEOUT, /* Host Timeout */
141 MGMT_STATUS_NOT_SUPPORTED, /* Unsupported Feature */
142 MGMT_STATUS_INVALID_PARAMS, /* Invalid Parameters */
143 MGMT_STATUS_DISCONNECTED, /* OE User Ended Connection */
144 MGMT_STATUS_NO_RESOURCES, /* OE Low Resources */
145 MGMT_STATUS_DISCONNECTED, /* OE Power Off */
146 MGMT_STATUS_DISCONNECTED, /* Connection Terminated */
147 MGMT_STATUS_BUSY, /* Repeated Attempts */
148 MGMT_STATUS_REJECTED, /* Pairing Not Allowed */
149 MGMT_STATUS_FAILED, /* Unknown LMP PDU */
150 MGMT_STATUS_NOT_SUPPORTED, /* Unsupported Remote Feature */
151 MGMT_STATUS_REJECTED, /* SCO Offset Rejected */
152 MGMT_STATUS_REJECTED, /* SCO Interval Rejected */
153 MGMT_STATUS_REJECTED, /* Air Mode Rejected */
154 MGMT_STATUS_INVALID_PARAMS, /* Invalid LMP Parameters */
155 MGMT_STATUS_FAILED, /* Unspecified Error */
156 MGMT_STATUS_NOT_SUPPORTED, /* Unsupported LMP Parameter Value */
157 MGMT_STATUS_FAILED, /* Role Change Not Allowed */
158 MGMT_STATUS_TIMEOUT, /* LMP Response Timeout */
159 MGMT_STATUS_FAILED, /* LMP Error Transaction Collision */
160 MGMT_STATUS_FAILED, /* LMP PDU Not Allowed */
161 MGMT_STATUS_REJECTED, /* Encryption Mode Not Accepted */
162 MGMT_STATUS_FAILED, /* Unit Link Key Used */
163 MGMT_STATUS_NOT_SUPPORTED, /* QoS Not Supported */
164 MGMT_STATUS_TIMEOUT, /* Instant Passed */
165 MGMT_STATUS_NOT_SUPPORTED, /* Pairing Not Supported */
166 MGMT_STATUS_FAILED, /* Transaction Collision */
167 MGMT_STATUS_INVALID_PARAMS, /* Unacceptable Parameter */
168 MGMT_STATUS_REJECTED, /* QoS Rejected */
169 MGMT_STATUS_NOT_SUPPORTED, /* Classification Not Supported */
170 MGMT_STATUS_REJECTED, /* Insufficient Security */
171 MGMT_STATUS_INVALID_PARAMS, /* Parameter Out Of Range */
172 MGMT_STATUS_BUSY, /* Role Switch Pending */
173 MGMT_STATUS_FAILED, /* Slot Violation */
174 MGMT_STATUS_FAILED, /* Role Switch Failed */
175 MGMT_STATUS_INVALID_PARAMS, /* EIR Too Large */
176 MGMT_STATUS_NOT_SUPPORTED, /* Simple Pairing Not Supported */
177 MGMT_STATUS_BUSY, /* Host Busy Pairing */
178 MGMT_STATUS_REJECTED, /* Rejected, No Suitable Channel */
179 MGMT_STATUS_BUSY, /* Controller Busy */
180 MGMT_STATUS_INVALID_PARAMS, /* Unsuitable Connection Interval */
181 MGMT_STATUS_TIMEOUT, /* Directed Advertising Timeout */
182 MGMT_STATUS_AUTH_FAILED, /* Terminated Due to MIC Failure */
183 MGMT_STATUS_CONNECT_FAILED, /* Connection Establishment Failed */
184 MGMT_STATUS_CONNECT_FAILED, /* MAC Connection Failed */
187 static u8 mgmt_status(u8 hci_status)
189 if (hci_status < ARRAY_SIZE(mgmt_status_table))
190 return mgmt_status_table[hci_status];
192 return MGMT_STATUS_FAILED;
195 static int cmd_status(struct sock *sk, u16 index, u16 cmd, u8 status)
198 struct mgmt_hdr *hdr;
199 struct mgmt_ev_cmd_status *ev;
202 BT_DBG("sock %p, index %u, cmd %u, status %u", sk, index, cmd, status);
204 skb = alloc_skb(sizeof(*hdr) + sizeof(*ev), GFP_KERNEL);
208 hdr = (void *) skb_put(skb, sizeof(*hdr));
210 hdr->opcode = __constant_cpu_to_le16(MGMT_EV_CMD_STATUS);
211 hdr->index = cpu_to_le16(index);
212 hdr->len = cpu_to_le16(sizeof(*ev));
214 ev = (void *) skb_put(skb, sizeof(*ev));
216 ev->opcode = cpu_to_le16(cmd);
218 err = sock_queue_rcv_skb(sk, skb);
225 static int cmd_complete(struct sock *sk, u16 index, u16 cmd, u8 status,
226 void *rp, size_t rp_len)
229 struct mgmt_hdr *hdr;
230 struct mgmt_ev_cmd_complete *ev;
233 BT_DBG("sock %p", sk);
235 skb = alloc_skb(sizeof(*hdr) + sizeof(*ev) + rp_len, GFP_KERNEL);
239 hdr = (void *) skb_put(skb, sizeof(*hdr));
241 hdr->opcode = __constant_cpu_to_le16(MGMT_EV_CMD_COMPLETE);
242 hdr->index = cpu_to_le16(index);
243 hdr->len = cpu_to_le16(sizeof(*ev) + rp_len);
245 ev = (void *) skb_put(skb, sizeof(*ev) + rp_len);
246 ev->opcode = cpu_to_le16(cmd);
250 memcpy(ev->data, rp, rp_len);
252 err = sock_queue_rcv_skb(sk, skb);
259 static int read_version(struct sock *sk, struct hci_dev *hdev, void *data,
262 struct mgmt_rp_read_version rp;
264 BT_DBG("sock %p", sk);
266 rp.version = MGMT_VERSION;
267 rp.revision = __constant_cpu_to_le16(MGMT_REVISION);
269 return cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_VERSION, 0, &rp,
273 static int read_commands(struct sock *sk, struct hci_dev *hdev, void *data,
276 struct mgmt_rp_read_commands *rp;
277 const u16 num_commands = ARRAY_SIZE(mgmt_commands);
278 const u16 num_events = ARRAY_SIZE(mgmt_events);
283 BT_DBG("sock %p", sk);
285 rp_size = sizeof(*rp) + ((num_commands + num_events) * sizeof(u16));
287 rp = kmalloc(rp_size, GFP_KERNEL);
291 rp->num_commands = __constant_cpu_to_le16(num_commands);
292 rp->num_events = __constant_cpu_to_le16(num_events);
294 for (i = 0, opcode = rp->opcodes; i < num_commands; i++, opcode++)
295 put_unaligned_le16(mgmt_commands[i], opcode);
297 for (i = 0; i < num_events; i++, opcode++)
298 put_unaligned_le16(mgmt_events[i], opcode);
300 err = cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_COMMANDS, 0, rp,
307 static int read_index_list(struct sock *sk, struct hci_dev *hdev, void *data,
310 struct mgmt_rp_read_index_list *rp;
316 BT_DBG("sock %p", sk);
318 read_lock(&hci_dev_list_lock);
321 list_for_each_entry(d, &hci_dev_list, list) {
322 if (d->dev_type == HCI_BREDR)
326 rp_len = sizeof(*rp) + (2 * count);
327 rp = kmalloc(rp_len, GFP_ATOMIC);
329 read_unlock(&hci_dev_list_lock);
334 list_for_each_entry(d, &hci_dev_list, list) {
335 if (test_bit(HCI_SETUP, &d->dev_flags))
338 if (test_bit(HCI_USER_CHANNEL, &d->dev_flags))
341 if (d->dev_type == HCI_BREDR) {
342 rp->index[count++] = cpu_to_le16(d->id);
343 BT_DBG("Added hci%u", d->id);
347 rp->num_controllers = cpu_to_le16(count);
348 rp_len = sizeof(*rp) + (2 * count);
350 read_unlock(&hci_dev_list_lock);
352 err = cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_INDEX_LIST, 0, rp,
360 static u32 get_supported_settings(struct hci_dev *hdev)
364 settings |= MGMT_SETTING_POWERED;
365 settings |= MGMT_SETTING_PAIRABLE;
367 if (lmp_bredr_capable(hdev)) {
368 settings |= MGMT_SETTING_CONNECTABLE;
369 if (hdev->hci_ver >= BLUETOOTH_VER_1_2)
370 settings |= MGMT_SETTING_FAST_CONNECTABLE;
371 settings |= MGMT_SETTING_DISCOVERABLE;
372 settings |= MGMT_SETTING_BREDR;
373 settings |= MGMT_SETTING_LINK_SECURITY;
375 if (lmp_ssp_capable(hdev)) {
376 settings |= MGMT_SETTING_SSP;
377 settings |= MGMT_SETTING_HS;
381 if (lmp_le_capable(hdev)) {
382 settings |= MGMT_SETTING_LE;
383 settings |= MGMT_SETTING_ADVERTISING;
389 static u32 get_current_settings(struct hci_dev *hdev)
393 if (hdev_is_powered(hdev))
394 settings |= MGMT_SETTING_POWERED;
396 if (test_bit(HCI_CONNECTABLE, &hdev->dev_flags))
397 settings |= MGMT_SETTING_CONNECTABLE;
399 if (test_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags))
400 settings |= MGMT_SETTING_FAST_CONNECTABLE;
402 if (test_bit(HCI_DISCOVERABLE, &hdev->dev_flags))
403 settings |= MGMT_SETTING_DISCOVERABLE;
405 if (test_bit(HCI_PAIRABLE, &hdev->dev_flags))
406 settings |= MGMT_SETTING_PAIRABLE;
408 if (test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
409 settings |= MGMT_SETTING_BREDR;
411 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
412 settings |= MGMT_SETTING_LE;
414 if (test_bit(HCI_LINK_SECURITY, &hdev->dev_flags))
415 settings |= MGMT_SETTING_LINK_SECURITY;
417 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags))
418 settings |= MGMT_SETTING_SSP;
420 if (test_bit(HCI_HS_ENABLED, &hdev->dev_flags))
421 settings |= MGMT_SETTING_HS;
423 if (test_bit(HCI_ADVERTISING, &hdev->dev_flags))
424 settings |= MGMT_SETTING_ADVERTISING;
429 #define PNP_INFO_SVCLASS_ID 0x1200
431 static u8 *create_uuid16_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
433 u8 *ptr = data, *uuids_start = NULL;
434 struct bt_uuid *uuid;
439 list_for_each_entry(uuid, &hdev->uuids, list) {
442 if (uuid->size != 16)
445 uuid16 = get_unaligned_le16(&uuid->uuid[12]);
449 if (uuid16 == PNP_INFO_SVCLASS_ID)
455 uuids_start[1] = EIR_UUID16_ALL;
459 /* Stop if not enough space to put next UUID */
460 if ((ptr - data) + sizeof(u16) > len) {
461 uuids_start[1] = EIR_UUID16_SOME;
465 *ptr++ = (uuid16 & 0x00ff);
466 *ptr++ = (uuid16 & 0xff00) >> 8;
467 uuids_start[0] += sizeof(uuid16);
473 static u8 *create_uuid32_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
475 u8 *ptr = data, *uuids_start = NULL;
476 struct bt_uuid *uuid;
481 list_for_each_entry(uuid, &hdev->uuids, list) {
482 if (uuid->size != 32)
488 uuids_start[1] = EIR_UUID32_ALL;
492 /* Stop if not enough space to put next UUID */
493 if ((ptr - data) + sizeof(u32) > len) {
494 uuids_start[1] = EIR_UUID32_SOME;
498 memcpy(ptr, &uuid->uuid[12], sizeof(u32));
500 uuids_start[0] += sizeof(u32);
506 static u8 *create_uuid128_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
508 u8 *ptr = data, *uuids_start = NULL;
509 struct bt_uuid *uuid;
514 list_for_each_entry(uuid, &hdev->uuids, list) {
515 if (uuid->size != 128)
521 uuids_start[1] = EIR_UUID128_ALL;
525 /* Stop if not enough space to put next UUID */
526 if ((ptr - data) + 16 > len) {
527 uuids_start[1] = EIR_UUID128_SOME;
531 memcpy(ptr, uuid->uuid, 16);
533 uuids_start[0] += 16;
539 static u8 create_scan_rsp_data(struct hci_dev *hdev, u8 *ptr)
544 name_len = strlen(hdev->dev_name);
546 size_t max_len = HCI_MAX_AD_LENGTH - ad_len - 2;
548 if (name_len > max_len) {
550 ptr[1] = EIR_NAME_SHORT;
552 ptr[1] = EIR_NAME_COMPLETE;
554 ptr[0] = name_len + 1;
556 memcpy(ptr + 2, hdev->dev_name, name_len);
558 ad_len += (name_len + 2);
559 ptr += (name_len + 2);
565 static void update_scan_rsp_data(struct hci_request *req)
567 struct hci_dev *hdev = req->hdev;
568 struct hci_cp_le_set_scan_rsp_data cp;
571 if (!lmp_le_capable(hdev))
574 memset(&cp, 0, sizeof(cp));
576 len = create_scan_rsp_data(hdev, cp.data);
578 if (hdev->adv_data_len == len &&
579 memcmp(cp.data, hdev->adv_data, len) == 0)
582 memcpy(hdev->adv_data, cp.data, sizeof(cp.data));
583 hdev->adv_data_len = len;
587 hci_req_add(req, HCI_OP_LE_SET_SCAN_RSP_DATA, sizeof(cp), &cp);
590 static u8 create_adv_data(struct hci_dev *hdev, u8 *ptr)
592 u8 ad_len = 0, flags = 0;
594 if (test_bit(HCI_ADVERTISING, &hdev->dev_flags))
595 flags |= LE_AD_GENERAL;
597 if (test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
598 if (lmp_le_br_capable(hdev))
599 flags |= LE_AD_SIM_LE_BREDR_CTRL;
600 if (lmp_host_le_br_capable(hdev))
601 flags |= LE_AD_SIM_LE_BREDR_HOST;
603 flags |= LE_AD_NO_BREDR;
607 BT_DBG("adv flags 0x%02x", flags);
617 if (hdev->adv_tx_power != HCI_TX_POWER_INVALID) {
619 ptr[1] = EIR_TX_POWER;
620 ptr[2] = (u8) hdev->adv_tx_power;
629 static void update_ad(struct hci_request *req)
631 struct hci_dev *hdev = req->hdev;
632 struct hci_cp_le_set_adv_data cp;
635 if (!lmp_le_capable(hdev))
638 memset(&cp, 0, sizeof(cp));
640 len = create_adv_data(hdev, cp.data);
642 if (hdev->adv_data_len == len &&
643 memcmp(cp.data, hdev->adv_data, len) == 0)
646 memcpy(hdev->adv_data, cp.data, sizeof(cp.data));
647 hdev->adv_data_len = len;
651 hci_req_add(req, HCI_OP_LE_SET_ADV_DATA, sizeof(cp), &cp);
654 static void create_eir(struct hci_dev *hdev, u8 *data)
659 name_len = strlen(hdev->dev_name);
665 ptr[1] = EIR_NAME_SHORT;
667 ptr[1] = EIR_NAME_COMPLETE;
669 /* EIR Data length */
670 ptr[0] = name_len + 1;
672 memcpy(ptr + 2, hdev->dev_name, name_len);
674 ptr += (name_len + 2);
677 if (hdev->inq_tx_power != HCI_TX_POWER_INVALID) {
679 ptr[1] = EIR_TX_POWER;
680 ptr[2] = (u8) hdev->inq_tx_power;
685 if (hdev->devid_source > 0) {
687 ptr[1] = EIR_DEVICE_ID;
689 put_unaligned_le16(hdev->devid_source, ptr + 2);
690 put_unaligned_le16(hdev->devid_vendor, ptr + 4);
691 put_unaligned_le16(hdev->devid_product, ptr + 6);
692 put_unaligned_le16(hdev->devid_version, ptr + 8);
697 ptr = create_uuid16_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
698 ptr = create_uuid32_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
699 ptr = create_uuid128_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
702 static void update_eir(struct hci_request *req)
704 struct hci_dev *hdev = req->hdev;
705 struct hci_cp_write_eir cp;
707 if (!hdev_is_powered(hdev))
710 if (!lmp_ext_inq_capable(hdev))
713 if (!test_bit(HCI_SSP_ENABLED, &hdev->dev_flags))
716 if (test_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
719 memset(&cp, 0, sizeof(cp));
721 create_eir(hdev, cp.data);
723 if (memcmp(cp.data, hdev->eir, sizeof(cp.data)) == 0)
726 memcpy(hdev->eir, cp.data, sizeof(cp.data));
728 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
731 static u8 get_service_classes(struct hci_dev *hdev)
733 struct bt_uuid *uuid;
736 list_for_each_entry(uuid, &hdev->uuids, list)
737 val |= uuid->svc_hint;
742 static void update_class(struct hci_request *req)
744 struct hci_dev *hdev = req->hdev;
747 BT_DBG("%s", hdev->name);
749 if (!hdev_is_powered(hdev))
752 if (test_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
755 cod[0] = hdev->minor_class;
756 cod[1] = hdev->major_class;
757 cod[2] = get_service_classes(hdev);
759 if (test_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags))
762 if (memcmp(cod, hdev->dev_class, 3) == 0)
765 hci_req_add(req, HCI_OP_WRITE_CLASS_OF_DEV, sizeof(cod), cod);
768 static void service_cache_off(struct work_struct *work)
770 struct hci_dev *hdev = container_of(work, struct hci_dev,
772 struct hci_request req;
774 if (!test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
777 hci_req_init(&req, hdev);
784 hci_dev_unlock(hdev);
786 hci_req_run(&req, NULL);
789 static void mgmt_init_hdev(struct sock *sk, struct hci_dev *hdev)
791 if (test_and_set_bit(HCI_MGMT, &hdev->dev_flags))
794 INIT_DELAYED_WORK(&hdev->service_cache, service_cache_off);
796 /* Non-mgmt controlled devices get this bit set
797 * implicitly so that pairing works for them, however
798 * for mgmt we require user-space to explicitly enable
801 clear_bit(HCI_PAIRABLE, &hdev->dev_flags);
804 static int read_controller_info(struct sock *sk, struct hci_dev *hdev,
805 void *data, u16 data_len)
807 struct mgmt_rp_read_info rp;
809 BT_DBG("sock %p %s", sk, hdev->name);
813 memset(&rp, 0, sizeof(rp));
815 bacpy(&rp.bdaddr, &hdev->bdaddr);
817 rp.version = hdev->hci_ver;
818 rp.manufacturer = cpu_to_le16(hdev->manufacturer);
820 rp.supported_settings = cpu_to_le32(get_supported_settings(hdev));
821 rp.current_settings = cpu_to_le32(get_current_settings(hdev));
823 memcpy(rp.dev_class, hdev->dev_class, 3);
825 memcpy(rp.name, hdev->dev_name, sizeof(hdev->dev_name));
826 memcpy(rp.short_name, hdev->short_name, sizeof(hdev->short_name));
828 hci_dev_unlock(hdev);
830 return cmd_complete(sk, hdev->id, MGMT_OP_READ_INFO, 0, &rp,
834 static void mgmt_pending_free(struct pending_cmd *cmd)
841 static struct pending_cmd *mgmt_pending_add(struct sock *sk, u16 opcode,
842 struct hci_dev *hdev, void *data,
845 struct pending_cmd *cmd;
847 cmd = kmalloc(sizeof(*cmd), GFP_KERNEL);
851 cmd->opcode = opcode;
852 cmd->index = hdev->id;
854 cmd->param = kmalloc(len, GFP_KERNEL);
861 memcpy(cmd->param, data, len);
866 list_add(&cmd->list, &hdev->mgmt_pending);
871 static void mgmt_pending_foreach(u16 opcode, struct hci_dev *hdev,
872 void (*cb)(struct pending_cmd *cmd,
876 struct pending_cmd *cmd, *tmp;
878 list_for_each_entry_safe(cmd, tmp, &hdev->mgmt_pending, list) {
879 if (opcode > 0 && cmd->opcode != opcode)
886 static struct pending_cmd *mgmt_pending_find(u16 opcode, struct hci_dev *hdev)
888 struct pending_cmd *cmd;
890 list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
891 if (cmd->opcode == opcode)
898 static void mgmt_pending_remove(struct pending_cmd *cmd)
900 list_del(&cmd->list);
901 mgmt_pending_free(cmd);
904 static int send_settings_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev)
906 __le32 settings = cpu_to_le32(get_current_settings(hdev));
908 return cmd_complete(sk, hdev->id, opcode, 0, &settings,
912 static int set_powered(struct sock *sk, struct hci_dev *hdev, void *data,
915 struct mgmt_mode *cp = data;
916 struct pending_cmd *cmd;
919 BT_DBG("request for %s", hdev->name);
921 if (cp->val != 0x00 && cp->val != 0x01)
922 return cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
923 MGMT_STATUS_INVALID_PARAMS);
927 if (mgmt_pending_find(MGMT_OP_SET_POWERED, hdev)) {
928 err = cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
933 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
934 cancel_delayed_work(&hdev->power_off);
937 mgmt_pending_add(sk, MGMT_OP_SET_POWERED, hdev,
939 err = mgmt_powered(hdev, 1);
944 if (!!cp->val == hdev_is_powered(hdev)) {
945 err = send_settings_rsp(sk, MGMT_OP_SET_POWERED, hdev);
949 cmd = mgmt_pending_add(sk, MGMT_OP_SET_POWERED, hdev, data, len);
956 queue_work(hdev->req_workqueue, &hdev->power_on);
958 queue_work(hdev->req_workqueue, &hdev->power_off.work);
963 hci_dev_unlock(hdev);
967 static int mgmt_event(u16 event, struct hci_dev *hdev, void *data, u16 data_len,
968 struct sock *skip_sk)
971 struct mgmt_hdr *hdr;
973 skb = alloc_skb(sizeof(*hdr) + data_len, GFP_KERNEL);
977 hdr = (void *) skb_put(skb, sizeof(*hdr));
978 hdr->opcode = cpu_to_le16(event);
980 hdr->index = cpu_to_le16(hdev->id);
982 hdr->index = __constant_cpu_to_le16(MGMT_INDEX_NONE);
983 hdr->len = cpu_to_le16(data_len);
986 memcpy(skb_put(skb, data_len), data, data_len);
989 __net_timestamp(skb);
991 hci_send_to_control(skb, skip_sk);
997 static int new_settings(struct hci_dev *hdev, struct sock *skip)
1001 ev = cpu_to_le32(get_current_settings(hdev));
1003 return mgmt_event(MGMT_EV_NEW_SETTINGS, hdev, &ev, sizeof(ev), skip);
1008 struct hci_dev *hdev;
1012 static void settings_rsp(struct pending_cmd *cmd, void *data)
1014 struct cmd_lookup *match = data;
1016 send_settings_rsp(cmd->sk, cmd->opcode, match->hdev);
1018 list_del(&cmd->list);
1020 if (match->sk == NULL) {
1021 match->sk = cmd->sk;
1022 sock_hold(match->sk);
1025 mgmt_pending_free(cmd);
1028 static void cmd_status_rsp(struct pending_cmd *cmd, void *data)
1032 cmd_status(cmd->sk, cmd->index, cmd->opcode, *status);
1033 mgmt_pending_remove(cmd);
1036 static u8 mgmt_bredr_support(struct hci_dev *hdev)
1038 if (!lmp_bredr_capable(hdev))
1039 return MGMT_STATUS_NOT_SUPPORTED;
1040 else if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
1041 return MGMT_STATUS_REJECTED;
1043 return MGMT_STATUS_SUCCESS;
1046 static u8 mgmt_le_support(struct hci_dev *hdev)
1048 if (!lmp_le_capable(hdev))
1049 return MGMT_STATUS_NOT_SUPPORTED;
1050 else if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
1051 return MGMT_STATUS_REJECTED;
1053 return MGMT_STATUS_SUCCESS;
1056 static void set_discoverable_complete(struct hci_dev *hdev, u8 status)
1058 struct pending_cmd *cmd;
1059 struct mgmt_mode *cp;
1060 struct hci_request req;
1063 BT_DBG("status 0x%02x", status);
1067 cmd = mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, hdev);
1072 u8 mgmt_err = mgmt_status(status);
1073 cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
1074 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
1080 changed = !test_and_set_bit(HCI_DISCOVERABLE,
1083 if (hdev->discov_timeout > 0) {
1084 int to = msecs_to_jiffies(hdev->discov_timeout * 1000);
1085 queue_delayed_work(hdev->workqueue, &hdev->discov_off,
1089 changed = test_and_clear_bit(HCI_DISCOVERABLE,
1093 send_settings_rsp(cmd->sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1096 new_settings(hdev, cmd->sk);
1098 /* When the discoverable mode gets changed, make sure
1099 * that class of device has the limited discoverable
1100 * bit correctly set.
1102 hci_req_init(&req, hdev);
1104 hci_req_run(&req, NULL);
1107 mgmt_pending_remove(cmd);
1110 hci_dev_unlock(hdev);
1113 static int set_discoverable(struct sock *sk, struct hci_dev *hdev, void *data,
1116 struct mgmt_cp_set_discoverable *cp = data;
1117 struct pending_cmd *cmd;
1118 struct hci_request req;
1123 BT_DBG("request for %s", hdev->name);
1125 status = mgmt_bredr_support(hdev);
1127 return cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1130 if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
1131 return cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1132 MGMT_STATUS_INVALID_PARAMS);
1134 timeout = __le16_to_cpu(cp->timeout);
1136 /* Disabling discoverable requires that no timeout is set,
1137 * and enabling limited discoverable requires a timeout.
1139 if ((cp->val == 0x00 && timeout > 0) ||
1140 (cp->val == 0x02 && timeout == 0))
1141 return cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1142 MGMT_STATUS_INVALID_PARAMS);
1146 if (!hdev_is_powered(hdev) && timeout > 0) {
1147 err = cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1148 MGMT_STATUS_NOT_POWERED);
1152 if (mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
1153 mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
1154 err = cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1159 if (!test_bit(HCI_CONNECTABLE, &hdev->dev_flags)) {
1160 err = cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1161 MGMT_STATUS_REJECTED);
1165 if (!hdev_is_powered(hdev)) {
1166 bool changed = false;
1168 /* Setting limited discoverable when powered off is
1169 * not a valid operation since it requires a timeout
1170 * and so no need to check HCI_LIMITED_DISCOVERABLE.
1172 if (!!cp->val != test_bit(HCI_DISCOVERABLE, &hdev->dev_flags)) {
1173 change_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
1177 err = send_settings_rsp(sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1182 err = new_settings(hdev, sk);
1187 /* If the current mode is the same, then just update the timeout
1188 * value with the new value. And if only the timeout gets updated,
1189 * then no need for any HCI transactions.
1191 if (!!cp->val == test_bit(HCI_DISCOVERABLE, &hdev->dev_flags) &&
1192 (cp->val == 0x02) == test_bit(HCI_LIMITED_DISCOVERABLE,
1193 &hdev->dev_flags)) {
1194 cancel_delayed_work(&hdev->discov_off);
1195 hdev->discov_timeout = timeout;
1197 if (cp->val && hdev->discov_timeout > 0) {
1198 int to = msecs_to_jiffies(hdev->discov_timeout * 1000);
1199 queue_delayed_work(hdev->workqueue, &hdev->discov_off,
1203 err = send_settings_rsp(sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1207 cmd = mgmt_pending_add(sk, MGMT_OP_SET_DISCOVERABLE, hdev, data, len);
1213 /* Cancel any potential discoverable timeout that might be
1214 * still active and store new timeout value. The arming of
1215 * the timeout happens in the complete handler.
1217 cancel_delayed_work(&hdev->discov_off);
1218 hdev->discov_timeout = timeout;
1220 hci_req_init(&req, hdev);
1225 struct hci_cp_write_current_iac_lap hci_cp;
1227 if (cp->val == 0x02) {
1228 /* Limited discoverable mode */
1229 set_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
1232 hci_cp.iac_lap[0] = 0x00; /* LIAC */
1233 hci_cp.iac_lap[1] = 0x8b;
1234 hci_cp.iac_lap[2] = 0x9e;
1235 hci_cp.iac_lap[3] = 0x33; /* GIAC */
1236 hci_cp.iac_lap[4] = 0x8b;
1237 hci_cp.iac_lap[5] = 0x9e;
1239 /* General discoverable mode */
1240 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
1243 hci_cp.iac_lap[0] = 0x33; /* GIAC */
1244 hci_cp.iac_lap[1] = 0x8b;
1245 hci_cp.iac_lap[2] = 0x9e;
1248 hci_req_add(&req, HCI_OP_WRITE_CURRENT_IAC_LAP,
1249 (hci_cp.num_iac * 3) + 1, &hci_cp);
1251 scan |= SCAN_INQUIRY;
1253 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
1256 hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE, sizeof(scan), &scan);
1258 err = hci_req_run(&req, set_discoverable_complete);
1260 mgmt_pending_remove(cmd);
1263 hci_dev_unlock(hdev);
1267 static void write_fast_connectable(struct hci_request *req, bool enable)
1269 struct hci_dev *hdev = req->hdev;
1270 struct hci_cp_write_page_scan_activity acp;
1273 if (hdev->hci_ver < BLUETOOTH_VER_1_2)
1277 type = PAGE_SCAN_TYPE_INTERLACED;
1279 /* 160 msec page scan interval */
1280 acp.interval = __constant_cpu_to_le16(0x0100);
1282 type = PAGE_SCAN_TYPE_STANDARD; /* default */
1284 /* default 1.28 sec page scan */
1285 acp.interval = __constant_cpu_to_le16(0x0800);
1288 acp.window = __constant_cpu_to_le16(0x0012);
1290 if (__cpu_to_le16(hdev->page_scan_interval) != acp.interval ||
1291 __cpu_to_le16(hdev->page_scan_window) != acp.window)
1292 hci_req_add(req, HCI_OP_WRITE_PAGE_SCAN_ACTIVITY,
1295 if (hdev->page_scan_type != type)
1296 hci_req_add(req, HCI_OP_WRITE_PAGE_SCAN_TYPE, 1, &type);
1299 static u8 get_adv_type(struct hci_dev *hdev)
1301 struct pending_cmd *cmd;
1304 /* If there's a pending mgmt command the flag will not yet have
1305 * it's final value, so check for this first.
1307 cmd = mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev);
1309 struct mgmt_mode *cp = cmd->param;
1310 connectable = !!cp->val;
1312 connectable = test_bit(HCI_CONNECTABLE, &hdev->dev_flags);
1315 return connectable ? LE_ADV_IND : LE_ADV_NONCONN_IND;
1318 static void enable_advertising(struct hci_request *req)
1320 struct hci_dev *hdev = req->hdev;
1321 struct hci_cp_le_set_adv_param cp;
1324 memset(&cp, 0, sizeof(cp));
1325 cp.min_interval = __constant_cpu_to_le16(0x0800);
1326 cp.max_interval = __constant_cpu_to_le16(0x0800);
1327 cp.type = get_adv_type(hdev);
1328 if (bacmp(&hdev->bdaddr, BDADDR_ANY))
1329 cp.own_address_type = ADDR_LE_DEV_PUBLIC;
1331 cp.own_address_type = ADDR_LE_DEV_RANDOM;
1332 cp.channel_map = 0x07;
1334 hci_req_add(req, HCI_OP_LE_SET_ADV_PARAM, sizeof(cp), &cp);
1336 hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable);
1339 static void disable_advertising(struct hci_request *req)
1343 hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable);
1346 static void set_connectable_complete(struct hci_dev *hdev, u8 status)
1348 struct pending_cmd *cmd;
1349 struct mgmt_mode *cp;
1352 BT_DBG("status 0x%02x", status);
1356 cmd = mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev);
1361 u8 mgmt_err = mgmt_status(status);
1362 cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
1368 changed = !test_and_set_bit(HCI_CONNECTABLE, &hdev->dev_flags);
1370 changed = test_and_clear_bit(HCI_CONNECTABLE, &hdev->dev_flags);
1372 send_settings_rsp(cmd->sk, MGMT_OP_SET_CONNECTABLE, hdev);
1375 new_settings(hdev, cmd->sk);
1378 mgmt_pending_remove(cmd);
1381 hci_dev_unlock(hdev);
1384 static int set_connectable(struct sock *sk, struct hci_dev *hdev, void *data,
1387 struct mgmt_mode *cp = data;
1388 struct pending_cmd *cmd;
1389 struct hci_request req;
1393 BT_DBG("request for %s", hdev->name);
1395 if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags) &&
1396 !test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
1397 return cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1398 MGMT_STATUS_REJECTED);
1400 if (cp->val != 0x00 && cp->val != 0x01)
1401 return cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1402 MGMT_STATUS_INVALID_PARAMS);
1406 if (!hdev_is_powered(hdev)) {
1407 bool changed = false;
1409 if (!!cp->val != test_bit(HCI_CONNECTABLE, &hdev->dev_flags))
1413 set_bit(HCI_CONNECTABLE, &hdev->dev_flags);
1415 clear_bit(HCI_CONNECTABLE, &hdev->dev_flags);
1416 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
1419 err = send_settings_rsp(sk, MGMT_OP_SET_CONNECTABLE, hdev);
1424 err = new_settings(hdev, sk);
1429 if (mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
1430 mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
1431 err = cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1436 cmd = mgmt_pending_add(sk, MGMT_OP_SET_CONNECTABLE, hdev, data, len);
1442 hci_req_init(&req, hdev);
1444 if (test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags) &&
1445 cp->val != test_bit(HCI_PSCAN, &hdev->flags)) {
1451 if (test_bit(HCI_ISCAN, &hdev->flags) &&
1452 hdev->discov_timeout > 0)
1453 cancel_delayed_work(&hdev->discov_off);
1456 hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
1459 /* If we're going from non-connectable to connectable or
1460 * vice-versa when fast connectable is enabled ensure that fast
1461 * connectable gets disabled. write_fast_connectable won't do
1462 * anything if the page scan parameters are already what they
1465 if (cp->val || test_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags))
1466 write_fast_connectable(&req, false);
1468 if (test_bit(HCI_ADVERTISING, &hdev->dev_flags) &&
1469 hci_conn_num(hdev, LE_LINK) == 0) {
1470 disable_advertising(&req);
1471 enable_advertising(&req);
1474 err = hci_req_run(&req, set_connectable_complete);
1476 mgmt_pending_remove(cmd);
1477 if (err == -ENODATA)
1478 err = send_settings_rsp(sk, MGMT_OP_SET_CONNECTABLE,
1484 hci_dev_unlock(hdev);
1488 static int set_pairable(struct sock *sk, struct hci_dev *hdev, void *data,
1491 struct mgmt_mode *cp = data;
1495 BT_DBG("request for %s", hdev->name);
1497 if (cp->val != 0x00 && cp->val != 0x01)
1498 return cmd_status(sk, hdev->id, MGMT_OP_SET_PAIRABLE,
1499 MGMT_STATUS_INVALID_PARAMS);
1504 changed = !test_and_set_bit(HCI_PAIRABLE, &hdev->dev_flags);
1506 changed = test_and_clear_bit(HCI_PAIRABLE, &hdev->dev_flags);
1508 err = send_settings_rsp(sk, MGMT_OP_SET_PAIRABLE, hdev);
1513 err = new_settings(hdev, sk);
1516 hci_dev_unlock(hdev);
1520 static int set_link_security(struct sock *sk, struct hci_dev *hdev, void *data,
1523 struct mgmt_mode *cp = data;
1524 struct pending_cmd *cmd;
1528 BT_DBG("request for %s", hdev->name);
1530 status = mgmt_bredr_support(hdev);
1532 return cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1535 if (cp->val != 0x00 && cp->val != 0x01)
1536 return cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1537 MGMT_STATUS_INVALID_PARAMS);
1541 if (!hdev_is_powered(hdev)) {
1542 bool changed = false;
1544 if (!!cp->val != test_bit(HCI_LINK_SECURITY,
1545 &hdev->dev_flags)) {
1546 change_bit(HCI_LINK_SECURITY, &hdev->dev_flags);
1550 err = send_settings_rsp(sk, MGMT_OP_SET_LINK_SECURITY, hdev);
1555 err = new_settings(hdev, sk);
1560 if (mgmt_pending_find(MGMT_OP_SET_LINK_SECURITY, hdev)) {
1561 err = cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1568 if (test_bit(HCI_AUTH, &hdev->flags) == val) {
1569 err = send_settings_rsp(sk, MGMT_OP_SET_LINK_SECURITY, hdev);
1573 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LINK_SECURITY, hdev, data, len);
1579 err = hci_send_cmd(hdev, HCI_OP_WRITE_AUTH_ENABLE, sizeof(val), &val);
1581 mgmt_pending_remove(cmd);
1586 hci_dev_unlock(hdev);
1590 static int set_ssp(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
1592 struct mgmt_mode *cp = data;
1593 struct pending_cmd *cmd;
1597 BT_DBG("request for %s", hdev->name);
1599 status = mgmt_bredr_support(hdev);
1601 return cmd_status(sk, hdev->id, MGMT_OP_SET_SSP, status);
1603 if (!lmp_ssp_capable(hdev))
1604 return cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
1605 MGMT_STATUS_NOT_SUPPORTED);
1607 if (cp->val != 0x00 && cp->val != 0x01)
1608 return cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
1609 MGMT_STATUS_INVALID_PARAMS);
1613 if (!hdev_is_powered(hdev)) {
1617 changed = !test_and_set_bit(HCI_SSP_ENABLED,
1620 changed = test_and_clear_bit(HCI_SSP_ENABLED,
1623 changed = test_and_clear_bit(HCI_HS_ENABLED,
1626 clear_bit(HCI_HS_ENABLED, &hdev->dev_flags);
1629 err = send_settings_rsp(sk, MGMT_OP_SET_SSP, hdev);
1634 err = new_settings(hdev, sk);
1639 if (mgmt_pending_find(MGMT_OP_SET_SSP, hdev) ||
1640 mgmt_pending_find(MGMT_OP_SET_HS, hdev)) {
1641 err = cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
1646 if (!!cp->val == test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
1647 err = send_settings_rsp(sk, MGMT_OP_SET_SSP, hdev);
1651 cmd = mgmt_pending_add(sk, MGMT_OP_SET_SSP, hdev, data, len);
1657 err = hci_send_cmd(hdev, HCI_OP_WRITE_SSP_MODE, 1, &cp->val);
1659 mgmt_pending_remove(cmd);
1664 hci_dev_unlock(hdev);
1668 static int set_hs(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
1670 struct mgmt_mode *cp = data;
1675 BT_DBG("request for %s", hdev->name);
1677 status = mgmt_bredr_support(hdev);
1679 return cmd_status(sk, hdev->id, MGMT_OP_SET_HS, status);
1681 if (!lmp_ssp_capable(hdev))
1682 return cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
1683 MGMT_STATUS_NOT_SUPPORTED);
1685 if (!test_bit(HCI_SSP_ENABLED, &hdev->dev_flags))
1686 return cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
1687 MGMT_STATUS_REJECTED);
1689 if (cp->val != 0x00 && cp->val != 0x01)
1690 return cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
1691 MGMT_STATUS_INVALID_PARAMS);
1696 changed = !test_and_set_bit(HCI_HS_ENABLED, &hdev->dev_flags);
1698 if (hdev_is_powered(hdev)) {
1699 err = cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
1700 MGMT_STATUS_REJECTED);
1704 changed = test_and_clear_bit(HCI_HS_ENABLED, &hdev->dev_flags);
1707 err = send_settings_rsp(sk, MGMT_OP_SET_HS, hdev);
1712 err = new_settings(hdev, sk);
1715 hci_dev_unlock(hdev);
1719 static void le_enable_complete(struct hci_dev *hdev, u8 status)
1721 struct cmd_lookup match = { NULL, hdev };
1724 u8 mgmt_err = mgmt_status(status);
1726 mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, cmd_status_rsp,
1731 mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, settings_rsp, &match);
1733 new_settings(hdev, match.sk);
1738 /* Make sure the controller has a good default for
1739 * advertising data. Restrict the update to when LE
1740 * has actually been enabled. During power on, the
1741 * update in powered_update_hci will take care of it.
1743 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
1744 struct hci_request req;
1748 hci_req_init(&req, hdev);
1750 update_scan_rsp_data(&req);
1751 hci_req_run(&req, NULL);
1753 hci_dev_unlock(hdev);
1757 static int set_le(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
1759 struct mgmt_mode *cp = data;
1760 struct hci_cp_write_le_host_supported hci_cp;
1761 struct pending_cmd *cmd;
1762 struct hci_request req;
1766 BT_DBG("request for %s", hdev->name);
1768 if (!lmp_le_capable(hdev))
1769 return cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
1770 MGMT_STATUS_NOT_SUPPORTED);
1772 if (cp->val != 0x00 && cp->val != 0x01)
1773 return cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
1774 MGMT_STATUS_INVALID_PARAMS);
1776 /* LE-only devices do not allow toggling LE on/off */
1777 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
1778 return cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
1779 MGMT_STATUS_REJECTED);
1784 enabled = lmp_host_le_capable(hdev);
1786 if (!hdev_is_powered(hdev) || val == enabled) {
1787 bool changed = false;
1789 if (val != test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
1790 change_bit(HCI_LE_ENABLED, &hdev->dev_flags);
1794 if (!val && test_bit(HCI_ADVERTISING, &hdev->dev_flags)) {
1795 clear_bit(HCI_ADVERTISING, &hdev->dev_flags);
1799 err = send_settings_rsp(sk, MGMT_OP_SET_LE, hdev);
1804 err = new_settings(hdev, sk);
1809 if (mgmt_pending_find(MGMT_OP_SET_LE, hdev) ||
1810 mgmt_pending_find(MGMT_OP_SET_ADVERTISING, hdev)) {
1811 err = cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
1816 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LE, hdev, data, len);
1822 hci_req_init(&req, hdev);
1824 memset(&hci_cp, 0, sizeof(hci_cp));
1828 hci_cp.simul = lmp_le_br_capable(hdev);
1830 if (test_bit(HCI_ADVERTISING, &hdev->dev_flags))
1831 disable_advertising(&req);
1834 hci_req_add(&req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(hci_cp),
1837 err = hci_req_run(&req, le_enable_complete);
1839 mgmt_pending_remove(cmd);
1842 hci_dev_unlock(hdev);
1846 /* This is a helper function to test for pending mgmt commands that can
1847 * cause CoD or EIR HCI commands. We can only allow one such pending
1848 * mgmt command at a time since otherwise we cannot easily track what
1849 * the current values are, will be, and based on that calculate if a new
1850 * HCI command needs to be sent and if yes with what value.
1852 static bool pending_eir_or_class(struct hci_dev *hdev)
1854 struct pending_cmd *cmd;
1856 list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
1857 switch (cmd->opcode) {
1858 case MGMT_OP_ADD_UUID:
1859 case MGMT_OP_REMOVE_UUID:
1860 case MGMT_OP_SET_DEV_CLASS:
1861 case MGMT_OP_SET_POWERED:
1869 static const u8 bluetooth_base_uuid[] = {
1870 0xfb, 0x34, 0x9b, 0x5f, 0x80, 0x00, 0x00, 0x80,
1871 0x00, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1874 static u8 get_uuid_size(const u8 *uuid)
1878 if (memcmp(uuid, bluetooth_base_uuid, 12))
1881 val = get_unaligned_le32(&uuid[12]);
1888 static void mgmt_class_complete(struct hci_dev *hdev, u16 mgmt_op, u8 status)
1890 struct pending_cmd *cmd;
1894 cmd = mgmt_pending_find(mgmt_op, hdev);
1898 cmd_complete(cmd->sk, cmd->index, cmd->opcode, mgmt_status(status),
1899 hdev->dev_class, 3);
1901 mgmt_pending_remove(cmd);
1904 hci_dev_unlock(hdev);
1907 static void add_uuid_complete(struct hci_dev *hdev, u8 status)
1909 BT_DBG("status 0x%02x", status);
1911 mgmt_class_complete(hdev, MGMT_OP_ADD_UUID, status);
1914 static int add_uuid(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
1916 struct mgmt_cp_add_uuid *cp = data;
1917 struct pending_cmd *cmd;
1918 struct hci_request req;
1919 struct bt_uuid *uuid;
1922 BT_DBG("request for %s", hdev->name);
1926 if (pending_eir_or_class(hdev)) {
1927 err = cmd_status(sk, hdev->id, MGMT_OP_ADD_UUID,
1932 uuid = kmalloc(sizeof(*uuid), GFP_KERNEL);
1938 memcpy(uuid->uuid, cp->uuid, 16);
1939 uuid->svc_hint = cp->svc_hint;
1940 uuid->size = get_uuid_size(cp->uuid);
1942 list_add_tail(&uuid->list, &hdev->uuids);
1944 hci_req_init(&req, hdev);
1949 err = hci_req_run(&req, add_uuid_complete);
1951 if (err != -ENODATA)
1954 err = cmd_complete(sk, hdev->id, MGMT_OP_ADD_UUID, 0,
1955 hdev->dev_class, 3);
1959 cmd = mgmt_pending_add(sk, MGMT_OP_ADD_UUID, hdev, data, len);
1968 hci_dev_unlock(hdev);
1972 static bool enable_service_cache(struct hci_dev *hdev)
1974 if (!hdev_is_powered(hdev))
1977 if (!test_and_set_bit(HCI_SERVICE_CACHE, &hdev->dev_flags)) {
1978 queue_delayed_work(hdev->workqueue, &hdev->service_cache,
1986 static void remove_uuid_complete(struct hci_dev *hdev, u8 status)
1988 BT_DBG("status 0x%02x", status);
1990 mgmt_class_complete(hdev, MGMT_OP_REMOVE_UUID, status);
1993 static int remove_uuid(struct sock *sk, struct hci_dev *hdev, void *data,
1996 struct mgmt_cp_remove_uuid *cp = data;
1997 struct pending_cmd *cmd;
1998 struct bt_uuid *match, *tmp;
1999 u8 bt_uuid_any[] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
2000 struct hci_request req;
2003 BT_DBG("request for %s", hdev->name);
2007 if (pending_eir_or_class(hdev)) {
2008 err = cmd_status(sk, hdev->id, MGMT_OP_REMOVE_UUID,
2013 if (memcmp(cp->uuid, bt_uuid_any, 16) == 0) {
2014 err = hci_uuids_clear(hdev);
2016 if (enable_service_cache(hdev)) {
2017 err = cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_UUID,
2018 0, hdev->dev_class, 3);
2027 list_for_each_entry_safe(match, tmp, &hdev->uuids, list) {
2028 if (memcmp(match->uuid, cp->uuid, 16) != 0)
2031 list_del(&match->list);
2037 err = cmd_status(sk, hdev->id, MGMT_OP_REMOVE_UUID,
2038 MGMT_STATUS_INVALID_PARAMS);
2043 hci_req_init(&req, hdev);
2048 err = hci_req_run(&req, remove_uuid_complete);
2050 if (err != -ENODATA)
2053 err = cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_UUID, 0,
2054 hdev->dev_class, 3);
2058 cmd = mgmt_pending_add(sk, MGMT_OP_REMOVE_UUID, hdev, data, len);
2067 hci_dev_unlock(hdev);
2071 static void set_class_complete(struct hci_dev *hdev, u8 status)
2073 BT_DBG("status 0x%02x", status);
2075 mgmt_class_complete(hdev, MGMT_OP_SET_DEV_CLASS, status);
2078 static int set_dev_class(struct sock *sk, struct hci_dev *hdev, void *data,
2081 struct mgmt_cp_set_dev_class *cp = data;
2082 struct pending_cmd *cmd;
2083 struct hci_request req;
2086 BT_DBG("request for %s", hdev->name);
2088 if (!lmp_bredr_capable(hdev))
2089 return cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2090 MGMT_STATUS_NOT_SUPPORTED);
2094 if (pending_eir_or_class(hdev)) {
2095 err = cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2100 if ((cp->minor & 0x03) != 0 || (cp->major & 0xe0) != 0) {
2101 err = cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2102 MGMT_STATUS_INVALID_PARAMS);
2106 hdev->major_class = cp->major;
2107 hdev->minor_class = cp->minor;
2109 if (!hdev_is_powered(hdev)) {
2110 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_DEV_CLASS, 0,
2111 hdev->dev_class, 3);
2115 hci_req_init(&req, hdev);
2117 if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags)) {
2118 hci_dev_unlock(hdev);
2119 cancel_delayed_work_sync(&hdev->service_cache);
2126 err = hci_req_run(&req, set_class_complete);
2128 if (err != -ENODATA)
2131 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_DEV_CLASS, 0,
2132 hdev->dev_class, 3);
2136 cmd = mgmt_pending_add(sk, MGMT_OP_SET_DEV_CLASS, hdev, data, len);
2145 hci_dev_unlock(hdev);
2149 static int load_link_keys(struct sock *sk, struct hci_dev *hdev, void *data,
2152 struct mgmt_cp_load_link_keys *cp = data;
2153 u16 key_count, expected_len;
2156 BT_DBG("request for %s", hdev->name);
2158 if (!lmp_bredr_capable(hdev))
2159 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2160 MGMT_STATUS_NOT_SUPPORTED);
2162 key_count = __le16_to_cpu(cp->key_count);
2164 expected_len = sizeof(*cp) + key_count *
2165 sizeof(struct mgmt_link_key_info);
2166 if (expected_len != len) {
2167 BT_ERR("load_link_keys: expected %u bytes, got %u bytes",
2169 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2170 MGMT_STATUS_INVALID_PARAMS);
2173 if (cp->debug_keys != 0x00 && cp->debug_keys != 0x01)
2174 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2175 MGMT_STATUS_INVALID_PARAMS);
2177 BT_DBG("%s debug_keys %u key_count %u", hdev->name, cp->debug_keys,
2180 for (i = 0; i < key_count; i++) {
2181 struct mgmt_link_key_info *key = &cp->keys[i];
2183 if (key->addr.type != BDADDR_BREDR)
2184 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2185 MGMT_STATUS_INVALID_PARAMS);
2190 hci_link_keys_clear(hdev);
2193 set_bit(HCI_DEBUG_KEYS, &hdev->dev_flags);
2195 clear_bit(HCI_DEBUG_KEYS, &hdev->dev_flags);
2197 for (i = 0; i < key_count; i++) {
2198 struct mgmt_link_key_info *key = &cp->keys[i];
2200 hci_add_link_key(hdev, NULL, 0, &key->addr.bdaddr, key->val,
2201 key->type, key->pin_len);
2204 cmd_complete(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS, 0, NULL, 0);
2206 hci_dev_unlock(hdev);
2211 static int device_unpaired(struct hci_dev *hdev, bdaddr_t *bdaddr,
2212 u8 addr_type, struct sock *skip_sk)
2214 struct mgmt_ev_device_unpaired ev;
2216 bacpy(&ev.addr.bdaddr, bdaddr);
2217 ev.addr.type = addr_type;
2219 return mgmt_event(MGMT_EV_DEVICE_UNPAIRED, hdev, &ev, sizeof(ev),
2223 static int unpair_device(struct sock *sk, struct hci_dev *hdev, void *data,
2226 struct mgmt_cp_unpair_device *cp = data;
2227 struct mgmt_rp_unpair_device rp;
2228 struct hci_cp_disconnect dc;
2229 struct pending_cmd *cmd;
2230 struct hci_conn *conn;
2233 memset(&rp, 0, sizeof(rp));
2234 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
2235 rp.addr.type = cp->addr.type;
2237 if (!bdaddr_type_is_valid(cp->addr.type))
2238 return cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2239 MGMT_STATUS_INVALID_PARAMS,
2242 if (cp->disconnect != 0x00 && cp->disconnect != 0x01)
2243 return cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2244 MGMT_STATUS_INVALID_PARAMS,
2249 if (!hdev_is_powered(hdev)) {
2250 err = cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2251 MGMT_STATUS_NOT_POWERED, &rp, sizeof(rp));
2255 if (cp->addr.type == BDADDR_BREDR)
2256 err = hci_remove_link_key(hdev, &cp->addr.bdaddr);
2258 err = hci_remove_ltk(hdev, &cp->addr.bdaddr);
2261 err = cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2262 MGMT_STATUS_NOT_PAIRED, &rp, sizeof(rp));
2266 if (cp->disconnect) {
2267 if (cp->addr.type == BDADDR_BREDR)
2268 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
2271 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK,
2278 err = cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE, 0,
2280 device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, sk);
2284 cmd = mgmt_pending_add(sk, MGMT_OP_UNPAIR_DEVICE, hdev, cp,
2291 dc.handle = cpu_to_le16(conn->handle);
2292 dc.reason = 0x13; /* Remote User Terminated Connection */
2293 err = hci_send_cmd(hdev, HCI_OP_DISCONNECT, sizeof(dc), &dc);
2295 mgmt_pending_remove(cmd);
2298 hci_dev_unlock(hdev);
2302 static int disconnect(struct sock *sk, struct hci_dev *hdev, void *data,
2305 struct mgmt_cp_disconnect *cp = data;
2306 struct mgmt_rp_disconnect rp;
2307 struct hci_cp_disconnect dc;
2308 struct pending_cmd *cmd;
2309 struct hci_conn *conn;
2314 memset(&rp, 0, sizeof(rp));
2315 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
2316 rp.addr.type = cp->addr.type;
2318 if (!bdaddr_type_is_valid(cp->addr.type))
2319 return cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2320 MGMT_STATUS_INVALID_PARAMS,
2325 if (!test_bit(HCI_UP, &hdev->flags)) {
2326 err = cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2327 MGMT_STATUS_NOT_POWERED, &rp, sizeof(rp));
2331 if (mgmt_pending_find(MGMT_OP_DISCONNECT, hdev)) {
2332 err = cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2333 MGMT_STATUS_BUSY, &rp, sizeof(rp));
2337 if (cp->addr.type == BDADDR_BREDR)
2338 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
2341 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->addr.bdaddr);
2343 if (!conn || conn->state == BT_OPEN || conn->state == BT_CLOSED) {
2344 err = cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2345 MGMT_STATUS_NOT_CONNECTED, &rp, sizeof(rp));
2349 cmd = mgmt_pending_add(sk, MGMT_OP_DISCONNECT, hdev, data, len);
2355 dc.handle = cpu_to_le16(conn->handle);
2356 dc.reason = HCI_ERROR_REMOTE_USER_TERM;
2358 err = hci_send_cmd(hdev, HCI_OP_DISCONNECT, sizeof(dc), &dc);
2360 mgmt_pending_remove(cmd);
2363 hci_dev_unlock(hdev);
2367 static u8 link_to_bdaddr(u8 link_type, u8 addr_type)
2369 switch (link_type) {
2371 switch (addr_type) {
2372 case ADDR_LE_DEV_PUBLIC:
2373 return BDADDR_LE_PUBLIC;
2376 /* Fallback to LE Random address type */
2377 return BDADDR_LE_RANDOM;
2381 /* Fallback to BR/EDR type */
2382 return BDADDR_BREDR;
2386 static int get_connections(struct sock *sk, struct hci_dev *hdev, void *data,
2389 struct mgmt_rp_get_connections *rp;
2399 if (!hdev_is_powered(hdev)) {
2400 err = cmd_status(sk, hdev->id, MGMT_OP_GET_CONNECTIONS,
2401 MGMT_STATUS_NOT_POWERED);
2406 list_for_each_entry(c, &hdev->conn_hash.list, list) {
2407 if (test_bit(HCI_CONN_MGMT_CONNECTED, &c->flags))
2411 rp_len = sizeof(*rp) + (i * sizeof(struct mgmt_addr_info));
2412 rp = kmalloc(rp_len, GFP_KERNEL);
2419 list_for_each_entry(c, &hdev->conn_hash.list, list) {
2420 if (!test_bit(HCI_CONN_MGMT_CONNECTED, &c->flags))
2422 bacpy(&rp->addr[i].bdaddr, &c->dst);
2423 rp->addr[i].type = link_to_bdaddr(c->type, c->dst_type);
2424 if (c->type == SCO_LINK || c->type == ESCO_LINK)
2429 rp->conn_count = cpu_to_le16(i);
2431 /* Recalculate length in case of filtered SCO connections, etc */
2432 rp_len = sizeof(*rp) + (i * sizeof(struct mgmt_addr_info));
2434 err = cmd_complete(sk, hdev->id, MGMT_OP_GET_CONNECTIONS, 0, rp,
2440 hci_dev_unlock(hdev);
2444 static int send_pin_code_neg_reply(struct sock *sk, struct hci_dev *hdev,
2445 struct mgmt_cp_pin_code_neg_reply *cp)
2447 struct pending_cmd *cmd;
2450 cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_NEG_REPLY, hdev, cp,
2455 err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_NEG_REPLY,
2456 sizeof(cp->addr.bdaddr), &cp->addr.bdaddr);
2458 mgmt_pending_remove(cmd);
2463 static int pin_code_reply(struct sock *sk, struct hci_dev *hdev, void *data,
2466 struct hci_conn *conn;
2467 struct mgmt_cp_pin_code_reply *cp = data;
2468 struct hci_cp_pin_code_reply reply;
2469 struct pending_cmd *cmd;
2476 if (!hdev_is_powered(hdev)) {
2477 err = cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
2478 MGMT_STATUS_NOT_POWERED);
2482 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->addr.bdaddr);
2484 err = cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
2485 MGMT_STATUS_NOT_CONNECTED);
2489 if (conn->pending_sec_level == BT_SECURITY_HIGH && cp->pin_len != 16) {
2490 struct mgmt_cp_pin_code_neg_reply ncp;
2492 memcpy(&ncp.addr, &cp->addr, sizeof(ncp.addr));
2494 BT_ERR("PIN code is not 16 bytes long");
2496 err = send_pin_code_neg_reply(sk, hdev, &ncp);
2498 err = cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
2499 MGMT_STATUS_INVALID_PARAMS);
2504 cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_REPLY, hdev, data, len);
2510 bacpy(&reply.bdaddr, &cp->addr.bdaddr);
2511 reply.pin_len = cp->pin_len;
2512 memcpy(reply.pin_code, cp->pin_code, sizeof(reply.pin_code));
2514 err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_REPLY, sizeof(reply), &reply);
2516 mgmt_pending_remove(cmd);
2519 hci_dev_unlock(hdev);
2523 static int set_io_capability(struct sock *sk, struct hci_dev *hdev, void *data,
2526 struct mgmt_cp_set_io_capability *cp = data;
2532 hdev->io_capability = cp->io_capability;
2534 BT_DBG("%s IO capability set to 0x%02x", hdev->name,
2535 hdev->io_capability);
2537 hci_dev_unlock(hdev);
2539 return cmd_complete(sk, hdev->id, MGMT_OP_SET_IO_CAPABILITY, 0, NULL,
2543 static struct pending_cmd *find_pairing(struct hci_conn *conn)
2545 struct hci_dev *hdev = conn->hdev;
2546 struct pending_cmd *cmd;
2548 list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
2549 if (cmd->opcode != MGMT_OP_PAIR_DEVICE)
2552 if (cmd->user_data != conn)
2561 static void pairing_complete(struct pending_cmd *cmd, u8 status)
2563 struct mgmt_rp_pair_device rp;
2564 struct hci_conn *conn = cmd->user_data;
2566 bacpy(&rp.addr.bdaddr, &conn->dst);
2567 rp.addr.type = link_to_bdaddr(conn->type, conn->dst_type);
2569 cmd_complete(cmd->sk, cmd->index, MGMT_OP_PAIR_DEVICE, status,
2572 /* So we don't get further callbacks for this connection */
2573 conn->connect_cfm_cb = NULL;
2574 conn->security_cfm_cb = NULL;
2575 conn->disconn_cfm_cb = NULL;
2577 hci_conn_drop(conn);
2579 mgmt_pending_remove(cmd);
2582 static void pairing_complete_cb(struct hci_conn *conn, u8 status)
2584 struct pending_cmd *cmd;
2586 BT_DBG("status %u", status);
2588 cmd = find_pairing(conn);
2590 BT_DBG("Unable to find a pending command");
2592 pairing_complete(cmd, mgmt_status(status));
2595 static void le_connect_complete_cb(struct hci_conn *conn, u8 status)
2597 struct pending_cmd *cmd;
2599 BT_DBG("status %u", status);
2604 cmd = find_pairing(conn);
2606 BT_DBG("Unable to find a pending command");
2608 pairing_complete(cmd, mgmt_status(status));
2611 static int pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
2614 struct mgmt_cp_pair_device *cp = data;
2615 struct mgmt_rp_pair_device rp;
2616 struct pending_cmd *cmd;
2617 u8 sec_level, auth_type;
2618 struct hci_conn *conn;
2623 memset(&rp, 0, sizeof(rp));
2624 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
2625 rp.addr.type = cp->addr.type;
2627 if (!bdaddr_type_is_valid(cp->addr.type))
2628 return cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
2629 MGMT_STATUS_INVALID_PARAMS,
2634 if (!hdev_is_powered(hdev)) {
2635 err = cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
2636 MGMT_STATUS_NOT_POWERED, &rp, sizeof(rp));
2640 sec_level = BT_SECURITY_MEDIUM;
2641 if (cp->io_cap == 0x03)
2642 auth_type = HCI_AT_DEDICATED_BONDING;
2644 auth_type = HCI_AT_DEDICATED_BONDING_MITM;
2646 if (cp->addr.type == BDADDR_BREDR)
2647 conn = hci_connect(hdev, ACL_LINK, &cp->addr.bdaddr,
2648 cp->addr.type, sec_level, auth_type);
2650 conn = hci_connect(hdev, LE_LINK, &cp->addr.bdaddr,
2651 cp->addr.type, sec_level, auth_type);
2656 if (PTR_ERR(conn) == -EBUSY)
2657 status = MGMT_STATUS_BUSY;
2659 status = MGMT_STATUS_CONNECT_FAILED;
2661 err = cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
2667 if (conn->connect_cfm_cb) {
2668 hci_conn_drop(conn);
2669 err = cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
2670 MGMT_STATUS_BUSY, &rp, sizeof(rp));
2674 cmd = mgmt_pending_add(sk, MGMT_OP_PAIR_DEVICE, hdev, data, len);
2677 hci_conn_drop(conn);
2681 /* For LE, just connecting isn't a proof that the pairing finished */
2682 if (cp->addr.type == BDADDR_BREDR)
2683 conn->connect_cfm_cb = pairing_complete_cb;
2685 conn->connect_cfm_cb = le_connect_complete_cb;
2687 conn->security_cfm_cb = pairing_complete_cb;
2688 conn->disconn_cfm_cb = pairing_complete_cb;
2689 conn->io_capability = cp->io_cap;
2690 cmd->user_data = conn;
2692 if (conn->state == BT_CONNECTED &&
2693 hci_conn_security(conn, sec_level, auth_type))
2694 pairing_complete(cmd, 0);
2699 hci_dev_unlock(hdev);
2703 static int cancel_pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
2706 struct mgmt_addr_info *addr = data;
2707 struct pending_cmd *cmd;
2708 struct hci_conn *conn;
2715 if (!hdev_is_powered(hdev)) {
2716 err = cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
2717 MGMT_STATUS_NOT_POWERED);
2721 cmd = mgmt_pending_find(MGMT_OP_PAIR_DEVICE, hdev);
2723 err = cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
2724 MGMT_STATUS_INVALID_PARAMS);
2728 conn = cmd->user_data;
2730 if (bacmp(&addr->bdaddr, &conn->dst) != 0) {
2731 err = cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
2732 MGMT_STATUS_INVALID_PARAMS);
2736 pairing_complete(cmd, MGMT_STATUS_CANCELLED);
2738 err = cmd_complete(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE, 0,
2739 addr, sizeof(*addr));
2741 hci_dev_unlock(hdev);
2745 static int user_pairing_resp(struct sock *sk, struct hci_dev *hdev,
2746 struct mgmt_addr_info *addr, u16 mgmt_op,
2747 u16 hci_op, __le32 passkey)
2749 struct pending_cmd *cmd;
2750 struct hci_conn *conn;
2755 if (!hdev_is_powered(hdev)) {
2756 err = cmd_complete(sk, hdev->id, mgmt_op,
2757 MGMT_STATUS_NOT_POWERED, addr,
2762 if (addr->type == BDADDR_BREDR)
2763 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &addr->bdaddr);
2765 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &addr->bdaddr);
2768 err = cmd_complete(sk, hdev->id, mgmt_op,
2769 MGMT_STATUS_NOT_CONNECTED, addr,
2774 if (addr->type == BDADDR_LE_PUBLIC || addr->type == BDADDR_LE_RANDOM) {
2775 /* Continue with pairing via SMP */
2776 err = smp_user_confirm_reply(conn, mgmt_op, passkey);
2779 err = cmd_complete(sk, hdev->id, mgmt_op,
2780 MGMT_STATUS_SUCCESS, addr,
2783 err = cmd_complete(sk, hdev->id, mgmt_op,
2784 MGMT_STATUS_FAILED, addr,
2790 cmd = mgmt_pending_add(sk, mgmt_op, hdev, addr, sizeof(*addr));
2796 /* Continue with pairing via HCI */
2797 if (hci_op == HCI_OP_USER_PASSKEY_REPLY) {
2798 struct hci_cp_user_passkey_reply cp;
2800 bacpy(&cp.bdaddr, &addr->bdaddr);
2801 cp.passkey = passkey;
2802 err = hci_send_cmd(hdev, hci_op, sizeof(cp), &cp);
2804 err = hci_send_cmd(hdev, hci_op, sizeof(addr->bdaddr),
2808 mgmt_pending_remove(cmd);
2811 hci_dev_unlock(hdev);
2815 static int pin_code_neg_reply(struct sock *sk, struct hci_dev *hdev,
2816 void *data, u16 len)
2818 struct mgmt_cp_pin_code_neg_reply *cp = data;
2822 return user_pairing_resp(sk, hdev, &cp->addr,
2823 MGMT_OP_PIN_CODE_NEG_REPLY,
2824 HCI_OP_PIN_CODE_NEG_REPLY, 0);
2827 static int user_confirm_reply(struct sock *sk, struct hci_dev *hdev, void *data,
2830 struct mgmt_cp_user_confirm_reply *cp = data;
2834 if (len != sizeof(*cp))
2835 return cmd_status(sk, hdev->id, MGMT_OP_USER_CONFIRM_REPLY,
2836 MGMT_STATUS_INVALID_PARAMS);
2838 return user_pairing_resp(sk, hdev, &cp->addr,
2839 MGMT_OP_USER_CONFIRM_REPLY,
2840 HCI_OP_USER_CONFIRM_REPLY, 0);
2843 static int user_confirm_neg_reply(struct sock *sk, struct hci_dev *hdev,
2844 void *data, u16 len)
2846 struct mgmt_cp_user_confirm_neg_reply *cp = data;
2850 return user_pairing_resp(sk, hdev, &cp->addr,
2851 MGMT_OP_USER_CONFIRM_NEG_REPLY,
2852 HCI_OP_USER_CONFIRM_NEG_REPLY, 0);
2855 static int user_passkey_reply(struct sock *sk, struct hci_dev *hdev, void *data,
2858 struct mgmt_cp_user_passkey_reply *cp = data;
2862 return user_pairing_resp(sk, hdev, &cp->addr,
2863 MGMT_OP_USER_PASSKEY_REPLY,
2864 HCI_OP_USER_PASSKEY_REPLY, cp->passkey);
2867 static int user_passkey_neg_reply(struct sock *sk, struct hci_dev *hdev,
2868 void *data, u16 len)
2870 struct mgmt_cp_user_passkey_neg_reply *cp = data;
2874 return user_pairing_resp(sk, hdev, &cp->addr,
2875 MGMT_OP_USER_PASSKEY_NEG_REPLY,
2876 HCI_OP_USER_PASSKEY_NEG_REPLY, 0);
2879 static void update_name(struct hci_request *req)
2881 struct hci_dev *hdev = req->hdev;
2882 struct hci_cp_write_local_name cp;
2884 memcpy(cp.name, hdev->dev_name, sizeof(cp.name));
2886 hci_req_add(req, HCI_OP_WRITE_LOCAL_NAME, sizeof(cp), &cp);
2889 static void set_name_complete(struct hci_dev *hdev, u8 status)
2891 struct mgmt_cp_set_local_name *cp;
2892 struct pending_cmd *cmd;
2894 BT_DBG("status 0x%02x", status);
2898 cmd = mgmt_pending_find(MGMT_OP_SET_LOCAL_NAME, hdev);
2905 cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME,
2906 mgmt_status(status));
2908 cmd_complete(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
2911 mgmt_pending_remove(cmd);
2914 hci_dev_unlock(hdev);
2917 static int set_local_name(struct sock *sk, struct hci_dev *hdev, void *data,
2920 struct mgmt_cp_set_local_name *cp = data;
2921 struct pending_cmd *cmd;
2922 struct hci_request req;
2929 /* If the old values are the same as the new ones just return a
2930 * direct command complete event.
2932 if (!memcmp(hdev->dev_name, cp->name, sizeof(hdev->dev_name)) &&
2933 !memcmp(hdev->short_name, cp->short_name,
2934 sizeof(hdev->short_name))) {
2935 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
2940 memcpy(hdev->short_name, cp->short_name, sizeof(hdev->short_name));
2942 if (!hdev_is_powered(hdev)) {
2943 memcpy(hdev->dev_name, cp->name, sizeof(hdev->dev_name));
2945 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
2950 err = mgmt_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev, data, len,
2956 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LOCAL_NAME, hdev, data, len);
2962 memcpy(hdev->dev_name, cp->name, sizeof(hdev->dev_name));
2964 hci_req_init(&req, hdev);
2966 if (lmp_bredr_capable(hdev)) {
2971 /* The name is stored in the scan response data and so
2972 * no need to udpate the advertising data here.
2974 if (lmp_le_capable(hdev))
2975 update_scan_rsp_data(&req);
2977 err = hci_req_run(&req, set_name_complete);
2979 mgmt_pending_remove(cmd);
2982 hci_dev_unlock(hdev);
2986 static int read_local_oob_data(struct sock *sk, struct hci_dev *hdev,
2987 void *data, u16 data_len)
2989 struct pending_cmd *cmd;
2992 BT_DBG("%s", hdev->name);
2996 if (!hdev_is_powered(hdev)) {
2997 err = cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
2998 MGMT_STATUS_NOT_POWERED);
3002 if (!lmp_ssp_capable(hdev)) {
3003 err = cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
3004 MGMT_STATUS_NOT_SUPPORTED);
3008 if (mgmt_pending_find(MGMT_OP_READ_LOCAL_OOB_DATA, hdev)) {
3009 err = cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
3014 cmd = mgmt_pending_add(sk, MGMT_OP_READ_LOCAL_OOB_DATA, hdev, NULL, 0);
3020 err = hci_send_cmd(hdev, HCI_OP_READ_LOCAL_OOB_DATA, 0, NULL);
3022 mgmt_pending_remove(cmd);
3025 hci_dev_unlock(hdev);
3029 static int add_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
3030 void *data, u16 len)
3032 struct mgmt_cp_add_remote_oob_data *cp = data;
3036 BT_DBG("%s ", hdev->name);
3040 err = hci_add_remote_oob_data(hdev, &cp->addr.bdaddr, cp->hash,
3043 status = MGMT_STATUS_FAILED;
3045 status = MGMT_STATUS_SUCCESS;
3047 err = cmd_complete(sk, hdev->id, MGMT_OP_ADD_REMOTE_OOB_DATA, status,
3048 &cp->addr, sizeof(cp->addr));
3050 hci_dev_unlock(hdev);
3054 static int remove_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
3055 void *data, u16 len)
3057 struct mgmt_cp_remove_remote_oob_data *cp = data;
3061 BT_DBG("%s", hdev->name);
3065 err = hci_remove_remote_oob_data(hdev, &cp->addr.bdaddr);
3067 status = MGMT_STATUS_INVALID_PARAMS;
3069 status = MGMT_STATUS_SUCCESS;
3071 err = cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_REMOTE_OOB_DATA,
3072 status, &cp->addr, sizeof(cp->addr));
3074 hci_dev_unlock(hdev);
3078 static int mgmt_start_discovery_failed(struct hci_dev *hdev, u8 status)
3080 struct pending_cmd *cmd;
3084 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3086 cmd = mgmt_pending_find(MGMT_OP_START_DISCOVERY, hdev);
3090 type = hdev->discovery.type;
3092 err = cmd_complete(cmd->sk, hdev->id, cmd->opcode, mgmt_status(status),
3093 &type, sizeof(type));
3094 mgmt_pending_remove(cmd);
3099 static void start_discovery_complete(struct hci_dev *hdev, u8 status)
3101 BT_DBG("status %d", status);
3105 mgmt_start_discovery_failed(hdev, status);
3106 hci_dev_unlock(hdev);
3111 hci_discovery_set_state(hdev, DISCOVERY_FINDING);
3112 hci_dev_unlock(hdev);
3114 switch (hdev->discovery.type) {
3115 case DISCOV_TYPE_LE:
3116 queue_delayed_work(hdev->workqueue, &hdev->le_scan_disable,
3120 case DISCOV_TYPE_INTERLEAVED:
3121 queue_delayed_work(hdev->workqueue, &hdev->le_scan_disable,
3122 DISCOV_INTERLEAVED_TIMEOUT);
3125 case DISCOV_TYPE_BREDR:
3129 BT_ERR("Invalid discovery type %d", hdev->discovery.type);
3133 static int start_discovery(struct sock *sk, struct hci_dev *hdev,
3134 void *data, u16 len)
3136 struct mgmt_cp_start_discovery *cp = data;
3137 struct pending_cmd *cmd;
3138 struct hci_cp_le_set_scan_param param_cp;
3139 struct hci_cp_le_set_scan_enable enable_cp;
3140 struct hci_cp_inquiry inq_cp;
3141 struct hci_request req;
3142 /* General inquiry access code (GIAC) */
3143 u8 lap[3] = { 0x33, 0x8b, 0x9e };
3147 BT_DBG("%s", hdev->name);
3151 if (!hdev_is_powered(hdev)) {
3152 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3153 MGMT_STATUS_NOT_POWERED);
3157 if (test_bit(HCI_PERIODIC_INQ, &hdev->dev_flags)) {
3158 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3163 if (hdev->discovery.state != DISCOVERY_STOPPED) {
3164 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3169 cmd = mgmt_pending_add(sk, MGMT_OP_START_DISCOVERY, hdev, NULL, 0);
3175 hdev->discovery.type = cp->type;
3177 hci_req_init(&req, hdev);
3179 switch (hdev->discovery.type) {
3180 case DISCOV_TYPE_BREDR:
3181 status = mgmt_bredr_support(hdev);
3183 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3185 mgmt_pending_remove(cmd);
3189 if (test_bit(HCI_INQUIRY, &hdev->flags)) {
3190 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3192 mgmt_pending_remove(cmd);
3196 hci_inquiry_cache_flush(hdev);
3198 memset(&inq_cp, 0, sizeof(inq_cp));
3199 memcpy(&inq_cp.lap, lap, sizeof(inq_cp.lap));
3200 inq_cp.length = DISCOV_BREDR_INQUIRY_LEN;
3201 hci_req_add(&req, HCI_OP_INQUIRY, sizeof(inq_cp), &inq_cp);
3204 case DISCOV_TYPE_LE:
3205 case DISCOV_TYPE_INTERLEAVED:
3206 status = mgmt_le_support(hdev);
3208 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3210 mgmt_pending_remove(cmd);
3214 if (hdev->discovery.type == DISCOV_TYPE_INTERLEAVED &&
3215 !test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
3216 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3217 MGMT_STATUS_NOT_SUPPORTED);
3218 mgmt_pending_remove(cmd);
3222 if (test_bit(HCI_ADVERTISING, &hdev->dev_flags)) {
3223 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3224 MGMT_STATUS_REJECTED);
3225 mgmt_pending_remove(cmd);
3229 if (test_bit(HCI_LE_SCAN, &hdev->dev_flags)) {
3230 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3232 mgmt_pending_remove(cmd);
3236 memset(¶m_cp, 0, sizeof(param_cp));
3237 param_cp.type = LE_SCAN_ACTIVE;
3238 param_cp.interval = cpu_to_le16(DISCOV_LE_SCAN_INT);
3239 param_cp.window = cpu_to_le16(DISCOV_LE_SCAN_WIN);
3240 if (bacmp(&hdev->bdaddr, BDADDR_ANY))
3241 param_cp.own_address_type = ADDR_LE_DEV_PUBLIC;
3243 param_cp.own_address_type = ADDR_LE_DEV_RANDOM;
3244 hci_req_add(&req, HCI_OP_LE_SET_SCAN_PARAM, sizeof(param_cp),
3247 memset(&enable_cp, 0, sizeof(enable_cp));
3248 enable_cp.enable = LE_SCAN_ENABLE;
3249 enable_cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
3250 hci_req_add(&req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(enable_cp),
3255 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3256 MGMT_STATUS_INVALID_PARAMS);
3257 mgmt_pending_remove(cmd);
3261 err = hci_req_run(&req, start_discovery_complete);
3263 mgmt_pending_remove(cmd);
3265 hci_discovery_set_state(hdev, DISCOVERY_STARTING);
3268 hci_dev_unlock(hdev);
3272 static int mgmt_stop_discovery_failed(struct hci_dev *hdev, u8 status)
3274 struct pending_cmd *cmd;
3277 cmd = mgmt_pending_find(MGMT_OP_STOP_DISCOVERY, hdev);
3281 err = cmd_complete(cmd->sk, hdev->id, cmd->opcode, mgmt_status(status),
3282 &hdev->discovery.type, sizeof(hdev->discovery.type));
3283 mgmt_pending_remove(cmd);
3288 static void stop_discovery_complete(struct hci_dev *hdev, u8 status)
3290 BT_DBG("status %d", status);
3295 mgmt_stop_discovery_failed(hdev, status);
3299 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3302 hci_dev_unlock(hdev);
3305 static int stop_discovery(struct sock *sk, struct hci_dev *hdev, void *data,
3308 struct mgmt_cp_stop_discovery *mgmt_cp = data;
3309 struct pending_cmd *cmd;
3310 struct hci_cp_remote_name_req_cancel cp;
3311 struct inquiry_entry *e;
3312 struct hci_request req;
3313 struct hci_cp_le_set_scan_enable enable_cp;
3316 BT_DBG("%s", hdev->name);
3320 if (!hci_discovery_active(hdev)) {
3321 err = cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
3322 MGMT_STATUS_REJECTED, &mgmt_cp->type,
3323 sizeof(mgmt_cp->type));
3327 if (hdev->discovery.type != mgmt_cp->type) {
3328 err = cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
3329 MGMT_STATUS_INVALID_PARAMS, &mgmt_cp->type,
3330 sizeof(mgmt_cp->type));
3334 cmd = mgmt_pending_add(sk, MGMT_OP_STOP_DISCOVERY, hdev, NULL, 0);
3340 hci_req_init(&req, hdev);
3342 switch (hdev->discovery.state) {
3343 case DISCOVERY_FINDING:
3344 if (test_bit(HCI_INQUIRY, &hdev->flags)) {
3345 hci_req_add(&req, HCI_OP_INQUIRY_CANCEL, 0, NULL);
3347 cancel_delayed_work(&hdev->le_scan_disable);
3349 memset(&enable_cp, 0, sizeof(enable_cp));
3350 enable_cp.enable = LE_SCAN_DISABLE;
3351 hci_req_add(&req, HCI_OP_LE_SET_SCAN_ENABLE,
3352 sizeof(enable_cp), &enable_cp);
3357 case DISCOVERY_RESOLVING:
3358 e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY,
3361 mgmt_pending_remove(cmd);
3362 err = cmd_complete(sk, hdev->id,
3363 MGMT_OP_STOP_DISCOVERY, 0,
3365 sizeof(mgmt_cp->type));
3366 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3370 bacpy(&cp.bdaddr, &e->data.bdaddr);
3371 hci_req_add(&req, HCI_OP_REMOTE_NAME_REQ_CANCEL, sizeof(cp),
3377 BT_DBG("unknown discovery state %u", hdev->discovery.state);
3379 mgmt_pending_remove(cmd);
3380 err = cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
3381 MGMT_STATUS_FAILED, &mgmt_cp->type,
3382 sizeof(mgmt_cp->type));
3386 err = hci_req_run(&req, stop_discovery_complete);
3388 mgmt_pending_remove(cmd);
3390 hci_discovery_set_state(hdev, DISCOVERY_STOPPING);
3393 hci_dev_unlock(hdev);
3397 static int confirm_name(struct sock *sk, struct hci_dev *hdev, void *data,
3400 struct mgmt_cp_confirm_name *cp = data;
3401 struct inquiry_entry *e;
3404 BT_DBG("%s", hdev->name);
3408 if (!hci_discovery_active(hdev)) {
3409 err = cmd_status(sk, hdev->id, MGMT_OP_CONFIRM_NAME,
3410 MGMT_STATUS_FAILED);
3414 e = hci_inquiry_cache_lookup_unknown(hdev, &cp->addr.bdaddr);
3416 err = cmd_status(sk, hdev->id, MGMT_OP_CONFIRM_NAME,
3417 MGMT_STATUS_INVALID_PARAMS);
3421 if (cp->name_known) {
3422 e->name_state = NAME_KNOWN;
3425 e->name_state = NAME_NEEDED;
3426 hci_inquiry_cache_update_resolve(hdev, e);
3429 err = cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME, 0, &cp->addr,
3433 hci_dev_unlock(hdev);
3437 static int block_device(struct sock *sk, struct hci_dev *hdev, void *data,
3440 struct mgmt_cp_block_device *cp = data;
3444 BT_DBG("%s", hdev->name);
3446 if (!bdaddr_type_is_valid(cp->addr.type))
3447 return cmd_complete(sk, hdev->id, MGMT_OP_BLOCK_DEVICE,
3448 MGMT_STATUS_INVALID_PARAMS,
3449 &cp->addr, sizeof(cp->addr));
3453 err = hci_blacklist_add(hdev, &cp->addr.bdaddr, cp->addr.type);
3455 status = MGMT_STATUS_FAILED;
3457 status = MGMT_STATUS_SUCCESS;
3459 err = cmd_complete(sk, hdev->id, MGMT_OP_BLOCK_DEVICE, status,
3460 &cp->addr, sizeof(cp->addr));
3462 hci_dev_unlock(hdev);
3467 static int unblock_device(struct sock *sk, struct hci_dev *hdev, void *data,
3470 struct mgmt_cp_unblock_device *cp = data;
3474 BT_DBG("%s", hdev->name);
3476 if (!bdaddr_type_is_valid(cp->addr.type))
3477 return cmd_complete(sk, hdev->id, MGMT_OP_UNBLOCK_DEVICE,
3478 MGMT_STATUS_INVALID_PARAMS,
3479 &cp->addr, sizeof(cp->addr));
3483 err = hci_blacklist_del(hdev, &cp->addr.bdaddr, cp->addr.type);
3485 status = MGMT_STATUS_INVALID_PARAMS;
3487 status = MGMT_STATUS_SUCCESS;
3489 err = cmd_complete(sk, hdev->id, MGMT_OP_UNBLOCK_DEVICE, status,
3490 &cp->addr, sizeof(cp->addr));
3492 hci_dev_unlock(hdev);
3497 static int set_device_id(struct sock *sk, struct hci_dev *hdev, void *data,
3500 struct mgmt_cp_set_device_id *cp = data;
3501 struct hci_request req;
3505 BT_DBG("%s", hdev->name);
3507 source = __le16_to_cpu(cp->source);
3509 if (source > 0x0002)
3510 return cmd_status(sk, hdev->id, MGMT_OP_SET_DEVICE_ID,
3511 MGMT_STATUS_INVALID_PARAMS);
3515 hdev->devid_source = source;
3516 hdev->devid_vendor = __le16_to_cpu(cp->vendor);
3517 hdev->devid_product = __le16_to_cpu(cp->product);
3518 hdev->devid_version = __le16_to_cpu(cp->version);
3520 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_DEVICE_ID, 0, NULL, 0);
3522 hci_req_init(&req, hdev);
3524 hci_req_run(&req, NULL);
3526 hci_dev_unlock(hdev);
3531 static void set_advertising_complete(struct hci_dev *hdev, u8 status)
3533 struct cmd_lookup match = { NULL, hdev };
3536 u8 mgmt_err = mgmt_status(status);
3538 mgmt_pending_foreach(MGMT_OP_SET_ADVERTISING, hdev,
3539 cmd_status_rsp, &mgmt_err);
3543 mgmt_pending_foreach(MGMT_OP_SET_ADVERTISING, hdev, settings_rsp,
3546 new_settings(hdev, match.sk);
3552 static int set_advertising(struct sock *sk, struct hci_dev *hdev, void *data,
3555 struct mgmt_mode *cp = data;
3556 struct pending_cmd *cmd;
3557 struct hci_request req;
3558 u8 val, enabled, status;
3561 BT_DBG("request for %s", hdev->name);
3563 status = mgmt_le_support(hdev);
3565 return cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
3568 if (cp->val != 0x00 && cp->val != 0x01)
3569 return cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
3570 MGMT_STATUS_INVALID_PARAMS);
3575 enabled = test_bit(HCI_ADVERTISING, &hdev->dev_flags);
3577 /* The following conditions are ones which mean that we should
3578 * not do any HCI communication but directly send a mgmt
3579 * response to user space (after toggling the flag if
3582 if (!hdev_is_powered(hdev) || val == enabled ||
3583 hci_conn_num(hdev, LE_LINK) > 0) {
3584 bool changed = false;
3586 if (val != test_bit(HCI_ADVERTISING, &hdev->dev_flags)) {
3587 change_bit(HCI_ADVERTISING, &hdev->dev_flags);
3591 err = send_settings_rsp(sk, MGMT_OP_SET_ADVERTISING, hdev);
3596 err = new_settings(hdev, sk);
3601 if (mgmt_pending_find(MGMT_OP_SET_ADVERTISING, hdev) ||
3602 mgmt_pending_find(MGMT_OP_SET_LE, hdev)) {
3603 err = cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
3608 cmd = mgmt_pending_add(sk, MGMT_OP_SET_ADVERTISING, hdev, data, len);
3614 hci_req_init(&req, hdev);
3617 enable_advertising(&req);
3619 disable_advertising(&req);
3621 err = hci_req_run(&req, set_advertising_complete);
3623 mgmt_pending_remove(cmd);
3626 hci_dev_unlock(hdev);
3630 static int set_static_address(struct sock *sk, struct hci_dev *hdev,
3631 void *data, u16 len)
3633 struct mgmt_cp_set_static_address *cp = data;
3636 BT_DBG("%s", hdev->name);
3638 if (!lmp_le_capable(hdev))
3639 return cmd_status(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS,
3640 MGMT_STATUS_NOT_SUPPORTED);
3642 if (hdev_is_powered(hdev))
3643 return cmd_status(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS,
3644 MGMT_STATUS_REJECTED);
3646 if (bacmp(&cp->bdaddr, BDADDR_ANY)) {
3647 if (!bacmp(&cp->bdaddr, BDADDR_NONE))
3648 return cmd_status(sk, hdev->id,
3649 MGMT_OP_SET_STATIC_ADDRESS,
3650 MGMT_STATUS_INVALID_PARAMS);
3652 /* Two most significant bits shall be set */
3653 if ((cp->bdaddr.b[5] & 0xc0) != 0xc0)
3654 return cmd_status(sk, hdev->id,
3655 MGMT_OP_SET_STATIC_ADDRESS,
3656 MGMT_STATUS_INVALID_PARAMS);
3661 bacpy(&hdev->static_addr, &cp->bdaddr);
3663 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS, 0, NULL, 0);
3665 hci_dev_unlock(hdev);
3670 static int set_scan_params(struct sock *sk, struct hci_dev *hdev,
3671 void *data, u16 len)
3673 struct mgmt_cp_set_scan_params *cp = data;
3674 __u16 interval, window;
3677 BT_DBG("%s", hdev->name);
3679 if (!lmp_le_capable(hdev))
3680 return cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
3681 MGMT_STATUS_NOT_SUPPORTED);
3683 interval = __le16_to_cpu(cp->interval);
3685 if (interval < 0x0004 || interval > 0x4000)
3686 return cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
3687 MGMT_STATUS_INVALID_PARAMS);
3689 window = __le16_to_cpu(cp->window);
3691 if (window < 0x0004 || window > 0x4000)
3692 return cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
3693 MGMT_STATUS_INVALID_PARAMS);
3695 if (window > interval)
3696 return cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
3697 MGMT_STATUS_INVALID_PARAMS);
3701 hdev->le_scan_interval = interval;
3702 hdev->le_scan_window = window;
3704 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS, 0, NULL, 0);
3706 hci_dev_unlock(hdev);
3711 static void fast_connectable_complete(struct hci_dev *hdev, u8 status)
3713 struct pending_cmd *cmd;
3715 BT_DBG("status 0x%02x", status);
3719 cmd = mgmt_pending_find(MGMT_OP_SET_FAST_CONNECTABLE, hdev);
3724 cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
3725 mgmt_status(status));
3727 struct mgmt_mode *cp = cmd->param;
3730 set_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags);
3732 clear_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags);
3734 send_settings_rsp(cmd->sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev);
3735 new_settings(hdev, cmd->sk);
3738 mgmt_pending_remove(cmd);
3741 hci_dev_unlock(hdev);
3744 static int set_fast_connectable(struct sock *sk, struct hci_dev *hdev,
3745 void *data, u16 len)
3747 struct mgmt_mode *cp = data;
3748 struct pending_cmd *cmd;
3749 struct hci_request req;
3752 BT_DBG("%s", hdev->name);
3754 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags) ||
3755 hdev->hci_ver < BLUETOOTH_VER_1_2)
3756 return cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
3757 MGMT_STATUS_NOT_SUPPORTED);
3759 if (cp->val != 0x00 && cp->val != 0x01)
3760 return cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
3761 MGMT_STATUS_INVALID_PARAMS);
3763 if (!hdev_is_powered(hdev))
3764 return cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
3765 MGMT_STATUS_NOT_POWERED);
3767 if (!test_bit(HCI_CONNECTABLE, &hdev->dev_flags))
3768 return cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
3769 MGMT_STATUS_REJECTED);
3773 if (mgmt_pending_find(MGMT_OP_SET_FAST_CONNECTABLE, hdev)) {
3774 err = cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
3779 if (!!cp->val == test_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags)) {
3780 err = send_settings_rsp(sk, MGMT_OP_SET_FAST_CONNECTABLE,
3785 cmd = mgmt_pending_add(sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev,
3792 hci_req_init(&req, hdev);
3794 write_fast_connectable(&req, cp->val);
3796 err = hci_req_run(&req, fast_connectable_complete);
3798 err = cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
3799 MGMT_STATUS_FAILED);
3800 mgmt_pending_remove(cmd);
3804 hci_dev_unlock(hdev);
3809 static void set_bredr_scan(struct hci_request *req)
3811 struct hci_dev *hdev = req->hdev;
3814 /* Ensure that fast connectable is disabled. This function will
3815 * not do anything if the page scan parameters are already what
3818 write_fast_connectable(req, false);
3820 if (test_bit(HCI_CONNECTABLE, &hdev->dev_flags))
3822 if (test_bit(HCI_DISCOVERABLE, &hdev->dev_flags))
3823 scan |= SCAN_INQUIRY;
3826 hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
3829 static void set_bredr_complete(struct hci_dev *hdev, u8 status)
3831 struct pending_cmd *cmd;
3833 BT_DBG("status 0x%02x", status);
3837 cmd = mgmt_pending_find(MGMT_OP_SET_BREDR, hdev);
3842 u8 mgmt_err = mgmt_status(status);
3844 /* We need to restore the flag if related HCI commands
3847 clear_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
3849 cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
3851 send_settings_rsp(cmd->sk, MGMT_OP_SET_BREDR, hdev);
3852 new_settings(hdev, cmd->sk);
3855 mgmt_pending_remove(cmd);
3858 hci_dev_unlock(hdev);
3861 static int set_bredr(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
3863 struct mgmt_mode *cp = data;
3864 struct pending_cmd *cmd;
3865 struct hci_request req;
3868 BT_DBG("request for %s", hdev->name);
3870 if (!lmp_bredr_capable(hdev) || !lmp_le_capable(hdev))
3871 return cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
3872 MGMT_STATUS_NOT_SUPPORTED);
3874 if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
3875 return cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
3876 MGMT_STATUS_REJECTED);
3878 if (cp->val != 0x00 && cp->val != 0x01)
3879 return cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
3880 MGMT_STATUS_INVALID_PARAMS);
3884 if (cp->val == test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
3885 err = send_settings_rsp(sk, MGMT_OP_SET_BREDR, hdev);
3889 if (!hdev_is_powered(hdev)) {
3891 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
3892 clear_bit(HCI_SSP_ENABLED, &hdev->dev_flags);
3893 clear_bit(HCI_LINK_SECURITY, &hdev->dev_flags);
3894 clear_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags);
3895 clear_bit(HCI_HS_ENABLED, &hdev->dev_flags);
3898 change_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
3900 err = send_settings_rsp(sk, MGMT_OP_SET_BREDR, hdev);
3904 err = new_settings(hdev, sk);
3908 /* Reject disabling when powered on */
3910 err = cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
3911 MGMT_STATUS_REJECTED);
3915 if (mgmt_pending_find(MGMT_OP_SET_BREDR, hdev)) {
3916 err = cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
3921 cmd = mgmt_pending_add(sk, MGMT_OP_SET_BREDR, hdev, data, len);
3927 /* We need to flip the bit already here so that update_ad
3928 * generates the correct flags.
3930 set_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
3932 hci_req_init(&req, hdev);
3934 if (test_bit(HCI_CONNECTABLE, &hdev->dev_flags))
3935 set_bredr_scan(&req);
3937 /* Since only the advertising data flags will change, there
3938 * is no need to update the scan response data.
3942 err = hci_req_run(&req, set_bredr_complete);
3944 mgmt_pending_remove(cmd);
3947 hci_dev_unlock(hdev);
3951 static bool ltk_is_valid(struct mgmt_ltk_info *key)
3953 if (key->authenticated != 0x00 && key->authenticated != 0x01)
3955 if (key->master != 0x00 && key->master != 0x01)
3957 if (!bdaddr_type_is_le(key->addr.type))
3962 static int load_long_term_keys(struct sock *sk, struct hci_dev *hdev,
3963 void *cp_data, u16 len)
3965 struct mgmt_cp_load_long_term_keys *cp = cp_data;
3966 u16 key_count, expected_len;
3969 BT_DBG("request for %s", hdev->name);
3971 if (!lmp_le_capable(hdev))
3972 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
3973 MGMT_STATUS_NOT_SUPPORTED);
3975 key_count = __le16_to_cpu(cp->key_count);
3977 expected_len = sizeof(*cp) + key_count *
3978 sizeof(struct mgmt_ltk_info);
3979 if (expected_len != len) {
3980 BT_ERR("load_keys: expected %u bytes, got %u bytes",
3982 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
3983 MGMT_STATUS_INVALID_PARAMS);
3986 BT_DBG("%s key_count %u", hdev->name, key_count);
3988 for (i = 0; i < key_count; i++) {
3989 struct mgmt_ltk_info *key = &cp->keys[i];
3991 if (!ltk_is_valid(key))
3992 return cmd_status(sk, hdev->id,
3993 MGMT_OP_LOAD_LONG_TERM_KEYS,
3994 MGMT_STATUS_INVALID_PARAMS);
3999 hci_smp_ltks_clear(hdev);
4001 for (i = 0; i < key_count; i++) {
4002 struct mgmt_ltk_info *key = &cp->keys[i];
4005 if (key->addr.type == BDADDR_LE_PUBLIC)
4006 addr_type = ADDR_LE_DEV_PUBLIC;
4008 addr_type = ADDR_LE_DEV_RANDOM;
4013 type = HCI_SMP_LTK_SLAVE;
4015 hci_add_ltk(hdev, &key->addr.bdaddr, addr_type,
4016 type, 0, key->authenticated, key->val,
4017 key->enc_size, key->ediv, key->rand);
4020 err = cmd_complete(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS, 0,
4023 hci_dev_unlock(hdev);
4028 static const struct mgmt_handler {
4029 int (*func) (struct sock *sk, struct hci_dev *hdev, void *data,
4033 } mgmt_handlers[] = {
4034 { NULL }, /* 0x0000 (no command) */
4035 { read_version, false, MGMT_READ_VERSION_SIZE },
4036 { read_commands, false, MGMT_READ_COMMANDS_SIZE },
4037 { read_index_list, false, MGMT_READ_INDEX_LIST_SIZE },
4038 { read_controller_info, false, MGMT_READ_INFO_SIZE },
4039 { set_powered, false, MGMT_SETTING_SIZE },
4040 { set_discoverable, false, MGMT_SET_DISCOVERABLE_SIZE },
4041 { set_connectable, false, MGMT_SETTING_SIZE },
4042 { set_fast_connectable, false, MGMT_SETTING_SIZE },
4043 { set_pairable, false, MGMT_SETTING_SIZE },
4044 { set_link_security, false, MGMT_SETTING_SIZE },
4045 { set_ssp, false, MGMT_SETTING_SIZE },
4046 { set_hs, false, MGMT_SETTING_SIZE },
4047 { set_le, false, MGMT_SETTING_SIZE },
4048 { set_dev_class, false, MGMT_SET_DEV_CLASS_SIZE },
4049 { set_local_name, false, MGMT_SET_LOCAL_NAME_SIZE },
4050 { add_uuid, false, MGMT_ADD_UUID_SIZE },
4051 { remove_uuid, false, MGMT_REMOVE_UUID_SIZE },
4052 { load_link_keys, true, MGMT_LOAD_LINK_KEYS_SIZE },
4053 { load_long_term_keys, true, MGMT_LOAD_LONG_TERM_KEYS_SIZE },
4054 { disconnect, false, MGMT_DISCONNECT_SIZE },
4055 { get_connections, false, MGMT_GET_CONNECTIONS_SIZE },
4056 { pin_code_reply, false, MGMT_PIN_CODE_REPLY_SIZE },
4057 { pin_code_neg_reply, false, MGMT_PIN_CODE_NEG_REPLY_SIZE },
4058 { set_io_capability, false, MGMT_SET_IO_CAPABILITY_SIZE },
4059 { pair_device, false, MGMT_PAIR_DEVICE_SIZE },
4060 { cancel_pair_device, false, MGMT_CANCEL_PAIR_DEVICE_SIZE },
4061 { unpair_device, false, MGMT_UNPAIR_DEVICE_SIZE },
4062 { user_confirm_reply, false, MGMT_USER_CONFIRM_REPLY_SIZE },
4063 { user_confirm_neg_reply, false, MGMT_USER_CONFIRM_NEG_REPLY_SIZE },
4064 { user_passkey_reply, false, MGMT_USER_PASSKEY_REPLY_SIZE },
4065 { user_passkey_neg_reply, false, MGMT_USER_PASSKEY_NEG_REPLY_SIZE },
4066 { read_local_oob_data, false, MGMT_READ_LOCAL_OOB_DATA_SIZE },
4067 { add_remote_oob_data, false, MGMT_ADD_REMOTE_OOB_DATA_SIZE },
4068 { remove_remote_oob_data, false, MGMT_REMOVE_REMOTE_OOB_DATA_SIZE },
4069 { start_discovery, false, MGMT_START_DISCOVERY_SIZE },
4070 { stop_discovery, false, MGMT_STOP_DISCOVERY_SIZE },
4071 { confirm_name, false, MGMT_CONFIRM_NAME_SIZE },
4072 { block_device, false, MGMT_BLOCK_DEVICE_SIZE },
4073 { unblock_device, false, MGMT_UNBLOCK_DEVICE_SIZE },
4074 { set_device_id, false, MGMT_SET_DEVICE_ID_SIZE },
4075 { set_advertising, false, MGMT_SETTING_SIZE },
4076 { set_bredr, false, MGMT_SETTING_SIZE },
4077 { set_static_address, false, MGMT_SET_STATIC_ADDRESS_SIZE },
4078 { set_scan_params, false, MGMT_SET_SCAN_PARAMS_SIZE },
4082 int mgmt_control(struct sock *sk, struct msghdr *msg, size_t msglen)
4086 struct mgmt_hdr *hdr;
4087 u16 opcode, index, len;
4088 struct hci_dev *hdev = NULL;
4089 const struct mgmt_handler *handler;
4092 BT_DBG("got %zu bytes", msglen);
4094 if (msglen < sizeof(*hdr))
4097 buf = kmalloc(msglen, GFP_KERNEL);
4101 if (memcpy_fromiovec(buf, msg->msg_iov, msglen)) {
4107 opcode = __le16_to_cpu(hdr->opcode);
4108 index = __le16_to_cpu(hdr->index);
4109 len = __le16_to_cpu(hdr->len);
4111 if (len != msglen - sizeof(*hdr)) {
4116 if (index != MGMT_INDEX_NONE) {
4117 hdev = hci_dev_get(index);
4119 err = cmd_status(sk, index, opcode,
4120 MGMT_STATUS_INVALID_INDEX);
4124 if (test_bit(HCI_SETUP, &hdev->dev_flags) ||
4125 test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
4126 err = cmd_status(sk, index, opcode,
4127 MGMT_STATUS_INVALID_INDEX);
4132 if (opcode >= ARRAY_SIZE(mgmt_handlers) ||
4133 mgmt_handlers[opcode].func == NULL) {
4134 BT_DBG("Unknown op %u", opcode);
4135 err = cmd_status(sk, index, opcode,
4136 MGMT_STATUS_UNKNOWN_COMMAND);
4140 if ((hdev && opcode < MGMT_OP_READ_INFO) ||
4141 (!hdev && opcode >= MGMT_OP_READ_INFO)) {
4142 err = cmd_status(sk, index, opcode,
4143 MGMT_STATUS_INVALID_INDEX);
4147 handler = &mgmt_handlers[opcode];
4149 if ((handler->var_len && len < handler->data_len) ||
4150 (!handler->var_len && len != handler->data_len)) {
4151 err = cmd_status(sk, index, opcode,
4152 MGMT_STATUS_INVALID_PARAMS);
4157 mgmt_init_hdev(sk, hdev);
4159 cp = buf + sizeof(*hdr);
4161 err = handler->func(sk, hdev, cp, len);
4175 void mgmt_index_added(struct hci_dev *hdev)
4177 if (hdev->dev_type != HCI_BREDR)
4180 mgmt_event(MGMT_EV_INDEX_ADDED, hdev, NULL, 0, NULL);
4183 void mgmt_index_removed(struct hci_dev *hdev)
4185 u8 status = MGMT_STATUS_INVALID_INDEX;
4187 if (hdev->dev_type != HCI_BREDR)
4190 mgmt_pending_foreach(0, hdev, cmd_status_rsp, &status);
4192 mgmt_event(MGMT_EV_INDEX_REMOVED, hdev, NULL, 0, NULL);
4195 static void powered_complete(struct hci_dev *hdev, u8 status)
4197 struct cmd_lookup match = { NULL, hdev };
4199 BT_DBG("status 0x%02x", status);
4203 mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp, &match);
4205 new_settings(hdev, match.sk);
4207 hci_dev_unlock(hdev);
4213 static int powered_update_hci(struct hci_dev *hdev)
4215 struct hci_request req;
4218 hci_req_init(&req, hdev);
4220 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags) &&
4221 !lmp_host_ssp_capable(hdev)) {
4224 hci_req_add(&req, HCI_OP_WRITE_SSP_MODE, 1, &ssp);
4227 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags) &&
4228 lmp_bredr_capable(hdev)) {
4229 struct hci_cp_write_le_host_supported cp;
4232 cp.simul = lmp_le_br_capable(hdev);
4234 /* Check first if we already have the right
4235 * host state (host features set)
4237 if (cp.le != lmp_host_le_capable(hdev) ||
4238 cp.simul != lmp_host_le_br_capable(hdev))
4239 hci_req_add(&req, HCI_OP_WRITE_LE_HOST_SUPPORTED,
4243 if (lmp_le_capable(hdev)) {
4244 /* Set random address to static address if configured */
4245 if (bacmp(&hdev->static_addr, BDADDR_ANY))
4246 hci_req_add(&req, HCI_OP_LE_SET_RANDOM_ADDR, 6,
4247 &hdev->static_addr);
4249 /* Make sure the controller has a good default for
4250 * advertising data. This also applies to the case
4251 * where BR/EDR was toggled during the AUTO_OFF phase.
4253 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
4255 update_scan_rsp_data(&req);
4258 if (test_bit(HCI_ADVERTISING, &hdev->dev_flags))
4259 enable_advertising(&req);
4262 link_sec = test_bit(HCI_LINK_SECURITY, &hdev->dev_flags);
4263 if (link_sec != test_bit(HCI_AUTH, &hdev->flags))
4264 hci_req_add(&req, HCI_OP_WRITE_AUTH_ENABLE,
4265 sizeof(link_sec), &link_sec);
4267 if (lmp_bredr_capable(hdev)) {
4268 if (test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
4269 set_bredr_scan(&req);
4275 return hci_req_run(&req, powered_complete);
4278 int mgmt_powered(struct hci_dev *hdev, u8 powered)
4280 struct cmd_lookup match = { NULL, hdev };
4281 u8 status_not_powered = MGMT_STATUS_NOT_POWERED;
4282 u8 zero_cod[] = { 0, 0, 0 };
4285 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
4289 if (powered_update_hci(hdev) == 0)
4292 mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp,
4297 mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp, &match);
4298 mgmt_pending_foreach(0, hdev, cmd_status_rsp, &status_not_powered);
4300 if (memcmp(hdev->dev_class, zero_cod, sizeof(zero_cod)) != 0)
4301 mgmt_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev,
4302 zero_cod, sizeof(zero_cod), NULL);
4305 err = new_settings(hdev, match.sk);
4313 void mgmt_set_powered_failed(struct hci_dev *hdev, int err)
4315 struct pending_cmd *cmd;
4318 cmd = mgmt_pending_find(MGMT_OP_SET_POWERED, hdev);
4322 if (err == -ERFKILL)
4323 status = MGMT_STATUS_RFKILLED;
4325 status = MGMT_STATUS_FAILED;
4327 cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_POWERED, status);
4329 mgmt_pending_remove(cmd);
4332 void mgmt_discoverable_timeout(struct hci_dev *hdev)
4334 struct hci_request req;
4335 u8 scan = SCAN_PAGE;
4339 /* When discoverable timeout triggers, then just make sure
4340 * the limited discoverable flag is cleared. Even in the case
4341 * of a timeout triggered from general discoverable, it is
4342 * safe to unconditionally clear the flag.
4344 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
4346 hci_req_init(&req, hdev);
4347 hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE, sizeof(scan), &scan);
4349 hci_req_run(&req, NULL);
4351 hdev->discov_timeout = 0;
4353 hci_dev_unlock(hdev);
4356 void mgmt_discoverable(struct hci_dev *hdev, u8 discoverable)
4360 /* Nothing needed here if there's a pending command since that
4361 * commands request completion callback takes care of everything
4364 if (mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, hdev))
4368 changed = !test_and_set_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
4370 changed = test_and_clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
4373 new_settings(hdev, NULL);
4376 void mgmt_connectable(struct hci_dev *hdev, u8 connectable)
4380 /* Nothing needed here if there's a pending command since that
4381 * commands request completion callback takes care of everything
4384 if (mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev))
4388 changed = !test_and_set_bit(HCI_CONNECTABLE, &hdev->dev_flags);
4390 changed = test_and_clear_bit(HCI_CONNECTABLE, &hdev->dev_flags);
4393 new_settings(hdev, NULL);
4396 void mgmt_write_scan_failed(struct hci_dev *hdev, u8 scan, u8 status)
4398 u8 mgmt_err = mgmt_status(status);
4400 if (scan & SCAN_PAGE)
4401 mgmt_pending_foreach(MGMT_OP_SET_CONNECTABLE, hdev,
4402 cmd_status_rsp, &mgmt_err);
4404 if (scan & SCAN_INQUIRY)
4405 mgmt_pending_foreach(MGMT_OP_SET_DISCOVERABLE, hdev,
4406 cmd_status_rsp, &mgmt_err);
4409 void mgmt_new_link_key(struct hci_dev *hdev, struct link_key *key,
4412 struct mgmt_ev_new_link_key ev;
4414 memset(&ev, 0, sizeof(ev));
4416 ev.store_hint = persistent;
4417 bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
4418 ev.key.addr.type = BDADDR_BREDR;
4419 ev.key.type = key->type;
4420 memcpy(ev.key.val, key->val, HCI_LINK_KEY_SIZE);
4421 ev.key.pin_len = key->pin_len;
4423 mgmt_event(MGMT_EV_NEW_LINK_KEY, hdev, &ev, sizeof(ev), NULL);
4426 void mgmt_new_ltk(struct hci_dev *hdev, struct smp_ltk *key, u8 persistent)
4428 struct mgmt_ev_new_long_term_key ev;
4430 memset(&ev, 0, sizeof(ev));
4432 ev.store_hint = persistent;
4433 bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
4434 ev.key.addr.type = link_to_bdaddr(LE_LINK, key->bdaddr_type);
4435 ev.key.authenticated = key->authenticated;
4436 ev.key.enc_size = key->enc_size;
4437 ev.key.ediv = key->ediv;
4439 if (key->type == HCI_SMP_LTK)
4442 memcpy(ev.key.rand, key->rand, sizeof(key->rand));
4443 memcpy(ev.key.val, key->val, sizeof(key->val));
4445 mgmt_event(MGMT_EV_NEW_LONG_TERM_KEY, hdev, &ev, sizeof(ev), NULL);
4448 static inline u16 eir_append_data(u8 *eir, u16 eir_len, u8 type, u8 *data,
4451 eir[eir_len++] = sizeof(type) + data_len;
4452 eir[eir_len++] = type;
4453 memcpy(&eir[eir_len], data, data_len);
4454 eir_len += data_len;
4459 void mgmt_device_connected(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
4460 u8 addr_type, u32 flags, u8 *name, u8 name_len,
4464 struct mgmt_ev_device_connected *ev = (void *) buf;
4467 bacpy(&ev->addr.bdaddr, bdaddr);
4468 ev->addr.type = link_to_bdaddr(link_type, addr_type);
4470 ev->flags = __cpu_to_le32(flags);
4473 eir_len = eir_append_data(ev->eir, 0, EIR_NAME_COMPLETE,
4476 if (dev_class && memcmp(dev_class, "\0\0\0", 3) != 0)
4477 eir_len = eir_append_data(ev->eir, eir_len,
4478 EIR_CLASS_OF_DEV, dev_class, 3);
4480 ev->eir_len = cpu_to_le16(eir_len);
4482 mgmt_event(MGMT_EV_DEVICE_CONNECTED, hdev, buf,
4483 sizeof(*ev) + eir_len, NULL);
4486 static void disconnect_rsp(struct pending_cmd *cmd, void *data)
4488 struct mgmt_cp_disconnect *cp = cmd->param;
4489 struct sock **sk = data;
4490 struct mgmt_rp_disconnect rp;
4492 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
4493 rp.addr.type = cp->addr.type;
4495 cmd_complete(cmd->sk, cmd->index, MGMT_OP_DISCONNECT, 0, &rp,
4501 mgmt_pending_remove(cmd);
4504 static void unpair_device_rsp(struct pending_cmd *cmd, void *data)
4506 struct hci_dev *hdev = data;
4507 struct mgmt_cp_unpair_device *cp = cmd->param;
4508 struct mgmt_rp_unpair_device rp;
4510 memset(&rp, 0, sizeof(rp));
4511 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
4512 rp.addr.type = cp->addr.type;
4514 device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, cmd->sk);
4516 cmd_complete(cmd->sk, cmd->index, cmd->opcode, 0, &rp, sizeof(rp));
4518 mgmt_pending_remove(cmd);
4521 void mgmt_device_disconnected(struct hci_dev *hdev, bdaddr_t *bdaddr,
4522 u8 link_type, u8 addr_type, u8 reason)
4524 struct mgmt_ev_device_disconnected ev;
4525 struct sock *sk = NULL;
4527 mgmt_pending_foreach(MGMT_OP_DISCONNECT, hdev, disconnect_rsp, &sk);
4529 bacpy(&ev.addr.bdaddr, bdaddr);
4530 ev.addr.type = link_to_bdaddr(link_type, addr_type);
4533 mgmt_event(MGMT_EV_DEVICE_DISCONNECTED, hdev, &ev, sizeof(ev), sk);
4538 mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, unpair_device_rsp,
4542 void mgmt_disconnect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr,
4543 u8 link_type, u8 addr_type, u8 status)
4545 struct mgmt_rp_disconnect rp;
4546 struct pending_cmd *cmd;
4548 mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, unpair_device_rsp,
4551 cmd = mgmt_pending_find(MGMT_OP_DISCONNECT, hdev);
4555 bacpy(&rp.addr.bdaddr, bdaddr);
4556 rp.addr.type = link_to_bdaddr(link_type, addr_type);
4558 cmd_complete(cmd->sk, cmd->index, MGMT_OP_DISCONNECT,
4559 mgmt_status(status), &rp, sizeof(rp));
4561 mgmt_pending_remove(cmd);
4564 void mgmt_connect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
4565 u8 addr_type, u8 status)
4567 struct mgmt_ev_connect_failed ev;
4569 bacpy(&ev.addr.bdaddr, bdaddr);
4570 ev.addr.type = link_to_bdaddr(link_type, addr_type);
4571 ev.status = mgmt_status(status);
4573 mgmt_event(MGMT_EV_CONNECT_FAILED, hdev, &ev, sizeof(ev), NULL);
4576 void mgmt_pin_code_request(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 secure)
4578 struct mgmt_ev_pin_code_request ev;
4580 bacpy(&ev.addr.bdaddr, bdaddr);
4581 ev.addr.type = BDADDR_BREDR;
4584 mgmt_event(MGMT_EV_PIN_CODE_REQUEST, hdev, &ev, sizeof(ev), NULL);
4587 void mgmt_pin_code_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
4590 struct pending_cmd *cmd;
4591 struct mgmt_rp_pin_code_reply rp;
4593 cmd = mgmt_pending_find(MGMT_OP_PIN_CODE_REPLY, hdev);
4597 bacpy(&rp.addr.bdaddr, bdaddr);
4598 rp.addr.type = BDADDR_BREDR;
4600 cmd_complete(cmd->sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
4601 mgmt_status(status), &rp, sizeof(rp));
4603 mgmt_pending_remove(cmd);
4606 void mgmt_pin_code_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
4609 struct pending_cmd *cmd;
4610 struct mgmt_rp_pin_code_reply rp;
4612 cmd = mgmt_pending_find(MGMT_OP_PIN_CODE_NEG_REPLY, hdev);
4616 bacpy(&rp.addr.bdaddr, bdaddr);
4617 rp.addr.type = BDADDR_BREDR;
4619 cmd_complete(cmd->sk, hdev->id, MGMT_OP_PIN_CODE_NEG_REPLY,
4620 mgmt_status(status), &rp, sizeof(rp));
4622 mgmt_pending_remove(cmd);
4625 int mgmt_user_confirm_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
4626 u8 link_type, u8 addr_type, __le32 value,
4629 struct mgmt_ev_user_confirm_request ev;
4631 BT_DBG("%s", hdev->name);
4633 bacpy(&ev.addr.bdaddr, bdaddr);
4634 ev.addr.type = link_to_bdaddr(link_type, addr_type);
4635 ev.confirm_hint = confirm_hint;
4638 return mgmt_event(MGMT_EV_USER_CONFIRM_REQUEST, hdev, &ev, sizeof(ev),
4642 int mgmt_user_passkey_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
4643 u8 link_type, u8 addr_type)
4645 struct mgmt_ev_user_passkey_request ev;
4647 BT_DBG("%s", hdev->name);
4649 bacpy(&ev.addr.bdaddr, bdaddr);
4650 ev.addr.type = link_to_bdaddr(link_type, addr_type);
4652 return mgmt_event(MGMT_EV_USER_PASSKEY_REQUEST, hdev, &ev, sizeof(ev),
4656 static int user_pairing_resp_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
4657 u8 link_type, u8 addr_type, u8 status,
4660 struct pending_cmd *cmd;
4661 struct mgmt_rp_user_confirm_reply rp;
4664 cmd = mgmt_pending_find(opcode, hdev);
4668 bacpy(&rp.addr.bdaddr, bdaddr);
4669 rp.addr.type = link_to_bdaddr(link_type, addr_type);
4670 err = cmd_complete(cmd->sk, hdev->id, opcode, mgmt_status(status),
4673 mgmt_pending_remove(cmd);
4678 int mgmt_user_confirm_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
4679 u8 link_type, u8 addr_type, u8 status)
4681 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
4682 status, MGMT_OP_USER_CONFIRM_REPLY);
4685 int mgmt_user_confirm_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
4686 u8 link_type, u8 addr_type, u8 status)
4688 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
4690 MGMT_OP_USER_CONFIRM_NEG_REPLY);
4693 int mgmt_user_passkey_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
4694 u8 link_type, u8 addr_type, u8 status)
4696 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
4697 status, MGMT_OP_USER_PASSKEY_REPLY);
4700 int mgmt_user_passkey_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
4701 u8 link_type, u8 addr_type, u8 status)
4703 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
4705 MGMT_OP_USER_PASSKEY_NEG_REPLY);
4708 int mgmt_user_passkey_notify(struct hci_dev *hdev, bdaddr_t *bdaddr,
4709 u8 link_type, u8 addr_type, u32 passkey,
4712 struct mgmt_ev_passkey_notify ev;
4714 BT_DBG("%s", hdev->name);
4716 bacpy(&ev.addr.bdaddr, bdaddr);
4717 ev.addr.type = link_to_bdaddr(link_type, addr_type);
4718 ev.passkey = __cpu_to_le32(passkey);
4719 ev.entered = entered;
4721 return mgmt_event(MGMT_EV_PASSKEY_NOTIFY, hdev, &ev, sizeof(ev), NULL);
4724 void mgmt_auth_failed(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
4725 u8 addr_type, u8 status)
4727 struct mgmt_ev_auth_failed ev;
4729 bacpy(&ev.addr.bdaddr, bdaddr);
4730 ev.addr.type = link_to_bdaddr(link_type, addr_type);
4731 ev.status = mgmt_status(status);
4733 mgmt_event(MGMT_EV_AUTH_FAILED, hdev, &ev, sizeof(ev), NULL);
4736 void mgmt_auth_enable_complete(struct hci_dev *hdev, u8 status)
4738 struct cmd_lookup match = { NULL, hdev };
4742 u8 mgmt_err = mgmt_status(status);
4743 mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev,
4744 cmd_status_rsp, &mgmt_err);
4748 if (test_bit(HCI_AUTH, &hdev->flags))
4749 changed = !test_and_set_bit(HCI_LINK_SECURITY,
4752 changed = test_and_clear_bit(HCI_LINK_SECURITY,
4755 mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev, settings_rsp,
4759 new_settings(hdev, match.sk);
4765 static void clear_eir(struct hci_request *req)
4767 struct hci_dev *hdev = req->hdev;
4768 struct hci_cp_write_eir cp;
4770 if (!lmp_ext_inq_capable(hdev))
4773 memset(hdev->eir, 0, sizeof(hdev->eir));
4775 memset(&cp, 0, sizeof(cp));
4777 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
4780 void mgmt_ssp_enable_complete(struct hci_dev *hdev, u8 enable, u8 status)
4782 struct cmd_lookup match = { NULL, hdev };
4783 struct hci_request req;
4784 bool changed = false;
4787 u8 mgmt_err = mgmt_status(status);
4789 if (enable && test_and_clear_bit(HCI_SSP_ENABLED,
4790 &hdev->dev_flags)) {
4791 clear_bit(HCI_HS_ENABLED, &hdev->dev_flags);
4792 new_settings(hdev, NULL);
4795 mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, cmd_status_rsp,
4801 changed = !test_and_set_bit(HCI_SSP_ENABLED, &hdev->dev_flags);
4803 changed = test_and_clear_bit(HCI_SSP_ENABLED, &hdev->dev_flags);
4805 changed = test_and_clear_bit(HCI_HS_ENABLED,
4808 clear_bit(HCI_HS_ENABLED, &hdev->dev_flags);
4811 mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, settings_rsp, &match);
4814 new_settings(hdev, match.sk);
4819 hci_req_init(&req, hdev);
4821 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags))
4826 hci_req_run(&req, NULL);
4829 static void sk_lookup(struct pending_cmd *cmd, void *data)
4831 struct cmd_lookup *match = data;
4833 if (match->sk == NULL) {
4834 match->sk = cmd->sk;
4835 sock_hold(match->sk);
4839 void mgmt_set_class_of_dev_complete(struct hci_dev *hdev, u8 *dev_class,
4842 struct cmd_lookup match = { NULL, hdev, mgmt_status(status) };
4844 mgmt_pending_foreach(MGMT_OP_SET_DEV_CLASS, hdev, sk_lookup, &match);
4845 mgmt_pending_foreach(MGMT_OP_ADD_UUID, hdev, sk_lookup, &match);
4846 mgmt_pending_foreach(MGMT_OP_REMOVE_UUID, hdev, sk_lookup, &match);
4849 mgmt_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev, dev_class, 3,
4856 void mgmt_set_local_name_complete(struct hci_dev *hdev, u8 *name, u8 status)
4858 struct mgmt_cp_set_local_name ev;
4859 struct pending_cmd *cmd;
4864 memset(&ev, 0, sizeof(ev));
4865 memcpy(ev.name, name, HCI_MAX_NAME_LENGTH);
4866 memcpy(ev.short_name, hdev->short_name, HCI_MAX_SHORT_NAME_LENGTH);
4868 cmd = mgmt_pending_find(MGMT_OP_SET_LOCAL_NAME, hdev);
4870 memcpy(hdev->dev_name, name, sizeof(hdev->dev_name));
4872 /* If this is a HCI command related to powering on the
4873 * HCI dev don't send any mgmt signals.
4875 if (mgmt_pending_find(MGMT_OP_SET_POWERED, hdev))
4879 mgmt_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev, &ev, sizeof(ev),
4880 cmd ? cmd->sk : NULL);
4883 void mgmt_read_local_oob_data_reply_complete(struct hci_dev *hdev, u8 *hash,
4884 u8 *randomizer, u8 status)
4886 struct pending_cmd *cmd;
4888 BT_DBG("%s status %u", hdev->name, status);
4890 cmd = mgmt_pending_find(MGMT_OP_READ_LOCAL_OOB_DATA, hdev);
4895 cmd_status(cmd->sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
4896 mgmt_status(status));
4898 struct mgmt_rp_read_local_oob_data rp;
4900 memcpy(rp.hash, hash, sizeof(rp.hash));
4901 memcpy(rp.randomizer, randomizer, sizeof(rp.randomizer));
4903 cmd_complete(cmd->sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
4904 0, &rp, sizeof(rp));
4907 mgmt_pending_remove(cmd);
4910 void mgmt_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
4911 u8 addr_type, u8 *dev_class, s8 rssi, u8 cfm_name, u8
4912 ssp, u8 *eir, u16 eir_len)
4915 struct mgmt_ev_device_found *ev = (void *) buf;
4918 if (!hci_discovery_active(hdev))
4921 /* Leave 5 bytes for a potential CoD field */
4922 if (sizeof(*ev) + eir_len + 5 > sizeof(buf))
4925 memset(buf, 0, sizeof(buf));
4927 bacpy(&ev->addr.bdaddr, bdaddr);
4928 ev->addr.type = link_to_bdaddr(link_type, addr_type);
4931 ev->flags |= __constant_cpu_to_le32(MGMT_DEV_FOUND_CONFIRM_NAME);
4933 ev->flags |= __constant_cpu_to_le32(MGMT_DEV_FOUND_LEGACY_PAIRING);
4936 memcpy(ev->eir, eir, eir_len);
4938 if (dev_class && !eir_has_data_type(ev->eir, eir_len, EIR_CLASS_OF_DEV))
4939 eir_len = eir_append_data(ev->eir, eir_len, EIR_CLASS_OF_DEV,
4942 ev->eir_len = cpu_to_le16(eir_len);
4943 ev_size = sizeof(*ev) + eir_len;
4945 mgmt_event(MGMT_EV_DEVICE_FOUND, hdev, ev, ev_size, NULL);
4948 void mgmt_remote_name(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
4949 u8 addr_type, s8 rssi, u8 *name, u8 name_len)
4951 struct mgmt_ev_device_found *ev;
4952 char buf[sizeof(*ev) + HCI_MAX_NAME_LENGTH + 2];
4955 ev = (struct mgmt_ev_device_found *) buf;
4957 memset(buf, 0, sizeof(buf));
4959 bacpy(&ev->addr.bdaddr, bdaddr);
4960 ev->addr.type = link_to_bdaddr(link_type, addr_type);
4963 eir_len = eir_append_data(ev->eir, 0, EIR_NAME_COMPLETE, name,
4966 ev->eir_len = cpu_to_le16(eir_len);
4968 mgmt_event(MGMT_EV_DEVICE_FOUND, hdev, ev, sizeof(*ev) + eir_len, NULL);
4971 void mgmt_discovering(struct hci_dev *hdev, u8 discovering)
4973 struct mgmt_ev_discovering ev;
4974 struct pending_cmd *cmd;
4976 BT_DBG("%s discovering %u", hdev->name, discovering);
4979 cmd = mgmt_pending_find(MGMT_OP_START_DISCOVERY, hdev);
4981 cmd = mgmt_pending_find(MGMT_OP_STOP_DISCOVERY, hdev);
4984 u8 type = hdev->discovery.type;
4986 cmd_complete(cmd->sk, hdev->id, cmd->opcode, 0, &type,
4988 mgmt_pending_remove(cmd);
4991 memset(&ev, 0, sizeof(ev));
4992 ev.type = hdev->discovery.type;
4993 ev.discovering = discovering;
4995 mgmt_event(MGMT_EV_DISCOVERING, hdev, &ev, sizeof(ev), NULL);
4998 int mgmt_device_blocked(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
5000 struct pending_cmd *cmd;
5001 struct mgmt_ev_device_blocked ev;
5003 cmd = mgmt_pending_find(MGMT_OP_BLOCK_DEVICE, hdev);
5005 bacpy(&ev.addr.bdaddr, bdaddr);
5006 ev.addr.type = type;
5008 return mgmt_event(MGMT_EV_DEVICE_BLOCKED, hdev, &ev, sizeof(ev),
5009 cmd ? cmd->sk : NULL);
5012 int mgmt_device_unblocked(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
5014 struct pending_cmd *cmd;
5015 struct mgmt_ev_device_unblocked ev;
5017 cmd = mgmt_pending_find(MGMT_OP_UNBLOCK_DEVICE, hdev);
5019 bacpy(&ev.addr.bdaddr, bdaddr);
5020 ev.addr.type = type;
5022 return mgmt_event(MGMT_EV_DEVICE_UNBLOCKED, hdev, &ev, sizeof(ev),
5023 cmd ? cmd->sk : NULL);
5026 static void adv_enable_complete(struct hci_dev *hdev, u8 status)
5028 BT_DBG("%s status %u", hdev->name, status);
5030 /* Clear the advertising mgmt setting if we failed to re-enable it */
5032 clear_bit(HCI_ADVERTISING, &hdev->dev_flags);
5033 new_settings(hdev, NULL);
5037 void mgmt_reenable_advertising(struct hci_dev *hdev)
5039 struct hci_request req;
5041 if (hci_conn_num(hdev, LE_LINK) > 0)
5044 if (!test_bit(HCI_ADVERTISING, &hdev->dev_flags))
5047 hci_req_init(&req, hdev);
5048 enable_advertising(&req);
5050 /* If this fails we have no option but to let user space know
5051 * that we've disabled advertising.
5053 if (hci_req_run(&req, adv_enable_complete) < 0) {
5054 clear_bit(HCI_ADVERTISING, &hdev->dev_flags);
5055 new_settings(hdev, NULL);