2 BlueZ - Bluetooth protocol stack for Linux
4 Copyright (C) 2010 Nokia Corporation
5 Copyright (C) 2011-2012 Intel Corporation
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
25 /* Bluetooth HCI Management interface */
27 #include <linux/module.h>
28 #include <asm/unaligned.h>
30 #include <net/bluetooth/bluetooth.h>
31 #include <net/bluetooth/hci_core.h>
32 #include <net/bluetooth/mgmt.h>
36 #define MGMT_VERSION 1
37 #define MGMT_REVISION 4
39 static const u16 mgmt_commands[] = {
40 MGMT_OP_READ_INDEX_LIST,
43 MGMT_OP_SET_DISCOVERABLE,
44 MGMT_OP_SET_CONNECTABLE,
45 MGMT_OP_SET_FAST_CONNECTABLE,
47 MGMT_OP_SET_LINK_SECURITY,
51 MGMT_OP_SET_DEV_CLASS,
52 MGMT_OP_SET_LOCAL_NAME,
55 MGMT_OP_LOAD_LINK_KEYS,
56 MGMT_OP_LOAD_LONG_TERM_KEYS,
58 MGMT_OP_GET_CONNECTIONS,
59 MGMT_OP_PIN_CODE_REPLY,
60 MGMT_OP_PIN_CODE_NEG_REPLY,
61 MGMT_OP_SET_IO_CAPABILITY,
63 MGMT_OP_CANCEL_PAIR_DEVICE,
64 MGMT_OP_UNPAIR_DEVICE,
65 MGMT_OP_USER_CONFIRM_REPLY,
66 MGMT_OP_USER_CONFIRM_NEG_REPLY,
67 MGMT_OP_USER_PASSKEY_REPLY,
68 MGMT_OP_USER_PASSKEY_NEG_REPLY,
69 MGMT_OP_READ_LOCAL_OOB_DATA,
70 MGMT_OP_ADD_REMOTE_OOB_DATA,
71 MGMT_OP_REMOVE_REMOTE_OOB_DATA,
72 MGMT_OP_START_DISCOVERY,
73 MGMT_OP_STOP_DISCOVERY,
76 MGMT_OP_UNBLOCK_DEVICE,
77 MGMT_OP_SET_DEVICE_ID,
78 MGMT_OP_SET_ADVERTISING,
80 MGMT_OP_SET_STATIC_ADDRESS,
81 MGMT_OP_SET_SCAN_PARAMS,
84 static const u16 mgmt_events[] = {
85 MGMT_EV_CONTROLLER_ERROR,
87 MGMT_EV_INDEX_REMOVED,
89 MGMT_EV_CLASS_OF_DEV_CHANGED,
90 MGMT_EV_LOCAL_NAME_CHANGED,
92 MGMT_EV_NEW_LONG_TERM_KEY,
93 MGMT_EV_DEVICE_CONNECTED,
94 MGMT_EV_DEVICE_DISCONNECTED,
95 MGMT_EV_CONNECT_FAILED,
96 MGMT_EV_PIN_CODE_REQUEST,
97 MGMT_EV_USER_CONFIRM_REQUEST,
98 MGMT_EV_USER_PASSKEY_REQUEST,
100 MGMT_EV_DEVICE_FOUND,
102 MGMT_EV_DEVICE_BLOCKED,
103 MGMT_EV_DEVICE_UNBLOCKED,
104 MGMT_EV_DEVICE_UNPAIRED,
105 MGMT_EV_PASSKEY_NOTIFY,
108 #define CACHE_TIMEOUT msecs_to_jiffies(2 * 1000)
110 #define hdev_is_powered(hdev) (test_bit(HCI_UP, &hdev->flags) && \
111 !test_bit(HCI_AUTO_OFF, &hdev->dev_flags))
114 struct list_head list;
122 /* HCI to MGMT error code conversion table */
123 static u8 mgmt_status_table[] = {
125 MGMT_STATUS_UNKNOWN_COMMAND, /* Unknown Command */
126 MGMT_STATUS_NOT_CONNECTED, /* No Connection */
127 MGMT_STATUS_FAILED, /* Hardware Failure */
128 MGMT_STATUS_CONNECT_FAILED, /* Page Timeout */
129 MGMT_STATUS_AUTH_FAILED, /* Authentication Failed */
130 MGMT_STATUS_NOT_PAIRED, /* PIN or Key Missing */
131 MGMT_STATUS_NO_RESOURCES, /* Memory Full */
132 MGMT_STATUS_TIMEOUT, /* Connection Timeout */
133 MGMT_STATUS_NO_RESOURCES, /* Max Number of Connections */
134 MGMT_STATUS_NO_RESOURCES, /* Max Number of SCO Connections */
135 MGMT_STATUS_ALREADY_CONNECTED, /* ACL Connection Exists */
136 MGMT_STATUS_BUSY, /* Command Disallowed */
137 MGMT_STATUS_NO_RESOURCES, /* Rejected Limited Resources */
138 MGMT_STATUS_REJECTED, /* Rejected Security */
139 MGMT_STATUS_REJECTED, /* Rejected Personal */
140 MGMT_STATUS_TIMEOUT, /* Host Timeout */
141 MGMT_STATUS_NOT_SUPPORTED, /* Unsupported Feature */
142 MGMT_STATUS_INVALID_PARAMS, /* Invalid Parameters */
143 MGMT_STATUS_DISCONNECTED, /* OE User Ended Connection */
144 MGMT_STATUS_NO_RESOURCES, /* OE Low Resources */
145 MGMT_STATUS_DISCONNECTED, /* OE Power Off */
146 MGMT_STATUS_DISCONNECTED, /* Connection Terminated */
147 MGMT_STATUS_BUSY, /* Repeated Attempts */
148 MGMT_STATUS_REJECTED, /* Pairing Not Allowed */
149 MGMT_STATUS_FAILED, /* Unknown LMP PDU */
150 MGMT_STATUS_NOT_SUPPORTED, /* Unsupported Remote Feature */
151 MGMT_STATUS_REJECTED, /* SCO Offset Rejected */
152 MGMT_STATUS_REJECTED, /* SCO Interval Rejected */
153 MGMT_STATUS_REJECTED, /* Air Mode Rejected */
154 MGMT_STATUS_INVALID_PARAMS, /* Invalid LMP Parameters */
155 MGMT_STATUS_FAILED, /* Unspecified Error */
156 MGMT_STATUS_NOT_SUPPORTED, /* Unsupported LMP Parameter Value */
157 MGMT_STATUS_FAILED, /* Role Change Not Allowed */
158 MGMT_STATUS_TIMEOUT, /* LMP Response Timeout */
159 MGMT_STATUS_FAILED, /* LMP Error Transaction Collision */
160 MGMT_STATUS_FAILED, /* LMP PDU Not Allowed */
161 MGMT_STATUS_REJECTED, /* Encryption Mode Not Accepted */
162 MGMT_STATUS_FAILED, /* Unit Link Key Used */
163 MGMT_STATUS_NOT_SUPPORTED, /* QoS Not Supported */
164 MGMT_STATUS_TIMEOUT, /* Instant Passed */
165 MGMT_STATUS_NOT_SUPPORTED, /* Pairing Not Supported */
166 MGMT_STATUS_FAILED, /* Transaction Collision */
167 MGMT_STATUS_INVALID_PARAMS, /* Unacceptable Parameter */
168 MGMT_STATUS_REJECTED, /* QoS Rejected */
169 MGMT_STATUS_NOT_SUPPORTED, /* Classification Not Supported */
170 MGMT_STATUS_REJECTED, /* Insufficient Security */
171 MGMT_STATUS_INVALID_PARAMS, /* Parameter Out Of Range */
172 MGMT_STATUS_BUSY, /* Role Switch Pending */
173 MGMT_STATUS_FAILED, /* Slot Violation */
174 MGMT_STATUS_FAILED, /* Role Switch Failed */
175 MGMT_STATUS_INVALID_PARAMS, /* EIR Too Large */
176 MGMT_STATUS_NOT_SUPPORTED, /* Simple Pairing Not Supported */
177 MGMT_STATUS_BUSY, /* Host Busy Pairing */
178 MGMT_STATUS_REJECTED, /* Rejected, No Suitable Channel */
179 MGMT_STATUS_BUSY, /* Controller Busy */
180 MGMT_STATUS_INVALID_PARAMS, /* Unsuitable Connection Interval */
181 MGMT_STATUS_TIMEOUT, /* Directed Advertising Timeout */
182 MGMT_STATUS_AUTH_FAILED, /* Terminated Due to MIC Failure */
183 MGMT_STATUS_CONNECT_FAILED, /* Connection Establishment Failed */
184 MGMT_STATUS_CONNECT_FAILED, /* MAC Connection Failed */
187 static u8 mgmt_status(u8 hci_status)
189 if (hci_status < ARRAY_SIZE(mgmt_status_table))
190 return mgmt_status_table[hci_status];
192 return MGMT_STATUS_FAILED;
195 static int cmd_status(struct sock *sk, u16 index, u16 cmd, u8 status)
198 struct mgmt_hdr *hdr;
199 struct mgmt_ev_cmd_status *ev;
202 BT_DBG("sock %p, index %u, cmd %u, status %u", sk, index, cmd, status);
204 skb = alloc_skb(sizeof(*hdr) + sizeof(*ev), GFP_KERNEL);
208 hdr = (void *) skb_put(skb, sizeof(*hdr));
210 hdr->opcode = __constant_cpu_to_le16(MGMT_EV_CMD_STATUS);
211 hdr->index = cpu_to_le16(index);
212 hdr->len = cpu_to_le16(sizeof(*ev));
214 ev = (void *) skb_put(skb, sizeof(*ev));
216 ev->opcode = cpu_to_le16(cmd);
218 err = sock_queue_rcv_skb(sk, skb);
225 static int cmd_complete(struct sock *sk, u16 index, u16 cmd, u8 status,
226 void *rp, size_t rp_len)
229 struct mgmt_hdr *hdr;
230 struct mgmt_ev_cmd_complete *ev;
233 BT_DBG("sock %p", sk);
235 skb = alloc_skb(sizeof(*hdr) + sizeof(*ev) + rp_len, GFP_KERNEL);
239 hdr = (void *) skb_put(skb, sizeof(*hdr));
241 hdr->opcode = __constant_cpu_to_le16(MGMT_EV_CMD_COMPLETE);
242 hdr->index = cpu_to_le16(index);
243 hdr->len = cpu_to_le16(sizeof(*ev) + rp_len);
245 ev = (void *) skb_put(skb, sizeof(*ev) + rp_len);
246 ev->opcode = cpu_to_le16(cmd);
250 memcpy(ev->data, rp, rp_len);
252 err = sock_queue_rcv_skb(sk, skb);
259 static int read_version(struct sock *sk, struct hci_dev *hdev, void *data,
262 struct mgmt_rp_read_version rp;
264 BT_DBG("sock %p", sk);
266 rp.version = MGMT_VERSION;
267 rp.revision = __constant_cpu_to_le16(MGMT_REVISION);
269 return cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_VERSION, 0, &rp,
273 static int read_commands(struct sock *sk, struct hci_dev *hdev, void *data,
276 struct mgmt_rp_read_commands *rp;
277 const u16 num_commands = ARRAY_SIZE(mgmt_commands);
278 const u16 num_events = ARRAY_SIZE(mgmt_events);
283 BT_DBG("sock %p", sk);
285 rp_size = sizeof(*rp) + ((num_commands + num_events) * sizeof(u16));
287 rp = kmalloc(rp_size, GFP_KERNEL);
291 rp->num_commands = __constant_cpu_to_le16(num_commands);
292 rp->num_events = __constant_cpu_to_le16(num_events);
294 for (i = 0, opcode = rp->opcodes; i < num_commands; i++, opcode++)
295 put_unaligned_le16(mgmt_commands[i], opcode);
297 for (i = 0; i < num_events; i++, opcode++)
298 put_unaligned_le16(mgmt_events[i], opcode);
300 err = cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_COMMANDS, 0, rp,
307 static int read_index_list(struct sock *sk, struct hci_dev *hdev, void *data,
310 struct mgmt_rp_read_index_list *rp;
316 BT_DBG("sock %p", sk);
318 read_lock(&hci_dev_list_lock);
321 list_for_each_entry(d, &hci_dev_list, list) {
322 if (d->dev_type == HCI_BREDR)
326 rp_len = sizeof(*rp) + (2 * count);
327 rp = kmalloc(rp_len, GFP_ATOMIC);
329 read_unlock(&hci_dev_list_lock);
334 list_for_each_entry(d, &hci_dev_list, list) {
335 if (test_bit(HCI_SETUP, &d->dev_flags))
338 if (test_bit(HCI_USER_CHANNEL, &d->dev_flags))
341 if (d->dev_type == HCI_BREDR) {
342 rp->index[count++] = cpu_to_le16(d->id);
343 BT_DBG("Added hci%u", d->id);
347 rp->num_controllers = cpu_to_le16(count);
348 rp_len = sizeof(*rp) + (2 * count);
350 read_unlock(&hci_dev_list_lock);
352 err = cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_INDEX_LIST, 0, rp,
360 static u32 get_supported_settings(struct hci_dev *hdev)
364 settings |= MGMT_SETTING_POWERED;
365 settings |= MGMT_SETTING_PAIRABLE;
367 if (lmp_bredr_capable(hdev)) {
368 settings |= MGMT_SETTING_CONNECTABLE;
369 if (hdev->hci_ver >= BLUETOOTH_VER_1_2)
370 settings |= MGMT_SETTING_FAST_CONNECTABLE;
371 settings |= MGMT_SETTING_DISCOVERABLE;
372 settings |= MGMT_SETTING_BREDR;
373 settings |= MGMT_SETTING_LINK_SECURITY;
375 if (lmp_ssp_capable(hdev)) {
376 settings |= MGMT_SETTING_SSP;
377 settings |= MGMT_SETTING_HS;
381 if (lmp_le_capable(hdev)) {
382 settings |= MGMT_SETTING_LE;
383 settings |= MGMT_SETTING_ADVERTISING;
389 static u32 get_current_settings(struct hci_dev *hdev)
393 if (hdev_is_powered(hdev))
394 settings |= MGMT_SETTING_POWERED;
396 if (test_bit(HCI_CONNECTABLE, &hdev->dev_flags))
397 settings |= MGMT_SETTING_CONNECTABLE;
399 if (test_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags))
400 settings |= MGMT_SETTING_FAST_CONNECTABLE;
402 if (test_bit(HCI_DISCOVERABLE, &hdev->dev_flags))
403 settings |= MGMT_SETTING_DISCOVERABLE;
405 if (test_bit(HCI_PAIRABLE, &hdev->dev_flags))
406 settings |= MGMT_SETTING_PAIRABLE;
408 if (test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
409 settings |= MGMT_SETTING_BREDR;
411 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
412 settings |= MGMT_SETTING_LE;
414 if (test_bit(HCI_LINK_SECURITY, &hdev->dev_flags))
415 settings |= MGMT_SETTING_LINK_SECURITY;
417 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags))
418 settings |= MGMT_SETTING_SSP;
420 if (test_bit(HCI_HS_ENABLED, &hdev->dev_flags))
421 settings |= MGMT_SETTING_HS;
423 if (test_bit(HCI_ADVERTISING, &hdev->dev_flags))
424 settings |= MGMT_SETTING_ADVERTISING;
429 #define PNP_INFO_SVCLASS_ID 0x1200
431 static u8 *create_uuid16_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
433 u8 *ptr = data, *uuids_start = NULL;
434 struct bt_uuid *uuid;
439 list_for_each_entry(uuid, &hdev->uuids, list) {
442 if (uuid->size != 16)
445 uuid16 = get_unaligned_le16(&uuid->uuid[12]);
449 if (uuid16 == PNP_INFO_SVCLASS_ID)
455 uuids_start[1] = EIR_UUID16_ALL;
459 /* Stop if not enough space to put next UUID */
460 if ((ptr - data) + sizeof(u16) > len) {
461 uuids_start[1] = EIR_UUID16_SOME;
465 *ptr++ = (uuid16 & 0x00ff);
466 *ptr++ = (uuid16 & 0xff00) >> 8;
467 uuids_start[0] += sizeof(uuid16);
473 static u8 *create_uuid32_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
475 u8 *ptr = data, *uuids_start = NULL;
476 struct bt_uuid *uuid;
481 list_for_each_entry(uuid, &hdev->uuids, list) {
482 if (uuid->size != 32)
488 uuids_start[1] = EIR_UUID32_ALL;
492 /* Stop if not enough space to put next UUID */
493 if ((ptr - data) + sizeof(u32) > len) {
494 uuids_start[1] = EIR_UUID32_SOME;
498 memcpy(ptr, &uuid->uuid[12], sizeof(u32));
500 uuids_start[0] += sizeof(u32);
506 static u8 *create_uuid128_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
508 u8 *ptr = data, *uuids_start = NULL;
509 struct bt_uuid *uuid;
514 list_for_each_entry(uuid, &hdev->uuids, list) {
515 if (uuid->size != 128)
521 uuids_start[1] = EIR_UUID128_ALL;
525 /* Stop if not enough space to put next UUID */
526 if ((ptr - data) + 16 > len) {
527 uuids_start[1] = EIR_UUID128_SOME;
531 memcpy(ptr, uuid->uuid, 16);
533 uuids_start[0] += 16;
539 static u8 create_scan_rsp_data(struct hci_dev *hdev, u8 *ptr)
544 name_len = strlen(hdev->dev_name);
546 size_t max_len = HCI_MAX_AD_LENGTH - ad_len - 2;
548 if (name_len > max_len) {
550 ptr[1] = EIR_NAME_SHORT;
552 ptr[1] = EIR_NAME_COMPLETE;
554 ptr[0] = name_len + 1;
556 memcpy(ptr + 2, hdev->dev_name, name_len);
558 ad_len += (name_len + 2);
559 ptr += (name_len + 2);
565 static void update_scan_rsp_data(struct hci_request *req)
567 struct hci_dev *hdev = req->hdev;
568 struct hci_cp_le_set_scan_rsp_data cp;
571 if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
574 memset(&cp, 0, sizeof(cp));
576 len = create_scan_rsp_data(hdev, cp.data);
578 if (hdev->scan_rsp_data_len == len &&
579 memcmp(cp.data, hdev->scan_rsp_data, len) == 0)
582 memcpy(hdev->scan_rsp_data, cp.data, sizeof(cp.data));
583 hdev->scan_rsp_data_len = len;
587 hci_req_add(req, HCI_OP_LE_SET_SCAN_RSP_DATA, sizeof(cp), &cp);
590 static u8 create_adv_data(struct hci_dev *hdev, u8 *ptr)
592 u8 ad_len = 0, flags = 0;
594 if (test_bit(HCI_ADVERTISING, &hdev->dev_flags))
595 flags |= LE_AD_GENERAL;
597 if (test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
598 if (lmp_le_br_capable(hdev))
599 flags |= LE_AD_SIM_LE_BREDR_CTRL;
600 if (lmp_host_le_br_capable(hdev))
601 flags |= LE_AD_SIM_LE_BREDR_HOST;
603 flags |= LE_AD_NO_BREDR;
607 BT_DBG("adv flags 0x%02x", flags);
617 if (hdev->adv_tx_power != HCI_TX_POWER_INVALID) {
619 ptr[1] = EIR_TX_POWER;
620 ptr[2] = (u8) hdev->adv_tx_power;
629 static void update_adv_data(struct hci_request *req)
631 struct hci_dev *hdev = req->hdev;
632 struct hci_cp_le_set_adv_data cp;
635 if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
638 memset(&cp, 0, sizeof(cp));
640 len = create_adv_data(hdev, cp.data);
642 if (hdev->adv_data_len == len &&
643 memcmp(cp.data, hdev->adv_data, len) == 0)
646 memcpy(hdev->adv_data, cp.data, sizeof(cp.data));
647 hdev->adv_data_len = len;
651 hci_req_add(req, HCI_OP_LE_SET_ADV_DATA, sizeof(cp), &cp);
654 static void create_eir(struct hci_dev *hdev, u8 *data)
659 name_len = strlen(hdev->dev_name);
665 ptr[1] = EIR_NAME_SHORT;
667 ptr[1] = EIR_NAME_COMPLETE;
669 /* EIR Data length */
670 ptr[0] = name_len + 1;
672 memcpy(ptr + 2, hdev->dev_name, name_len);
674 ptr += (name_len + 2);
677 if (hdev->inq_tx_power != HCI_TX_POWER_INVALID) {
679 ptr[1] = EIR_TX_POWER;
680 ptr[2] = (u8) hdev->inq_tx_power;
685 if (hdev->devid_source > 0) {
687 ptr[1] = EIR_DEVICE_ID;
689 put_unaligned_le16(hdev->devid_source, ptr + 2);
690 put_unaligned_le16(hdev->devid_vendor, ptr + 4);
691 put_unaligned_le16(hdev->devid_product, ptr + 6);
692 put_unaligned_le16(hdev->devid_version, ptr + 8);
697 ptr = create_uuid16_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
698 ptr = create_uuid32_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
699 ptr = create_uuid128_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
702 static void update_eir(struct hci_request *req)
704 struct hci_dev *hdev = req->hdev;
705 struct hci_cp_write_eir cp;
707 if (!hdev_is_powered(hdev))
710 if (!lmp_ext_inq_capable(hdev))
713 if (!test_bit(HCI_SSP_ENABLED, &hdev->dev_flags))
716 if (test_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
719 memset(&cp, 0, sizeof(cp));
721 create_eir(hdev, cp.data);
723 if (memcmp(cp.data, hdev->eir, sizeof(cp.data)) == 0)
726 memcpy(hdev->eir, cp.data, sizeof(cp.data));
728 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
731 static u8 get_service_classes(struct hci_dev *hdev)
733 struct bt_uuid *uuid;
736 list_for_each_entry(uuid, &hdev->uuids, list)
737 val |= uuid->svc_hint;
742 static void update_class(struct hci_request *req)
744 struct hci_dev *hdev = req->hdev;
747 BT_DBG("%s", hdev->name);
749 if (!hdev_is_powered(hdev))
752 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
755 if (test_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
758 cod[0] = hdev->minor_class;
759 cod[1] = hdev->major_class;
760 cod[2] = get_service_classes(hdev);
762 if (test_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags))
765 if (memcmp(cod, hdev->dev_class, 3) == 0)
768 hci_req_add(req, HCI_OP_WRITE_CLASS_OF_DEV, sizeof(cod), cod);
771 static void service_cache_off(struct work_struct *work)
773 struct hci_dev *hdev = container_of(work, struct hci_dev,
775 struct hci_request req;
777 if (!test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
780 hci_req_init(&req, hdev);
787 hci_dev_unlock(hdev);
789 hci_req_run(&req, NULL);
792 static void mgmt_init_hdev(struct sock *sk, struct hci_dev *hdev)
794 if (test_and_set_bit(HCI_MGMT, &hdev->dev_flags))
797 INIT_DELAYED_WORK(&hdev->service_cache, service_cache_off);
799 /* Non-mgmt controlled devices get this bit set
800 * implicitly so that pairing works for them, however
801 * for mgmt we require user-space to explicitly enable
804 clear_bit(HCI_PAIRABLE, &hdev->dev_flags);
807 static int read_controller_info(struct sock *sk, struct hci_dev *hdev,
808 void *data, u16 data_len)
810 struct mgmt_rp_read_info rp;
812 BT_DBG("sock %p %s", sk, hdev->name);
816 memset(&rp, 0, sizeof(rp));
818 bacpy(&rp.bdaddr, &hdev->bdaddr);
820 rp.version = hdev->hci_ver;
821 rp.manufacturer = cpu_to_le16(hdev->manufacturer);
823 rp.supported_settings = cpu_to_le32(get_supported_settings(hdev));
824 rp.current_settings = cpu_to_le32(get_current_settings(hdev));
826 memcpy(rp.dev_class, hdev->dev_class, 3);
828 memcpy(rp.name, hdev->dev_name, sizeof(hdev->dev_name));
829 memcpy(rp.short_name, hdev->short_name, sizeof(hdev->short_name));
831 hci_dev_unlock(hdev);
833 return cmd_complete(sk, hdev->id, MGMT_OP_READ_INFO, 0, &rp,
837 static void mgmt_pending_free(struct pending_cmd *cmd)
844 static struct pending_cmd *mgmt_pending_add(struct sock *sk, u16 opcode,
845 struct hci_dev *hdev, void *data,
848 struct pending_cmd *cmd;
850 cmd = kmalloc(sizeof(*cmd), GFP_KERNEL);
854 cmd->opcode = opcode;
855 cmd->index = hdev->id;
857 cmd->param = kmalloc(len, GFP_KERNEL);
864 memcpy(cmd->param, data, len);
869 list_add(&cmd->list, &hdev->mgmt_pending);
874 static void mgmt_pending_foreach(u16 opcode, struct hci_dev *hdev,
875 void (*cb)(struct pending_cmd *cmd,
879 struct pending_cmd *cmd, *tmp;
881 list_for_each_entry_safe(cmd, tmp, &hdev->mgmt_pending, list) {
882 if (opcode > 0 && cmd->opcode != opcode)
889 static struct pending_cmd *mgmt_pending_find(u16 opcode, struct hci_dev *hdev)
891 struct pending_cmd *cmd;
893 list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
894 if (cmd->opcode == opcode)
901 static void mgmt_pending_remove(struct pending_cmd *cmd)
903 list_del(&cmd->list);
904 mgmt_pending_free(cmd);
907 static int send_settings_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev)
909 __le32 settings = cpu_to_le32(get_current_settings(hdev));
911 return cmd_complete(sk, hdev->id, opcode, 0, &settings,
915 static int set_powered(struct sock *sk, struct hci_dev *hdev, void *data,
918 struct mgmt_mode *cp = data;
919 struct pending_cmd *cmd;
922 BT_DBG("request for %s", hdev->name);
924 if (cp->val != 0x00 && cp->val != 0x01)
925 return cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
926 MGMT_STATUS_INVALID_PARAMS);
930 if (mgmt_pending_find(MGMT_OP_SET_POWERED, hdev)) {
931 err = cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
936 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
937 cancel_delayed_work(&hdev->power_off);
940 mgmt_pending_add(sk, MGMT_OP_SET_POWERED, hdev,
942 err = mgmt_powered(hdev, 1);
947 if (!!cp->val == hdev_is_powered(hdev)) {
948 err = send_settings_rsp(sk, MGMT_OP_SET_POWERED, hdev);
952 cmd = mgmt_pending_add(sk, MGMT_OP_SET_POWERED, hdev, data, len);
959 queue_work(hdev->req_workqueue, &hdev->power_on);
961 queue_work(hdev->req_workqueue, &hdev->power_off.work);
966 hci_dev_unlock(hdev);
970 static int mgmt_event(u16 event, struct hci_dev *hdev, void *data, u16 data_len,
971 struct sock *skip_sk)
974 struct mgmt_hdr *hdr;
976 skb = alloc_skb(sizeof(*hdr) + data_len, GFP_KERNEL);
980 hdr = (void *) skb_put(skb, sizeof(*hdr));
981 hdr->opcode = cpu_to_le16(event);
983 hdr->index = cpu_to_le16(hdev->id);
985 hdr->index = __constant_cpu_to_le16(MGMT_INDEX_NONE);
986 hdr->len = cpu_to_le16(data_len);
989 memcpy(skb_put(skb, data_len), data, data_len);
992 __net_timestamp(skb);
994 hci_send_to_control(skb, skip_sk);
1000 static int new_settings(struct hci_dev *hdev, struct sock *skip)
1004 ev = cpu_to_le32(get_current_settings(hdev));
1006 return mgmt_event(MGMT_EV_NEW_SETTINGS, hdev, &ev, sizeof(ev), skip);
1011 struct hci_dev *hdev;
1015 static void settings_rsp(struct pending_cmd *cmd, void *data)
1017 struct cmd_lookup *match = data;
1019 send_settings_rsp(cmd->sk, cmd->opcode, match->hdev);
1021 list_del(&cmd->list);
1023 if (match->sk == NULL) {
1024 match->sk = cmd->sk;
1025 sock_hold(match->sk);
1028 mgmt_pending_free(cmd);
1031 static void cmd_status_rsp(struct pending_cmd *cmd, void *data)
1035 cmd_status(cmd->sk, cmd->index, cmd->opcode, *status);
1036 mgmt_pending_remove(cmd);
1039 static u8 mgmt_bredr_support(struct hci_dev *hdev)
1041 if (!lmp_bredr_capable(hdev))
1042 return MGMT_STATUS_NOT_SUPPORTED;
1043 else if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
1044 return MGMT_STATUS_REJECTED;
1046 return MGMT_STATUS_SUCCESS;
1049 static u8 mgmt_le_support(struct hci_dev *hdev)
1051 if (!lmp_le_capable(hdev))
1052 return MGMT_STATUS_NOT_SUPPORTED;
1053 else if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
1054 return MGMT_STATUS_REJECTED;
1056 return MGMT_STATUS_SUCCESS;
1059 static void set_discoverable_complete(struct hci_dev *hdev, u8 status)
1061 struct pending_cmd *cmd;
1062 struct mgmt_mode *cp;
1063 struct hci_request req;
1066 BT_DBG("status 0x%02x", status);
1070 cmd = mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, hdev);
1075 u8 mgmt_err = mgmt_status(status);
1076 cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
1077 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
1083 changed = !test_and_set_bit(HCI_DISCOVERABLE,
1086 if (hdev->discov_timeout > 0) {
1087 int to = msecs_to_jiffies(hdev->discov_timeout * 1000);
1088 queue_delayed_work(hdev->workqueue, &hdev->discov_off,
1092 changed = test_and_clear_bit(HCI_DISCOVERABLE,
1096 send_settings_rsp(cmd->sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1099 new_settings(hdev, cmd->sk);
1101 /* When the discoverable mode gets changed, make sure
1102 * that class of device has the limited discoverable
1103 * bit correctly set.
1105 hci_req_init(&req, hdev);
1107 hci_req_run(&req, NULL);
1110 mgmt_pending_remove(cmd);
1113 hci_dev_unlock(hdev);
1116 static int set_discoverable(struct sock *sk, struct hci_dev *hdev, void *data,
1119 struct mgmt_cp_set_discoverable *cp = data;
1120 struct pending_cmd *cmd;
1121 struct hci_request req;
1126 BT_DBG("request for %s", hdev->name);
1128 status = mgmt_bredr_support(hdev);
1130 return cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1133 if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
1134 return cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1135 MGMT_STATUS_INVALID_PARAMS);
1137 timeout = __le16_to_cpu(cp->timeout);
1139 /* Disabling discoverable requires that no timeout is set,
1140 * and enabling limited discoverable requires a timeout.
1142 if ((cp->val == 0x00 && timeout > 0) ||
1143 (cp->val == 0x02 && timeout == 0))
1144 return cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1145 MGMT_STATUS_INVALID_PARAMS);
1149 if (!hdev_is_powered(hdev) && timeout > 0) {
1150 err = cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1151 MGMT_STATUS_NOT_POWERED);
1155 if (mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
1156 mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
1157 err = cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1162 if (!test_bit(HCI_CONNECTABLE, &hdev->dev_flags)) {
1163 err = cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1164 MGMT_STATUS_REJECTED);
1168 if (!hdev_is_powered(hdev)) {
1169 bool changed = false;
1171 /* Setting limited discoverable when powered off is
1172 * not a valid operation since it requires a timeout
1173 * and so no need to check HCI_LIMITED_DISCOVERABLE.
1175 if (!!cp->val != test_bit(HCI_DISCOVERABLE, &hdev->dev_flags)) {
1176 change_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
1180 err = send_settings_rsp(sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1185 err = new_settings(hdev, sk);
1190 /* If the current mode is the same, then just update the timeout
1191 * value with the new value. And if only the timeout gets updated,
1192 * then no need for any HCI transactions.
1194 if (!!cp->val == test_bit(HCI_DISCOVERABLE, &hdev->dev_flags) &&
1195 (cp->val == 0x02) == test_bit(HCI_LIMITED_DISCOVERABLE,
1196 &hdev->dev_flags)) {
1197 cancel_delayed_work(&hdev->discov_off);
1198 hdev->discov_timeout = timeout;
1200 if (cp->val && hdev->discov_timeout > 0) {
1201 int to = msecs_to_jiffies(hdev->discov_timeout * 1000);
1202 queue_delayed_work(hdev->workqueue, &hdev->discov_off,
1206 err = send_settings_rsp(sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1210 cmd = mgmt_pending_add(sk, MGMT_OP_SET_DISCOVERABLE, hdev, data, len);
1216 /* Cancel any potential discoverable timeout that might be
1217 * still active and store new timeout value. The arming of
1218 * the timeout happens in the complete handler.
1220 cancel_delayed_work(&hdev->discov_off);
1221 hdev->discov_timeout = timeout;
1223 hci_req_init(&req, hdev);
1228 struct hci_cp_write_current_iac_lap hci_cp;
1230 if (cp->val == 0x02) {
1231 /* Limited discoverable mode */
1232 set_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
1235 hci_cp.iac_lap[0] = 0x00; /* LIAC */
1236 hci_cp.iac_lap[1] = 0x8b;
1237 hci_cp.iac_lap[2] = 0x9e;
1238 hci_cp.iac_lap[3] = 0x33; /* GIAC */
1239 hci_cp.iac_lap[4] = 0x8b;
1240 hci_cp.iac_lap[5] = 0x9e;
1242 /* General discoverable mode */
1243 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
1246 hci_cp.iac_lap[0] = 0x33; /* GIAC */
1247 hci_cp.iac_lap[1] = 0x8b;
1248 hci_cp.iac_lap[2] = 0x9e;
1251 hci_req_add(&req, HCI_OP_WRITE_CURRENT_IAC_LAP,
1252 (hci_cp.num_iac * 3) + 1, &hci_cp);
1254 scan |= SCAN_INQUIRY;
1256 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
1259 hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE, sizeof(scan), &scan);
1261 err = hci_req_run(&req, set_discoverable_complete);
1263 mgmt_pending_remove(cmd);
1266 hci_dev_unlock(hdev);
1270 static void write_fast_connectable(struct hci_request *req, bool enable)
1272 struct hci_dev *hdev = req->hdev;
1273 struct hci_cp_write_page_scan_activity acp;
1276 if (hdev->hci_ver < BLUETOOTH_VER_1_2)
1280 type = PAGE_SCAN_TYPE_INTERLACED;
1282 /* 160 msec page scan interval */
1283 acp.interval = __constant_cpu_to_le16(0x0100);
1285 type = PAGE_SCAN_TYPE_STANDARD; /* default */
1287 /* default 1.28 sec page scan */
1288 acp.interval = __constant_cpu_to_le16(0x0800);
1291 acp.window = __constant_cpu_to_le16(0x0012);
1293 if (__cpu_to_le16(hdev->page_scan_interval) != acp.interval ||
1294 __cpu_to_le16(hdev->page_scan_window) != acp.window)
1295 hci_req_add(req, HCI_OP_WRITE_PAGE_SCAN_ACTIVITY,
1298 if (hdev->page_scan_type != type)
1299 hci_req_add(req, HCI_OP_WRITE_PAGE_SCAN_TYPE, 1, &type);
1302 static u8 get_adv_type(struct hci_dev *hdev)
1304 struct pending_cmd *cmd;
1307 /* If there's a pending mgmt command the flag will not yet have
1308 * it's final value, so check for this first.
1310 cmd = mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev);
1312 struct mgmt_mode *cp = cmd->param;
1313 connectable = !!cp->val;
1315 connectable = test_bit(HCI_CONNECTABLE, &hdev->dev_flags);
1318 return connectable ? LE_ADV_IND : LE_ADV_NONCONN_IND;
1321 static void enable_advertising(struct hci_request *req)
1323 struct hci_dev *hdev = req->hdev;
1324 struct hci_cp_le_set_adv_param cp;
1327 memset(&cp, 0, sizeof(cp));
1328 cp.min_interval = __constant_cpu_to_le16(0x0800);
1329 cp.max_interval = __constant_cpu_to_le16(0x0800);
1330 cp.type = get_adv_type(hdev);
1331 cp.own_address_type = hdev->own_addr_type;
1332 cp.channel_map = 0x07;
1334 hci_req_add(req, HCI_OP_LE_SET_ADV_PARAM, sizeof(cp), &cp);
1336 hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable);
1339 static void disable_advertising(struct hci_request *req)
1343 hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable);
1346 static void set_connectable_complete(struct hci_dev *hdev, u8 status)
1348 struct pending_cmd *cmd;
1349 struct mgmt_mode *cp;
1352 BT_DBG("status 0x%02x", status);
1356 cmd = mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev);
1361 u8 mgmt_err = mgmt_status(status);
1362 cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
1368 changed = !test_and_set_bit(HCI_CONNECTABLE, &hdev->dev_flags);
1370 changed = test_and_clear_bit(HCI_CONNECTABLE, &hdev->dev_flags);
1372 send_settings_rsp(cmd->sk, MGMT_OP_SET_CONNECTABLE, hdev);
1375 new_settings(hdev, cmd->sk);
1378 mgmt_pending_remove(cmd);
1381 hci_dev_unlock(hdev);
1384 static int set_connectable_update_settings(struct hci_dev *hdev,
1385 struct sock *sk, u8 val)
1387 bool changed = false;
1390 if (!!val != test_bit(HCI_CONNECTABLE, &hdev->dev_flags))
1394 set_bit(HCI_CONNECTABLE, &hdev->dev_flags);
1396 clear_bit(HCI_CONNECTABLE, &hdev->dev_flags);
1397 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
1400 err = send_settings_rsp(sk, MGMT_OP_SET_CONNECTABLE, hdev);
1405 return new_settings(hdev, sk);
1410 static int set_connectable(struct sock *sk, struct hci_dev *hdev, void *data,
1413 struct mgmt_mode *cp = data;
1414 struct pending_cmd *cmd;
1415 struct hci_request req;
1419 BT_DBG("request for %s", hdev->name);
1421 if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags) &&
1422 !test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
1423 return cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1424 MGMT_STATUS_REJECTED);
1426 if (cp->val != 0x00 && cp->val != 0x01)
1427 return cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1428 MGMT_STATUS_INVALID_PARAMS);
1432 if (!hdev_is_powered(hdev)) {
1433 err = set_connectable_update_settings(hdev, sk, cp->val);
1437 if (mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
1438 mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
1439 err = cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1444 cmd = mgmt_pending_add(sk, MGMT_OP_SET_CONNECTABLE, hdev, data, len);
1450 hci_req_init(&req, hdev);
1452 if (test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags) &&
1453 cp->val != test_bit(HCI_PSCAN, &hdev->flags)) {
1459 if (test_bit(HCI_ISCAN, &hdev->flags) &&
1460 hdev->discov_timeout > 0)
1461 cancel_delayed_work(&hdev->discov_off);
1464 hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
1467 /* If we're going from non-connectable to connectable or
1468 * vice-versa when fast connectable is enabled ensure that fast
1469 * connectable gets disabled. write_fast_connectable won't do
1470 * anything if the page scan parameters are already what they
1473 if (cp->val || test_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags))
1474 write_fast_connectable(&req, false);
1476 if (test_bit(HCI_ADVERTISING, &hdev->dev_flags) &&
1477 hci_conn_num(hdev, LE_LINK) == 0) {
1478 disable_advertising(&req);
1479 enable_advertising(&req);
1482 err = hci_req_run(&req, set_connectable_complete);
1484 mgmt_pending_remove(cmd);
1485 if (err == -ENODATA)
1486 err = set_connectable_update_settings(hdev, sk,
1492 hci_dev_unlock(hdev);
1496 static int set_pairable(struct sock *sk, struct hci_dev *hdev, void *data,
1499 struct mgmt_mode *cp = data;
1503 BT_DBG("request for %s", hdev->name);
1505 if (cp->val != 0x00 && cp->val != 0x01)
1506 return cmd_status(sk, hdev->id, MGMT_OP_SET_PAIRABLE,
1507 MGMT_STATUS_INVALID_PARAMS);
1512 changed = !test_and_set_bit(HCI_PAIRABLE, &hdev->dev_flags);
1514 changed = test_and_clear_bit(HCI_PAIRABLE, &hdev->dev_flags);
1516 err = send_settings_rsp(sk, MGMT_OP_SET_PAIRABLE, hdev);
1521 err = new_settings(hdev, sk);
1524 hci_dev_unlock(hdev);
1528 static int set_link_security(struct sock *sk, struct hci_dev *hdev, void *data,
1531 struct mgmt_mode *cp = data;
1532 struct pending_cmd *cmd;
1536 BT_DBG("request for %s", hdev->name);
1538 status = mgmt_bredr_support(hdev);
1540 return cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1543 if (cp->val != 0x00 && cp->val != 0x01)
1544 return cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1545 MGMT_STATUS_INVALID_PARAMS);
1549 if (!hdev_is_powered(hdev)) {
1550 bool changed = false;
1552 if (!!cp->val != test_bit(HCI_LINK_SECURITY,
1553 &hdev->dev_flags)) {
1554 change_bit(HCI_LINK_SECURITY, &hdev->dev_flags);
1558 err = send_settings_rsp(sk, MGMT_OP_SET_LINK_SECURITY, hdev);
1563 err = new_settings(hdev, sk);
1568 if (mgmt_pending_find(MGMT_OP_SET_LINK_SECURITY, hdev)) {
1569 err = cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1576 if (test_bit(HCI_AUTH, &hdev->flags) == val) {
1577 err = send_settings_rsp(sk, MGMT_OP_SET_LINK_SECURITY, hdev);
1581 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LINK_SECURITY, hdev, data, len);
1587 err = hci_send_cmd(hdev, HCI_OP_WRITE_AUTH_ENABLE, sizeof(val), &val);
1589 mgmt_pending_remove(cmd);
1594 hci_dev_unlock(hdev);
1598 static int set_ssp(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
1600 struct mgmt_mode *cp = data;
1601 struct pending_cmd *cmd;
1605 BT_DBG("request for %s", hdev->name);
1607 status = mgmt_bredr_support(hdev);
1609 return cmd_status(sk, hdev->id, MGMT_OP_SET_SSP, status);
1611 if (!lmp_ssp_capable(hdev))
1612 return cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
1613 MGMT_STATUS_NOT_SUPPORTED);
1615 if (cp->val != 0x00 && cp->val != 0x01)
1616 return cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
1617 MGMT_STATUS_INVALID_PARAMS);
1621 if (!hdev_is_powered(hdev)) {
1625 changed = !test_and_set_bit(HCI_SSP_ENABLED,
1628 changed = test_and_clear_bit(HCI_SSP_ENABLED,
1631 changed = test_and_clear_bit(HCI_HS_ENABLED,
1634 clear_bit(HCI_HS_ENABLED, &hdev->dev_flags);
1637 err = send_settings_rsp(sk, MGMT_OP_SET_SSP, hdev);
1642 err = new_settings(hdev, sk);
1647 if (mgmt_pending_find(MGMT_OP_SET_SSP, hdev) ||
1648 mgmt_pending_find(MGMT_OP_SET_HS, hdev)) {
1649 err = cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
1654 if (!!cp->val == test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
1655 err = send_settings_rsp(sk, MGMT_OP_SET_SSP, hdev);
1659 cmd = mgmt_pending_add(sk, MGMT_OP_SET_SSP, hdev, data, len);
1665 err = hci_send_cmd(hdev, HCI_OP_WRITE_SSP_MODE, 1, &cp->val);
1667 mgmt_pending_remove(cmd);
1672 hci_dev_unlock(hdev);
1676 static int set_hs(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
1678 struct mgmt_mode *cp = data;
1683 BT_DBG("request for %s", hdev->name);
1685 status = mgmt_bredr_support(hdev);
1687 return cmd_status(sk, hdev->id, MGMT_OP_SET_HS, status);
1689 if (!lmp_ssp_capable(hdev))
1690 return cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
1691 MGMT_STATUS_NOT_SUPPORTED);
1693 if (!test_bit(HCI_SSP_ENABLED, &hdev->dev_flags))
1694 return cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
1695 MGMT_STATUS_REJECTED);
1697 if (cp->val != 0x00 && cp->val != 0x01)
1698 return cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
1699 MGMT_STATUS_INVALID_PARAMS);
1704 changed = !test_and_set_bit(HCI_HS_ENABLED, &hdev->dev_flags);
1706 if (hdev_is_powered(hdev)) {
1707 err = cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
1708 MGMT_STATUS_REJECTED);
1712 changed = test_and_clear_bit(HCI_HS_ENABLED, &hdev->dev_flags);
1715 err = send_settings_rsp(sk, MGMT_OP_SET_HS, hdev);
1720 err = new_settings(hdev, sk);
1723 hci_dev_unlock(hdev);
1727 static void le_enable_complete(struct hci_dev *hdev, u8 status)
1729 struct cmd_lookup match = { NULL, hdev };
1732 u8 mgmt_err = mgmt_status(status);
1734 mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, cmd_status_rsp,
1739 mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, settings_rsp, &match);
1741 new_settings(hdev, match.sk);
1746 /* Make sure the controller has a good default for
1747 * advertising data. Restrict the update to when LE
1748 * has actually been enabled. During power on, the
1749 * update in powered_update_hci will take care of it.
1751 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
1752 struct hci_request req;
1756 hci_req_init(&req, hdev);
1757 update_adv_data(&req);
1758 update_scan_rsp_data(&req);
1759 hci_req_run(&req, NULL);
1761 hci_dev_unlock(hdev);
1765 static int set_le(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
1767 struct mgmt_mode *cp = data;
1768 struct hci_cp_write_le_host_supported hci_cp;
1769 struct pending_cmd *cmd;
1770 struct hci_request req;
1774 BT_DBG("request for %s", hdev->name);
1776 if (!lmp_le_capable(hdev))
1777 return cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
1778 MGMT_STATUS_NOT_SUPPORTED);
1780 if (cp->val != 0x00 && cp->val != 0x01)
1781 return cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
1782 MGMT_STATUS_INVALID_PARAMS);
1784 /* LE-only devices do not allow toggling LE on/off */
1785 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
1786 return cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
1787 MGMT_STATUS_REJECTED);
1792 enabled = lmp_host_le_capable(hdev);
1794 if (!hdev_is_powered(hdev) || val == enabled) {
1795 bool changed = false;
1797 if (val != test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
1798 change_bit(HCI_LE_ENABLED, &hdev->dev_flags);
1802 if (!val && test_bit(HCI_ADVERTISING, &hdev->dev_flags)) {
1803 clear_bit(HCI_ADVERTISING, &hdev->dev_flags);
1807 err = send_settings_rsp(sk, MGMT_OP_SET_LE, hdev);
1812 err = new_settings(hdev, sk);
1817 if (mgmt_pending_find(MGMT_OP_SET_LE, hdev) ||
1818 mgmt_pending_find(MGMT_OP_SET_ADVERTISING, hdev)) {
1819 err = cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
1824 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LE, hdev, data, len);
1830 hci_req_init(&req, hdev);
1832 memset(&hci_cp, 0, sizeof(hci_cp));
1836 hci_cp.simul = lmp_le_br_capable(hdev);
1838 if (test_bit(HCI_ADVERTISING, &hdev->dev_flags))
1839 disable_advertising(&req);
1842 hci_req_add(&req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(hci_cp),
1845 err = hci_req_run(&req, le_enable_complete);
1847 mgmt_pending_remove(cmd);
1850 hci_dev_unlock(hdev);
1854 /* This is a helper function to test for pending mgmt commands that can
1855 * cause CoD or EIR HCI commands. We can only allow one such pending
1856 * mgmt command at a time since otherwise we cannot easily track what
1857 * the current values are, will be, and based on that calculate if a new
1858 * HCI command needs to be sent and if yes with what value.
1860 static bool pending_eir_or_class(struct hci_dev *hdev)
1862 struct pending_cmd *cmd;
1864 list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
1865 switch (cmd->opcode) {
1866 case MGMT_OP_ADD_UUID:
1867 case MGMT_OP_REMOVE_UUID:
1868 case MGMT_OP_SET_DEV_CLASS:
1869 case MGMT_OP_SET_POWERED:
1877 static const u8 bluetooth_base_uuid[] = {
1878 0xfb, 0x34, 0x9b, 0x5f, 0x80, 0x00, 0x00, 0x80,
1879 0x00, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1882 static u8 get_uuid_size(const u8 *uuid)
1886 if (memcmp(uuid, bluetooth_base_uuid, 12))
1889 val = get_unaligned_le32(&uuid[12]);
1896 static void mgmt_class_complete(struct hci_dev *hdev, u16 mgmt_op, u8 status)
1898 struct pending_cmd *cmd;
1902 cmd = mgmt_pending_find(mgmt_op, hdev);
1906 cmd_complete(cmd->sk, cmd->index, cmd->opcode, mgmt_status(status),
1907 hdev->dev_class, 3);
1909 mgmt_pending_remove(cmd);
1912 hci_dev_unlock(hdev);
1915 static void add_uuid_complete(struct hci_dev *hdev, u8 status)
1917 BT_DBG("status 0x%02x", status);
1919 mgmt_class_complete(hdev, MGMT_OP_ADD_UUID, status);
1922 static int add_uuid(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
1924 struct mgmt_cp_add_uuid *cp = data;
1925 struct pending_cmd *cmd;
1926 struct hci_request req;
1927 struct bt_uuid *uuid;
1930 BT_DBG("request for %s", hdev->name);
1934 if (pending_eir_or_class(hdev)) {
1935 err = cmd_status(sk, hdev->id, MGMT_OP_ADD_UUID,
1940 uuid = kmalloc(sizeof(*uuid), GFP_KERNEL);
1946 memcpy(uuid->uuid, cp->uuid, 16);
1947 uuid->svc_hint = cp->svc_hint;
1948 uuid->size = get_uuid_size(cp->uuid);
1950 list_add_tail(&uuid->list, &hdev->uuids);
1952 hci_req_init(&req, hdev);
1957 err = hci_req_run(&req, add_uuid_complete);
1959 if (err != -ENODATA)
1962 err = cmd_complete(sk, hdev->id, MGMT_OP_ADD_UUID, 0,
1963 hdev->dev_class, 3);
1967 cmd = mgmt_pending_add(sk, MGMT_OP_ADD_UUID, hdev, data, len);
1976 hci_dev_unlock(hdev);
1980 static bool enable_service_cache(struct hci_dev *hdev)
1982 if (!hdev_is_powered(hdev))
1985 if (!test_and_set_bit(HCI_SERVICE_CACHE, &hdev->dev_flags)) {
1986 queue_delayed_work(hdev->workqueue, &hdev->service_cache,
1994 static void remove_uuid_complete(struct hci_dev *hdev, u8 status)
1996 BT_DBG("status 0x%02x", status);
1998 mgmt_class_complete(hdev, MGMT_OP_REMOVE_UUID, status);
2001 static int remove_uuid(struct sock *sk, struct hci_dev *hdev, void *data,
2004 struct mgmt_cp_remove_uuid *cp = data;
2005 struct pending_cmd *cmd;
2006 struct bt_uuid *match, *tmp;
2007 u8 bt_uuid_any[] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
2008 struct hci_request req;
2011 BT_DBG("request for %s", hdev->name);
2015 if (pending_eir_or_class(hdev)) {
2016 err = cmd_status(sk, hdev->id, MGMT_OP_REMOVE_UUID,
2021 if (memcmp(cp->uuid, bt_uuid_any, 16) == 0) {
2022 err = hci_uuids_clear(hdev);
2024 if (enable_service_cache(hdev)) {
2025 err = cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_UUID,
2026 0, hdev->dev_class, 3);
2035 list_for_each_entry_safe(match, tmp, &hdev->uuids, list) {
2036 if (memcmp(match->uuid, cp->uuid, 16) != 0)
2039 list_del(&match->list);
2045 err = cmd_status(sk, hdev->id, MGMT_OP_REMOVE_UUID,
2046 MGMT_STATUS_INVALID_PARAMS);
2051 hci_req_init(&req, hdev);
2056 err = hci_req_run(&req, remove_uuid_complete);
2058 if (err != -ENODATA)
2061 err = cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_UUID, 0,
2062 hdev->dev_class, 3);
2066 cmd = mgmt_pending_add(sk, MGMT_OP_REMOVE_UUID, hdev, data, len);
2075 hci_dev_unlock(hdev);
2079 static void set_class_complete(struct hci_dev *hdev, u8 status)
2081 BT_DBG("status 0x%02x", status);
2083 mgmt_class_complete(hdev, MGMT_OP_SET_DEV_CLASS, status);
2086 static int set_dev_class(struct sock *sk, struct hci_dev *hdev, void *data,
2089 struct mgmt_cp_set_dev_class *cp = data;
2090 struct pending_cmd *cmd;
2091 struct hci_request req;
2094 BT_DBG("request for %s", hdev->name);
2096 if (!lmp_bredr_capable(hdev))
2097 return cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2098 MGMT_STATUS_NOT_SUPPORTED);
2102 if (pending_eir_or_class(hdev)) {
2103 err = cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2108 if ((cp->minor & 0x03) != 0 || (cp->major & 0xe0) != 0) {
2109 err = cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2110 MGMT_STATUS_INVALID_PARAMS);
2114 hdev->major_class = cp->major;
2115 hdev->minor_class = cp->minor;
2117 if (!hdev_is_powered(hdev)) {
2118 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_DEV_CLASS, 0,
2119 hdev->dev_class, 3);
2123 hci_req_init(&req, hdev);
2125 if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags)) {
2126 hci_dev_unlock(hdev);
2127 cancel_delayed_work_sync(&hdev->service_cache);
2134 err = hci_req_run(&req, set_class_complete);
2136 if (err != -ENODATA)
2139 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_DEV_CLASS, 0,
2140 hdev->dev_class, 3);
2144 cmd = mgmt_pending_add(sk, MGMT_OP_SET_DEV_CLASS, hdev, data, len);
2153 hci_dev_unlock(hdev);
2157 static int load_link_keys(struct sock *sk, struct hci_dev *hdev, void *data,
2160 struct mgmt_cp_load_link_keys *cp = data;
2161 u16 key_count, expected_len;
2164 BT_DBG("request for %s", hdev->name);
2166 if (!lmp_bredr_capable(hdev))
2167 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2168 MGMT_STATUS_NOT_SUPPORTED);
2170 key_count = __le16_to_cpu(cp->key_count);
2172 expected_len = sizeof(*cp) + key_count *
2173 sizeof(struct mgmt_link_key_info);
2174 if (expected_len != len) {
2175 BT_ERR("load_link_keys: expected %u bytes, got %u bytes",
2177 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2178 MGMT_STATUS_INVALID_PARAMS);
2181 if (cp->debug_keys != 0x00 && cp->debug_keys != 0x01)
2182 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2183 MGMT_STATUS_INVALID_PARAMS);
2185 BT_DBG("%s debug_keys %u key_count %u", hdev->name, cp->debug_keys,
2188 for (i = 0; i < key_count; i++) {
2189 struct mgmt_link_key_info *key = &cp->keys[i];
2191 if (key->addr.type != BDADDR_BREDR)
2192 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2193 MGMT_STATUS_INVALID_PARAMS);
2198 hci_link_keys_clear(hdev);
2201 set_bit(HCI_DEBUG_KEYS, &hdev->dev_flags);
2203 clear_bit(HCI_DEBUG_KEYS, &hdev->dev_flags);
2205 for (i = 0; i < key_count; i++) {
2206 struct mgmt_link_key_info *key = &cp->keys[i];
2208 hci_add_link_key(hdev, NULL, 0, &key->addr.bdaddr, key->val,
2209 key->type, key->pin_len);
2212 cmd_complete(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS, 0, NULL, 0);
2214 hci_dev_unlock(hdev);
2219 static int device_unpaired(struct hci_dev *hdev, bdaddr_t *bdaddr,
2220 u8 addr_type, struct sock *skip_sk)
2222 struct mgmt_ev_device_unpaired ev;
2224 bacpy(&ev.addr.bdaddr, bdaddr);
2225 ev.addr.type = addr_type;
2227 return mgmt_event(MGMT_EV_DEVICE_UNPAIRED, hdev, &ev, sizeof(ev),
2231 static int unpair_device(struct sock *sk, struct hci_dev *hdev, void *data,
2234 struct mgmt_cp_unpair_device *cp = data;
2235 struct mgmt_rp_unpair_device rp;
2236 struct hci_cp_disconnect dc;
2237 struct pending_cmd *cmd;
2238 struct hci_conn *conn;
2241 memset(&rp, 0, sizeof(rp));
2242 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
2243 rp.addr.type = cp->addr.type;
2245 if (!bdaddr_type_is_valid(cp->addr.type))
2246 return cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2247 MGMT_STATUS_INVALID_PARAMS,
2250 if (cp->disconnect != 0x00 && cp->disconnect != 0x01)
2251 return cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2252 MGMT_STATUS_INVALID_PARAMS,
2257 if (!hdev_is_powered(hdev)) {
2258 err = cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2259 MGMT_STATUS_NOT_POWERED, &rp, sizeof(rp));
2263 if (cp->addr.type == BDADDR_BREDR)
2264 err = hci_remove_link_key(hdev, &cp->addr.bdaddr);
2266 err = hci_remove_ltk(hdev, &cp->addr.bdaddr);
2269 err = cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2270 MGMT_STATUS_NOT_PAIRED, &rp, sizeof(rp));
2274 if (cp->disconnect) {
2275 if (cp->addr.type == BDADDR_BREDR)
2276 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
2279 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK,
2286 err = cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE, 0,
2288 device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, sk);
2292 cmd = mgmt_pending_add(sk, MGMT_OP_UNPAIR_DEVICE, hdev, cp,
2299 dc.handle = cpu_to_le16(conn->handle);
2300 dc.reason = 0x13; /* Remote User Terminated Connection */
2301 err = hci_send_cmd(hdev, HCI_OP_DISCONNECT, sizeof(dc), &dc);
2303 mgmt_pending_remove(cmd);
2306 hci_dev_unlock(hdev);
2310 static int disconnect(struct sock *sk, struct hci_dev *hdev, void *data,
2313 struct mgmt_cp_disconnect *cp = data;
2314 struct mgmt_rp_disconnect rp;
2315 struct hci_cp_disconnect dc;
2316 struct pending_cmd *cmd;
2317 struct hci_conn *conn;
2322 memset(&rp, 0, sizeof(rp));
2323 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
2324 rp.addr.type = cp->addr.type;
2326 if (!bdaddr_type_is_valid(cp->addr.type))
2327 return cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2328 MGMT_STATUS_INVALID_PARAMS,
2333 if (!test_bit(HCI_UP, &hdev->flags)) {
2334 err = cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2335 MGMT_STATUS_NOT_POWERED, &rp, sizeof(rp));
2339 if (mgmt_pending_find(MGMT_OP_DISCONNECT, hdev)) {
2340 err = cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2341 MGMT_STATUS_BUSY, &rp, sizeof(rp));
2345 if (cp->addr.type == BDADDR_BREDR)
2346 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
2349 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->addr.bdaddr);
2351 if (!conn || conn->state == BT_OPEN || conn->state == BT_CLOSED) {
2352 err = cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2353 MGMT_STATUS_NOT_CONNECTED, &rp, sizeof(rp));
2357 cmd = mgmt_pending_add(sk, MGMT_OP_DISCONNECT, hdev, data, len);
2363 dc.handle = cpu_to_le16(conn->handle);
2364 dc.reason = HCI_ERROR_REMOTE_USER_TERM;
2366 err = hci_send_cmd(hdev, HCI_OP_DISCONNECT, sizeof(dc), &dc);
2368 mgmt_pending_remove(cmd);
2371 hci_dev_unlock(hdev);
2375 static u8 link_to_bdaddr(u8 link_type, u8 addr_type)
2377 switch (link_type) {
2379 switch (addr_type) {
2380 case ADDR_LE_DEV_PUBLIC:
2381 return BDADDR_LE_PUBLIC;
2384 /* Fallback to LE Random address type */
2385 return BDADDR_LE_RANDOM;
2389 /* Fallback to BR/EDR type */
2390 return BDADDR_BREDR;
2394 static int get_connections(struct sock *sk, struct hci_dev *hdev, void *data,
2397 struct mgmt_rp_get_connections *rp;
2407 if (!hdev_is_powered(hdev)) {
2408 err = cmd_status(sk, hdev->id, MGMT_OP_GET_CONNECTIONS,
2409 MGMT_STATUS_NOT_POWERED);
2414 list_for_each_entry(c, &hdev->conn_hash.list, list) {
2415 if (test_bit(HCI_CONN_MGMT_CONNECTED, &c->flags))
2419 rp_len = sizeof(*rp) + (i * sizeof(struct mgmt_addr_info));
2420 rp = kmalloc(rp_len, GFP_KERNEL);
2427 list_for_each_entry(c, &hdev->conn_hash.list, list) {
2428 if (!test_bit(HCI_CONN_MGMT_CONNECTED, &c->flags))
2430 bacpy(&rp->addr[i].bdaddr, &c->dst);
2431 rp->addr[i].type = link_to_bdaddr(c->type, c->dst_type);
2432 if (c->type == SCO_LINK || c->type == ESCO_LINK)
2437 rp->conn_count = cpu_to_le16(i);
2439 /* Recalculate length in case of filtered SCO connections, etc */
2440 rp_len = sizeof(*rp) + (i * sizeof(struct mgmt_addr_info));
2442 err = cmd_complete(sk, hdev->id, MGMT_OP_GET_CONNECTIONS, 0, rp,
2448 hci_dev_unlock(hdev);
2452 static int send_pin_code_neg_reply(struct sock *sk, struct hci_dev *hdev,
2453 struct mgmt_cp_pin_code_neg_reply *cp)
2455 struct pending_cmd *cmd;
2458 cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_NEG_REPLY, hdev, cp,
2463 err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_NEG_REPLY,
2464 sizeof(cp->addr.bdaddr), &cp->addr.bdaddr);
2466 mgmt_pending_remove(cmd);
2471 static int pin_code_reply(struct sock *sk, struct hci_dev *hdev, void *data,
2474 struct hci_conn *conn;
2475 struct mgmt_cp_pin_code_reply *cp = data;
2476 struct hci_cp_pin_code_reply reply;
2477 struct pending_cmd *cmd;
2484 if (!hdev_is_powered(hdev)) {
2485 err = cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
2486 MGMT_STATUS_NOT_POWERED);
2490 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->addr.bdaddr);
2492 err = cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
2493 MGMT_STATUS_NOT_CONNECTED);
2497 if (conn->pending_sec_level == BT_SECURITY_HIGH && cp->pin_len != 16) {
2498 struct mgmt_cp_pin_code_neg_reply ncp;
2500 memcpy(&ncp.addr, &cp->addr, sizeof(ncp.addr));
2502 BT_ERR("PIN code is not 16 bytes long");
2504 err = send_pin_code_neg_reply(sk, hdev, &ncp);
2506 err = cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
2507 MGMT_STATUS_INVALID_PARAMS);
2512 cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_REPLY, hdev, data, len);
2518 bacpy(&reply.bdaddr, &cp->addr.bdaddr);
2519 reply.pin_len = cp->pin_len;
2520 memcpy(reply.pin_code, cp->pin_code, sizeof(reply.pin_code));
2522 err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_REPLY, sizeof(reply), &reply);
2524 mgmt_pending_remove(cmd);
2527 hci_dev_unlock(hdev);
2531 static int set_io_capability(struct sock *sk, struct hci_dev *hdev, void *data,
2534 struct mgmt_cp_set_io_capability *cp = data;
2540 hdev->io_capability = cp->io_capability;
2542 BT_DBG("%s IO capability set to 0x%02x", hdev->name,
2543 hdev->io_capability);
2545 hci_dev_unlock(hdev);
2547 return cmd_complete(sk, hdev->id, MGMT_OP_SET_IO_CAPABILITY, 0, NULL,
2551 static struct pending_cmd *find_pairing(struct hci_conn *conn)
2553 struct hci_dev *hdev = conn->hdev;
2554 struct pending_cmd *cmd;
2556 list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
2557 if (cmd->opcode != MGMT_OP_PAIR_DEVICE)
2560 if (cmd->user_data != conn)
2569 static void pairing_complete(struct pending_cmd *cmd, u8 status)
2571 struct mgmt_rp_pair_device rp;
2572 struct hci_conn *conn = cmd->user_data;
2574 bacpy(&rp.addr.bdaddr, &conn->dst);
2575 rp.addr.type = link_to_bdaddr(conn->type, conn->dst_type);
2577 cmd_complete(cmd->sk, cmd->index, MGMT_OP_PAIR_DEVICE, status,
2580 /* So we don't get further callbacks for this connection */
2581 conn->connect_cfm_cb = NULL;
2582 conn->security_cfm_cb = NULL;
2583 conn->disconn_cfm_cb = NULL;
2585 hci_conn_drop(conn);
2587 mgmt_pending_remove(cmd);
2590 static void pairing_complete_cb(struct hci_conn *conn, u8 status)
2592 struct pending_cmd *cmd;
2594 BT_DBG("status %u", status);
2596 cmd = find_pairing(conn);
2598 BT_DBG("Unable to find a pending command");
2600 pairing_complete(cmd, mgmt_status(status));
2603 static void le_connect_complete_cb(struct hci_conn *conn, u8 status)
2605 struct pending_cmd *cmd;
2607 BT_DBG("status %u", status);
2612 cmd = find_pairing(conn);
2614 BT_DBG("Unable to find a pending command");
2616 pairing_complete(cmd, mgmt_status(status));
2619 static int pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
2622 struct mgmt_cp_pair_device *cp = data;
2623 struct mgmt_rp_pair_device rp;
2624 struct pending_cmd *cmd;
2625 u8 sec_level, auth_type;
2626 struct hci_conn *conn;
2631 memset(&rp, 0, sizeof(rp));
2632 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
2633 rp.addr.type = cp->addr.type;
2635 if (!bdaddr_type_is_valid(cp->addr.type))
2636 return cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
2637 MGMT_STATUS_INVALID_PARAMS,
2642 if (!hdev_is_powered(hdev)) {
2643 err = cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
2644 MGMT_STATUS_NOT_POWERED, &rp, sizeof(rp));
2648 sec_level = BT_SECURITY_MEDIUM;
2649 if (cp->io_cap == 0x03)
2650 auth_type = HCI_AT_DEDICATED_BONDING;
2652 auth_type = HCI_AT_DEDICATED_BONDING_MITM;
2654 if (cp->addr.type == BDADDR_BREDR)
2655 conn = hci_connect(hdev, ACL_LINK, &cp->addr.bdaddr,
2656 cp->addr.type, sec_level, auth_type);
2658 conn = hci_connect(hdev, LE_LINK, &cp->addr.bdaddr,
2659 cp->addr.type, sec_level, auth_type);
2664 if (PTR_ERR(conn) == -EBUSY)
2665 status = MGMT_STATUS_BUSY;
2667 status = MGMT_STATUS_CONNECT_FAILED;
2669 err = cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
2675 if (conn->connect_cfm_cb) {
2676 hci_conn_drop(conn);
2677 err = cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
2678 MGMT_STATUS_BUSY, &rp, sizeof(rp));
2682 cmd = mgmt_pending_add(sk, MGMT_OP_PAIR_DEVICE, hdev, data, len);
2685 hci_conn_drop(conn);
2689 /* For LE, just connecting isn't a proof that the pairing finished */
2690 if (cp->addr.type == BDADDR_BREDR)
2691 conn->connect_cfm_cb = pairing_complete_cb;
2693 conn->connect_cfm_cb = le_connect_complete_cb;
2695 conn->security_cfm_cb = pairing_complete_cb;
2696 conn->disconn_cfm_cb = pairing_complete_cb;
2697 conn->io_capability = cp->io_cap;
2698 cmd->user_data = conn;
2700 if (conn->state == BT_CONNECTED &&
2701 hci_conn_security(conn, sec_level, auth_type))
2702 pairing_complete(cmd, 0);
2707 hci_dev_unlock(hdev);
2711 static int cancel_pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
2714 struct mgmt_addr_info *addr = data;
2715 struct pending_cmd *cmd;
2716 struct hci_conn *conn;
2723 if (!hdev_is_powered(hdev)) {
2724 err = cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
2725 MGMT_STATUS_NOT_POWERED);
2729 cmd = mgmt_pending_find(MGMT_OP_PAIR_DEVICE, hdev);
2731 err = cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
2732 MGMT_STATUS_INVALID_PARAMS);
2736 conn = cmd->user_data;
2738 if (bacmp(&addr->bdaddr, &conn->dst) != 0) {
2739 err = cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
2740 MGMT_STATUS_INVALID_PARAMS);
2744 pairing_complete(cmd, MGMT_STATUS_CANCELLED);
2746 err = cmd_complete(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE, 0,
2747 addr, sizeof(*addr));
2749 hci_dev_unlock(hdev);
2753 static int user_pairing_resp(struct sock *sk, struct hci_dev *hdev,
2754 struct mgmt_addr_info *addr, u16 mgmt_op,
2755 u16 hci_op, __le32 passkey)
2757 struct pending_cmd *cmd;
2758 struct hci_conn *conn;
2763 if (!hdev_is_powered(hdev)) {
2764 err = cmd_complete(sk, hdev->id, mgmt_op,
2765 MGMT_STATUS_NOT_POWERED, addr,
2770 if (addr->type == BDADDR_BREDR)
2771 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &addr->bdaddr);
2773 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &addr->bdaddr);
2776 err = cmd_complete(sk, hdev->id, mgmt_op,
2777 MGMT_STATUS_NOT_CONNECTED, addr,
2782 if (addr->type == BDADDR_LE_PUBLIC || addr->type == BDADDR_LE_RANDOM) {
2783 /* Continue with pairing via SMP */
2784 err = smp_user_confirm_reply(conn, mgmt_op, passkey);
2787 err = cmd_complete(sk, hdev->id, mgmt_op,
2788 MGMT_STATUS_SUCCESS, addr,
2791 err = cmd_complete(sk, hdev->id, mgmt_op,
2792 MGMT_STATUS_FAILED, addr,
2798 cmd = mgmt_pending_add(sk, mgmt_op, hdev, addr, sizeof(*addr));
2804 /* Continue with pairing via HCI */
2805 if (hci_op == HCI_OP_USER_PASSKEY_REPLY) {
2806 struct hci_cp_user_passkey_reply cp;
2808 bacpy(&cp.bdaddr, &addr->bdaddr);
2809 cp.passkey = passkey;
2810 err = hci_send_cmd(hdev, hci_op, sizeof(cp), &cp);
2812 err = hci_send_cmd(hdev, hci_op, sizeof(addr->bdaddr),
2816 mgmt_pending_remove(cmd);
2819 hci_dev_unlock(hdev);
2823 static int pin_code_neg_reply(struct sock *sk, struct hci_dev *hdev,
2824 void *data, u16 len)
2826 struct mgmt_cp_pin_code_neg_reply *cp = data;
2830 return user_pairing_resp(sk, hdev, &cp->addr,
2831 MGMT_OP_PIN_CODE_NEG_REPLY,
2832 HCI_OP_PIN_CODE_NEG_REPLY, 0);
2835 static int user_confirm_reply(struct sock *sk, struct hci_dev *hdev, void *data,
2838 struct mgmt_cp_user_confirm_reply *cp = data;
2842 if (len != sizeof(*cp))
2843 return cmd_status(sk, hdev->id, MGMT_OP_USER_CONFIRM_REPLY,
2844 MGMT_STATUS_INVALID_PARAMS);
2846 return user_pairing_resp(sk, hdev, &cp->addr,
2847 MGMT_OP_USER_CONFIRM_REPLY,
2848 HCI_OP_USER_CONFIRM_REPLY, 0);
2851 static int user_confirm_neg_reply(struct sock *sk, struct hci_dev *hdev,
2852 void *data, u16 len)
2854 struct mgmt_cp_user_confirm_neg_reply *cp = data;
2858 return user_pairing_resp(sk, hdev, &cp->addr,
2859 MGMT_OP_USER_CONFIRM_NEG_REPLY,
2860 HCI_OP_USER_CONFIRM_NEG_REPLY, 0);
2863 static int user_passkey_reply(struct sock *sk, struct hci_dev *hdev, void *data,
2866 struct mgmt_cp_user_passkey_reply *cp = data;
2870 return user_pairing_resp(sk, hdev, &cp->addr,
2871 MGMT_OP_USER_PASSKEY_REPLY,
2872 HCI_OP_USER_PASSKEY_REPLY, cp->passkey);
2875 static int user_passkey_neg_reply(struct sock *sk, struct hci_dev *hdev,
2876 void *data, u16 len)
2878 struct mgmt_cp_user_passkey_neg_reply *cp = data;
2882 return user_pairing_resp(sk, hdev, &cp->addr,
2883 MGMT_OP_USER_PASSKEY_NEG_REPLY,
2884 HCI_OP_USER_PASSKEY_NEG_REPLY, 0);
2887 static void update_name(struct hci_request *req)
2889 struct hci_dev *hdev = req->hdev;
2890 struct hci_cp_write_local_name cp;
2892 memcpy(cp.name, hdev->dev_name, sizeof(cp.name));
2894 hci_req_add(req, HCI_OP_WRITE_LOCAL_NAME, sizeof(cp), &cp);
2897 static void set_name_complete(struct hci_dev *hdev, u8 status)
2899 struct mgmt_cp_set_local_name *cp;
2900 struct pending_cmd *cmd;
2902 BT_DBG("status 0x%02x", status);
2906 cmd = mgmt_pending_find(MGMT_OP_SET_LOCAL_NAME, hdev);
2913 cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME,
2914 mgmt_status(status));
2916 cmd_complete(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
2919 mgmt_pending_remove(cmd);
2922 hci_dev_unlock(hdev);
2925 static int set_local_name(struct sock *sk, struct hci_dev *hdev, void *data,
2928 struct mgmt_cp_set_local_name *cp = data;
2929 struct pending_cmd *cmd;
2930 struct hci_request req;
2937 /* If the old values are the same as the new ones just return a
2938 * direct command complete event.
2940 if (!memcmp(hdev->dev_name, cp->name, sizeof(hdev->dev_name)) &&
2941 !memcmp(hdev->short_name, cp->short_name,
2942 sizeof(hdev->short_name))) {
2943 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
2948 memcpy(hdev->short_name, cp->short_name, sizeof(hdev->short_name));
2950 if (!hdev_is_powered(hdev)) {
2951 memcpy(hdev->dev_name, cp->name, sizeof(hdev->dev_name));
2953 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
2958 err = mgmt_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev, data, len,
2964 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LOCAL_NAME, hdev, data, len);
2970 memcpy(hdev->dev_name, cp->name, sizeof(hdev->dev_name));
2972 hci_req_init(&req, hdev);
2974 if (lmp_bredr_capable(hdev)) {
2979 /* The name is stored in the scan response data and so
2980 * no need to udpate the advertising data here.
2982 if (lmp_le_capable(hdev))
2983 update_scan_rsp_data(&req);
2985 err = hci_req_run(&req, set_name_complete);
2987 mgmt_pending_remove(cmd);
2990 hci_dev_unlock(hdev);
2994 static int read_local_oob_data(struct sock *sk, struct hci_dev *hdev,
2995 void *data, u16 data_len)
2997 struct pending_cmd *cmd;
3000 BT_DBG("%s", hdev->name);
3004 if (!hdev_is_powered(hdev)) {
3005 err = cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
3006 MGMT_STATUS_NOT_POWERED);
3010 if (!lmp_ssp_capable(hdev)) {
3011 err = cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
3012 MGMT_STATUS_NOT_SUPPORTED);
3016 if (mgmt_pending_find(MGMT_OP_READ_LOCAL_OOB_DATA, hdev)) {
3017 err = cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
3022 cmd = mgmt_pending_add(sk, MGMT_OP_READ_LOCAL_OOB_DATA, hdev, NULL, 0);
3028 err = hci_send_cmd(hdev, HCI_OP_READ_LOCAL_OOB_DATA, 0, NULL);
3030 mgmt_pending_remove(cmd);
3033 hci_dev_unlock(hdev);
3037 static int add_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
3038 void *data, u16 len)
3040 struct mgmt_cp_add_remote_oob_data *cp = data;
3044 BT_DBG("%s ", hdev->name);
3048 err = hci_add_remote_oob_data(hdev, &cp->addr.bdaddr, cp->hash,
3051 status = MGMT_STATUS_FAILED;
3053 status = MGMT_STATUS_SUCCESS;
3055 err = cmd_complete(sk, hdev->id, MGMT_OP_ADD_REMOTE_OOB_DATA, status,
3056 &cp->addr, sizeof(cp->addr));
3058 hci_dev_unlock(hdev);
3062 static int remove_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
3063 void *data, u16 len)
3065 struct mgmt_cp_remove_remote_oob_data *cp = data;
3069 BT_DBG("%s", hdev->name);
3073 err = hci_remove_remote_oob_data(hdev, &cp->addr.bdaddr);
3075 status = MGMT_STATUS_INVALID_PARAMS;
3077 status = MGMT_STATUS_SUCCESS;
3079 err = cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_REMOTE_OOB_DATA,
3080 status, &cp->addr, sizeof(cp->addr));
3082 hci_dev_unlock(hdev);
3086 static int mgmt_start_discovery_failed(struct hci_dev *hdev, u8 status)
3088 struct pending_cmd *cmd;
3092 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3094 cmd = mgmt_pending_find(MGMT_OP_START_DISCOVERY, hdev);
3098 type = hdev->discovery.type;
3100 err = cmd_complete(cmd->sk, hdev->id, cmd->opcode, mgmt_status(status),
3101 &type, sizeof(type));
3102 mgmt_pending_remove(cmd);
3107 static void start_discovery_complete(struct hci_dev *hdev, u8 status)
3109 BT_DBG("status %d", status);
3113 mgmt_start_discovery_failed(hdev, status);
3114 hci_dev_unlock(hdev);
3119 hci_discovery_set_state(hdev, DISCOVERY_FINDING);
3120 hci_dev_unlock(hdev);
3122 switch (hdev->discovery.type) {
3123 case DISCOV_TYPE_LE:
3124 queue_delayed_work(hdev->workqueue, &hdev->le_scan_disable,
3128 case DISCOV_TYPE_INTERLEAVED:
3129 queue_delayed_work(hdev->workqueue, &hdev->le_scan_disable,
3130 DISCOV_INTERLEAVED_TIMEOUT);
3133 case DISCOV_TYPE_BREDR:
3137 BT_ERR("Invalid discovery type %d", hdev->discovery.type);
3141 static int start_discovery(struct sock *sk, struct hci_dev *hdev,
3142 void *data, u16 len)
3144 struct mgmt_cp_start_discovery *cp = data;
3145 struct pending_cmd *cmd;
3146 struct hci_cp_le_set_scan_param param_cp;
3147 struct hci_cp_le_set_scan_enable enable_cp;
3148 struct hci_cp_inquiry inq_cp;
3149 struct hci_request req;
3150 /* General inquiry access code (GIAC) */
3151 u8 lap[3] = { 0x33, 0x8b, 0x9e };
3155 BT_DBG("%s", hdev->name);
3159 if (!hdev_is_powered(hdev)) {
3160 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3161 MGMT_STATUS_NOT_POWERED);
3165 if (test_bit(HCI_PERIODIC_INQ, &hdev->dev_flags)) {
3166 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3171 if (hdev->discovery.state != DISCOVERY_STOPPED) {
3172 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3177 cmd = mgmt_pending_add(sk, MGMT_OP_START_DISCOVERY, hdev, NULL, 0);
3183 hdev->discovery.type = cp->type;
3185 hci_req_init(&req, hdev);
3187 switch (hdev->discovery.type) {
3188 case DISCOV_TYPE_BREDR:
3189 status = mgmt_bredr_support(hdev);
3191 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3193 mgmt_pending_remove(cmd);
3197 if (test_bit(HCI_INQUIRY, &hdev->flags)) {
3198 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3200 mgmt_pending_remove(cmd);
3204 hci_inquiry_cache_flush(hdev);
3206 memset(&inq_cp, 0, sizeof(inq_cp));
3207 memcpy(&inq_cp.lap, lap, sizeof(inq_cp.lap));
3208 inq_cp.length = DISCOV_BREDR_INQUIRY_LEN;
3209 hci_req_add(&req, HCI_OP_INQUIRY, sizeof(inq_cp), &inq_cp);
3212 case DISCOV_TYPE_LE:
3213 case DISCOV_TYPE_INTERLEAVED:
3214 status = mgmt_le_support(hdev);
3216 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3218 mgmt_pending_remove(cmd);
3222 if (hdev->discovery.type == DISCOV_TYPE_INTERLEAVED &&
3223 !test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
3224 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3225 MGMT_STATUS_NOT_SUPPORTED);
3226 mgmt_pending_remove(cmd);
3230 if (test_bit(HCI_ADVERTISING, &hdev->dev_flags)) {
3231 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3232 MGMT_STATUS_REJECTED);
3233 mgmt_pending_remove(cmd);
3237 if (test_bit(HCI_LE_SCAN, &hdev->dev_flags)) {
3238 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3240 mgmt_pending_remove(cmd);
3244 memset(¶m_cp, 0, sizeof(param_cp));
3245 param_cp.type = LE_SCAN_ACTIVE;
3246 param_cp.interval = cpu_to_le16(DISCOV_LE_SCAN_INT);
3247 param_cp.window = cpu_to_le16(DISCOV_LE_SCAN_WIN);
3248 param_cp.own_address_type = hdev->own_addr_type;
3249 hci_req_add(&req, HCI_OP_LE_SET_SCAN_PARAM, sizeof(param_cp),
3252 memset(&enable_cp, 0, sizeof(enable_cp));
3253 enable_cp.enable = LE_SCAN_ENABLE;
3254 enable_cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
3255 hci_req_add(&req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(enable_cp),
3260 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3261 MGMT_STATUS_INVALID_PARAMS);
3262 mgmt_pending_remove(cmd);
3266 err = hci_req_run(&req, start_discovery_complete);
3268 mgmt_pending_remove(cmd);
3270 hci_discovery_set_state(hdev, DISCOVERY_STARTING);
3273 hci_dev_unlock(hdev);
3277 static int mgmt_stop_discovery_failed(struct hci_dev *hdev, u8 status)
3279 struct pending_cmd *cmd;
3282 cmd = mgmt_pending_find(MGMT_OP_STOP_DISCOVERY, hdev);
3286 err = cmd_complete(cmd->sk, hdev->id, cmd->opcode, mgmt_status(status),
3287 &hdev->discovery.type, sizeof(hdev->discovery.type));
3288 mgmt_pending_remove(cmd);
3293 static void stop_discovery_complete(struct hci_dev *hdev, u8 status)
3295 BT_DBG("status %d", status);
3300 mgmt_stop_discovery_failed(hdev, status);
3304 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3307 hci_dev_unlock(hdev);
3310 static int stop_discovery(struct sock *sk, struct hci_dev *hdev, void *data,
3313 struct mgmt_cp_stop_discovery *mgmt_cp = data;
3314 struct pending_cmd *cmd;
3315 struct hci_cp_remote_name_req_cancel cp;
3316 struct inquiry_entry *e;
3317 struct hci_request req;
3318 struct hci_cp_le_set_scan_enable enable_cp;
3321 BT_DBG("%s", hdev->name);
3325 if (!hci_discovery_active(hdev)) {
3326 err = cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
3327 MGMT_STATUS_REJECTED, &mgmt_cp->type,
3328 sizeof(mgmt_cp->type));
3332 if (hdev->discovery.type != mgmt_cp->type) {
3333 err = cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
3334 MGMT_STATUS_INVALID_PARAMS, &mgmt_cp->type,
3335 sizeof(mgmt_cp->type));
3339 cmd = mgmt_pending_add(sk, MGMT_OP_STOP_DISCOVERY, hdev, NULL, 0);
3345 hci_req_init(&req, hdev);
3347 switch (hdev->discovery.state) {
3348 case DISCOVERY_FINDING:
3349 if (test_bit(HCI_INQUIRY, &hdev->flags)) {
3350 hci_req_add(&req, HCI_OP_INQUIRY_CANCEL, 0, NULL);
3352 cancel_delayed_work(&hdev->le_scan_disable);
3354 memset(&enable_cp, 0, sizeof(enable_cp));
3355 enable_cp.enable = LE_SCAN_DISABLE;
3356 hci_req_add(&req, HCI_OP_LE_SET_SCAN_ENABLE,
3357 sizeof(enable_cp), &enable_cp);
3362 case DISCOVERY_RESOLVING:
3363 e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY,
3366 mgmt_pending_remove(cmd);
3367 err = cmd_complete(sk, hdev->id,
3368 MGMT_OP_STOP_DISCOVERY, 0,
3370 sizeof(mgmt_cp->type));
3371 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3375 bacpy(&cp.bdaddr, &e->data.bdaddr);
3376 hci_req_add(&req, HCI_OP_REMOTE_NAME_REQ_CANCEL, sizeof(cp),
3382 BT_DBG("unknown discovery state %u", hdev->discovery.state);
3384 mgmt_pending_remove(cmd);
3385 err = cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
3386 MGMT_STATUS_FAILED, &mgmt_cp->type,
3387 sizeof(mgmt_cp->type));
3391 err = hci_req_run(&req, stop_discovery_complete);
3393 mgmt_pending_remove(cmd);
3395 hci_discovery_set_state(hdev, DISCOVERY_STOPPING);
3398 hci_dev_unlock(hdev);
3402 static int confirm_name(struct sock *sk, struct hci_dev *hdev, void *data,
3405 struct mgmt_cp_confirm_name *cp = data;
3406 struct inquiry_entry *e;
3409 BT_DBG("%s", hdev->name);
3413 if (!hci_discovery_active(hdev)) {
3414 err = cmd_status(sk, hdev->id, MGMT_OP_CONFIRM_NAME,
3415 MGMT_STATUS_FAILED);
3419 e = hci_inquiry_cache_lookup_unknown(hdev, &cp->addr.bdaddr);
3421 err = cmd_status(sk, hdev->id, MGMT_OP_CONFIRM_NAME,
3422 MGMT_STATUS_INVALID_PARAMS);
3426 if (cp->name_known) {
3427 e->name_state = NAME_KNOWN;
3430 e->name_state = NAME_NEEDED;
3431 hci_inquiry_cache_update_resolve(hdev, e);
3434 err = cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME, 0, &cp->addr,
3438 hci_dev_unlock(hdev);
3442 static int block_device(struct sock *sk, struct hci_dev *hdev, void *data,
3445 struct mgmt_cp_block_device *cp = data;
3449 BT_DBG("%s", hdev->name);
3451 if (!bdaddr_type_is_valid(cp->addr.type))
3452 return cmd_complete(sk, hdev->id, MGMT_OP_BLOCK_DEVICE,
3453 MGMT_STATUS_INVALID_PARAMS,
3454 &cp->addr, sizeof(cp->addr));
3458 err = hci_blacklist_add(hdev, &cp->addr.bdaddr, cp->addr.type);
3460 status = MGMT_STATUS_FAILED;
3462 status = MGMT_STATUS_SUCCESS;
3464 err = cmd_complete(sk, hdev->id, MGMT_OP_BLOCK_DEVICE, status,
3465 &cp->addr, sizeof(cp->addr));
3467 hci_dev_unlock(hdev);
3472 static int unblock_device(struct sock *sk, struct hci_dev *hdev, void *data,
3475 struct mgmt_cp_unblock_device *cp = data;
3479 BT_DBG("%s", hdev->name);
3481 if (!bdaddr_type_is_valid(cp->addr.type))
3482 return cmd_complete(sk, hdev->id, MGMT_OP_UNBLOCK_DEVICE,
3483 MGMT_STATUS_INVALID_PARAMS,
3484 &cp->addr, sizeof(cp->addr));
3488 err = hci_blacklist_del(hdev, &cp->addr.bdaddr, cp->addr.type);
3490 status = MGMT_STATUS_INVALID_PARAMS;
3492 status = MGMT_STATUS_SUCCESS;
3494 err = cmd_complete(sk, hdev->id, MGMT_OP_UNBLOCK_DEVICE, status,
3495 &cp->addr, sizeof(cp->addr));
3497 hci_dev_unlock(hdev);
3502 static int set_device_id(struct sock *sk, struct hci_dev *hdev, void *data,
3505 struct mgmt_cp_set_device_id *cp = data;
3506 struct hci_request req;
3510 BT_DBG("%s", hdev->name);
3512 source = __le16_to_cpu(cp->source);
3514 if (source > 0x0002)
3515 return cmd_status(sk, hdev->id, MGMT_OP_SET_DEVICE_ID,
3516 MGMT_STATUS_INVALID_PARAMS);
3520 hdev->devid_source = source;
3521 hdev->devid_vendor = __le16_to_cpu(cp->vendor);
3522 hdev->devid_product = __le16_to_cpu(cp->product);
3523 hdev->devid_version = __le16_to_cpu(cp->version);
3525 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_DEVICE_ID, 0, NULL, 0);
3527 hci_req_init(&req, hdev);
3529 hci_req_run(&req, NULL);
3531 hci_dev_unlock(hdev);
3536 static void set_advertising_complete(struct hci_dev *hdev, u8 status)
3538 struct cmd_lookup match = { NULL, hdev };
3541 u8 mgmt_err = mgmt_status(status);
3543 mgmt_pending_foreach(MGMT_OP_SET_ADVERTISING, hdev,
3544 cmd_status_rsp, &mgmt_err);
3548 mgmt_pending_foreach(MGMT_OP_SET_ADVERTISING, hdev, settings_rsp,
3551 new_settings(hdev, match.sk);
3557 static int set_advertising(struct sock *sk, struct hci_dev *hdev, void *data,
3560 struct mgmt_mode *cp = data;
3561 struct pending_cmd *cmd;
3562 struct hci_request req;
3563 u8 val, enabled, status;
3566 BT_DBG("request for %s", hdev->name);
3568 status = mgmt_le_support(hdev);
3570 return cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
3573 if (cp->val != 0x00 && cp->val != 0x01)
3574 return cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
3575 MGMT_STATUS_INVALID_PARAMS);
3580 enabled = test_bit(HCI_ADVERTISING, &hdev->dev_flags);
3582 /* The following conditions are ones which mean that we should
3583 * not do any HCI communication but directly send a mgmt
3584 * response to user space (after toggling the flag if
3587 if (!hdev_is_powered(hdev) || val == enabled ||
3588 hci_conn_num(hdev, LE_LINK) > 0) {
3589 bool changed = false;
3591 if (val != test_bit(HCI_ADVERTISING, &hdev->dev_flags)) {
3592 change_bit(HCI_ADVERTISING, &hdev->dev_flags);
3596 err = send_settings_rsp(sk, MGMT_OP_SET_ADVERTISING, hdev);
3601 err = new_settings(hdev, sk);
3606 if (mgmt_pending_find(MGMT_OP_SET_ADVERTISING, hdev) ||
3607 mgmt_pending_find(MGMT_OP_SET_LE, hdev)) {
3608 err = cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
3613 cmd = mgmt_pending_add(sk, MGMT_OP_SET_ADVERTISING, hdev, data, len);
3619 hci_req_init(&req, hdev);
3622 enable_advertising(&req);
3624 disable_advertising(&req);
3626 err = hci_req_run(&req, set_advertising_complete);
3628 mgmt_pending_remove(cmd);
3631 hci_dev_unlock(hdev);
3635 static int set_static_address(struct sock *sk, struct hci_dev *hdev,
3636 void *data, u16 len)
3638 struct mgmt_cp_set_static_address *cp = data;
3641 BT_DBG("%s", hdev->name);
3643 if (!lmp_le_capable(hdev))
3644 return cmd_status(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS,
3645 MGMT_STATUS_NOT_SUPPORTED);
3647 if (hdev_is_powered(hdev))
3648 return cmd_status(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS,
3649 MGMT_STATUS_REJECTED);
3651 if (bacmp(&cp->bdaddr, BDADDR_ANY)) {
3652 if (!bacmp(&cp->bdaddr, BDADDR_NONE))
3653 return cmd_status(sk, hdev->id,
3654 MGMT_OP_SET_STATIC_ADDRESS,
3655 MGMT_STATUS_INVALID_PARAMS);
3657 /* Two most significant bits shall be set */
3658 if ((cp->bdaddr.b[5] & 0xc0) != 0xc0)
3659 return cmd_status(sk, hdev->id,
3660 MGMT_OP_SET_STATIC_ADDRESS,
3661 MGMT_STATUS_INVALID_PARAMS);
3666 bacpy(&hdev->static_addr, &cp->bdaddr);
3668 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS, 0, NULL, 0);
3670 hci_dev_unlock(hdev);
3675 static int set_scan_params(struct sock *sk, struct hci_dev *hdev,
3676 void *data, u16 len)
3678 struct mgmt_cp_set_scan_params *cp = data;
3679 __u16 interval, window;
3682 BT_DBG("%s", hdev->name);
3684 if (!lmp_le_capable(hdev))
3685 return cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
3686 MGMT_STATUS_NOT_SUPPORTED);
3688 interval = __le16_to_cpu(cp->interval);
3690 if (interval < 0x0004 || interval > 0x4000)
3691 return cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
3692 MGMT_STATUS_INVALID_PARAMS);
3694 window = __le16_to_cpu(cp->window);
3696 if (window < 0x0004 || window > 0x4000)
3697 return cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
3698 MGMT_STATUS_INVALID_PARAMS);
3700 if (window > interval)
3701 return cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
3702 MGMT_STATUS_INVALID_PARAMS);
3706 hdev->le_scan_interval = interval;
3707 hdev->le_scan_window = window;
3709 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS, 0, NULL, 0);
3711 hci_dev_unlock(hdev);
3716 static void fast_connectable_complete(struct hci_dev *hdev, u8 status)
3718 struct pending_cmd *cmd;
3720 BT_DBG("status 0x%02x", status);
3724 cmd = mgmt_pending_find(MGMT_OP_SET_FAST_CONNECTABLE, hdev);
3729 cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
3730 mgmt_status(status));
3732 struct mgmt_mode *cp = cmd->param;
3735 set_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags);
3737 clear_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags);
3739 send_settings_rsp(cmd->sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev);
3740 new_settings(hdev, cmd->sk);
3743 mgmt_pending_remove(cmd);
3746 hci_dev_unlock(hdev);
3749 static int set_fast_connectable(struct sock *sk, struct hci_dev *hdev,
3750 void *data, u16 len)
3752 struct mgmt_mode *cp = data;
3753 struct pending_cmd *cmd;
3754 struct hci_request req;
3757 BT_DBG("%s", hdev->name);
3759 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags) ||
3760 hdev->hci_ver < BLUETOOTH_VER_1_2)
3761 return cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
3762 MGMT_STATUS_NOT_SUPPORTED);
3764 if (cp->val != 0x00 && cp->val != 0x01)
3765 return cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
3766 MGMT_STATUS_INVALID_PARAMS);
3768 if (!hdev_is_powered(hdev))
3769 return cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
3770 MGMT_STATUS_NOT_POWERED);
3772 if (!test_bit(HCI_CONNECTABLE, &hdev->dev_flags))
3773 return cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
3774 MGMT_STATUS_REJECTED);
3778 if (mgmt_pending_find(MGMT_OP_SET_FAST_CONNECTABLE, hdev)) {
3779 err = cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
3784 if (!!cp->val == test_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags)) {
3785 err = send_settings_rsp(sk, MGMT_OP_SET_FAST_CONNECTABLE,
3790 cmd = mgmt_pending_add(sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev,
3797 hci_req_init(&req, hdev);
3799 write_fast_connectable(&req, cp->val);
3801 err = hci_req_run(&req, fast_connectable_complete);
3803 err = cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
3804 MGMT_STATUS_FAILED);
3805 mgmt_pending_remove(cmd);
3809 hci_dev_unlock(hdev);
3814 static void set_bredr_scan(struct hci_request *req)
3816 struct hci_dev *hdev = req->hdev;
3819 /* Ensure that fast connectable is disabled. This function will
3820 * not do anything if the page scan parameters are already what
3823 write_fast_connectable(req, false);
3825 if (test_bit(HCI_CONNECTABLE, &hdev->dev_flags))
3827 if (test_bit(HCI_DISCOVERABLE, &hdev->dev_flags))
3828 scan |= SCAN_INQUIRY;
3831 hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
3834 static void set_bredr_complete(struct hci_dev *hdev, u8 status)
3836 struct pending_cmd *cmd;
3838 BT_DBG("status 0x%02x", status);
3842 cmd = mgmt_pending_find(MGMT_OP_SET_BREDR, hdev);
3847 u8 mgmt_err = mgmt_status(status);
3849 /* We need to restore the flag if related HCI commands
3852 clear_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
3854 cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
3856 send_settings_rsp(cmd->sk, MGMT_OP_SET_BREDR, hdev);
3857 new_settings(hdev, cmd->sk);
3860 mgmt_pending_remove(cmd);
3863 hci_dev_unlock(hdev);
3866 static int set_bredr(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
3868 struct mgmt_mode *cp = data;
3869 struct pending_cmd *cmd;
3870 struct hci_request req;
3873 BT_DBG("request for %s", hdev->name);
3875 if (!lmp_bredr_capable(hdev) || !lmp_le_capable(hdev))
3876 return cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
3877 MGMT_STATUS_NOT_SUPPORTED);
3879 if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
3880 return cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
3881 MGMT_STATUS_REJECTED);
3883 if (cp->val != 0x00 && cp->val != 0x01)
3884 return cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
3885 MGMT_STATUS_INVALID_PARAMS);
3889 if (cp->val == test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
3890 err = send_settings_rsp(sk, MGMT_OP_SET_BREDR, hdev);
3894 if (!hdev_is_powered(hdev)) {
3896 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
3897 clear_bit(HCI_SSP_ENABLED, &hdev->dev_flags);
3898 clear_bit(HCI_LINK_SECURITY, &hdev->dev_flags);
3899 clear_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags);
3900 clear_bit(HCI_HS_ENABLED, &hdev->dev_flags);
3903 change_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
3905 err = send_settings_rsp(sk, MGMT_OP_SET_BREDR, hdev);
3909 err = new_settings(hdev, sk);
3913 /* Reject disabling when powered on */
3915 err = cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
3916 MGMT_STATUS_REJECTED);
3920 if (mgmt_pending_find(MGMT_OP_SET_BREDR, hdev)) {
3921 err = cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
3926 cmd = mgmt_pending_add(sk, MGMT_OP_SET_BREDR, hdev, data, len);
3932 /* We need to flip the bit already here so that update_adv_data
3933 * generates the correct flags.
3935 set_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
3937 hci_req_init(&req, hdev);
3939 if (test_bit(HCI_CONNECTABLE, &hdev->dev_flags))
3940 set_bredr_scan(&req);
3942 /* Since only the advertising data flags will change, there
3943 * is no need to update the scan response data.
3945 update_adv_data(&req);
3947 err = hci_req_run(&req, set_bredr_complete);
3949 mgmt_pending_remove(cmd);
3952 hci_dev_unlock(hdev);
3956 static bool ltk_is_valid(struct mgmt_ltk_info *key)
3958 if (key->authenticated != 0x00 && key->authenticated != 0x01)
3960 if (key->master != 0x00 && key->master != 0x01)
3962 if (!bdaddr_type_is_le(key->addr.type))
3967 static int load_long_term_keys(struct sock *sk, struct hci_dev *hdev,
3968 void *cp_data, u16 len)
3970 struct mgmt_cp_load_long_term_keys *cp = cp_data;
3971 u16 key_count, expected_len;
3974 BT_DBG("request for %s", hdev->name);
3976 if (!lmp_le_capable(hdev))
3977 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
3978 MGMT_STATUS_NOT_SUPPORTED);
3980 key_count = __le16_to_cpu(cp->key_count);
3982 expected_len = sizeof(*cp) + key_count *
3983 sizeof(struct mgmt_ltk_info);
3984 if (expected_len != len) {
3985 BT_ERR("load_keys: expected %u bytes, got %u bytes",
3987 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
3988 MGMT_STATUS_INVALID_PARAMS);
3991 BT_DBG("%s key_count %u", hdev->name, key_count);
3993 for (i = 0; i < key_count; i++) {
3994 struct mgmt_ltk_info *key = &cp->keys[i];
3996 if (!ltk_is_valid(key))
3997 return cmd_status(sk, hdev->id,
3998 MGMT_OP_LOAD_LONG_TERM_KEYS,
3999 MGMT_STATUS_INVALID_PARAMS);
4004 hci_smp_ltks_clear(hdev);
4006 for (i = 0; i < key_count; i++) {
4007 struct mgmt_ltk_info *key = &cp->keys[i];
4010 if (key->addr.type == BDADDR_LE_PUBLIC)
4011 addr_type = ADDR_LE_DEV_PUBLIC;
4013 addr_type = ADDR_LE_DEV_RANDOM;
4018 type = HCI_SMP_LTK_SLAVE;
4020 hci_add_ltk(hdev, &key->addr.bdaddr, addr_type,
4021 type, 0, key->authenticated, key->val,
4022 key->enc_size, key->ediv, key->rand);
4025 err = cmd_complete(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS, 0,
4028 hci_dev_unlock(hdev);
4033 static const struct mgmt_handler {
4034 int (*func) (struct sock *sk, struct hci_dev *hdev, void *data,
4038 } mgmt_handlers[] = {
4039 { NULL }, /* 0x0000 (no command) */
4040 { read_version, false, MGMT_READ_VERSION_SIZE },
4041 { read_commands, false, MGMT_READ_COMMANDS_SIZE },
4042 { read_index_list, false, MGMT_READ_INDEX_LIST_SIZE },
4043 { read_controller_info, false, MGMT_READ_INFO_SIZE },
4044 { set_powered, false, MGMT_SETTING_SIZE },
4045 { set_discoverable, false, MGMT_SET_DISCOVERABLE_SIZE },
4046 { set_connectable, false, MGMT_SETTING_SIZE },
4047 { set_fast_connectable, false, MGMT_SETTING_SIZE },
4048 { set_pairable, false, MGMT_SETTING_SIZE },
4049 { set_link_security, false, MGMT_SETTING_SIZE },
4050 { set_ssp, false, MGMT_SETTING_SIZE },
4051 { set_hs, false, MGMT_SETTING_SIZE },
4052 { set_le, false, MGMT_SETTING_SIZE },
4053 { set_dev_class, false, MGMT_SET_DEV_CLASS_SIZE },
4054 { set_local_name, false, MGMT_SET_LOCAL_NAME_SIZE },
4055 { add_uuid, false, MGMT_ADD_UUID_SIZE },
4056 { remove_uuid, false, MGMT_REMOVE_UUID_SIZE },
4057 { load_link_keys, true, MGMT_LOAD_LINK_KEYS_SIZE },
4058 { load_long_term_keys, true, MGMT_LOAD_LONG_TERM_KEYS_SIZE },
4059 { disconnect, false, MGMT_DISCONNECT_SIZE },
4060 { get_connections, false, MGMT_GET_CONNECTIONS_SIZE },
4061 { pin_code_reply, false, MGMT_PIN_CODE_REPLY_SIZE },
4062 { pin_code_neg_reply, false, MGMT_PIN_CODE_NEG_REPLY_SIZE },
4063 { set_io_capability, false, MGMT_SET_IO_CAPABILITY_SIZE },
4064 { pair_device, false, MGMT_PAIR_DEVICE_SIZE },
4065 { cancel_pair_device, false, MGMT_CANCEL_PAIR_DEVICE_SIZE },
4066 { unpair_device, false, MGMT_UNPAIR_DEVICE_SIZE },
4067 { user_confirm_reply, false, MGMT_USER_CONFIRM_REPLY_SIZE },
4068 { user_confirm_neg_reply, false, MGMT_USER_CONFIRM_NEG_REPLY_SIZE },
4069 { user_passkey_reply, false, MGMT_USER_PASSKEY_REPLY_SIZE },
4070 { user_passkey_neg_reply, false, MGMT_USER_PASSKEY_NEG_REPLY_SIZE },
4071 { read_local_oob_data, false, MGMT_READ_LOCAL_OOB_DATA_SIZE },
4072 { add_remote_oob_data, false, MGMT_ADD_REMOTE_OOB_DATA_SIZE },
4073 { remove_remote_oob_data, false, MGMT_REMOVE_REMOTE_OOB_DATA_SIZE },
4074 { start_discovery, false, MGMT_START_DISCOVERY_SIZE },
4075 { stop_discovery, false, MGMT_STOP_DISCOVERY_SIZE },
4076 { confirm_name, false, MGMT_CONFIRM_NAME_SIZE },
4077 { block_device, false, MGMT_BLOCK_DEVICE_SIZE },
4078 { unblock_device, false, MGMT_UNBLOCK_DEVICE_SIZE },
4079 { set_device_id, false, MGMT_SET_DEVICE_ID_SIZE },
4080 { set_advertising, false, MGMT_SETTING_SIZE },
4081 { set_bredr, false, MGMT_SETTING_SIZE },
4082 { set_static_address, false, MGMT_SET_STATIC_ADDRESS_SIZE },
4083 { set_scan_params, false, MGMT_SET_SCAN_PARAMS_SIZE },
4087 int mgmt_control(struct sock *sk, struct msghdr *msg, size_t msglen)
4091 struct mgmt_hdr *hdr;
4092 u16 opcode, index, len;
4093 struct hci_dev *hdev = NULL;
4094 const struct mgmt_handler *handler;
4097 BT_DBG("got %zu bytes", msglen);
4099 if (msglen < sizeof(*hdr))
4102 buf = kmalloc(msglen, GFP_KERNEL);
4106 if (memcpy_fromiovec(buf, msg->msg_iov, msglen)) {
4112 opcode = __le16_to_cpu(hdr->opcode);
4113 index = __le16_to_cpu(hdr->index);
4114 len = __le16_to_cpu(hdr->len);
4116 if (len != msglen - sizeof(*hdr)) {
4121 if (index != MGMT_INDEX_NONE) {
4122 hdev = hci_dev_get(index);
4124 err = cmd_status(sk, index, opcode,
4125 MGMT_STATUS_INVALID_INDEX);
4129 if (test_bit(HCI_SETUP, &hdev->dev_flags) ||
4130 test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
4131 err = cmd_status(sk, index, opcode,
4132 MGMT_STATUS_INVALID_INDEX);
4137 if (opcode >= ARRAY_SIZE(mgmt_handlers) ||
4138 mgmt_handlers[opcode].func == NULL) {
4139 BT_DBG("Unknown op %u", opcode);
4140 err = cmd_status(sk, index, opcode,
4141 MGMT_STATUS_UNKNOWN_COMMAND);
4145 if ((hdev && opcode < MGMT_OP_READ_INFO) ||
4146 (!hdev && opcode >= MGMT_OP_READ_INFO)) {
4147 err = cmd_status(sk, index, opcode,
4148 MGMT_STATUS_INVALID_INDEX);
4152 handler = &mgmt_handlers[opcode];
4154 if ((handler->var_len && len < handler->data_len) ||
4155 (!handler->var_len && len != handler->data_len)) {
4156 err = cmd_status(sk, index, opcode,
4157 MGMT_STATUS_INVALID_PARAMS);
4162 mgmt_init_hdev(sk, hdev);
4164 cp = buf + sizeof(*hdr);
4166 err = handler->func(sk, hdev, cp, len);
4180 void mgmt_index_added(struct hci_dev *hdev)
4182 if (hdev->dev_type != HCI_BREDR)
4185 mgmt_event(MGMT_EV_INDEX_ADDED, hdev, NULL, 0, NULL);
4188 void mgmt_index_removed(struct hci_dev *hdev)
4190 u8 status = MGMT_STATUS_INVALID_INDEX;
4192 if (hdev->dev_type != HCI_BREDR)
4195 mgmt_pending_foreach(0, hdev, cmd_status_rsp, &status);
4197 mgmt_event(MGMT_EV_INDEX_REMOVED, hdev, NULL, 0, NULL);
4200 static void powered_complete(struct hci_dev *hdev, u8 status)
4202 struct cmd_lookup match = { NULL, hdev };
4204 BT_DBG("status 0x%02x", status);
4208 mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp, &match);
4210 new_settings(hdev, match.sk);
4212 hci_dev_unlock(hdev);
4218 static int powered_update_hci(struct hci_dev *hdev)
4220 struct hci_request req;
4223 hci_req_init(&req, hdev);
4225 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags) &&
4226 !lmp_host_ssp_capable(hdev)) {
4229 hci_req_add(&req, HCI_OP_WRITE_SSP_MODE, 1, &ssp);
4232 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags) &&
4233 lmp_bredr_capable(hdev)) {
4234 struct hci_cp_write_le_host_supported cp;
4237 cp.simul = lmp_le_br_capable(hdev);
4239 /* Check first if we already have the right
4240 * host state (host features set)
4242 if (cp.le != lmp_host_le_capable(hdev) ||
4243 cp.simul != lmp_host_le_br_capable(hdev))
4244 hci_req_add(&req, HCI_OP_WRITE_LE_HOST_SUPPORTED,
4248 if (lmp_le_capable(hdev)) {
4249 /* Set random address to static address if configured */
4250 if (bacmp(&hdev->static_addr, BDADDR_ANY))
4251 hci_req_add(&req, HCI_OP_LE_SET_RANDOM_ADDR, 6,
4252 &hdev->static_addr);
4254 /* Make sure the controller has a good default for
4255 * advertising data. This also applies to the case
4256 * where BR/EDR was toggled during the AUTO_OFF phase.
4258 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
4259 update_adv_data(&req);
4260 update_scan_rsp_data(&req);
4263 if (test_bit(HCI_ADVERTISING, &hdev->dev_flags))
4264 enable_advertising(&req);
4267 link_sec = test_bit(HCI_LINK_SECURITY, &hdev->dev_flags);
4268 if (link_sec != test_bit(HCI_AUTH, &hdev->flags))
4269 hci_req_add(&req, HCI_OP_WRITE_AUTH_ENABLE,
4270 sizeof(link_sec), &link_sec);
4272 if (lmp_bredr_capable(hdev)) {
4273 if (test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
4274 set_bredr_scan(&req);
4280 return hci_req_run(&req, powered_complete);
4283 int mgmt_powered(struct hci_dev *hdev, u8 powered)
4285 struct cmd_lookup match = { NULL, hdev };
4286 u8 status_not_powered = MGMT_STATUS_NOT_POWERED;
4287 u8 zero_cod[] = { 0, 0, 0 };
4290 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
4294 if (powered_update_hci(hdev) == 0)
4297 mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp,
4302 mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp, &match);
4303 mgmt_pending_foreach(0, hdev, cmd_status_rsp, &status_not_powered);
4305 if (memcmp(hdev->dev_class, zero_cod, sizeof(zero_cod)) != 0)
4306 mgmt_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev,
4307 zero_cod, sizeof(zero_cod), NULL);
4310 err = new_settings(hdev, match.sk);
4318 void mgmt_set_powered_failed(struct hci_dev *hdev, int err)
4320 struct pending_cmd *cmd;
4323 cmd = mgmt_pending_find(MGMT_OP_SET_POWERED, hdev);
4327 if (err == -ERFKILL)
4328 status = MGMT_STATUS_RFKILLED;
4330 status = MGMT_STATUS_FAILED;
4332 cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_POWERED, status);
4334 mgmt_pending_remove(cmd);
4337 void mgmt_discoverable_timeout(struct hci_dev *hdev)
4339 struct hci_request req;
4340 u8 scan = SCAN_PAGE;
4344 /* When discoverable timeout triggers, then just make sure
4345 * the limited discoverable flag is cleared. Even in the case
4346 * of a timeout triggered from general discoverable, it is
4347 * safe to unconditionally clear the flag.
4349 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
4351 hci_req_init(&req, hdev);
4352 hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE, sizeof(scan), &scan);
4354 hci_req_run(&req, NULL);
4356 hdev->discov_timeout = 0;
4358 hci_dev_unlock(hdev);
4361 void mgmt_discoverable(struct hci_dev *hdev, u8 discoverable)
4365 /* Nothing needed here if there's a pending command since that
4366 * commands request completion callback takes care of everything
4369 if (mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, hdev))
4373 changed = !test_and_set_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
4375 changed = test_and_clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
4378 new_settings(hdev, NULL);
4381 void mgmt_connectable(struct hci_dev *hdev, u8 connectable)
4385 /* Nothing needed here if there's a pending command since that
4386 * commands request completion callback takes care of everything
4389 if (mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev))
4393 changed = !test_and_set_bit(HCI_CONNECTABLE, &hdev->dev_flags);
4395 changed = test_and_clear_bit(HCI_CONNECTABLE, &hdev->dev_flags);
4398 new_settings(hdev, NULL);
4401 void mgmt_write_scan_failed(struct hci_dev *hdev, u8 scan, u8 status)
4403 u8 mgmt_err = mgmt_status(status);
4405 if (scan & SCAN_PAGE)
4406 mgmt_pending_foreach(MGMT_OP_SET_CONNECTABLE, hdev,
4407 cmd_status_rsp, &mgmt_err);
4409 if (scan & SCAN_INQUIRY)
4410 mgmt_pending_foreach(MGMT_OP_SET_DISCOVERABLE, hdev,
4411 cmd_status_rsp, &mgmt_err);
4414 void mgmt_new_link_key(struct hci_dev *hdev, struct link_key *key,
4417 struct mgmt_ev_new_link_key ev;
4419 memset(&ev, 0, sizeof(ev));
4421 ev.store_hint = persistent;
4422 bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
4423 ev.key.addr.type = BDADDR_BREDR;
4424 ev.key.type = key->type;
4425 memcpy(ev.key.val, key->val, HCI_LINK_KEY_SIZE);
4426 ev.key.pin_len = key->pin_len;
4428 mgmt_event(MGMT_EV_NEW_LINK_KEY, hdev, &ev, sizeof(ev), NULL);
4431 void mgmt_new_ltk(struct hci_dev *hdev, struct smp_ltk *key, u8 persistent)
4433 struct mgmt_ev_new_long_term_key ev;
4435 memset(&ev, 0, sizeof(ev));
4437 ev.store_hint = persistent;
4438 bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
4439 ev.key.addr.type = link_to_bdaddr(LE_LINK, key->bdaddr_type);
4440 ev.key.authenticated = key->authenticated;
4441 ev.key.enc_size = key->enc_size;
4442 ev.key.ediv = key->ediv;
4444 if (key->type == HCI_SMP_LTK)
4447 memcpy(ev.key.rand, key->rand, sizeof(key->rand));
4448 memcpy(ev.key.val, key->val, sizeof(key->val));
4450 mgmt_event(MGMT_EV_NEW_LONG_TERM_KEY, hdev, &ev, sizeof(ev), NULL);
4453 static inline u16 eir_append_data(u8 *eir, u16 eir_len, u8 type, u8 *data,
4456 eir[eir_len++] = sizeof(type) + data_len;
4457 eir[eir_len++] = type;
4458 memcpy(&eir[eir_len], data, data_len);
4459 eir_len += data_len;
4464 void mgmt_device_connected(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
4465 u8 addr_type, u32 flags, u8 *name, u8 name_len,
4469 struct mgmt_ev_device_connected *ev = (void *) buf;
4472 bacpy(&ev->addr.bdaddr, bdaddr);
4473 ev->addr.type = link_to_bdaddr(link_type, addr_type);
4475 ev->flags = __cpu_to_le32(flags);
4478 eir_len = eir_append_data(ev->eir, 0, EIR_NAME_COMPLETE,
4481 if (dev_class && memcmp(dev_class, "\0\0\0", 3) != 0)
4482 eir_len = eir_append_data(ev->eir, eir_len,
4483 EIR_CLASS_OF_DEV, dev_class, 3);
4485 ev->eir_len = cpu_to_le16(eir_len);
4487 mgmt_event(MGMT_EV_DEVICE_CONNECTED, hdev, buf,
4488 sizeof(*ev) + eir_len, NULL);
4491 static void disconnect_rsp(struct pending_cmd *cmd, void *data)
4493 struct mgmt_cp_disconnect *cp = cmd->param;
4494 struct sock **sk = data;
4495 struct mgmt_rp_disconnect rp;
4497 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
4498 rp.addr.type = cp->addr.type;
4500 cmd_complete(cmd->sk, cmd->index, MGMT_OP_DISCONNECT, 0, &rp,
4506 mgmt_pending_remove(cmd);
4509 static void unpair_device_rsp(struct pending_cmd *cmd, void *data)
4511 struct hci_dev *hdev = data;
4512 struct mgmt_cp_unpair_device *cp = cmd->param;
4513 struct mgmt_rp_unpair_device rp;
4515 memset(&rp, 0, sizeof(rp));
4516 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
4517 rp.addr.type = cp->addr.type;
4519 device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, cmd->sk);
4521 cmd_complete(cmd->sk, cmd->index, cmd->opcode, 0, &rp, sizeof(rp));
4523 mgmt_pending_remove(cmd);
4526 void mgmt_device_disconnected(struct hci_dev *hdev, bdaddr_t *bdaddr,
4527 u8 link_type, u8 addr_type, u8 reason)
4529 struct mgmt_ev_device_disconnected ev;
4530 struct sock *sk = NULL;
4532 mgmt_pending_foreach(MGMT_OP_DISCONNECT, hdev, disconnect_rsp, &sk);
4534 bacpy(&ev.addr.bdaddr, bdaddr);
4535 ev.addr.type = link_to_bdaddr(link_type, addr_type);
4538 mgmt_event(MGMT_EV_DEVICE_DISCONNECTED, hdev, &ev, sizeof(ev), sk);
4543 mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, unpair_device_rsp,
4547 void mgmt_disconnect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr,
4548 u8 link_type, u8 addr_type, u8 status)
4550 struct mgmt_rp_disconnect rp;
4551 struct pending_cmd *cmd;
4553 mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, unpair_device_rsp,
4556 cmd = mgmt_pending_find(MGMT_OP_DISCONNECT, hdev);
4560 bacpy(&rp.addr.bdaddr, bdaddr);
4561 rp.addr.type = link_to_bdaddr(link_type, addr_type);
4563 cmd_complete(cmd->sk, cmd->index, MGMT_OP_DISCONNECT,
4564 mgmt_status(status), &rp, sizeof(rp));
4566 mgmt_pending_remove(cmd);
4569 void mgmt_connect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
4570 u8 addr_type, u8 status)
4572 struct mgmt_ev_connect_failed ev;
4574 bacpy(&ev.addr.bdaddr, bdaddr);
4575 ev.addr.type = link_to_bdaddr(link_type, addr_type);
4576 ev.status = mgmt_status(status);
4578 mgmt_event(MGMT_EV_CONNECT_FAILED, hdev, &ev, sizeof(ev), NULL);
4581 void mgmt_pin_code_request(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 secure)
4583 struct mgmt_ev_pin_code_request ev;
4585 bacpy(&ev.addr.bdaddr, bdaddr);
4586 ev.addr.type = BDADDR_BREDR;
4589 mgmt_event(MGMT_EV_PIN_CODE_REQUEST, hdev, &ev, sizeof(ev), NULL);
4592 void mgmt_pin_code_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
4595 struct pending_cmd *cmd;
4596 struct mgmt_rp_pin_code_reply rp;
4598 cmd = mgmt_pending_find(MGMT_OP_PIN_CODE_REPLY, hdev);
4602 bacpy(&rp.addr.bdaddr, bdaddr);
4603 rp.addr.type = BDADDR_BREDR;
4605 cmd_complete(cmd->sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
4606 mgmt_status(status), &rp, sizeof(rp));
4608 mgmt_pending_remove(cmd);
4611 void mgmt_pin_code_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
4614 struct pending_cmd *cmd;
4615 struct mgmt_rp_pin_code_reply rp;
4617 cmd = mgmt_pending_find(MGMT_OP_PIN_CODE_NEG_REPLY, hdev);
4621 bacpy(&rp.addr.bdaddr, bdaddr);
4622 rp.addr.type = BDADDR_BREDR;
4624 cmd_complete(cmd->sk, hdev->id, MGMT_OP_PIN_CODE_NEG_REPLY,
4625 mgmt_status(status), &rp, sizeof(rp));
4627 mgmt_pending_remove(cmd);
4630 int mgmt_user_confirm_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
4631 u8 link_type, u8 addr_type, __le32 value,
4634 struct mgmt_ev_user_confirm_request ev;
4636 BT_DBG("%s", hdev->name);
4638 bacpy(&ev.addr.bdaddr, bdaddr);
4639 ev.addr.type = link_to_bdaddr(link_type, addr_type);
4640 ev.confirm_hint = confirm_hint;
4643 return mgmt_event(MGMT_EV_USER_CONFIRM_REQUEST, hdev, &ev, sizeof(ev),
4647 int mgmt_user_passkey_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
4648 u8 link_type, u8 addr_type)
4650 struct mgmt_ev_user_passkey_request ev;
4652 BT_DBG("%s", hdev->name);
4654 bacpy(&ev.addr.bdaddr, bdaddr);
4655 ev.addr.type = link_to_bdaddr(link_type, addr_type);
4657 return mgmt_event(MGMT_EV_USER_PASSKEY_REQUEST, hdev, &ev, sizeof(ev),
4661 static int user_pairing_resp_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
4662 u8 link_type, u8 addr_type, u8 status,
4665 struct pending_cmd *cmd;
4666 struct mgmt_rp_user_confirm_reply rp;
4669 cmd = mgmt_pending_find(opcode, hdev);
4673 bacpy(&rp.addr.bdaddr, bdaddr);
4674 rp.addr.type = link_to_bdaddr(link_type, addr_type);
4675 err = cmd_complete(cmd->sk, hdev->id, opcode, mgmt_status(status),
4678 mgmt_pending_remove(cmd);
4683 int mgmt_user_confirm_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
4684 u8 link_type, u8 addr_type, u8 status)
4686 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
4687 status, MGMT_OP_USER_CONFIRM_REPLY);
4690 int mgmt_user_confirm_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
4691 u8 link_type, u8 addr_type, u8 status)
4693 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
4695 MGMT_OP_USER_CONFIRM_NEG_REPLY);
4698 int mgmt_user_passkey_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
4699 u8 link_type, u8 addr_type, u8 status)
4701 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
4702 status, MGMT_OP_USER_PASSKEY_REPLY);
4705 int mgmt_user_passkey_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
4706 u8 link_type, u8 addr_type, u8 status)
4708 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
4710 MGMT_OP_USER_PASSKEY_NEG_REPLY);
4713 int mgmt_user_passkey_notify(struct hci_dev *hdev, bdaddr_t *bdaddr,
4714 u8 link_type, u8 addr_type, u32 passkey,
4717 struct mgmt_ev_passkey_notify ev;
4719 BT_DBG("%s", hdev->name);
4721 bacpy(&ev.addr.bdaddr, bdaddr);
4722 ev.addr.type = link_to_bdaddr(link_type, addr_type);
4723 ev.passkey = __cpu_to_le32(passkey);
4724 ev.entered = entered;
4726 return mgmt_event(MGMT_EV_PASSKEY_NOTIFY, hdev, &ev, sizeof(ev), NULL);
4729 void mgmt_auth_failed(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
4730 u8 addr_type, u8 status)
4732 struct mgmt_ev_auth_failed ev;
4734 bacpy(&ev.addr.bdaddr, bdaddr);
4735 ev.addr.type = link_to_bdaddr(link_type, addr_type);
4736 ev.status = mgmt_status(status);
4738 mgmt_event(MGMT_EV_AUTH_FAILED, hdev, &ev, sizeof(ev), NULL);
4741 void mgmt_auth_enable_complete(struct hci_dev *hdev, u8 status)
4743 struct cmd_lookup match = { NULL, hdev };
4747 u8 mgmt_err = mgmt_status(status);
4748 mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev,
4749 cmd_status_rsp, &mgmt_err);
4753 if (test_bit(HCI_AUTH, &hdev->flags))
4754 changed = !test_and_set_bit(HCI_LINK_SECURITY,
4757 changed = test_and_clear_bit(HCI_LINK_SECURITY,
4760 mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev, settings_rsp,
4764 new_settings(hdev, match.sk);
4770 static void clear_eir(struct hci_request *req)
4772 struct hci_dev *hdev = req->hdev;
4773 struct hci_cp_write_eir cp;
4775 if (!lmp_ext_inq_capable(hdev))
4778 memset(hdev->eir, 0, sizeof(hdev->eir));
4780 memset(&cp, 0, sizeof(cp));
4782 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
4785 void mgmt_ssp_enable_complete(struct hci_dev *hdev, u8 enable, u8 status)
4787 struct cmd_lookup match = { NULL, hdev };
4788 struct hci_request req;
4789 bool changed = false;
4792 u8 mgmt_err = mgmt_status(status);
4794 if (enable && test_and_clear_bit(HCI_SSP_ENABLED,
4795 &hdev->dev_flags)) {
4796 clear_bit(HCI_HS_ENABLED, &hdev->dev_flags);
4797 new_settings(hdev, NULL);
4800 mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, cmd_status_rsp,
4806 changed = !test_and_set_bit(HCI_SSP_ENABLED, &hdev->dev_flags);
4808 changed = test_and_clear_bit(HCI_SSP_ENABLED, &hdev->dev_flags);
4810 changed = test_and_clear_bit(HCI_HS_ENABLED,
4813 clear_bit(HCI_HS_ENABLED, &hdev->dev_flags);
4816 mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, settings_rsp, &match);
4819 new_settings(hdev, match.sk);
4824 hci_req_init(&req, hdev);
4826 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags))
4831 hci_req_run(&req, NULL);
4834 static void sk_lookup(struct pending_cmd *cmd, void *data)
4836 struct cmd_lookup *match = data;
4838 if (match->sk == NULL) {
4839 match->sk = cmd->sk;
4840 sock_hold(match->sk);
4844 void mgmt_set_class_of_dev_complete(struct hci_dev *hdev, u8 *dev_class,
4847 struct cmd_lookup match = { NULL, hdev, mgmt_status(status) };
4849 mgmt_pending_foreach(MGMT_OP_SET_DEV_CLASS, hdev, sk_lookup, &match);
4850 mgmt_pending_foreach(MGMT_OP_ADD_UUID, hdev, sk_lookup, &match);
4851 mgmt_pending_foreach(MGMT_OP_REMOVE_UUID, hdev, sk_lookup, &match);
4854 mgmt_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev, dev_class, 3,
4861 void mgmt_set_local_name_complete(struct hci_dev *hdev, u8 *name, u8 status)
4863 struct mgmt_cp_set_local_name ev;
4864 struct pending_cmd *cmd;
4869 memset(&ev, 0, sizeof(ev));
4870 memcpy(ev.name, name, HCI_MAX_NAME_LENGTH);
4871 memcpy(ev.short_name, hdev->short_name, HCI_MAX_SHORT_NAME_LENGTH);
4873 cmd = mgmt_pending_find(MGMT_OP_SET_LOCAL_NAME, hdev);
4875 memcpy(hdev->dev_name, name, sizeof(hdev->dev_name));
4877 /* If this is a HCI command related to powering on the
4878 * HCI dev don't send any mgmt signals.
4880 if (mgmt_pending_find(MGMT_OP_SET_POWERED, hdev))
4884 mgmt_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev, &ev, sizeof(ev),
4885 cmd ? cmd->sk : NULL);
4888 void mgmt_read_local_oob_data_reply_complete(struct hci_dev *hdev, u8 *hash,
4889 u8 *randomizer, u8 status)
4891 struct pending_cmd *cmd;
4893 BT_DBG("%s status %u", hdev->name, status);
4895 cmd = mgmt_pending_find(MGMT_OP_READ_LOCAL_OOB_DATA, hdev);
4900 cmd_status(cmd->sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
4901 mgmt_status(status));
4903 struct mgmt_rp_read_local_oob_data rp;
4905 memcpy(rp.hash, hash, sizeof(rp.hash));
4906 memcpy(rp.randomizer, randomizer, sizeof(rp.randomizer));
4908 cmd_complete(cmd->sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
4909 0, &rp, sizeof(rp));
4912 mgmt_pending_remove(cmd);
4915 void mgmt_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
4916 u8 addr_type, u8 *dev_class, s8 rssi, u8 cfm_name, u8
4917 ssp, u8 *eir, u16 eir_len)
4920 struct mgmt_ev_device_found *ev = (void *) buf;
4923 if (!hci_discovery_active(hdev))
4926 /* Leave 5 bytes for a potential CoD field */
4927 if (sizeof(*ev) + eir_len + 5 > sizeof(buf))
4930 memset(buf, 0, sizeof(buf));
4932 bacpy(&ev->addr.bdaddr, bdaddr);
4933 ev->addr.type = link_to_bdaddr(link_type, addr_type);
4936 ev->flags |= __constant_cpu_to_le32(MGMT_DEV_FOUND_CONFIRM_NAME);
4938 ev->flags |= __constant_cpu_to_le32(MGMT_DEV_FOUND_LEGACY_PAIRING);
4941 memcpy(ev->eir, eir, eir_len);
4943 if (dev_class && !eir_has_data_type(ev->eir, eir_len, EIR_CLASS_OF_DEV))
4944 eir_len = eir_append_data(ev->eir, eir_len, EIR_CLASS_OF_DEV,
4947 ev->eir_len = cpu_to_le16(eir_len);
4948 ev_size = sizeof(*ev) + eir_len;
4950 mgmt_event(MGMT_EV_DEVICE_FOUND, hdev, ev, ev_size, NULL);
4953 void mgmt_remote_name(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
4954 u8 addr_type, s8 rssi, u8 *name, u8 name_len)
4956 struct mgmt_ev_device_found *ev;
4957 char buf[sizeof(*ev) + HCI_MAX_NAME_LENGTH + 2];
4960 ev = (struct mgmt_ev_device_found *) buf;
4962 memset(buf, 0, sizeof(buf));
4964 bacpy(&ev->addr.bdaddr, bdaddr);
4965 ev->addr.type = link_to_bdaddr(link_type, addr_type);
4968 eir_len = eir_append_data(ev->eir, 0, EIR_NAME_COMPLETE, name,
4971 ev->eir_len = cpu_to_le16(eir_len);
4973 mgmt_event(MGMT_EV_DEVICE_FOUND, hdev, ev, sizeof(*ev) + eir_len, NULL);
4976 void mgmt_discovering(struct hci_dev *hdev, u8 discovering)
4978 struct mgmt_ev_discovering ev;
4979 struct pending_cmd *cmd;
4981 BT_DBG("%s discovering %u", hdev->name, discovering);
4984 cmd = mgmt_pending_find(MGMT_OP_START_DISCOVERY, hdev);
4986 cmd = mgmt_pending_find(MGMT_OP_STOP_DISCOVERY, hdev);
4989 u8 type = hdev->discovery.type;
4991 cmd_complete(cmd->sk, hdev->id, cmd->opcode, 0, &type,
4993 mgmt_pending_remove(cmd);
4996 memset(&ev, 0, sizeof(ev));
4997 ev.type = hdev->discovery.type;
4998 ev.discovering = discovering;
5000 mgmt_event(MGMT_EV_DISCOVERING, hdev, &ev, sizeof(ev), NULL);
5003 int mgmt_device_blocked(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
5005 struct pending_cmd *cmd;
5006 struct mgmt_ev_device_blocked ev;
5008 cmd = mgmt_pending_find(MGMT_OP_BLOCK_DEVICE, hdev);
5010 bacpy(&ev.addr.bdaddr, bdaddr);
5011 ev.addr.type = type;
5013 return mgmt_event(MGMT_EV_DEVICE_BLOCKED, hdev, &ev, sizeof(ev),
5014 cmd ? cmd->sk : NULL);
5017 int mgmt_device_unblocked(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
5019 struct pending_cmd *cmd;
5020 struct mgmt_ev_device_unblocked ev;
5022 cmd = mgmt_pending_find(MGMT_OP_UNBLOCK_DEVICE, hdev);
5024 bacpy(&ev.addr.bdaddr, bdaddr);
5025 ev.addr.type = type;
5027 return mgmt_event(MGMT_EV_DEVICE_UNBLOCKED, hdev, &ev, sizeof(ev),
5028 cmd ? cmd->sk : NULL);
5031 static void adv_enable_complete(struct hci_dev *hdev, u8 status)
5033 BT_DBG("%s status %u", hdev->name, status);
5035 /* Clear the advertising mgmt setting if we failed to re-enable it */
5037 clear_bit(HCI_ADVERTISING, &hdev->dev_flags);
5038 new_settings(hdev, NULL);
5042 void mgmt_reenable_advertising(struct hci_dev *hdev)
5044 struct hci_request req;
5046 if (hci_conn_num(hdev, LE_LINK) > 0)
5049 if (!test_bit(HCI_ADVERTISING, &hdev->dev_flags))
5052 hci_req_init(&req, hdev);
5053 enable_advertising(&req);
5055 /* If this fails we have no option but to let user space know
5056 * that we've disabled advertising.
5058 if (hci_req_run(&req, adv_enable_complete) < 0) {
5059 clear_bit(HCI_ADVERTISING, &hdev->dev_flags);
5060 new_settings(hdev, NULL);