2 BlueZ - Bluetooth protocol stack for Linux
4 Copyright (C) 2010 Nokia Corporation
5 Copyright (C) 2011-2012 Intel Corporation
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
25 /* Bluetooth HCI Management interface */
27 #include <linux/module.h>
28 #include <asm/unaligned.h>
30 #include <net/bluetooth/bluetooth.h>
31 #include <net/bluetooth/hci_core.h>
32 #include <net/bluetooth/mgmt.h>
36 #define MGMT_VERSION 1
37 #define MGMT_REVISION 4
39 static const u16 mgmt_commands[] = {
40 MGMT_OP_READ_INDEX_LIST,
43 MGMT_OP_SET_DISCOVERABLE,
44 MGMT_OP_SET_CONNECTABLE,
45 MGMT_OP_SET_FAST_CONNECTABLE,
47 MGMT_OP_SET_LINK_SECURITY,
51 MGMT_OP_SET_DEV_CLASS,
52 MGMT_OP_SET_LOCAL_NAME,
55 MGMT_OP_LOAD_LINK_KEYS,
56 MGMT_OP_LOAD_LONG_TERM_KEYS,
58 MGMT_OP_GET_CONNECTIONS,
59 MGMT_OP_PIN_CODE_REPLY,
60 MGMT_OP_PIN_CODE_NEG_REPLY,
61 MGMT_OP_SET_IO_CAPABILITY,
63 MGMT_OP_CANCEL_PAIR_DEVICE,
64 MGMT_OP_UNPAIR_DEVICE,
65 MGMT_OP_USER_CONFIRM_REPLY,
66 MGMT_OP_USER_CONFIRM_NEG_REPLY,
67 MGMT_OP_USER_PASSKEY_REPLY,
68 MGMT_OP_USER_PASSKEY_NEG_REPLY,
69 MGMT_OP_READ_LOCAL_OOB_DATA,
70 MGMT_OP_ADD_REMOTE_OOB_DATA,
71 MGMT_OP_REMOVE_REMOTE_OOB_DATA,
72 MGMT_OP_START_DISCOVERY,
73 MGMT_OP_STOP_DISCOVERY,
76 MGMT_OP_UNBLOCK_DEVICE,
77 MGMT_OP_SET_DEVICE_ID,
78 MGMT_OP_SET_ADVERTISING,
80 MGMT_OP_SET_STATIC_ADDRESS,
81 MGMT_OP_SET_SCAN_PARAMS,
84 static const u16 mgmt_events[] = {
85 MGMT_EV_CONTROLLER_ERROR,
87 MGMT_EV_INDEX_REMOVED,
89 MGMT_EV_CLASS_OF_DEV_CHANGED,
90 MGMT_EV_LOCAL_NAME_CHANGED,
92 MGMT_EV_NEW_LONG_TERM_KEY,
93 MGMT_EV_DEVICE_CONNECTED,
94 MGMT_EV_DEVICE_DISCONNECTED,
95 MGMT_EV_CONNECT_FAILED,
96 MGMT_EV_PIN_CODE_REQUEST,
97 MGMT_EV_USER_CONFIRM_REQUEST,
98 MGMT_EV_USER_PASSKEY_REQUEST,
100 MGMT_EV_DEVICE_FOUND,
102 MGMT_EV_DEVICE_BLOCKED,
103 MGMT_EV_DEVICE_UNBLOCKED,
104 MGMT_EV_DEVICE_UNPAIRED,
105 MGMT_EV_PASSKEY_NOTIFY,
108 #define CACHE_TIMEOUT msecs_to_jiffies(2 * 1000)
110 #define hdev_is_powered(hdev) (test_bit(HCI_UP, &hdev->flags) && \
111 !test_bit(HCI_AUTO_OFF, &hdev->dev_flags))
114 struct list_head list;
122 /* HCI to MGMT error code conversion table */
123 static u8 mgmt_status_table[] = {
125 MGMT_STATUS_UNKNOWN_COMMAND, /* Unknown Command */
126 MGMT_STATUS_NOT_CONNECTED, /* No Connection */
127 MGMT_STATUS_FAILED, /* Hardware Failure */
128 MGMT_STATUS_CONNECT_FAILED, /* Page Timeout */
129 MGMT_STATUS_AUTH_FAILED, /* Authentication Failed */
130 MGMT_STATUS_NOT_PAIRED, /* PIN or Key Missing */
131 MGMT_STATUS_NO_RESOURCES, /* Memory Full */
132 MGMT_STATUS_TIMEOUT, /* Connection Timeout */
133 MGMT_STATUS_NO_RESOURCES, /* Max Number of Connections */
134 MGMT_STATUS_NO_RESOURCES, /* Max Number of SCO Connections */
135 MGMT_STATUS_ALREADY_CONNECTED, /* ACL Connection Exists */
136 MGMT_STATUS_BUSY, /* Command Disallowed */
137 MGMT_STATUS_NO_RESOURCES, /* Rejected Limited Resources */
138 MGMT_STATUS_REJECTED, /* Rejected Security */
139 MGMT_STATUS_REJECTED, /* Rejected Personal */
140 MGMT_STATUS_TIMEOUT, /* Host Timeout */
141 MGMT_STATUS_NOT_SUPPORTED, /* Unsupported Feature */
142 MGMT_STATUS_INVALID_PARAMS, /* Invalid Parameters */
143 MGMT_STATUS_DISCONNECTED, /* OE User Ended Connection */
144 MGMT_STATUS_NO_RESOURCES, /* OE Low Resources */
145 MGMT_STATUS_DISCONNECTED, /* OE Power Off */
146 MGMT_STATUS_DISCONNECTED, /* Connection Terminated */
147 MGMT_STATUS_BUSY, /* Repeated Attempts */
148 MGMT_STATUS_REJECTED, /* Pairing Not Allowed */
149 MGMT_STATUS_FAILED, /* Unknown LMP PDU */
150 MGMT_STATUS_NOT_SUPPORTED, /* Unsupported Remote Feature */
151 MGMT_STATUS_REJECTED, /* SCO Offset Rejected */
152 MGMT_STATUS_REJECTED, /* SCO Interval Rejected */
153 MGMT_STATUS_REJECTED, /* Air Mode Rejected */
154 MGMT_STATUS_INVALID_PARAMS, /* Invalid LMP Parameters */
155 MGMT_STATUS_FAILED, /* Unspecified Error */
156 MGMT_STATUS_NOT_SUPPORTED, /* Unsupported LMP Parameter Value */
157 MGMT_STATUS_FAILED, /* Role Change Not Allowed */
158 MGMT_STATUS_TIMEOUT, /* LMP Response Timeout */
159 MGMT_STATUS_FAILED, /* LMP Error Transaction Collision */
160 MGMT_STATUS_FAILED, /* LMP PDU Not Allowed */
161 MGMT_STATUS_REJECTED, /* Encryption Mode Not Accepted */
162 MGMT_STATUS_FAILED, /* Unit Link Key Used */
163 MGMT_STATUS_NOT_SUPPORTED, /* QoS Not Supported */
164 MGMT_STATUS_TIMEOUT, /* Instant Passed */
165 MGMT_STATUS_NOT_SUPPORTED, /* Pairing Not Supported */
166 MGMT_STATUS_FAILED, /* Transaction Collision */
167 MGMT_STATUS_INVALID_PARAMS, /* Unacceptable Parameter */
168 MGMT_STATUS_REJECTED, /* QoS Rejected */
169 MGMT_STATUS_NOT_SUPPORTED, /* Classification Not Supported */
170 MGMT_STATUS_REJECTED, /* Insufficient Security */
171 MGMT_STATUS_INVALID_PARAMS, /* Parameter Out Of Range */
172 MGMT_STATUS_BUSY, /* Role Switch Pending */
173 MGMT_STATUS_FAILED, /* Slot Violation */
174 MGMT_STATUS_FAILED, /* Role Switch Failed */
175 MGMT_STATUS_INVALID_PARAMS, /* EIR Too Large */
176 MGMT_STATUS_NOT_SUPPORTED, /* Simple Pairing Not Supported */
177 MGMT_STATUS_BUSY, /* Host Busy Pairing */
178 MGMT_STATUS_REJECTED, /* Rejected, No Suitable Channel */
179 MGMT_STATUS_BUSY, /* Controller Busy */
180 MGMT_STATUS_INVALID_PARAMS, /* Unsuitable Connection Interval */
181 MGMT_STATUS_TIMEOUT, /* Directed Advertising Timeout */
182 MGMT_STATUS_AUTH_FAILED, /* Terminated Due to MIC Failure */
183 MGMT_STATUS_CONNECT_FAILED, /* Connection Establishment Failed */
184 MGMT_STATUS_CONNECT_FAILED, /* MAC Connection Failed */
187 static u8 mgmt_status(u8 hci_status)
189 if (hci_status < ARRAY_SIZE(mgmt_status_table))
190 return mgmt_status_table[hci_status];
192 return MGMT_STATUS_FAILED;
195 static int cmd_status(struct sock *sk, u16 index, u16 cmd, u8 status)
198 struct mgmt_hdr *hdr;
199 struct mgmt_ev_cmd_status *ev;
202 BT_DBG("sock %p, index %u, cmd %u, status %u", sk, index, cmd, status);
204 skb = alloc_skb(sizeof(*hdr) + sizeof(*ev), GFP_KERNEL);
208 hdr = (void *) skb_put(skb, sizeof(*hdr));
210 hdr->opcode = __constant_cpu_to_le16(MGMT_EV_CMD_STATUS);
211 hdr->index = cpu_to_le16(index);
212 hdr->len = cpu_to_le16(sizeof(*ev));
214 ev = (void *) skb_put(skb, sizeof(*ev));
216 ev->opcode = cpu_to_le16(cmd);
218 err = sock_queue_rcv_skb(sk, skb);
225 static int cmd_complete(struct sock *sk, u16 index, u16 cmd, u8 status,
226 void *rp, size_t rp_len)
229 struct mgmt_hdr *hdr;
230 struct mgmt_ev_cmd_complete *ev;
233 BT_DBG("sock %p", sk);
235 skb = alloc_skb(sizeof(*hdr) + sizeof(*ev) + rp_len, GFP_KERNEL);
239 hdr = (void *) skb_put(skb, sizeof(*hdr));
241 hdr->opcode = __constant_cpu_to_le16(MGMT_EV_CMD_COMPLETE);
242 hdr->index = cpu_to_le16(index);
243 hdr->len = cpu_to_le16(sizeof(*ev) + rp_len);
245 ev = (void *) skb_put(skb, sizeof(*ev) + rp_len);
246 ev->opcode = cpu_to_le16(cmd);
250 memcpy(ev->data, rp, rp_len);
252 err = sock_queue_rcv_skb(sk, skb);
259 static int read_version(struct sock *sk, struct hci_dev *hdev, void *data,
262 struct mgmt_rp_read_version rp;
264 BT_DBG("sock %p", sk);
266 rp.version = MGMT_VERSION;
267 rp.revision = __constant_cpu_to_le16(MGMT_REVISION);
269 return cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_VERSION, 0, &rp,
273 static int read_commands(struct sock *sk, struct hci_dev *hdev, void *data,
276 struct mgmt_rp_read_commands *rp;
277 const u16 num_commands = ARRAY_SIZE(mgmt_commands);
278 const u16 num_events = ARRAY_SIZE(mgmt_events);
283 BT_DBG("sock %p", sk);
285 rp_size = sizeof(*rp) + ((num_commands + num_events) * sizeof(u16));
287 rp = kmalloc(rp_size, GFP_KERNEL);
291 rp->num_commands = __constant_cpu_to_le16(num_commands);
292 rp->num_events = __constant_cpu_to_le16(num_events);
294 for (i = 0, opcode = rp->opcodes; i < num_commands; i++, opcode++)
295 put_unaligned_le16(mgmt_commands[i], opcode);
297 for (i = 0; i < num_events; i++, opcode++)
298 put_unaligned_le16(mgmt_events[i], opcode);
300 err = cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_COMMANDS, 0, rp,
307 static int read_index_list(struct sock *sk, struct hci_dev *hdev, void *data,
310 struct mgmt_rp_read_index_list *rp;
316 BT_DBG("sock %p", sk);
318 read_lock(&hci_dev_list_lock);
321 list_for_each_entry(d, &hci_dev_list, list) {
322 if (d->dev_type == HCI_BREDR)
326 rp_len = sizeof(*rp) + (2 * count);
327 rp = kmalloc(rp_len, GFP_ATOMIC);
329 read_unlock(&hci_dev_list_lock);
334 list_for_each_entry(d, &hci_dev_list, list) {
335 if (test_bit(HCI_SETUP, &d->dev_flags))
338 if (test_bit(HCI_USER_CHANNEL, &d->dev_flags))
341 if (d->dev_type == HCI_BREDR) {
342 rp->index[count++] = cpu_to_le16(d->id);
343 BT_DBG("Added hci%u", d->id);
347 rp->num_controllers = cpu_to_le16(count);
348 rp_len = sizeof(*rp) + (2 * count);
350 read_unlock(&hci_dev_list_lock);
352 err = cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_INDEX_LIST, 0, rp,
360 static u32 get_supported_settings(struct hci_dev *hdev)
364 settings |= MGMT_SETTING_POWERED;
365 settings |= MGMT_SETTING_PAIRABLE;
367 if (lmp_bredr_capable(hdev)) {
368 settings |= MGMT_SETTING_CONNECTABLE;
369 if (hdev->hci_ver >= BLUETOOTH_VER_1_2)
370 settings |= MGMT_SETTING_FAST_CONNECTABLE;
371 settings |= MGMT_SETTING_DISCOVERABLE;
372 settings |= MGMT_SETTING_BREDR;
373 settings |= MGMT_SETTING_LINK_SECURITY;
375 if (lmp_ssp_capable(hdev)) {
376 settings |= MGMT_SETTING_SSP;
377 settings |= MGMT_SETTING_HS;
381 if (lmp_le_capable(hdev)) {
382 settings |= MGMT_SETTING_LE;
383 settings |= MGMT_SETTING_ADVERTISING;
389 static u32 get_current_settings(struct hci_dev *hdev)
393 if (hdev_is_powered(hdev))
394 settings |= MGMT_SETTING_POWERED;
396 if (test_bit(HCI_CONNECTABLE, &hdev->dev_flags))
397 settings |= MGMT_SETTING_CONNECTABLE;
399 if (test_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags))
400 settings |= MGMT_SETTING_FAST_CONNECTABLE;
402 if (test_bit(HCI_DISCOVERABLE, &hdev->dev_flags))
403 settings |= MGMT_SETTING_DISCOVERABLE;
405 if (test_bit(HCI_PAIRABLE, &hdev->dev_flags))
406 settings |= MGMT_SETTING_PAIRABLE;
408 if (test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
409 settings |= MGMT_SETTING_BREDR;
411 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
412 settings |= MGMT_SETTING_LE;
414 if (test_bit(HCI_LINK_SECURITY, &hdev->dev_flags))
415 settings |= MGMT_SETTING_LINK_SECURITY;
417 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags))
418 settings |= MGMT_SETTING_SSP;
420 if (test_bit(HCI_HS_ENABLED, &hdev->dev_flags))
421 settings |= MGMT_SETTING_HS;
423 if (test_bit(HCI_ADVERTISING, &hdev->dev_flags))
424 settings |= MGMT_SETTING_ADVERTISING;
429 #define PNP_INFO_SVCLASS_ID 0x1200
431 static u8 *create_uuid16_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
433 u8 *ptr = data, *uuids_start = NULL;
434 struct bt_uuid *uuid;
439 list_for_each_entry(uuid, &hdev->uuids, list) {
442 if (uuid->size != 16)
445 uuid16 = get_unaligned_le16(&uuid->uuid[12]);
449 if (uuid16 == PNP_INFO_SVCLASS_ID)
455 uuids_start[1] = EIR_UUID16_ALL;
459 /* Stop if not enough space to put next UUID */
460 if ((ptr - data) + sizeof(u16) > len) {
461 uuids_start[1] = EIR_UUID16_SOME;
465 *ptr++ = (uuid16 & 0x00ff);
466 *ptr++ = (uuid16 & 0xff00) >> 8;
467 uuids_start[0] += sizeof(uuid16);
473 static u8 *create_uuid32_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
475 u8 *ptr = data, *uuids_start = NULL;
476 struct bt_uuid *uuid;
481 list_for_each_entry(uuid, &hdev->uuids, list) {
482 if (uuid->size != 32)
488 uuids_start[1] = EIR_UUID32_ALL;
492 /* Stop if not enough space to put next UUID */
493 if ((ptr - data) + sizeof(u32) > len) {
494 uuids_start[1] = EIR_UUID32_SOME;
498 memcpy(ptr, &uuid->uuid[12], sizeof(u32));
500 uuids_start[0] += sizeof(u32);
506 static u8 *create_uuid128_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
508 u8 *ptr = data, *uuids_start = NULL;
509 struct bt_uuid *uuid;
514 list_for_each_entry(uuid, &hdev->uuids, list) {
515 if (uuid->size != 128)
521 uuids_start[1] = EIR_UUID128_ALL;
525 /* Stop if not enough space to put next UUID */
526 if ((ptr - data) + 16 > len) {
527 uuids_start[1] = EIR_UUID128_SOME;
531 memcpy(ptr, uuid->uuid, 16);
533 uuids_start[0] += 16;
539 static u8 create_ad(struct hci_dev *hdev, u8 *ptr)
541 u8 ad_len = 0, flags = 0;
544 if (test_bit(HCI_ADVERTISING, &hdev->dev_flags))
545 flags |= LE_AD_GENERAL;
547 if (test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
548 if (lmp_le_br_capable(hdev))
549 flags |= LE_AD_SIM_LE_BREDR_CTRL;
550 if (lmp_host_le_br_capable(hdev))
551 flags |= LE_AD_SIM_LE_BREDR_HOST;
553 flags |= LE_AD_NO_BREDR;
557 BT_DBG("adv flags 0x%02x", flags);
567 if (hdev->adv_tx_power != HCI_TX_POWER_INVALID) {
569 ptr[1] = EIR_TX_POWER;
570 ptr[2] = (u8) hdev->adv_tx_power;
576 name_len = strlen(hdev->dev_name);
578 size_t max_len = HCI_MAX_AD_LENGTH - ad_len - 2;
580 if (name_len > max_len) {
582 ptr[1] = EIR_NAME_SHORT;
584 ptr[1] = EIR_NAME_COMPLETE;
586 ptr[0] = name_len + 1;
588 memcpy(ptr + 2, hdev->dev_name, name_len);
590 ad_len += (name_len + 2);
591 ptr += (name_len + 2);
597 static void update_ad(struct hci_request *req)
599 struct hci_dev *hdev = req->hdev;
600 struct hci_cp_le_set_adv_data cp;
603 if (!lmp_le_capable(hdev))
606 memset(&cp, 0, sizeof(cp));
608 len = create_ad(hdev, cp.data);
610 if (hdev->adv_data_len == len &&
611 memcmp(cp.data, hdev->adv_data, len) == 0)
614 memcpy(hdev->adv_data, cp.data, sizeof(cp.data));
615 hdev->adv_data_len = len;
619 hci_req_add(req, HCI_OP_LE_SET_ADV_DATA, sizeof(cp), &cp);
622 static void create_eir(struct hci_dev *hdev, u8 *data)
627 name_len = strlen(hdev->dev_name);
633 ptr[1] = EIR_NAME_SHORT;
635 ptr[1] = EIR_NAME_COMPLETE;
637 /* EIR Data length */
638 ptr[0] = name_len + 1;
640 memcpy(ptr + 2, hdev->dev_name, name_len);
642 ptr += (name_len + 2);
645 if (hdev->inq_tx_power != HCI_TX_POWER_INVALID) {
647 ptr[1] = EIR_TX_POWER;
648 ptr[2] = (u8) hdev->inq_tx_power;
653 if (hdev->devid_source > 0) {
655 ptr[1] = EIR_DEVICE_ID;
657 put_unaligned_le16(hdev->devid_source, ptr + 2);
658 put_unaligned_le16(hdev->devid_vendor, ptr + 4);
659 put_unaligned_le16(hdev->devid_product, ptr + 6);
660 put_unaligned_le16(hdev->devid_version, ptr + 8);
665 ptr = create_uuid16_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
666 ptr = create_uuid32_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
667 ptr = create_uuid128_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
670 static void update_eir(struct hci_request *req)
672 struct hci_dev *hdev = req->hdev;
673 struct hci_cp_write_eir cp;
675 if (!hdev_is_powered(hdev))
678 if (!lmp_ext_inq_capable(hdev))
681 if (!test_bit(HCI_SSP_ENABLED, &hdev->dev_flags))
684 if (test_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
687 memset(&cp, 0, sizeof(cp));
689 create_eir(hdev, cp.data);
691 if (memcmp(cp.data, hdev->eir, sizeof(cp.data)) == 0)
694 memcpy(hdev->eir, cp.data, sizeof(cp.data));
696 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
699 static u8 get_service_classes(struct hci_dev *hdev)
701 struct bt_uuid *uuid;
704 list_for_each_entry(uuid, &hdev->uuids, list)
705 val |= uuid->svc_hint;
710 static void update_class(struct hci_request *req)
712 struct hci_dev *hdev = req->hdev;
715 BT_DBG("%s", hdev->name);
717 if (!hdev_is_powered(hdev))
720 if (test_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
723 cod[0] = hdev->minor_class;
724 cod[1] = hdev->major_class;
725 cod[2] = get_service_classes(hdev);
727 if (test_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags))
730 if (memcmp(cod, hdev->dev_class, 3) == 0)
733 hci_req_add(req, HCI_OP_WRITE_CLASS_OF_DEV, sizeof(cod), cod);
736 static void service_cache_off(struct work_struct *work)
738 struct hci_dev *hdev = container_of(work, struct hci_dev,
740 struct hci_request req;
742 if (!test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
745 hci_req_init(&req, hdev);
752 hci_dev_unlock(hdev);
754 hci_req_run(&req, NULL);
757 static void mgmt_init_hdev(struct sock *sk, struct hci_dev *hdev)
759 if (test_and_set_bit(HCI_MGMT, &hdev->dev_flags))
762 INIT_DELAYED_WORK(&hdev->service_cache, service_cache_off);
764 /* Non-mgmt controlled devices get this bit set
765 * implicitly so that pairing works for them, however
766 * for mgmt we require user-space to explicitly enable
769 clear_bit(HCI_PAIRABLE, &hdev->dev_flags);
772 static int read_controller_info(struct sock *sk, struct hci_dev *hdev,
773 void *data, u16 data_len)
775 struct mgmt_rp_read_info rp;
777 BT_DBG("sock %p %s", sk, hdev->name);
781 memset(&rp, 0, sizeof(rp));
783 bacpy(&rp.bdaddr, &hdev->bdaddr);
785 rp.version = hdev->hci_ver;
786 rp.manufacturer = cpu_to_le16(hdev->manufacturer);
788 rp.supported_settings = cpu_to_le32(get_supported_settings(hdev));
789 rp.current_settings = cpu_to_le32(get_current_settings(hdev));
791 memcpy(rp.dev_class, hdev->dev_class, 3);
793 memcpy(rp.name, hdev->dev_name, sizeof(hdev->dev_name));
794 memcpy(rp.short_name, hdev->short_name, sizeof(hdev->short_name));
796 hci_dev_unlock(hdev);
798 return cmd_complete(sk, hdev->id, MGMT_OP_READ_INFO, 0, &rp,
802 static void mgmt_pending_free(struct pending_cmd *cmd)
809 static struct pending_cmd *mgmt_pending_add(struct sock *sk, u16 opcode,
810 struct hci_dev *hdev, void *data,
813 struct pending_cmd *cmd;
815 cmd = kmalloc(sizeof(*cmd), GFP_KERNEL);
819 cmd->opcode = opcode;
820 cmd->index = hdev->id;
822 cmd->param = kmalloc(len, GFP_KERNEL);
829 memcpy(cmd->param, data, len);
834 list_add(&cmd->list, &hdev->mgmt_pending);
839 static void mgmt_pending_foreach(u16 opcode, struct hci_dev *hdev,
840 void (*cb)(struct pending_cmd *cmd,
844 struct pending_cmd *cmd, *tmp;
846 list_for_each_entry_safe(cmd, tmp, &hdev->mgmt_pending, list) {
847 if (opcode > 0 && cmd->opcode != opcode)
854 static struct pending_cmd *mgmt_pending_find(u16 opcode, struct hci_dev *hdev)
856 struct pending_cmd *cmd;
858 list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
859 if (cmd->opcode == opcode)
866 static void mgmt_pending_remove(struct pending_cmd *cmd)
868 list_del(&cmd->list);
869 mgmt_pending_free(cmd);
872 static int send_settings_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev)
874 __le32 settings = cpu_to_le32(get_current_settings(hdev));
876 return cmd_complete(sk, hdev->id, opcode, 0, &settings,
880 static int set_powered(struct sock *sk, struct hci_dev *hdev, void *data,
883 struct mgmt_mode *cp = data;
884 struct pending_cmd *cmd;
887 BT_DBG("request for %s", hdev->name);
889 if (cp->val != 0x00 && cp->val != 0x01)
890 return cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
891 MGMT_STATUS_INVALID_PARAMS);
895 if (mgmt_pending_find(MGMT_OP_SET_POWERED, hdev)) {
896 err = cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
901 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
902 cancel_delayed_work(&hdev->power_off);
905 mgmt_pending_add(sk, MGMT_OP_SET_POWERED, hdev,
907 err = mgmt_powered(hdev, 1);
912 if (!!cp->val == hdev_is_powered(hdev)) {
913 err = send_settings_rsp(sk, MGMT_OP_SET_POWERED, hdev);
917 cmd = mgmt_pending_add(sk, MGMT_OP_SET_POWERED, hdev, data, len);
924 queue_work(hdev->req_workqueue, &hdev->power_on);
926 queue_work(hdev->req_workqueue, &hdev->power_off.work);
931 hci_dev_unlock(hdev);
935 static int mgmt_event(u16 event, struct hci_dev *hdev, void *data, u16 data_len,
936 struct sock *skip_sk)
939 struct mgmt_hdr *hdr;
941 skb = alloc_skb(sizeof(*hdr) + data_len, GFP_KERNEL);
945 hdr = (void *) skb_put(skb, sizeof(*hdr));
946 hdr->opcode = cpu_to_le16(event);
948 hdr->index = cpu_to_le16(hdev->id);
950 hdr->index = __constant_cpu_to_le16(MGMT_INDEX_NONE);
951 hdr->len = cpu_to_le16(data_len);
954 memcpy(skb_put(skb, data_len), data, data_len);
957 __net_timestamp(skb);
959 hci_send_to_control(skb, skip_sk);
965 static int new_settings(struct hci_dev *hdev, struct sock *skip)
969 ev = cpu_to_le32(get_current_settings(hdev));
971 return mgmt_event(MGMT_EV_NEW_SETTINGS, hdev, &ev, sizeof(ev), skip);
976 struct hci_dev *hdev;
980 static void settings_rsp(struct pending_cmd *cmd, void *data)
982 struct cmd_lookup *match = data;
984 send_settings_rsp(cmd->sk, cmd->opcode, match->hdev);
986 list_del(&cmd->list);
988 if (match->sk == NULL) {
990 sock_hold(match->sk);
993 mgmt_pending_free(cmd);
996 static void cmd_status_rsp(struct pending_cmd *cmd, void *data)
1000 cmd_status(cmd->sk, cmd->index, cmd->opcode, *status);
1001 mgmt_pending_remove(cmd);
1004 static u8 mgmt_bredr_support(struct hci_dev *hdev)
1006 if (!lmp_bredr_capable(hdev))
1007 return MGMT_STATUS_NOT_SUPPORTED;
1008 else if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
1009 return MGMT_STATUS_REJECTED;
1011 return MGMT_STATUS_SUCCESS;
1014 static u8 mgmt_le_support(struct hci_dev *hdev)
1016 if (!lmp_le_capable(hdev))
1017 return MGMT_STATUS_NOT_SUPPORTED;
1018 else if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
1019 return MGMT_STATUS_REJECTED;
1021 return MGMT_STATUS_SUCCESS;
1024 static void set_discoverable_complete(struct hci_dev *hdev, u8 status)
1026 struct pending_cmd *cmd;
1027 struct mgmt_mode *cp;
1028 struct hci_request req;
1031 BT_DBG("status 0x%02x", status);
1035 cmd = mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, hdev);
1040 u8 mgmt_err = mgmt_status(status);
1041 cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
1042 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
1048 changed = !test_and_set_bit(HCI_DISCOVERABLE,
1051 if (hdev->discov_timeout > 0) {
1052 int to = msecs_to_jiffies(hdev->discov_timeout * 1000);
1053 queue_delayed_work(hdev->workqueue, &hdev->discov_off,
1057 changed = test_and_clear_bit(HCI_DISCOVERABLE,
1061 send_settings_rsp(cmd->sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1064 new_settings(hdev, cmd->sk);
1066 /* When the discoverable mode gets changed, make sure
1067 * that class of device has the limited discoverable
1068 * bit correctly set.
1070 hci_req_init(&req, hdev);
1072 hci_req_run(&req, NULL);
1075 mgmt_pending_remove(cmd);
1078 hci_dev_unlock(hdev);
1081 static int set_discoverable(struct sock *sk, struct hci_dev *hdev, void *data,
1084 struct mgmt_cp_set_discoverable *cp = data;
1085 struct pending_cmd *cmd;
1086 struct hci_request req;
1091 BT_DBG("request for %s", hdev->name);
1093 status = mgmt_bredr_support(hdev);
1095 return cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1098 if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
1099 return cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1100 MGMT_STATUS_INVALID_PARAMS);
1102 timeout = __le16_to_cpu(cp->timeout);
1104 /* Disabling discoverable requires that no timeout is set,
1105 * and enabling limited discoverable requires a timeout.
1107 if ((cp->val == 0x00 && timeout > 0) ||
1108 (cp->val == 0x02 && timeout == 0))
1109 return cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1110 MGMT_STATUS_INVALID_PARAMS);
1114 if (!hdev_is_powered(hdev) && timeout > 0) {
1115 err = cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1116 MGMT_STATUS_NOT_POWERED);
1120 if (mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
1121 mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
1122 err = cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1127 if (!test_bit(HCI_CONNECTABLE, &hdev->dev_flags)) {
1128 err = cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1129 MGMT_STATUS_REJECTED);
1133 if (!hdev_is_powered(hdev)) {
1134 bool changed = false;
1136 /* Setting limited discoverable when powered off is
1137 * not a valid operation since it requires a timeout
1138 * and so no need to check HCI_LIMITED_DISCOVERABLE.
1140 if (!!cp->val != test_bit(HCI_DISCOVERABLE, &hdev->dev_flags)) {
1141 change_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
1145 err = send_settings_rsp(sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1150 err = new_settings(hdev, sk);
1155 /* If the current mode is the same, then just update the timeout
1156 * value with the new value. And if only the timeout gets updated,
1157 * then no need for any HCI transactions.
1159 if (!!cp->val == test_bit(HCI_DISCOVERABLE, &hdev->dev_flags) &&
1160 (cp->val == 0x02) == test_bit(HCI_LIMITED_DISCOVERABLE,
1161 &hdev->dev_flags)) {
1162 cancel_delayed_work(&hdev->discov_off);
1163 hdev->discov_timeout = timeout;
1165 if (cp->val && hdev->discov_timeout > 0) {
1166 int to = msecs_to_jiffies(hdev->discov_timeout * 1000);
1167 queue_delayed_work(hdev->workqueue, &hdev->discov_off,
1171 err = send_settings_rsp(sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1175 cmd = mgmt_pending_add(sk, MGMT_OP_SET_DISCOVERABLE, hdev, data, len);
1181 /* Cancel any potential discoverable timeout that might be
1182 * still active and store new timeout value. The arming of
1183 * the timeout happens in the complete handler.
1185 cancel_delayed_work(&hdev->discov_off);
1186 hdev->discov_timeout = timeout;
1188 hci_req_init(&req, hdev);
1193 struct hci_cp_write_current_iac_lap hci_cp;
1195 if (cp->val == 0x02) {
1196 /* Limited discoverable mode */
1197 set_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
1200 hci_cp.iac_lap[0] = 0x00; /* LIAC */
1201 hci_cp.iac_lap[1] = 0x8b;
1202 hci_cp.iac_lap[2] = 0x9e;
1203 hci_cp.iac_lap[3] = 0x33; /* GIAC */
1204 hci_cp.iac_lap[4] = 0x8b;
1205 hci_cp.iac_lap[5] = 0x9e;
1207 /* General discoverable mode */
1208 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
1211 hci_cp.iac_lap[0] = 0x33; /* GIAC */
1212 hci_cp.iac_lap[1] = 0x8b;
1213 hci_cp.iac_lap[2] = 0x9e;
1216 hci_req_add(&req, HCI_OP_WRITE_CURRENT_IAC_LAP,
1217 (hci_cp.num_iac * 3) + 1, &hci_cp);
1219 scan |= SCAN_INQUIRY;
1221 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
1224 hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE, sizeof(scan), &scan);
1226 err = hci_req_run(&req, set_discoverable_complete);
1228 mgmt_pending_remove(cmd);
1231 hci_dev_unlock(hdev);
1235 static void write_fast_connectable(struct hci_request *req, bool enable)
1237 struct hci_dev *hdev = req->hdev;
1238 struct hci_cp_write_page_scan_activity acp;
1241 if (hdev->hci_ver < BLUETOOTH_VER_1_2)
1245 type = PAGE_SCAN_TYPE_INTERLACED;
1247 /* 160 msec page scan interval */
1248 acp.interval = __constant_cpu_to_le16(0x0100);
1250 type = PAGE_SCAN_TYPE_STANDARD; /* default */
1252 /* default 1.28 sec page scan */
1253 acp.interval = __constant_cpu_to_le16(0x0800);
1256 acp.window = __constant_cpu_to_le16(0x0012);
1258 if (__cpu_to_le16(hdev->page_scan_interval) != acp.interval ||
1259 __cpu_to_le16(hdev->page_scan_window) != acp.window)
1260 hci_req_add(req, HCI_OP_WRITE_PAGE_SCAN_ACTIVITY,
1263 if (hdev->page_scan_type != type)
1264 hci_req_add(req, HCI_OP_WRITE_PAGE_SCAN_TYPE, 1, &type);
1267 static u8 get_adv_type(struct hci_dev *hdev)
1269 struct pending_cmd *cmd;
1272 /* If there's a pending mgmt command the flag will not yet have
1273 * it's final value, so check for this first.
1275 cmd = mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev);
1277 struct mgmt_mode *cp = cmd->param;
1278 connectable = !!cp->val;
1280 connectable = test_bit(HCI_CONNECTABLE, &hdev->dev_flags);
1283 return connectable ? LE_ADV_IND : LE_ADV_NONCONN_IND;
1286 static void enable_advertising(struct hci_request *req)
1288 struct hci_dev *hdev = req->hdev;
1289 struct hci_cp_le_set_adv_param cp;
1292 memset(&cp, 0, sizeof(cp));
1293 cp.min_interval = __constant_cpu_to_le16(0x0800);
1294 cp.max_interval = __constant_cpu_to_le16(0x0800);
1295 cp.type = get_adv_type(hdev);
1296 if (bacmp(&hdev->bdaddr, BDADDR_ANY))
1297 cp.own_address_type = ADDR_LE_DEV_PUBLIC;
1299 cp.own_address_type = ADDR_LE_DEV_RANDOM;
1300 cp.channel_map = 0x07;
1302 hci_req_add(req, HCI_OP_LE_SET_ADV_PARAM, sizeof(cp), &cp);
1304 hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable);
1307 static void disable_advertising(struct hci_request *req)
1311 hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable);
1314 static void set_connectable_complete(struct hci_dev *hdev, u8 status)
1316 struct pending_cmd *cmd;
1317 struct mgmt_mode *cp;
1320 BT_DBG("status 0x%02x", status);
1324 cmd = mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev);
1329 u8 mgmt_err = mgmt_status(status);
1330 cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
1336 changed = !test_and_set_bit(HCI_CONNECTABLE, &hdev->dev_flags);
1338 changed = test_and_clear_bit(HCI_CONNECTABLE, &hdev->dev_flags);
1340 send_settings_rsp(cmd->sk, MGMT_OP_SET_CONNECTABLE, hdev);
1343 new_settings(hdev, cmd->sk);
1346 mgmt_pending_remove(cmd);
1349 hci_dev_unlock(hdev);
1352 static int set_connectable(struct sock *sk, struct hci_dev *hdev, void *data,
1355 struct mgmt_mode *cp = data;
1356 struct pending_cmd *cmd;
1357 struct hci_request req;
1361 BT_DBG("request for %s", hdev->name);
1363 if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags) &&
1364 !test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
1365 return cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1366 MGMT_STATUS_REJECTED);
1368 if (cp->val != 0x00 && cp->val != 0x01)
1369 return cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1370 MGMT_STATUS_INVALID_PARAMS);
1374 if (!hdev_is_powered(hdev)) {
1375 bool changed = false;
1377 if (!!cp->val != test_bit(HCI_CONNECTABLE, &hdev->dev_flags))
1381 set_bit(HCI_CONNECTABLE, &hdev->dev_flags);
1383 clear_bit(HCI_CONNECTABLE, &hdev->dev_flags);
1384 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
1387 err = send_settings_rsp(sk, MGMT_OP_SET_CONNECTABLE, hdev);
1392 err = new_settings(hdev, sk);
1397 if (mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
1398 mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
1399 err = cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1404 cmd = mgmt_pending_add(sk, MGMT_OP_SET_CONNECTABLE, hdev, data, len);
1410 hci_req_init(&req, hdev);
1412 if (test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags) &&
1413 cp->val != test_bit(HCI_PSCAN, &hdev->flags)) {
1419 if (test_bit(HCI_ISCAN, &hdev->flags) &&
1420 hdev->discov_timeout > 0)
1421 cancel_delayed_work(&hdev->discov_off);
1424 hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
1427 /* If we're going from non-connectable to connectable or
1428 * vice-versa when fast connectable is enabled ensure that fast
1429 * connectable gets disabled. write_fast_connectable won't do
1430 * anything if the page scan parameters are already what they
1433 if (cp->val || test_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags))
1434 write_fast_connectable(&req, false);
1436 if (test_bit(HCI_ADVERTISING, &hdev->dev_flags) &&
1437 hci_conn_num(hdev, LE_LINK) == 0) {
1438 disable_advertising(&req);
1439 enable_advertising(&req);
1442 err = hci_req_run(&req, set_connectable_complete);
1444 mgmt_pending_remove(cmd);
1445 if (err == -ENODATA)
1446 err = send_settings_rsp(sk, MGMT_OP_SET_CONNECTABLE,
1452 hci_dev_unlock(hdev);
1456 static int set_pairable(struct sock *sk, struct hci_dev *hdev, void *data,
1459 struct mgmt_mode *cp = data;
1463 BT_DBG("request for %s", hdev->name);
1465 if (cp->val != 0x00 && cp->val != 0x01)
1466 return cmd_status(sk, hdev->id, MGMT_OP_SET_PAIRABLE,
1467 MGMT_STATUS_INVALID_PARAMS);
1472 changed = !test_and_set_bit(HCI_PAIRABLE, &hdev->dev_flags);
1474 changed = test_and_clear_bit(HCI_PAIRABLE, &hdev->dev_flags);
1476 err = send_settings_rsp(sk, MGMT_OP_SET_PAIRABLE, hdev);
1481 err = new_settings(hdev, sk);
1484 hci_dev_unlock(hdev);
1488 static int set_link_security(struct sock *sk, struct hci_dev *hdev, void *data,
1491 struct mgmt_mode *cp = data;
1492 struct pending_cmd *cmd;
1496 BT_DBG("request for %s", hdev->name);
1498 status = mgmt_bredr_support(hdev);
1500 return cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1503 if (cp->val != 0x00 && cp->val != 0x01)
1504 return cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1505 MGMT_STATUS_INVALID_PARAMS);
1509 if (!hdev_is_powered(hdev)) {
1510 bool changed = false;
1512 if (!!cp->val != test_bit(HCI_LINK_SECURITY,
1513 &hdev->dev_flags)) {
1514 change_bit(HCI_LINK_SECURITY, &hdev->dev_flags);
1518 err = send_settings_rsp(sk, MGMT_OP_SET_LINK_SECURITY, hdev);
1523 err = new_settings(hdev, sk);
1528 if (mgmt_pending_find(MGMT_OP_SET_LINK_SECURITY, hdev)) {
1529 err = cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1536 if (test_bit(HCI_AUTH, &hdev->flags) == val) {
1537 err = send_settings_rsp(sk, MGMT_OP_SET_LINK_SECURITY, hdev);
1541 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LINK_SECURITY, hdev, data, len);
1547 err = hci_send_cmd(hdev, HCI_OP_WRITE_AUTH_ENABLE, sizeof(val), &val);
1549 mgmt_pending_remove(cmd);
1554 hci_dev_unlock(hdev);
1558 static int set_ssp(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
1560 struct mgmt_mode *cp = data;
1561 struct pending_cmd *cmd;
1565 BT_DBG("request for %s", hdev->name);
1567 status = mgmt_bredr_support(hdev);
1569 return cmd_status(sk, hdev->id, MGMT_OP_SET_SSP, status);
1571 if (!lmp_ssp_capable(hdev))
1572 return cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
1573 MGMT_STATUS_NOT_SUPPORTED);
1575 if (cp->val != 0x00 && cp->val != 0x01)
1576 return cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
1577 MGMT_STATUS_INVALID_PARAMS);
1581 if (!hdev_is_powered(hdev)) {
1585 changed = !test_and_set_bit(HCI_SSP_ENABLED,
1588 changed = test_and_clear_bit(HCI_SSP_ENABLED,
1591 changed = test_and_clear_bit(HCI_HS_ENABLED,
1594 clear_bit(HCI_HS_ENABLED, &hdev->dev_flags);
1597 err = send_settings_rsp(sk, MGMT_OP_SET_SSP, hdev);
1602 err = new_settings(hdev, sk);
1607 if (mgmt_pending_find(MGMT_OP_SET_SSP, hdev) ||
1608 mgmt_pending_find(MGMT_OP_SET_HS, hdev)) {
1609 err = cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
1614 if (!!cp->val == test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
1615 err = send_settings_rsp(sk, MGMT_OP_SET_SSP, hdev);
1619 cmd = mgmt_pending_add(sk, MGMT_OP_SET_SSP, hdev, data, len);
1625 err = hci_send_cmd(hdev, HCI_OP_WRITE_SSP_MODE, 1, &cp->val);
1627 mgmt_pending_remove(cmd);
1632 hci_dev_unlock(hdev);
1636 static int set_hs(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
1638 struct mgmt_mode *cp = data;
1643 BT_DBG("request for %s", hdev->name);
1645 status = mgmt_bredr_support(hdev);
1647 return cmd_status(sk, hdev->id, MGMT_OP_SET_HS, status);
1649 if (!lmp_ssp_capable(hdev))
1650 return cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
1651 MGMT_STATUS_NOT_SUPPORTED);
1653 if (!test_bit(HCI_SSP_ENABLED, &hdev->dev_flags))
1654 return cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
1655 MGMT_STATUS_REJECTED);
1657 if (cp->val != 0x00 && cp->val != 0x01)
1658 return cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
1659 MGMT_STATUS_INVALID_PARAMS);
1664 changed = !test_and_set_bit(HCI_HS_ENABLED, &hdev->dev_flags);
1666 if (hdev_is_powered(hdev)) {
1667 err = cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
1668 MGMT_STATUS_REJECTED);
1672 changed = test_and_clear_bit(HCI_HS_ENABLED, &hdev->dev_flags);
1675 err = send_settings_rsp(sk, MGMT_OP_SET_HS, hdev);
1680 err = new_settings(hdev, sk);
1683 hci_dev_unlock(hdev);
1687 static void le_enable_complete(struct hci_dev *hdev, u8 status)
1689 struct cmd_lookup match = { NULL, hdev };
1692 u8 mgmt_err = mgmt_status(status);
1694 mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, cmd_status_rsp,
1699 mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, settings_rsp, &match);
1701 new_settings(hdev, match.sk);
1706 /* Make sure the controller has a good default for
1707 * advertising data. Restrict the update to when LE
1708 * has actually been enabled. During power on, the
1709 * update in powered_update_hci will take care of it.
1711 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
1712 struct hci_request req;
1716 hci_req_init(&req, hdev);
1718 hci_req_run(&req, NULL);
1720 hci_dev_unlock(hdev);
1724 static int set_le(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
1726 struct mgmt_mode *cp = data;
1727 struct hci_cp_write_le_host_supported hci_cp;
1728 struct pending_cmd *cmd;
1729 struct hci_request req;
1733 BT_DBG("request for %s", hdev->name);
1735 if (!lmp_le_capable(hdev))
1736 return cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
1737 MGMT_STATUS_NOT_SUPPORTED);
1739 if (cp->val != 0x00 && cp->val != 0x01)
1740 return cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
1741 MGMT_STATUS_INVALID_PARAMS);
1743 /* LE-only devices do not allow toggling LE on/off */
1744 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
1745 return cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
1746 MGMT_STATUS_REJECTED);
1751 enabled = lmp_host_le_capable(hdev);
1753 if (!hdev_is_powered(hdev) || val == enabled) {
1754 bool changed = false;
1756 if (val != test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
1757 change_bit(HCI_LE_ENABLED, &hdev->dev_flags);
1761 if (!val && test_bit(HCI_ADVERTISING, &hdev->dev_flags)) {
1762 clear_bit(HCI_ADVERTISING, &hdev->dev_flags);
1766 err = send_settings_rsp(sk, MGMT_OP_SET_LE, hdev);
1771 err = new_settings(hdev, sk);
1776 if (mgmt_pending_find(MGMT_OP_SET_LE, hdev) ||
1777 mgmt_pending_find(MGMT_OP_SET_ADVERTISING, hdev)) {
1778 err = cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
1783 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LE, hdev, data, len);
1789 hci_req_init(&req, hdev);
1791 memset(&hci_cp, 0, sizeof(hci_cp));
1795 hci_cp.simul = lmp_le_br_capable(hdev);
1797 if (test_bit(HCI_ADVERTISING, &hdev->dev_flags))
1798 disable_advertising(&req);
1801 hci_req_add(&req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(hci_cp),
1804 err = hci_req_run(&req, le_enable_complete);
1806 mgmt_pending_remove(cmd);
1809 hci_dev_unlock(hdev);
1813 /* This is a helper function to test for pending mgmt commands that can
1814 * cause CoD or EIR HCI commands. We can only allow one such pending
1815 * mgmt command at a time since otherwise we cannot easily track what
1816 * the current values are, will be, and based on that calculate if a new
1817 * HCI command needs to be sent and if yes with what value.
1819 static bool pending_eir_or_class(struct hci_dev *hdev)
1821 struct pending_cmd *cmd;
1823 list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
1824 switch (cmd->opcode) {
1825 case MGMT_OP_ADD_UUID:
1826 case MGMT_OP_REMOVE_UUID:
1827 case MGMT_OP_SET_DEV_CLASS:
1828 case MGMT_OP_SET_POWERED:
1836 static const u8 bluetooth_base_uuid[] = {
1837 0xfb, 0x34, 0x9b, 0x5f, 0x80, 0x00, 0x00, 0x80,
1838 0x00, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1841 static u8 get_uuid_size(const u8 *uuid)
1845 if (memcmp(uuid, bluetooth_base_uuid, 12))
1848 val = get_unaligned_le32(&uuid[12]);
1855 static void mgmt_class_complete(struct hci_dev *hdev, u16 mgmt_op, u8 status)
1857 struct pending_cmd *cmd;
1861 cmd = mgmt_pending_find(mgmt_op, hdev);
1865 cmd_complete(cmd->sk, cmd->index, cmd->opcode, mgmt_status(status),
1866 hdev->dev_class, 3);
1868 mgmt_pending_remove(cmd);
1871 hci_dev_unlock(hdev);
1874 static void add_uuid_complete(struct hci_dev *hdev, u8 status)
1876 BT_DBG("status 0x%02x", status);
1878 mgmt_class_complete(hdev, MGMT_OP_ADD_UUID, status);
1881 static int add_uuid(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
1883 struct mgmt_cp_add_uuid *cp = data;
1884 struct pending_cmd *cmd;
1885 struct hci_request req;
1886 struct bt_uuid *uuid;
1889 BT_DBG("request for %s", hdev->name);
1893 if (pending_eir_or_class(hdev)) {
1894 err = cmd_status(sk, hdev->id, MGMT_OP_ADD_UUID,
1899 uuid = kmalloc(sizeof(*uuid), GFP_KERNEL);
1905 memcpy(uuid->uuid, cp->uuid, 16);
1906 uuid->svc_hint = cp->svc_hint;
1907 uuid->size = get_uuid_size(cp->uuid);
1909 list_add_tail(&uuid->list, &hdev->uuids);
1911 hci_req_init(&req, hdev);
1916 err = hci_req_run(&req, add_uuid_complete);
1918 if (err != -ENODATA)
1921 err = cmd_complete(sk, hdev->id, MGMT_OP_ADD_UUID, 0,
1922 hdev->dev_class, 3);
1926 cmd = mgmt_pending_add(sk, MGMT_OP_ADD_UUID, hdev, data, len);
1935 hci_dev_unlock(hdev);
1939 static bool enable_service_cache(struct hci_dev *hdev)
1941 if (!hdev_is_powered(hdev))
1944 if (!test_and_set_bit(HCI_SERVICE_CACHE, &hdev->dev_flags)) {
1945 queue_delayed_work(hdev->workqueue, &hdev->service_cache,
1953 static void remove_uuid_complete(struct hci_dev *hdev, u8 status)
1955 BT_DBG("status 0x%02x", status);
1957 mgmt_class_complete(hdev, MGMT_OP_REMOVE_UUID, status);
1960 static int remove_uuid(struct sock *sk, struct hci_dev *hdev, void *data,
1963 struct mgmt_cp_remove_uuid *cp = data;
1964 struct pending_cmd *cmd;
1965 struct bt_uuid *match, *tmp;
1966 u8 bt_uuid_any[] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
1967 struct hci_request req;
1970 BT_DBG("request for %s", hdev->name);
1974 if (pending_eir_or_class(hdev)) {
1975 err = cmd_status(sk, hdev->id, MGMT_OP_REMOVE_UUID,
1980 if (memcmp(cp->uuid, bt_uuid_any, 16) == 0) {
1981 err = hci_uuids_clear(hdev);
1983 if (enable_service_cache(hdev)) {
1984 err = cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_UUID,
1985 0, hdev->dev_class, 3);
1994 list_for_each_entry_safe(match, tmp, &hdev->uuids, list) {
1995 if (memcmp(match->uuid, cp->uuid, 16) != 0)
1998 list_del(&match->list);
2004 err = cmd_status(sk, hdev->id, MGMT_OP_REMOVE_UUID,
2005 MGMT_STATUS_INVALID_PARAMS);
2010 hci_req_init(&req, hdev);
2015 err = hci_req_run(&req, remove_uuid_complete);
2017 if (err != -ENODATA)
2020 err = cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_UUID, 0,
2021 hdev->dev_class, 3);
2025 cmd = mgmt_pending_add(sk, MGMT_OP_REMOVE_UUID, hdev, data, len);
2034 hci_dev_unlock(hdev);
2038 static void set_class_complete(struct hci_dev *hdev, u8 status)
2040 BT_DBG("status 0x%02x", status);
2042 mgmt_class_complete(hdev, MGMT_OP_SET_DEV_CLASS, status);
2045 static int set_dev_class(struct sock *sk, struct hci_dev *hdev, void *data,
2048 struct mgmt_cp_set_dev_class *cp = data;
2049 struct pending_cmd *cmd;
2050 struct hci_request req;
2053 BT_DBG("request for %s", hdev->name);
2055 if (!lmp_bredr_capable(hdev))
2056 return cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2057 MGMT_STATUS_NOT_SUPPORTED);
2061 if (pending_eir_or_class(hdev)) {
2062 err = cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2067 if ((cp->minor & 0x03) != 0 || (cp->major & 0xe0) != 0) {
2068 err = cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2069 MGMT_STATUS_INVALID_PARAMS);
2073 hdev->major_class = cp->major;
2074 hdev->minor_class = cp->minor;
2076 if (!hdev_is_powered(hdev)) {
2077 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_DEV_CLASS, 0,
2078 hdev->dev_class, 3);
2082 hci_req_init(&req, hdev);
2084 if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags)) {
2085 hci_dev_unlock(hdev);
2086 cancel_delayed_work_sync(&hdev->service_cache);
2093 err = hci_req_run(&req, set_class_complete);
2095 if (err != -ENODATA)
2098 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_DEV_CLASS, 0,
2099 hdev->dev_class, 3);
2103 cmd = mgmt_pending_add(sk, MGMT_OP_SET_DEV_CLASS, hdev, data, len);
2112 hci_dev_unlock(hdev);
2116 static int load_link_keys(struct sock *sk, struct hci_dev *hdev, void *data,
2119 struct mgmt_cp_load_link_keys *cp = data;
2120 u16 key_count, expected_len;
2123 BT_DBG("request for %s", hdev->name);
2125 if (!lmp_bredr_capable(hdev))
2126 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2127 MGMT_STATUS_NOT_SUPPORTED);
2129 key_count = __le16_to_cpu(cp->key_count);
2131 expected_len = sizeof(*cp) + key_count *
2132 sizeof(struct mgmt_link_key_info);
2133 if (expected_len != len) {
2134 BT_ERR("load_link_keys: expected %u bytes, got %u bytes",
2136 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2137 MGMT_STATUS_INVALID_PARAMS);
2140 if (cp->debug_keys != 0x00 && cp->debug_keys != 0x01)
2141 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2142 MGMT_STATUS_INVALID_PARAMS);
2144 BT_DBG("%s debug_keys %u key_count %u", hdev->name, cp->debug_keys,
2147 for (i = 0; i < key_count; i++) {
2148 struct mgmt_link_key_info *key = &cp->keys[i];
2150 if (key->addr.type != BDADDR_BREDR)
2151 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2152 MGMT_STATUS_INVALID_PARAMS);
2157 hci_link_keys_clear(hdev);
2160 set_bit(HCI_DEBUG_KEYS, &hdev->dev_flags);
2162 clear_bit(HCI_DEBUG_KEYS, &hdev->dev_flags);
2164 for (i = 0; i < key_count; i++) {
2165 struct mgmt_link_key_info *key = &cp->keys[i];
2167 hci_add_link_key(hdev, NULL, 0, &key->addr.bdaddr, key->val,
2168 key->type, key->pin_len);
2171 cmd_complete(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS, 0, NULL, 0);
2173 hci_dev_unlock(hdev);
2178 static int device_unpaired(struct hci_dev *hdev, bdaddr_t *bdaddr,
2179 u8 addr_type, struct sock *skip_sk)
2181 struct mgmt_ev_device_unpaired ev;
2183 bacpy(&ev.addr.bdaddr, bdaddr);
2184 ev.addr.type = addr_type;
2186 return mgmt_event(MGMT_EV_DEVICE_UNPAIRED, hdev, &ev, sizeof(ev),
2190 static int unpair_device(struct sock *sk, struct hci_dev *hdev, void *data,
2193 struct mgmt_cp_unpair_device *cp = data;
2194 struct mgmt_rp_unpair_device rp;
2195 struct hci_cp_disconnect dc;
2196 struct pending_cmd *cmd;
2197 struct hci_conn *conn;
2200 memset(&rp, 0, sizeof(rp));
2201 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
2202 rp.addr.type = cp->addr.type;
2204 if (!bdaddr_type_is_valid(cp->addr.type))
2205 return cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2206 MGMT_STATUS_INVALID_PARAMS,
2209 if (cp->disconnect != 0x00 && cp->disconnect != 0x01)
2210 return cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2211 MGMT_STATUS_INVALID_PARAMS,
2216 if (!hdev_is_powered(hdev)) {
2217 err = cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2218 MGMT_STATUS_NOT_POWERED, &rp, sizeof(rp));
2222 if (cp->addr.type == BDADDR_BREDR)
2223 err = hci_remove_link_key(hdev, &cp->addr.bdaddr);
2225 err = hci_remove_ltk(hdev, &cp->addr.bdaddr);
2228 err = cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2229 MGMT_STATUS_NOT_PAIRED, &rp, sizeof(rp));
2233 if (cp->disconnect) {
2234 if (cp->addr.type == BDADDR_BREDR)
2235 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
2238 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK,
2245 err = cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE, 0,
2247 device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, sk);
2251 cmd = mgmt_pending_add(sk, MGMT_OP_UNPAIR_DEVICE, hdev, cp,
2258 dc.handle = cpu_to_le16(conn->handle);
2259 dc.reason = 0x13; /* Remote User Terminated Connection */
2260 err = hci_send_cmd(hdev, HCI_OP_DISCONNECT, sizeof(dc), &dc);
2262 mgmt_pending_remove(cmd);
2265 hci_dev_unlock(hdev);
2269 static int disconnect(struct sock *sk, struct hci_dev *hdev, void *data,
2272 struct mgmt_cp_disconnect *cp = data;
2273 struct mgmt_rp_disconnect rp;
2274 struct hci_cp_disconnect dc;
2275 struct pending_cmd *cmd;
2276 struct hci_conn *conn;
2281 memset(&rp, 0, sizeof(rp));
2282 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
2283 rp.addr.type = cp->addr.type;
2285 if (!bdaddr_type_is_valid(cp->addr.type))
2286 return cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2287 MGMT_STATUS_INVALID_PARAMS,
2292 if (!test_bit(HCI_UP, &hdev->flags)) {
2293 err = cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2294 MGMT_STATUS_NOT_POWERED, &rp, sizeof(rp));
2298 if (mgmt_pending_find(MGMT_OP_DISCONNECT, hdev)) {
2299 err = cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2300 MGMT_STATUS_BUSY, &rp, sizeof(rp));
2304 if (cp->addr.type == BDADDR_BREDR)
2305 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
2308 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->addr.bdaddr);
2310 if (!conn || conn->state == BT_OPEN || conn->state == BT_CLOSED) {
2311 err = cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2312 MGMT_STATUS_NOT_CONNECTED, &rp, sizeof(rp));
2316 cmd = mgmt_pending_add(sk, MGMT_OP_DISCONNECT, hdev, data, len);
2322 dc.handle = cpu_to_le16(conn->handle);
2323 dc.reason = HCI_ERROR_REMOTE_USER_TERM;
2325 err = hci_send_cmd(hdev, HCI_OP_DISCONNECT, sizeof(dc), &dc);
2327 mgmt_pending_remove(cmd);
2330 hci_dev_unlock(hdev);
2334 static u8 link_to_bdaddr(u8 link_type, u8 addr_type)
2336 switch (link_type) {
2338 switch (addr_type) {
2339 case ADDR_LE_DEV_PUBLIC:
2340 return BDADDR_LE_PUBLIC;
2343 /* Fallback to LE Random address type */
2344 return BDADDR_LE_RANDOM;
2348 /* Fallback to BR/EDR type */
2349 return BDADDR_BREDR;
2353 static int get_connections(struct sock *sk, struct hci_dev *hdev, void *data,
2356 struct mgmt_rp_get_connections *rp;
2366 if (!hdev_is_powered(hdev)) {
2367 err = cmd_status(sk, hdev->id, MGMT_OP_GET_CONNECTIONS,
2368 MGMT_STATUS_NOT_POWERED);
2373 list_for_each_entry(c, &hdev->conn_hash.list, list) {
2374 if (test_bit(HCI_CONN_MGMT_CONNECTED, &c->flags))
2378 rp_len = sizeof(*rp) + (i * sizeof(struct mgmt_addr_info));
2379 rp = kmalloc(rp_len, GFP_KERNEL);
2386 list_for_each_entry(c, &hdev->conn_hash.list, list) {
2387 if (!test_bit(HCI_CONN_MGMT_CONNECTED, &c->flags))
2389 bacpy(&rp->addr[i].bdaddr, &c->dst);
2390 rp->addr[i].type = link_to_bdaddr(c->type, c->dst_type);
2391 if (c->type == SCO_LINK || c->type == ESCO_LINK)
2396 rp->conn_count = cpu_to_le16(i);
2398 /* Recalculate length in case of filtered SCO connections, etc */
2399 rp_len = sizeof(*rp) + (i * sizeof(struct mgmt_addr_info));
2401 err = cmd_complete(sk, hdev->id, MGMT_OP_GET_CONNECTIONS, 0, rp,
2407 hci_dev_unlock(hdev);
2411 static int send_pin_code_neg_reply(struct sock *sk, struct hci_dev *hdev,
2412 struct mgmt_cp_pin_code_neg_reply *cp)
2414 struct pending_cmd *cmd;
2417 cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_NEG_REPLY, hdev, cp,
2422 err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_NEG_REPLY,
2423 sizeof(cp->addr.bdaddr), &cp->addr.bdaddr);
2425 mgmt_pending_remove(cmd);
2430 static int pin_code_reply(struct sock *sk, struct hci_dev *hdev, void *data,
2433 struct hci_conn *conn;
2434 struct mgmt_cp_pin_code_reply *cp = data;
2435 struct hci_cp_pin_code_reply reply;
2436 struct pending_cmd *cmd;
2443 if (!hdev_is_powered(hdev)) {
2444 err = cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
2445 MGMT_STATUS_NOT_POWERED);
2449 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->addr.bdaddr);
2451 err = cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
2452 MGMT_STATUS_NOT_CONNECTED);
2456 if (conn->pending_sec_level == BT_SECURITY_HIGH && cp->pin_len != 16) {
2457 struct mgmt_cp_pin_code_neg_reply ncp;
2459 memcpy(&ncp.addr, &cp->addr, sizeof(ncp.addr));
2461 BT_ERR("PIN code is not 16 bytes long");
2463 err = send_pin_code_neg_reply(sk, hdev, &ncp);
2465 err = cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
2466 MGMT_STATUS_INVALID_PARAMS);
2471 cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_REPLY, hdev, data, len);
2477 bacpy(&reply.bdaddr, &cp->addr.bdaddr);
2478 reply.pin_len = cp->pin_len;
2479 memcpy(reply.pin_code, cp->pin_code, sizeof(reply.pin_code));
2481 err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_REPLY, sizeof(reply), &reply);
2483 mgmt_pending_remove(cmd);
2486 hci_dev_unlock(hdev);
2490 static int set_io_capability(struct sock *sk, struct hci_dev *hdev, void *data,
2493 struct mgmt_cp_set_io_capability *cp = data;
2499 hdev->io_capability = cp->io_capability;
2501 BT_DBG("%s IO capability set to 0x%02x", hdev->name,
2502 hdev->io_capability);
2504 hci_dev_unlock(hdev);
2506 return cmd_complete(sk, hdev->id, MGMT_OP_SET_IO_CAPABILITY, 0, NULL,
2510 static struct pending_cmd *find_pairing(struct hci_conn *conn)
2512 struct hci_dev *hdev = conn->hdev;
2513 struct pending_cmd *cmd;
2515 list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
2516 if (cmd->opcode != MGMT_OP_PAIR_DEVICE)
2519 if (cmd->user_data != conn)
2528 static void pairing_complete(struct pending_cmd *cmd, u8 status)
2530 struct mgmt_rp_pair_device rp;
2531 struct hci_conn *conn = cmd->user_data;
2533 bacpy(&rp.addr.bdaddr, &conn->dst);
2534 rp.addr.type = link_to_bdaddr(conn->type, conn->dst_type);
2536 cmd_complete(cmd->sk, cmd->index, MGMT_OP_PAIR_DEVICE, status,
2539 /* So we don't get further callbacks for this connection */
2540 conn->connect_cfm_cb = NULL;
2541 conn->security_cfm_cb = NULL;
2542 conn->disconn_cfm_cb = NULL;
2544 hci_conn_drop(conn);
2546 mgmt_pending_remove(cmd);
2549 static void pairing_complete_cb(struct hci_conn *conn, u8 status)
2551 struct pending_cmd *cmd;
2553 BT_DBG("status %u", status);
2555 cmd = find_pairing(conn);
2557 BT_DBG("Unable to find a pending command");
2559 pairing_complete(cmd, mgmt_status(status));
2562 static void le_connect_complete_cb(struct hci_conn *conn, u8 status)
2564 struct pending_cmd *cmd;
2566 BT_DBG("status %u", status);
2571 cmd = find_pairing(conn);
2573 BT_DBG("Unable to find a pending command");
2575 pairing_complete(cmd, mgmt_status(status));
2578 static int pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
2581 struct mgmt_cp_pair_device *cp = data;
2582 struct mgmt_rp_pair_device rp;
2583 struct pending_cmd *cmd;
2584 u8 sec_level, auth_type;
2585 struct hci_conn *conn;
2590 memset(&rp, 0, sizeof(rp));
2591 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
2592 rp.addr.type = cp->addr.type;
2594 if (!bdaddr_type_is_valid(cp->addr.type))
2595 return cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
2596 MGMT_STATUS_INVALID_PARAMS,
2601 if (!hdev_is_powered(hdev)) {
2602 err = cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
2603 MGMT_STATUS_NOT_POWERED, &rp, sizeof(rp));
2607 sec_level = BT_SECURITY_MEDIUM;
2608 if (cp->io_cap == 0x03)
2609 auth_type = HCI_AT_DEDICATED_BONDING;
2611 auth_type = HCI_AT_DEDICATED_BONDING_MITM;
2613 if (cp->addr.type == BDADDR_BREDR)
2614 conn = hci_connect(hdev, ACL_LINK, &cp->addr.bdaddr,
2615 cp->addr.type, sec_level, auth_type);
2617 conn = hci_connect(hdev, LE_LINK, &cp->addr.bdaddr,
2618 cp->addr.type, sec_level, auth_type);
2623 if (PTR_ERR(conn) == -EBUSY)
2624 status = MGMT_STATUS_BUSY;
2626 status = MGMT_STATUS_CONNECT_FAILED;
2628 err = cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
2634 if (conn->connect_cfm_cb) {
2635 hci_conn_drop(conn);
2636 err = cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
2637 MGMT_STATUS_BUSY, &rp, sizeof(rp));
2641 cmd = mgmt_pending_add(sk, MGMT_OP_PAIR_DEVICE, hdev, data, len);
2644 hci_conn_drop(conn);
2648 /* For LE, just connecting isn't a proof that the pairing finished */
2649 if (cp->addr.type == BDADDR_BREDR)
2650 conn->connect_cfm_cb = pairing_complete_cb;
2652 conn->connect_cfm_cb = le_connect_complete_cb;
2654 conn->security_cfm_cb = pairing_complete_cb;
2655 conn->disconn_cfm_cb = pairing_complete_cb;
2656 conn->io_capability = cp->io_cap;
2657 cmd->user_data = conn;
2659 if (conn->state == BT_CONNECTED &&
2660 hci_conn_security(conn, sec_level, auth_type))
2661 pairing_complete(cmd, 0);
2666 hci_dev_unlock(hdev);
2670 static int cancel_pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
2673 struct mgmt_addr_info *addr = data;
2674 struct pending_cmd *cmd;
2675 struct hci_conn *conn;
2682 if (!hdev_is_powered(hdev)) {
2683 err = cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
2684 MGMT_STATUS_NOT_POWERED);
2688 cmd = mgmt_pending_find(MGMT_OP_PAIR_DEVICE, hdev);
2690 err = cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
2691 MGMT_STATUS_INVALID_PARAMS);
2695 conn = cmd->user_data;
2697 if (bacmp(&addr->bdaddr, &conn->dst) != 0) {
2698 err = cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
2699 MGMT_STATUS_INVALID_PARAMS);
2703 pairing_complete(cmd, MGMT_STATUS_CANCELLED);
2705 err = cmd_complete(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE, 0,
2706 addr, sizeof(*addr));
2708 hci_dev_unlock(hdev);
2712 static int user_pairing_resp(struct sock *sk, struct hci_dev *hdev,
2713 struct mgmt_addr_info *addr, u16 mgmt_op,
2714 u16 hci_op, __le32 passkey)
2716 struct pending_cmd *cmd;
2717 struct hci_conn *conn;
2722 if (!hdev_is_powered(hdev)) {
2723 err = cmd_complete(sk, hdev->id, mgmt_op,
2724 MGMT_STATUS_NOT_POWERED, addr,
2729 if (addr->type == BDADDR_BREDR)
2730 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &addr->bdaddr);
2732 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &addr->bdaddr);
2735 err = cmd_complete(sk, hdev->id, mgmt_op,
2736 MGMT_STATUS_NOT_CONNECTED, addr,
2741 if (addr->type == BDADDR_LE_PUBLIC || addr->type == BDADDR_LE_RANDOM) {
2742 /* Continue with pairing via SMP */
2743 err = smp_user_confirm_reply(conn, mgmt_op, passkey);
2746 err = cmd_complete(sk, hdev->id, mgmt_op,
2747 MGMT_STATUS_SUCCESS, addr,
2750 err = cmd_complete(sk, hdev->id, mgmt_op,
2751 MGMT_STATUS_FAILED, addr,
2757 cmd = mgmt_pending_add(sk, mgmt_op, hdev, addr, sizeof(*addr));
2763 /* Continue with pairing via HCI */
2764 if (hci_op == HCI_OP_USER_PASSKEY_REPLY) {
2765 struct hci_cp_user_passkey_reply cp;
2767 bacpy(&cp.bdaddr, &addr->bdaddr);
2768 cp.passkey = passkey;
2769 err = hci_send_cmd(hdev, hci_op, sizeof(cp), &cp);
2771 err = hci_send_cmd(hdev, hci_op, sizeof(addr->bdaddr),
2775 mgmt_pending_remove(cmd);
2778 hci_dev_unlock(hdev);
2782 static int pin_code_neg_reply(struct sock *sk, struct hci_dev *hdev,
2783 void *data, u16 len)
2785 struct mgmt_cp_pin_code_neg_reply *cp = data;
2789 return user_pairing_resp(sk, hdev, &cp->addr,
2790 MGMT_OP_PIN_CODE_NEG_REPLY,
2791 HCI_OP_PIN_CODE_NEG_REPLY, 0);
2794 static int user_confirm_reply(struct sock *sk, struct hci_dev *hdev, void *data,
2797 struct mgmt_cp_user_confirm_reply *cp = data;
2801 if (len != sizeof(*cp))
2802 return cmd_status(sk, hdev->id, MGMT_OP_USER_CONFIRM_REPLY,
2803 MGMT_STATUS_INVALID_PARAMS);
2805 return user_pairing_resp(sk, hdev, &cp->addr,
2806 MGMT_OP_USER_CONFIRM_REPLY,
2807 HCI_OP_USER_CONFIRM_REPLY, 0);
2810 static int user_confirm_neg_reply(struct sock *sk, struct hci_dev *hdev,
2811 void *data, u16 len)
2813 struct mgmt_cp_user_confirm_neg_reply *cp = data;
2817 return user_pairing_resp(sk, hdev, &cp->addr,
2818 MGMT_OP_USER_CONFIRM_NEG_REPLY,
2819 HCI_OP_USER_CONFIRM_NEG_REPLY, 0);
2822 static int user_passkey_reply(struct sock *sk, struct hci_dev *hdev, void *data,
2825 struct mgmt_cp_user_passkey_reply *cp = data;
2829 return user_pairing_resp(sk, hdev, &cp->addr,
2830 MGMT_OP_USER_PASSKEY_REPLY,
2831 HCI_OP_USER_PASSKEY_REPLY, cp->passkey);
2834 static int user_passkey_neg_reply(struct sock *sk, struct hci_dev *hdev,
2835 void *data, u16 len)
2837 struct mgmt_cp_user_passkey_neg_reply *cp = data;
2841 return user_pairing_resp(sk, hdev, &cp->addr,
2842 MGMT_OP_USER_PASSKEY_NEG_REPLY,
2843 HCI_OP_USER_PASSKEY_NEG_REPLY, 0);
2846 static void update_name(struct hci_request *req)
2848 struct hci_dev *hdev = req->hdev;
2849 struct hci_cp_write_local_name cp;
2851 memcpy(cp.name, hdev->dev_name, sizeof(cp.name));
2853 hci_req_add(req, HCI_OP_WRITE_LOCAL_NAME, sizeof(cp), &cp);
2856 static void set_name_complete(struct hci_dev *hdev, u8 status)
2858 struct mgmt_cp_set_local_name *cp;
2859 struct pending_cmd *cmd;
2861 BT_DBG("status 0x%02x", status);
2865 cmd = mgmt_pending_find(MGMT_OP_SET_LOCAL_NAME, hdev);
2872 cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME,
2873 mgmt_status(status));
2875 cmd_complete(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
2878 mgmt_pending_remove(cmd);
2881 hci_dev_unlock(hdev);
2884 static int set_local_name(struct sock *sk, struct hci_dev *hdev, void *data,
2887 struct mgmt_cp_set_local_name *cp = data;
2888 struct pending_cmd *cmd;
2889 struct hci_request req;
2896 /* If the old values are the same as the new ones just return a
2897 * direct command complete event.
2899 if (!memcmp(hdev->dev_name, cp->name, sizeof(hdev->dev_name)) &&
2900 !memcmp(hdev->short_name, cp->short_name,
2901 sizeof(hdev->short_name))) {
2902 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
2907 memcpy(hdev->short_name, cp->short_name, sizeof(hdev->short_name));
2909 if (!hdev_is_powered(hdev)) {
2910 memcpy(hdev->dev_name, cp->name, sizeof(hdev->dev_name));
2912 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
2917 err = mgmt_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev, data, len,
2923 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LOCAL_NAME, hdev, data, len);
2929 memcpy(hdev->dev_name, cp->name, sizeof(hdev->dev_name));
2931 hci_req_init(&req, hdev);
2933 if (lmp_bredr_capable(hdev)) {
2938 if (lmp_le_capable(hdev))
2941 err = hci_req_run(&req, set_name_complete);
2943 mgmt_pending_remove(cmd);
2946 hci_dev_unlock(hdev);
2950 static int read_local_oob_data(struct sock *sk, struct hci_dev *hdev,
2951 void *data, u16 data_len)
2953 struct pending_cmd *cmd;
2956 BT_DBG("%s", hdev->name);
2960 if (!hdev_is_powered(hdev)) {
2961 err = cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
2962 MGMT_STATUS_NOT_POWERED);
2966 if (!lmp_ssp_capable(hdev)) {
2967 err = cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
2968 MGMT_STATUS_NOT_SUPPORTED);
2972 if (mgmt_pending_find(MGMT_OP_READ_LOCAL_OOB_DATA, hdev)) {
2973 err = cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
2978 cmd = mgmt_pending_add(sk, MGMT_OP_READ_LOCAL_OOB_DATA, hdev, NULL, 0);
2984 err = hci_send_cmd(hdev, HCI_OP_READ_LOCAL_OOB_DATA, 0, NULL);
2986 mgmt_pending_remove(cmd);
2989 hci_dev_unlock(hdev);
2993 static int add_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
2994 void *data, u16 len)
2996 struct mgmt_cp_add_remote_oob_data *cp = data;
3000 BT_DBG("%s ", hdev->name);
3004 err = hci_add_remote_oob_data(hdev, &cp->addr.bdaddr, cp->hash,
3007 status = MGMT_STATUS_FAILED;
3009 status = MGMT_STATUS_SUCCESS;
3011 err = cmd_complete(sk, hdev->id, MGMT_OP_ADD_REMOTE_OOB_DATA, status,
3012 &cp->addr, sizeof(cp->addr));
3014 hci_dev_unlock(hdev);
3018 static int remove_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
3019 void *data, u16 len)
3021 struct mgmt_cp_remove_remote_oob_data *cp = data;
3025 BT_DBG("%s", hdev->name);
3029 err = hci_remove_remote_oob_data(hdev, &cp->addr.bdaddr);
3031 status = MGMT_STATUS_INVALID_PARAMS;
3033 status = MGMT_STATUS_SUCCESS;
3035 err = cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_REMOTE_OOB_DATA,
3036 status, &cp->addr, sizeof(cp->addr));
3038 hci_dev_unlock(hdev);
3042 static int mgmt_start_discovery_failed(struct hci_dev *hdev, u8 status)
3044 struct pending_cmd *cmd;
3048 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3050 cmd = mgmt_pending_find(MGMT_OP_START_DISCOVERY, hdev);
3054 type = hdev->discovery.type;
3056 err = cmd_complete(cmd->sk, hdev->id, cmd->opcode, mgmt_status(status),
3057 &type, sizeof(type));
3058 mgmt_pending_remove(cmd);
3063 static void start_discovery_complete(struct hci_dev *hdev, u8 status)
3065 BT_DBG("status %d", status);
3069 mgmt_start_discovery_failed(hdev, status);
3070 hci_dev_unlock(hdev);
3075 hci_discovery_set_state(hdev, DISCOVERY_FINDING);
3076 hci_dev_unlock(hdev);
3078 switch (hdev->discovery.type) {
3079 case DISCOV_TYPE_LE:
3080 queue_delayed_work(hdev->workqueue, &hdev->le_scan_disable,
3084 case DISCOV_TYPE_INTERLEAVED:
3085 queue_delayed_work(hdev->workqueue, &hdev->le_scan_disable,
3086 DISCOV_INTERLEAVED_TIMEOUT);
3089 case DISCOV_TYPE_BREDR:
3093 BT_ERR("Invalid discovery type %d", hdev->discovery.type);
3097 static int start_discovery(struct sock *sk, struct hci_dev *hdev,
3098 void *data, u16 len)
3100 struct mgmt_cp_start_discovery *cp = data;
3101 struct pending_cmd *cmd;
3102 struct hci_cp_le_set_scan_param param_cp;
3103 struct hci_cp_le_set_scan_enable enable_cp;
3104 struct hci_cp_inquiry inq_cp;
3105 struct hci_request req;
3106 /* General inquiry access code (GIAC) */
3107 u8 lap[3] = { 0x33, 0x8b, 0x9e };
3111 BT_DBG("%s", hdev->name);
3115 if (!hdev_is_powered(hdev)) {
3116 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3117 MGMT_STATUS_NOT_POWERED);
3121 if (test_bit(HCI_PERIODIC_INQ, &hdev->dev_flags)) {
3122 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3127 if (hdev->discovery.state != DISCOVERY_STOPPED) {
3128 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3133 cmd = mgmt_pending_add(sk, MGMT_OP_START_DISCOVERY, hdev, NULL, 0);
3139 hdev->discovery.type = cp->type;
3141 hci_req_init(&req, hdev);
3143 switch (hdev->discovery.type) {
3144 case DISCOV_TYPE_BREDR:
3145 status = mgmt_bredr_support(hdev);
3147 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3149 mgmt_pending_remove(cmd);
3153 if (test_bit(HCI_INQUIRY, &hdev->flags)) {
3154 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3156 mgmt_pending_remove(cmd);
3160 hci_inquiry_cache_flush(hdev);
3162 memset(&inq_cp, 0, sizeof(inq_cp));
3163 memcpy(&inq_cp.lap, lap, sizeof(inq_cp.lap));
3164 inq_cp.length = DISCOV_BREDR_INQUIRY_LEN;
3165 hci_req_add(&req, HCI_OP_INQUIRY, sizeof(inq_cp), &inq_cp);
3168 case DISCOV_TYPE_LE:
3169 case DISCOV_TYPE_INTERLEAVED:
3170 status = mgmt_le_support(hdev);
3172 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3174 mgmt_pending_remove(cmd);
3178 if (hdev->discovery.type == DISCOV_TYPE_INTERLEAVED &&
3179 !test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
3180 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3181 MGMT_STATUS_NOT_SUPPORTED);
3182 mgmt_pending_remove(cmd);
3186 if (test_bit(HCI_ADVERTISING, &hdev->dev_flags)) {
3187 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3188 MGMT_STATUS_REJECTED);
3189 mgmt_pending_remove(cmd);
3193 if (test_bit(HCI_LE_SCAN, &hdev->dev_flags)) {
3194 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3196 mgmt_pending_remove(cmd);
3200 memset(¶m_cp, 0, sizeof(param_cp));
3201 param_cp.type = LE_SCAN_ACTIVE;
3202 param_cp.interval = cpu_to_le16(DISCOV_LE_SCAN_INT);
3203 param_cp.window = cpu_to_le16(DISCOV_LE_SCAN_WIN);
3204 if (bacmp(&hdev->bdaddr, BDADDR_ANY))
3205 param_cp.own_address_type = ADDR_LE_DEV_PUBLIC;
3207 param_cp.own_address_type = ADDR_LE_DEV_RANDOM;
3208 hci_req_add(&req, HCI_OP_LE_SET_SCAN_PARAM, sizeof(param_cp),
3211 memset(&enable_cp, 0, sizeof(enable_cp));
3212 enable_cp.enable = LE_SCAN_ENABLE;
3213 enable_cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
3214 hci_req_add(&req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(enable_cp),
3219 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3220 MGMT_STATUS_INVALID_PARAMS);
3221 mgmt_pending_remove(cmd);
3225 err = hci_req_run(&req, start_discovery_complete);
3227 mgmt_pending_remove(cmd);
3229 hci_discovery_set_state(hdev, DISCOVERY_STARTING);
3232 hci_dev_unlock(hdev);
3236 static int mgmt_stop_discovery_failed(struct hci_dev *hdev, u8 status)
3238 struct pending_cmd *cmd;
3241 cmd = mgmt_pending_find(MGMT_OP_STOP_DISCOVERY, hdev);
3245 err = cmd_complete(cmd->sk, hdev->id, cmd->opcode, mgmt_status(status),
3246 &hdev->discovery.type, sizeof(hdev->discovery.type));
3247 mgmt_pending_remove(cmd);
3252 static void stop_discovery_complete(struct hci_dev *hdev, u8 status)
3254 BT_DBG("status %d", status);
3259 mgmt_stop_discovery_failed(hdev, status);
3263 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3266 hci_dev_unlock(hdev);
3269 static int stop_discovery(struct sock *sk, struct hci_dev *hdev, void *data,
3272 struct mgmt_cp_stop_discovery *mgmt_cp = data;
3273 struct pending_cmd *cmd;
3274 struct hci_cp_remote_name_req_cancel cp;
3275 struct inquiry_entry *e;
3276 struct hci_request req;
3277 struct hci_cp_le_set_scan_enable enable_cp;
3280 BT_DBG("%s", hdev->name);
3284 if (!hci_discovery_active(hdev)) {
3285 err = cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
3286 MGMT_STATUS_REJECTED, &mgmt_cp->type,
3287 sizeof(mgmt_cp->type));
3291 if (hdev->discovery.type != mgmt_cp->type) {
3292 err = cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
3293 MGMT_STATUS_INVALID_PARAMS, &mgmt_cp->type,
3294 sizeof(mgmt_cp->type));
3298 cmd = mgmt_pending_add(sk, MGMT_OP_STOP_DISCOVERY, hdev, NULL, 0);
3304 hci_req_init(&req, hdev);
3306 switch (hdev->discovery.state) {
3307 case DISCOVERY_FINDING:
3308 if (test_bit(HCI_INQUIRY, &hdev->flags)) {
3309 hci_req_add(&req, HCI_OP_INQUIRY_CANCEL, 0, NULL);
3311 cancel_delayed_work(&hdev->le_scan_disable);
3313 memset(&enable_cp, 0, sizeof(enable_cp));
3314 enable_cp.enable = LE_SCAN_DISABLE;
3315 hci_req_add(&req, HCI_OP_LE_SET_SCAN_ENABLE,
3316 sizeof(enable_cp), &enable_cp);
3321 case DISCOVERY_RESOLVING:
3322 e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY,
3325 mgmt_pending_remove(cmd);
3326 err = cmd_complete(sk, hdev->id,
3327 MGMT_OP_STOP_DISCOVERY, 0,
3329 sizeof(mgmt_cp->type));
3330 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3334 bacpy(&cp.bdaddr, &e->data.bdaddr);
3335 hci_req_add(&req, HCI_OP_REMOTE_NAME_REQ_CANCEL, sizeof(cp),
3341 BT_DBG("unknown discovery state %u", hdev->discovery.state);
3343 mgmt_pending_remove(cmd);
3344 err = cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
3345 MGMT_STATUS_FAILED, &mgmt_cp->type,
3346 sizeof(mgmt_cp->type));
3350 err = hci_req_run(&req, stop_discovery_complete);
3352 mgmt_pending_remove(cmd);
3354 hci_discovery_set_state(hdev, DISCOVERY_STOPPING);
3357 hci_dev_unlock(hdev);
3361 static int confirm_name(struct sock *sk, struct hci_dev *hdev, void *data,
3364 struct mgmt_cp_confirm_name *cp = data;
3365 struct inquiry_entry *e;
3368 BT_DBG("%s", hdev->name);
3372 if (!hci_discovery_active(hdev)) {
3373 err = cmd_status(sk, hdev->id, MGMT_OP_CONFIRM_NAME,
3374 MGMT_STATUS_FAILED);
3378 e = hci_inquiry_cache_lookup_unknown(hdev, &cp->addr.bdaddr);
3380 err = cmd_status(sk, hdev->id, MGMT_OP_CONFIRM_NAME,
3381 MGMT_STATUS_INVALID_PARAMS);
3385 if (cp->name_known) {
3386 e->name_state = NAME_KNOWN;
3389 e->name_state = NAME_NEEDED;
3390 hci_inquiry_cache_update_resolve(hdev, e);
3393 err = cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME, 0, &cp->addr,
3397 hci_dev_unlock(hdev);
3401 static int block_device(struct sock *sk, struct hci_dev *hdev, void *data,
3404 struct mgmt_cp_block_device *cp = data;
3408 BT_DBG("%s", hdev->name);
3410 if (!bdaddr_type_is_valid(cp->addr.type))
3411 return cmd_complete(sk, hdev->id, MGMT_OP_BLOCK_DEVICE,
3412 MGMT_STATUS_INVALID_PARAMS,
3413 &cp->addr, sizeof(cp->addr));
3417 err = hci_blacklist_add(hdev, &cp->addr.bdaddr, cp->addr.type);
3419 status = MGMT_STATUS_FAILED;
3421 status = MGMT_STATUS_SUCCESS;
3423 err = cmd_complete(sk, hdev->id, MGMT_OP_BLOCK_DEVICE, status,
3424 &cp->addr, sizeof(cp->addr));
3426 hci_dev_unlock(hdev);
3431 static int unblock_device(struct sock *sk, struct hci_dev *hdev, void *data,
3434 struct mgmt_cp_unblock_device *cp = data;
3438 BT_DBG("%s", hdev->name);
3440 if (!bdaddr_type_is_valid(cp->addr.type))
3441 return cmd_complete(sk, hdev->id, MGMT_OP_UNBLOCK_DEVICE,
3442 MGMT_STATUS_INVALID_PARAMS,
3443 &cp->addr, sizeof(cp->addr));
3447 err = hci_blacklist_del(hdev, &cp->addr.bdaddr, cp->addr.type);
3449 status = MGMT_STATUS_INVALID_PARAMS;
3451 status = MGMT_STATUS_SUCCESS;
3453 err = cmd_complete(sk, hdev->id, MGMT_OP_UNBLOCK_DEVICE, status,
3454 &cp->addr, sizeof(cp->addr));
3456 hci_dev_unlock(hdev);
3461 static int set_device_id(struct sock *sk, struct hci_dev *hdev, void *data,
3464 struct mgmt_cp_set_device_id *cp = data;
3465 struct hci_request req;
3469 BT_DBG("%s", hdev->name);
3471 source = __le16_to_cpu(cp->source);
3473 if (source > 0x0002)
3474 return cmd_status(sk, hdev->id, MGMT_OP_SET_DEVICE_ID,
3475 MGMT_STATUS_INVALID_PARAMS);
3479 hdev->devid_source = source;
3480 hdev->devid_vendor = __le16_to_cpu(cp->vendor);
3481 hdev->devid_product = __le16_to_cpu(cp->product);
3482 hdev->devid_version = __le16_to_cpu(cp->version);
3484 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_DEVICE_ID, 0, NULL, 0);
3486 hci_req_init(&req, hdev);
3488 hci_req_run(&req, NULL);
3490 hci_dev_unlock(hdev);
3495 static void set_advertising_complete(struct hci_dev *hdev, u8 status)
3497 struct cmd_lookup match = { NULL, hdev };
3500 u8 mgmt_err = mgmt_status(status);
3502 mgmt_pending_foreach(MGMT_OP_SET_ADVERTISING, hdev,
3503 cmd_status_rsp, &mgmt_err);
3507 mgmt_pending_foreach(MGMT_OP_SET_ADVERTISING, hdev, settings_rsp,
3510 new_settings(hdev, match.sk);
3516 static int set_advertising(struct sock *sk, struct hci_dev *hdev, void *data,
3519 struct mgmt_mode *cp = data;
3520 struct pending_cmd *cmd;
3521 struct hci_request req;
3522 u8 val, enabled, status;
3525 BT_DBG("request for %s", hdev->name);
3527 status = mgmt_le_support(hdev);
3529 return cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
3532 if (cp->val != 0x00 && cp->val != 0x01)
3533 return cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
3534 MGMT_STATUS_INVALID_PARAMS);
3539 enabled = test_bit(HCI_ADVERTISING, &hdev->dev_flags);
3541 /* The following conditions are ones which mean that we should
3542 * not do any HCI communication but directly send a mgmt
3543 * response to user space (after toggling the flag if
3546 if (!hdev_is_powered(hdev) || val == enabled ||
3547 hci_conn_num(hdev, LE_LINK) > 0) {
3548 bool changed = false;
3550 if (val != test_bit(HCI_ADVERTISING, &hdev->dev_flags)) {
3551 change_bit(HCI_ADVERTISING, &hdev->dev_flags);
3555 err = send_settings_rsp(sk, MGMT_OP_SET_ADVERTISING, hdev);
3560 err = new_settings(hdev, sk);
3565 if (mgmt_pending_find(MGMT_OP_SET_ADVERTISING, hdev) ||
3566 mgmt_pending_find(MGMT_OP_SET_LE, hdev)) {
3567 err = cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
3572 cmd = mgmt_pending_add(sk, MGMT_OP_SET_ADVERTISING, hdev, data, len);
3578 hci_req_init(&req, hdev);
3581 enable_advertising(&req);
3583 disable_advertising(&req);
3585 err = hci_req_run(&req, set_advertising_complete);
3587 mgmt_pending_remove(cmd);
3590 hci_dev_unlock(hdev);
3594 static int set_static_address(struct sock *sk, struct hci_dev *hdev,
3595 void *data, u16 len)
3597 struct mgmt_cp_set_static_address *cp = data;
3600 BT_DBG("%s", hdev->name);
3602 if (!lmp_le_capable(hdev))
3603 return cmd_status(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS,
3604 MGMT_STATUS_NOT_SUPPORTED);
3606 if (hdev_is_powered(hdev))
3607 return cmd_status(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS,
3608 MGMT_STATUS_REJECTED);
3610 if (bacmp(&cp->bdaddr, BDADDR_ANY)) {
3611 if (!bacmp(&cp->bdaddr, BDADDR_NONE))
3612 return cmd_status(sk, hdev->id,
3613 MGMT_OP_SET_STATIC_ADDRESS,
3614 MGMT_STATUS_INVALID_PARAMS);
3616 /* Two most significant bits shall be set */
3617 if ((cp->bdaddr.b[5] & 0xc0) != 0xc0)
3618 return cmd_status(sk, hdev->id,
3619 MGMT_OP_SET_STATIC_ADDRESS,
3620 MGMT_STATUS_INVALID_PARAMS);
3625 bacpy(&hdev->static_addr, &cp->bdaddr);
3627 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS, 0, NULL, 0);
3629 hci_dev_unlock(hdev);
3634 static int set_scan_params(struct sock *sk, struct hci_dev *hdev,
3635 void *data, u16 len)
3637 struct mgmt_cp_set_scan_params *cp = data;
3638 __u16 interval, window;
3641 BT_DBG("%s", hdev->name);
3643 if (!lmp_le_capable(hdev))
3644 return cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
3645 MGMT_STATUS_NOT_SUPPORTED);
3647 interval = __le16_to_cpu(cp->interval);
3649 if (interval < 0x0004 || interval > 0x4000)
3650 return cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
3651 MGMT_STATUS_INVALID_PARAMS);
3653 window = __le16_to_cpu(cp->window);
3655 if (window < 0x0004 || window > 0x4000)
3656 return cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
3657 MGMT_STATUS_INVALID_PARAMS);
3659 if (window > interval)
3660 return cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
3661 MGMT_STATUS_INVALID_PARAMS);
3665 hdev->le_scan_interval = interval;
3666 hdev->le_scan_window = window;
3668 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS, 0, NULL, 0);
3670 hci_dev_unlock(hdev);
3675 static void fast_connectable_complete(struct hci_dev *hdev, u8 status)
3677 struct pending_cmd *cmd;
3679 BT_DBG("status 0x%02x", status);
3683 cmd = mgmt_pending_find(MGMT_OP_SET_FAST_CONNECTABLE, hdev);
3688 cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
3689 mgmt_status(status));
3691 struct mgmt_mode *cp = cmd->param;
3694 set_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags);
3696 clear_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags);
3698 send_settings_rsp(cmd->sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev);
3699 new_settings(hdev, cmd->sk);
3702 mgmt_pending_remove(cmd);
3705 hci_dev_unlock(hdev);
3708 static int set_fast_connectable(struct sock *sk, struct hci_dev *hdev,
3709 void *data, u16 len)
3711 struct mgmt_mode *cp = data;
3712 struct pending_cmd *cmd;
3713 struct hci_request req;
3716 BT_DBG("%s", hdev->name);
3718 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags) ||
3719 hdev->hci_ver < BLUETOOTH_VER_1_2)
3720 return cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
3721 MGMT_STATUS_NOT_SUPPORTED);
3723 if (cp->val != 0x00 && cp->val != 0x01)
3724 return cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
3725 MGMT_STATUS_INVALID_PARAMS);
3727 if (!hdev_is_powered(hdev))
3728 return cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
3729 MGMT_STATUS_NOT_POWERED);
3731 if (!test_bit(HCI_CONNECTABLE, &hdev->dev_flags))
3732 return cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
3733 MGMT_STATUS_REJECTED);
3737 if (mgmt_pending_find(MGMT_OP_SET_FAST_CONNECTABLE, hdev)) {
3738 err = cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
3743 if (!!cp->val == test_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags)) {
3744 err = send_settings_rsp(sk, MGMT_OP_SET_FAST_CONNECTABLE,
3749 cmd = mgmt_pending_add(sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev,
3756 hci_req_init(&req, hdev);
3758 write_fast_connectable(&req, cp->val);
3760 err = hci_req_run(&req, fast_connectable_complete);
3762 err = cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
3763 MGMT_STATUS_FAILED);
3764 mgmt_pending_remove(cmd);
3768 hci_dev_unlock(hdev);
3773 static void set_bredr_scan(struct hci_request *req)
3775 struct hci_dev *hdev = req->hdev;
3778 /* Ensure that fast connectable is disabled. This function will
3779 * not do anything if the page scan parameters are already what
3782 write_fast_connectable(req, false);
3784 if (test_bit(HCI_CONNECTABLE, &hdev->dev_flags))
3786 if (test_bit(HCI_DISCOVERABLE, &hdev->dev_flags))
3787 scan |= SCAN_INQUIRY;
3790 hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
3793 static void set_bredr_complete(struct hci_dev *hdev, u8 status)
3795 struct pending_cmd *cmd;
3797 BT_DBG("status 0x%02x", status);
3801 cmd = mgmt_pending_find(MGMT_OP_SET_BREDR, hdev);
3806 u8 mgmt_err = mgmt_status(status);
3808 /* We need to restore the flag if related HCI commands
3811 clear_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
3813 cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
3815 send_settings_rsp(cmd->sk, MGMT_OP_SET_BREDR, hdev);
3816 new_settings(hdev, cmd->sk);
3819 mgmt_pending_remove(cmd);
3822 hci_dev_unlock(hdev);
3825 static int set_bredr(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
3827 struct mgmt_mode *cp = data;
3828 struct pending_cmd *cmd;
3829 struct hci_request req;
3832 BT_DBG("request for %s", hdev->name);
3834 if (!lmp_bredr_capable(hdev) || !lmp_le_capable(hdev))
3835 return cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
3836 MGMT_STATUS_NOT_SUPPORTED);
3838 if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
3839 return cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
3840 MGMT_STATUS_REJECTED);
3842 if (cp->val != 0x00 && cp->val != 0x01)
3843 return cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
3844 MGMT_STATUS_INVALID_PARAMS);
3848 if (cp->val == test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
3849 err = send_settings_rsp(sk, MGMT_OP_SET_BREDR, hdev);
3853 if (!hdev_is_powered(hdev)) {
3855 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
3856 clear_bit(HCI_SSP_ENABLED, &hdev->dev_flags);
3857 clear_bit(HCI_LINK_SECURITY, &hdev->dev_flags);
3858 clear_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags);
3859 clear_bit(HCI_HS_ENABLED, &hdev->dev_flags);
3862 change_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
3864 err = send_settings_rsp(sk, MGMT_OP_SET_BREDR, hdev);
3868 err = new_settings(hdev, sk);
3872 /* Reject disabling when powered on */
3874 err = cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
3875 MGMT_STATUS_REJECTED);
3879 if (mgmt_pending_find(MGMT_OP_SET_BREDR, hdev)) {
3880 err = cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
3885 cmd = mgmt_pending_add(sk, MGMT_OP_SET_BREDR, hdev, data, len);
3891 /* We need to flip the bit already here so that update_ad
3892 * generates the correct flags.
3894 set_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
3896 hci_req_init(&req, hdev);
3898 if (test_bit(HCI_CONNECTABLE, &hdev->dev_flags))
3899 set_bredr_scan(&req);
3903 err = hci_req_run(&req, set_bredr_complete);
3905 mgmt_pending_remove(cmd);
3908 hci_dev_unlock(hdev);
3912 static bool ltk_is_valid(struct mgmt_ltk_info *key)
3914 if (key->authenticated != 0x00 && key->authenticated != 0x01)
3916 if (key->master != 0x00 && key->master != 0x01)
3918 if (!bdaddr_type_is_le(key->addr.type))
3923 static int load_long_term_keys(struct sock *sk, struct hci_dev *hdev,
3924 void *cp_data, u16 len)
3926 struct mgmt_cp_load_long_term_keys *cp = cp_data;
3927 u16 key_count, expected_len;
3930 BT_DBG("request for %s", hdev->name);
3932 if (!lmp_le_capable(hdev))
3933 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
3934 MGMT_STATUS_NOT_SUPPORTED);
3936 key_count = __le16_to_cpu(cp->key_count);
3938 expected_len = sizeof(*cp) + key_count *
3939 sizeof(struct mgmt_ltk_info);
3940 if (expected_len != len) {
3941 BT_ERR("load_keys: expected %u bytes, got %u bytes",
3943 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
3944 MGMT_STATUS_INVALID_PARAMS);
3947 BT_DBG("%s key_count %u", hdev->name, key_count);
3949 for (i = 0; i < key_count; i++) {
3950 struct mgmt_ltk_info *key = &cp->keys[i];
3952 if (!ltk_is_valid(key))
3953 return cmd_status(sk, hdev->id,
3954 MGMT_OP_LOAD_LONG_TERM_KEYS,
3955 MGMT_STATUS_INVALID_PARAMS);
3960 hci_smp_ltks_clear(hdev);
3962 for (i = 0; i < key_count; i++) {
3963 struct mgmt_ltk_info *key = &cp->keys[i];
3966 if (key->addr.type == BDADDR_LE_PUBLIC)
3967 addr_type = ADDR_LE_DEV_PUBLIC;
3969 addr_type = ADDR_LE_DEV_RANDOM;
3974 type = HCI_SMP_LTK_SLAVE;
3976 hci_add_ltk(hdev, &key->addr.bdaddr, addr_type,
3977 type, 0, key->authenticated, key->val,
3978 key->enc_size, key->ediv, key->rand);
3981 err = cmd_complete(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS, 0,
3984 hci_dev_unlock(hdev);
3989 static const struct mgmt_handler {
3990 int (*func) (struct sock *sk, struct hci_dev *hdev, void *data,
3994 } mgmt_handlers[] = {
3995 { NULL }, /* 0x0000 (no command) */
3996 { read_version, false, MGMT_READ_VERSION_SIZE },
3997 { read_commands, false, MGMT_READ_COMMANDS_SIZE },
3998 { read_index_list, false, MGMT_READ_INDEX_LIST_SIZE },
3999 { read_controller_info, false, MGMT_READ_INFO_SIZE },
4000 { set_powered, false, MGMT_SETTING_SIZE },
4001 { set_discoverable, false, MGMT_SET_DISCOVERABLE_SIZE },
4002 { set_connectable, false, MGMT_SETTING_SIZE },
4003 { set_fast_connectable, false, MGMT_SETTING_SIZE },
4004 { set_pairable, false, MGMT_SETTING_SIZE },
4005 { set_link_security, false, MGMT_SETTING_SIZE },
4006 { set_ssp, false, MGMT_SETTING_SIZE },
4007 { set_hs, false, MGMT_SETTING_SIZE },
4008 { set_le, false, MGMT_SETTING_SIZE },
4009 { set_dev_class, false, MGMT_SET_DEV_CLASS_SIZE },
4010 { set_local_name, false, MGMT_SET_LOCAL_NAME_SIZE },
4011 { add_uuid, false, MGMT_ADD_UUID_SIZE },
4012 { remove_uuid, false, MGMT_REMOVE_UUID_SIZE },
4013 { load_link_keys, true, MGMT_LOAD_LINK_KEYS_SIZE },
4014 { load_long_term_keys, true, MGMT_LOAD_LONG_TERM_KEYS_SIZE },
4015 { disconnect, false, MGMT_DISCONNECT_SIZE },
4016 { get_connections, false, MGMT_GET_CONNECTIONS_SIZE },
4017 { pin_code_reply, false, MGMT_PIN_CODE_REPLY_SIZE },
4018 { pin_code_neg_reply, false, MGMT_PIN_CODE_NEG_REPLY_SIZE },
4019 { set_io_capability, false, MGMT_SET_IO_CAPABILITY_SIZE },
4020 { pair_device, false, MGMT_PAIR_DEVICE_SIZE },
4021 { cancel_pair_device, false, MGMT_CANCEL_PAIR_DEVICE_SIZE },
4022 { unpair_device, false, MGMT_UNPAIR_DEVICE_SIZE },
4023 { user_confirm_reply, false, MGMT_USER_CONFIRM_REPLY_SIZE },
4024 { user_confirm_neg_reply, false, MGMT_USER_CONFIRM_NEG_REPLY_SIZE },
4025 { user_passkey_reply, false, MGMT_USER_PASSKEY_REPLY_SIZE },
4026 { user_passkey_neg_reply, false, MGMT_USER_PASSKEY_NEG_REPLY_SIZE },
4027 { read_local_oob_data, false, MGMT_READ_LOCAL_OOB_DATA_SIZE },
4028 { add_remote_oob_data, false, MGMT_ADD_REMOTE_OOB_DATA_SIZE },
4029 { remove_remote_oob_data, false, MGMT_REMOVE_REMOTE_OOB_DATA_SIZE },
4030 { start_discovery, false, MGMT_START_DISCOVERY_SIZE },
4031 { stop_discovery, false, MGMT_STOP_DISCOVERY_SIZE },
4032 { confirm_name, false, MGMT_CONFIRM_NAME_SIZE },
4033 { block_device, false, MGMT_BLOCK_DEVICE_SIZE },
4034 { unblock_device, false, MGMT_UNBLOCK_DEVICE_SIZE },
4035 { set_device_id, false, MGMT_SET_DEVICE_ID_SIZE },
4036 { set_advertising, false, MGMT_SETTING_SIZE },
4037 { set_bredr, false, MGMT_SETTING_SIZE },
4038 { set_static_address, false, MGMT_SET_STATIC_ADDRESS_SIZE },
4039 { set_scan_params, false, MGMT_SET_SCAN_PARAMS_SIZE },
4043 int mgmt_control(struct sock *sk, struct msghdr *msg, size_t msglen)
4047 struct mgmt_hdr *hdr;
4048 u16 opcode, index, len;
4049 struct hci_dev *hdev = NULL;
4050 const struct mgmt_handler *handler;
4053 BT_DBG("got %zu bytes", msglen);
4055 if (msglen < sizeof(*hdr))
4058 buf = kmalloc(msglen, GFP_KERNEL);
4062 if (memcpy_fromiovec(buf, msg->msg_iov, msglen)) {
4068 opcode = __le16_to_cpu(hdr->opcode);
4069 index = __le16_to_cpu(hdr->index);
4070 len = __le16_to_cpu(hdr->len);
4072 if (len != msglen - sizeof(*hdr)) {
4077 if (index != MGMT_INDEX_NONE) {
4078 hdev = hci_dev_get(index);
4080 err = cmd_status(sk, index, opcode,
4081 MGMT_STATUS_INVALID_INDEX);
4085 if (test_bit(HCI_SETUP, &hdev->dev_flags) ||
4086 test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
4087 err = cmd_status(sk, index, opcode,
4088 MGMT_STATUS_INVALID_INDEX);
4093 if (opcode >= ARRAY_SIZE(mgmt_handlers) ||
4094 mgmt_handlers[opcode].func == NULL) {
4095 BT_DBG("Unknown op %u", opcode);
4096 err = cmd_status(sk, index, opcode,
4097 MGMT_STATUS_UNKNOWN_COMMAND);
4101 if ((hdev && opcode < MGMT_OP_READ_INFO) ||
4102 (!hdev && opcode >= MGMT_OP_READ_INFO)) {
4103 err = cmd_status(sk, index, opcode,
4104 MGMT_STATUS_INVALID_INDEX);
4108 handler = &mgmt_handlers[opcode];
4110 if ((handler->var_len && len < handler->data_len) ||
4111 (!handler->var_len && len != handler->data_len)) {
4112 err = cmd_status(sk, index, opcode,
4113 MGMT_STATUS_INVALID_PARAMS);
4118 mgmt_init_hdev(sk, hdev);
4120 cp = buf + sizeof(*hdr);
4122 err = handler->func(sk, hdev, cp, len);
4136 void mgmt_index_added(struct hci_dev *hdev)
4138 if (hdev->dev_type != HCI_BREDR)
4141 mgmt_event(MGMT_EV_INDEX_ADDED, hdev, NULL, 0, NULL);
4144 void mgmt_index_removed(struct hci_dev *hdev)
4146 u8 status = MGMT_STATUS_INVALID_INDEX;
4148 if (hdev->dev_type != HCI_BREDR)
4151 mgmt_pending_foreach(0, hdev, cmd_status_rsp, &status);
4153 mgmt_event(MGMT_EV_INDEX_REMOVED, hdev, NULL, 0, NULL);
4156 static void powered_complete(struct hci_dev *hdev, u8 status)
4158 struct cmd_lookup match = { NULL, hdev };
4160 BT_DBG("status 0x%02x", status);
4164 mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp, &match);
4166 new_settings(hdev, match.sk);
4168 hci_dev_unlock(hdev);
4174 static int powered_update_hci(struct hci_dev *hdev)
4176 struct hci_request req;
4179 hci_req_init(&req, hdev);
4181 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags) &&
4182 !lmp_host_ssp_capable(hdev)) {
4185 hci_req_add(&req, HCI_OP_WRITE_SSP_MODE, 1, &ssp);
4188 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags) &&
4189 lmp_bredr_capable(hdev)) {
4190 struct hci_cp_write_le_host_supported cp;
4193 cp.simul = lmp_le_br_capable(hdev);
4195 /* Check first if we already have the right
4196 * host state (host features set)
4198 if (cp.le != lmp_host_le_capable(hdev) ||
4199 cp.simul != lmp_host_le_br_capable(hdev))
4200 hci_req_add(&req, HCI_OP_WRITE_LE_HOST_SUPPORTED,
4204 if (lmp_le_capable(hdev)) {
4205 /* Set random address to static address if configured */
4206 if (bacmp(&hdev->static_addr, BDADDR_ANY))
4207 hci_req_add(&req, HCI_OP_LE_SET_RANDOM_ADDR, 6,
4208 &hdev->static_addr);
4210 /* Make sure the controller has a good default for
4211 * advertising data. This also applies to the case
4212 * where BR/EDR was toggled during the AUTO_OFF phase.
4214 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
4217 if (test_bit(HCI_ADVERTISING, &hdev->dev_flags))
4218 enable_advertising(&req);
4221 link_sec = test_bit(HCI_LINK_SECURITY, &hdev->dev_flags);
4222 if (link_sec != test_bit(HCI_AUTH, &hdev->flags))
4223 hci_req_add(&req, HCI_OP_WRITE_AUTH_ENABLE,
4224 sizeof(link_sec), &link_sec);
4226 if (lmp_bredr_capable(hdev)) {
4227 if (test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
4228 set_bredr_scan(&req);
4234 return hci_req_run(&req, powered_complete);
4237 int mgmt_powered(struct hci_dev *hdev, u8 powered)
4239 struct cmd_lookup match = { NULL, hdev };
4240 u8 status_not_powered = MGMT_STATUS_NOT_POWERED;
4241 u8 zero_cod[] = { 0, 0, 0 };
4244 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
4248 if (powered_update_hci(hdev) == 0)
4251 mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp,
4256 mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp, &match);
4257 mgmt_pending_foreach(0, hdev, cmd_status_rsp, &status_not_powered);
4259 if (memcmp(hdev->dev_class, zero_cod, sizeof(zero_cod)) != 0)
4260 mgmt_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev,
4261 zero_cod, sizeof(zero_cod), NULL);
4264 err = new_settings(hdev, match.sk);
4272 void mgmt_set_powered_failed(struct hci_dev *hdev, int err)
4274 struct pending_cmd *cmd;
4277 cmd = mgmt_pending_find(MGMT_OP_SET_POWERED, hdev);
4281 if (err == -ERFKILL)
4282 status = MGMT_STATUS_RFKILLED;
4284 status = MGMT_STATUS_FAILED;
4286 cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_POWERED, status);
4288 mgmt_pending_remove(cmd);
4291 void mgmt_discoverable(struct hci_dev *hdev, u8 discoverable)
4295 /* Nothing needed here if there's a pending command since that
4296 * commands request completion callback takes care of everything
4299 if (mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, hdev))
4303 changed = !test_and_set_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
4305 changed = test_and_clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
4308 new_settings(hdev, NULL);
4311 void mgmt_connectable(struct hci_dev *hdev, u8 connectable)
4315 /* Nothing needed here if there's a pending command since that
4316 * commands request completion callback takes care of everything
4319 if (mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev))
4323 changed = !test_and_set_bit(HCI_CONNECTABLE, &hdev->dev_flags);
4325 changed = test_and_clear_bit(HCI_CONNECTABLE, &hdev->dev_flags);
4328 new_settings(hdev, NULL);
4331 void mgmt_write_scan_failed(struct hci_dev *hdev, u8 scan, u8 status)
4333 u8 mgmt_err = mgmt_status(status);
4335 if (scan & SCAN_PAGE)
4336 mgmt_pending_foreach(MGMT_OP_SET_CONNECTABLE, hdev,
4337 cmd_status_rsp, &mgmt_err);
4339 if (scan & SCAN_INQUIRY)
4340 mgmt_pending_foreach(MGMT_OP_SET_DISCOVERABLE, hdev,
4341 cmd_status_rsp, &mgmt_err);
4344 void mgmt_new_link_key(struct hci_dev *hdev, struct link_key *key,
4347 struct mgmt_ev_new_link_key ev;
4349 memset(&ev, 0, sizeof(ev));
4351 ev.store_hint = persistent;
4352 bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
4353 ev.key.addr.type = BDADDR_BREDR;
4354 ev.key.type = key->type;
4355 memcpy(ev.key.val, key->val, HCI_LINK_KEY_SIZE);
4356 ev.key.pin_len = key->pin_len;
4358 mgmt_event(MGMT_EV_NEW_LINK_KEY, hdev, &ev, sizeof(ev), NULL);
4361 int mgmt_new_ltk(struct hci_dev *hdev, struct smp_ltk *key, u8 persistent)
4363 struct mgmt_ev_new_long_term_key ev;
4365 memset(&ev, 0, sizeof(ev));
4367 ev.store_hint = persistent;
4368 bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
4369 ev.key.addr.type = link_to_bdaddr(LE_LINK, key->bdaddr_type);
4370 ev.key.authenticated = key->authenticated;
4371 ev.key.enc_size = key->enc_size;
4372 ev.key.ediv = key->ediv;
4374 if (key->type == HCI_SMP_LTK)
4377 memcpy(ev.key.rand, key->rand, sizeof(key->rand));
4378 memcpy(ev.key.val, key->val, sizeof(key->val));
4380 return mgmt_event(MGMT_EV_NEW_LONG_TERM_KEY, hdev, &ev, sizeof(ev),
4384 void mgmt_device_connected(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
4385 u8 addr_type, u32 flags, u8 *name, u8 name_len,
4389 struct mgmt_ev_device_connected *ev = (void *) buf;
4392 bacpy(&ev->addr.bdaddr, bdaddr);
4393 ev->addr.type = link_to_bdaddr(link_type, addr_type);
4395 ev->flags = __cpu_to_le32(flags);
4398 eir_len = eir_append_data(ev->eir, 0, EIR_NAME_COMPLETE,
4401 if (dev_class && memcmp(dev_class, "\0\0\0", 3) != 0)
4402 eir_len = eir_append_data(ev->eir, eir_len,
4403 EIR_CLASS_OF_DEV, dev_class, 3);
4405 ev->eir_len = cpu_to_le16(eir_len);
4407 mgmt_event(MGMT_EV_DEVICE_CONNECTED, hdev, buf,
4408 sizeof(*ev) + eir_len, NULL);
4411 static void disconnect_rsp(struct pending_cmd *cmd, void *data)
4413 struct mgmt_cp_disconnect *cp = cmd->param;
4414 struct sock **sk = data;
4415 struct mgmt_rp_disconnect rp;
4417 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
4418 rp.addr.type = cp->addr.type;
4420 cmd_complete(cmd->sk, cmd->index, MGMT_OP_DISCONNECT, 0, &rp,
4426 mgmt_pending_remove(cmd);
4429 static void unpair_device_rsp(struct pending_cmd *cmd, void *data)
4431 struct hci_dev *hdev = data;
4432 struct mgmt_cp_unpair_device *cp = cmd->param;
4433 struct mgmt_rp_unpair_device rp;
4435 memset(&rp, 0, sizeof(rp));
4436 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
4437 rp.addr.type = cp->addr.type;
4439 device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, cmd->sk);
4441 cmd_complete(cmd->sk, cmd->index, cmd->opcode, 0, &rp, sizeof(rp));
4443 mgmt_pending_remove(cmd);
4446 void mgmt_device_disconnected(struct hci_dev *hdev, bdaddr_t *bdaddr,
4447 u8 link_type, u8 addr_type, u8 reason)
4449 struct mgmt_ev_device_disconnected ev;
4450 struct sock *sk = NULL;
4452 mgmt_pending_foreach(MGMT_OP_DISCONNECT, hdev, disconnect_rsp, &sk);
4454 bacpy(&ev.addr.bdaddr, bdaddr);
4455 ev.addr.type = link_to_bdaddr(link_type, addr_type);
4458 mgmt_event(MGMT_EV_DEVICE_DISCONNECTED, hdev, &ev, sizeof(ev), sk);
4463 mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, unpair_device_rsp,
4467 void mgmt_disconnect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr,
4468 u8 link_type, u8 addr_type, u8 status)
4470 struct mgmt_rp_disconnect rp;
4471 struct pending_cmd *cmd;
4473 mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, unpair_device_rsp,
4476 cmd = mgmt_pending_find(MGMT_OP_DISCONNECT, hdev);
4480 bacpy(&rp.addr.bdaddr, bdaddr);
4481 rp.addr.type = link_to_bdaddr(link_type, addr_type);
4483 cmd_complete(cmd->sk, cmd->index, MGMT_OP_DISCONNECT,
4484 mgmt_status(status), &rp, sizeof(rp));
4486 mgmt_pending_remove(cmd);
4489 void mgmt_connect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
4490 u8 addr_type, u8 status)
4492 struct mgmt_ev_connect_failed ev;
4494 bacpy(&ev.addr.bdaddr, bdaddr);
4495 ev.addr.type = link_to_bdaddr(link_type, addr_type);
4496 ev.status = mgmt_status(status);
4498 mgmt_event(MGMT_EV_CONNECT_FAILED, hdev, &ev, sizeof(ev), NULL);
4501 int mgmt_pin_code_request(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 secure)
4503 struct mgmt_ev_pin_code_request ev;
4505 bacpy(&ev.addr.bdaddr, bdaddr);
4506 ev.addr.type = BDADDR_BREDR;
4509 return mgmt_event(MGMT_EV_PIN_CODE_REQUEST, hdev, &ev, sizeof(ev),
4513 int mgmt_pin_code_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
4516 struct pending_cmd *cmd;
4517 struct mgmt_rp_pin_code_reply rp;
4520 cmd = mgmt_pending_find(MGMT_OP_PIN_CODE_REPLY, hdev);
4524 bacpy(&rp.addr.bdaddr, bdaddr);
4525 rp.addr.type = BDADDR_BREDR;
4527 err = cmd_complete(cmd->sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
4528 mgmt_status(status), &rp, sizeof(rp));
4530 mgmt_pending_remove(cmd);
4535 int mgmt_pin_code_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
4538 struct pending_cmd *cmd;
4539 struct mgmt_rp_pin_code_reply rp;
4542 cmd = mgmt_pending_find(MGMT_OP_PIN_CODE_NEG_REPLY, hdev);
4546 bacpy(&rp.addr.bdaddr, bdaddr);
4547 rp.addr.type = BDADDR_BREDR;
4549 err = cmd_complete(cmd->sk, hdev->id, MGMT_OP_PIN_CODE_NEG_REPLY,
4550 mgmt_status(status), &rp, sizeof(rp));
4552 mgmt_pending_remove(cmd);
4557 int mgmt_user_confirm_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
4558 u8 link_type, u8 addr_type, __le32 value,
4561 struct mgmt_ev_user_confirm_request ev;
4563 BT_DBG("%s", hdev->name);
4565 bacpy(&ev.addr.bdaddr, bdaddr);
4566 ev.addr.type = link_to_bdaddr(link_type, addr_type);
4567 ev.confirm_hint = confirm_hint;
4570 return mgmt_event(MGMT_EV_USER_CONFIRM_REQUEST, hdev, &ev, sizeof(ev),
4574 int mgmt_user_passkey_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
4575 u8 link_type, u8 addr_type)
4577 struct mgmt_ev_user_passkey_request ev;
4579 BT_DBG("%s", hdev->name);
4581 bacpy(&ev.addr.bdaddr, bdaddr);
4582 ev.addr.type = link_to_bdaddr(link_type, addr_type);
4584 return mgmt_event(MGMT_EV_USER_PASSKEY_REQUEST, hdev, &ev, sizeof(ev),
4588 static int user_pairing_resp_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
4589 u8 link_type, u8 addr_type, u8 status,
4592 struct pending_cmd *cmd;
4593 struct mgmt_rp_user_confirm_reply rp;
4596 cmd = mgmt_pending_find(opcode, hdev);
4600 bacpy(&rp.addr.bdaddr, bdaddr);
4601 rp.addr.type = link_to_bdaddr(link_type, addr_type);
4602 err = cmd_complete(cmd->sk, hdev->id, opcode, mgmt_status(status),
4605 mgmt_pending_remove(cmd);
4610 int mgmt_user_confirm_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
4611 u8 link_type, u8 addr_type, u8 status)
4613 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
4614 status, MGMT_OP_USER_CONFIRM_REPLY);
4617 int mgmt_user_confirm_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
4618 u8 link_type, u8 addr_type, u8 status)
4620 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
4622 MGMT_OP_USER_CONFIRM_NEG_REPLY);
4625 int mgmt_user_passkey_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
4626 u8 link_type, u8 addr_type, u8 status)
4628 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
4629 status, MGMT_OP_USER_PASSKEY_REPLY);
4632 int mgmt_user_passkey_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
4633 u8 link_type, u8 addr_type, u8 status)
4635 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
4637 MGMT_OP_USER_PASSKEY_NEG_REPLY);
4640 int mgmt_user_passkey_notify(struct hci_dev *hdev, bdaddr_t *bdaddr,
4641 u8 link_type, u8 addr_type, u32 passkey,
4644 struct mgmt_ev_passkey_notify ev;
4646 BT_DBG("%s", hdev->name);
4648 bacpy(&ev.addr.bdaddr, bdaddr);
4649 ev.addr.type = link_to_bdaddr(link_type, addr_type);
4650 ev.passkey = __cpu_to_le32(passkey);
4651 ev.entered = entered;
4653 return mgmt_event(MGMT_EV_PASSKEY_NOTIFY, hdev, &ev, sizeof(ev), NULL);
4656 int mgmt_auth_failed(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
4657 u8 addr_type, u8 status)
4659 struct mgmt_ev_auth_failed ev;
4661 bacpy(&ev.addr.bdaddr, bdaddr);
4662 ev.addr.type = link_to_bdaddr(link_type, addr_type);
4663 ev.status = mgmt_status(status);
4665 return mgmt_event(MGMT_EV_AUTH_FAILED, hdev, &ev, sizeof(ev), NULL);
4668 int mgmt_auth_enable_complete(struct hci_dev *hdev, u8 status)
4670 struct cmd_lookup match = { NULL, hdev };
4671 bool changed = false;
4675 u8 mgmt_err = mgmt_status(status);
4676 mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev,
4677 cmd_status_rsp, &mgmt_err);
4681 if (test_bit(HCI_AUTH, &hdev->flags)) {
4682 if (!test_and_set_bit(HCI_LINK_SECURITY, &hdev->dev_flags))
4685 if (test_and_clear_bit(HCI_LINK_SECURITY, &hdev->dev_flags))
4689 mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev, settings_rsp,
4693 err = new_settings(hdev, match.sk);
4701 static void clear_eir(struct hci_request *req)
4703 struct hci_dev *hdev = req->hdev;
4704 struct hci_cp_write_eir cp;
4706 if (!lmp_ext_inq_capable(hdev))
4709 memset(hdev->eir, 0, sizeof(hdev->eir));
4711 memset(&cp, 0, sizeof(cp));
4713 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
4716 int mgmt_ssp_enable_complete(struct hci_dev *hdev, u8 enable, u8 status)
4718 struct cmd_lookup match = { NULL, hdev };
4719 struct hci_request req;
4720 bool changed = false;
4724 u8 mgmt_err = mgmt_status(status);
4726 if (enable && test_and_clear_bit(HCI_SSP_ENABLED,
4727 &hdev->dev_flags)) {
4728 clear_bit(HCI_HS_ENABLED, &hdev->dev_flags);
4729 err = new_settings(hdev, NULL);
4732 mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, cmd_status_rsp,
4739 changed = !test_and_set_bit(HCI_SSP_ENABLED, &hdev->dev_flags);
4741 changed = test_and_clear_bit(HCI_SSP_ENABLED, &hdev->dev_flags);
4743 changed = test_and_clear_bit(HCI_HS_ENABLED,
4746 clear_bit(HCI_HS_ENABLED, &hdev->dev_flags);
4749 mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, settings_rsp, &match);
4752 err = new_settings(hdev, match.sk);
4757 hci_req_init(&req, hdev);
4759 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags))
4764 hci_req_run(&req, NULL);
4769 static void sk_lookup(struct pending_cmd *cmd, void *data)
4771 struct cmd_lookup *match = data;
4773 if (match->sk == NULL) {
4774 match->sk = cmd->sk;
4775 sock_hold(match->sk);
4779 int mgmt_set_class_of_dev_complete(struct hci_dev *hdev, u8 *dev_class,
4782 struct cmd_lookup match = { NULL, hdev, mgmt_status(status) };
4785 mgmt_pending_foreach(MGMT_OP_SET_DEV_CLASS, hdev, sk_lookup, &match);
4786 mgmt_pending_foreach(MGMT_OP_ADD_UUID, hdev, sk_lookup, &match);
4787 mgmt_pending_foreach(MGMT_OP_REMOVE_UUID, hdev, sk_lookup, &match);
4790 err = mgmt_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev, dev_class,
4799 int mgmt_set_local_name_complete(struct hci_dev *hdev, u8 *name, u8 status)
4801 struct mgmt_cp_set_local_name ev;
4802 struct pending_cmd *cmd;
4807 memset(&ev, 0, sizeof(ev));
4808 memcpy(ev.name, name, HCI_MAX_NAME_LENGTH);
4809 memcpy(ev.short_name, hdev->short_name, HCI_MAX_SHORT_NAME_LENGTH);
4811 cmd = mgmt_pending_find(MGMT_OP_SET_LOCAL_NAME, hdev);
4813 memcpy(hdev->dev_name, name, sizeof(hdev->dev_name));
4815 /* If this is a HCI command related to powering on the
4816 * HCI dev don't send any mgmt signals.
4818 if (mgmt_pending_find(MGMT_OP_SET_POWERED, hdev))
4822 return mgmt_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev, &ev, sizeof(ev),
4823 cmd ? cmd->sk : NULL);
4826 int mgmt_read_local_oob_data_reply_complete(struct hci_dev *hdev, u8 *hash,
4827 u8 *randomizer, u8 status)
4829 struct pending_cmd *cmd;
4832 BT_DBG("%s status %u", hdev->name, status);
4834 cmd = mgmt_pending_find(MGMT_OP_READ_LOCAL_OOB_DATA, hdev);
4839 err = cmd_status(cmd->sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
4840 mgmt_status(status));
4842 struct mgmt_rp_read_local_oob_data rp;
4844 memcpy(rp.hash, hash, sizeof(rp.hash));
4845 memcpy(rp.randomizer, randomizer, sizeof(rp.randomizer));
4847 err = cmd_complete(cmd->sk, hdev->id,
4848 MGMT_OP_READ_LOCAL_OOB_DATA, 0, &rp,
4852 mgmt_pending_remove(cmd);
4857 void mgmt_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
4858 u8 addr_type, u8 *dev_class, s8 rssi, u8 cfm_name, u8
4859 ssp, u8 *eir, u16 eir_len)
4862 struct mgmt_ev_device_found *ev = (void *) buf;
4865 if (!hci_discovery_active(hdev))
4868 /* Leave 5 bytes for a potential CoD field */
4869 if (sizeof(*ev) + eir_len + 5 > sizeof(buf))
4872 memset(buf, 0, sizeof(buf));
4874 bacpy(&ev->addr.bdaddr, bdaddr);
4875 ev->addr.type = link_to_bdaddr(link_type, addr_type);
4878 ev->flags |= __constant_cpu_to_le32(MGMT_DEV_FOUND_CONFIRM_NAME);
4880 ev->flags |= __constant_cpu_to_le32(MGMT_DEV_FOUND_LEGACY_PAIRING);
4883 memcpy(ev->eir, eir, eir_len);
4885 if (dev_class && !eir_has_data_type(ev->eir, eir_len, EIR_CLASS_OF_DEV))
4886 eir_len = eir_append_data(ev->eir, eir_len, EIR_CLASS_OF_DEV,
4889 ev->eir_len = cpu_to_le16(eir_len);
4890 ev_size = sizeof(*ev) + eir_len;
4892 mgmt_event(MGMT_EV_DEVICE_FOUND, hdev, ev, ev_size, NULL);
4895 void mgmt_remote_name(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
4896 u8 addr_type, s8 rssi, u8 *name, u8 name_len)
4898 struct mgmt_ev_device_found *ev;
4899 char buf[sizeof(*ev) + HCI_MAX_NAME_LENGTH + 2];
4902 ev = (struct mgmt_ev_device_found *) buf;
4904 memset(buf, 0, sizeof(buf));
4906 bacpy(&ev->addr.bdaddr, bdaddr);
4907 ev->addr.type = link_to_bdaddr(link_type, addr_type);
4910 eir_len = eir_append_data(ev->eir, 0, EIR_NAME_COMPLETE, name,
4913 ev->eir_len = cpu_to_le16(eir_len);
4915 mgmt_event(MGMT_EV_DEVICE_FOUND, hdev, ev, sizeof(*ev) + eir_len, NULL);
4918 void mgmt_discovering(struct hci_dev *hdev, u8 discovering)
4920 struct mgmt_ev_discovering ev;
4921 struct pending_cmd *cmd;
4923 BT_DBG("%s discovering %u", hdev->name, discovering);
4926 cmd = mgmt_pending_find(MGMT_OP_START_DISCOVERY, hdev);
4928 cmd = mgmt_pending_find(MGMT_OP_STOP_DISCOVERY, hdev);
4931 u8 type = hdev->discovery.type;
4933 cmd_complete(cmd->sk, hdev->id, cmd->opcode, 0, &type,
4935 mgmt_pending_remove(cmd);
4938 memset(&ev, 0, sizeof(ev));
4939 ev.type = hdev->discovery.type;
4940 ev.discovering = discovering;
4942 mgmt_event(MGMT_EV_DISCOVERING, hdev, &ev, sizeof(ev), NULL);
4945 int mgmt_device_blocked(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
4947 struct pending_cmd *cmd;
4948 struct mgmt_ev_device_blocked ev;
4950 cmd = mgmt_pending_find(MGMT_OP_BLOCK_DEVICE, hdev);
4952 bacpy(&ev.addr.bdaddr, bdaddr);
4953 ev.addr.type = type;
4955 return mgmt_event(MGMT_EV_DEVICE_BLOCKED, hdev, &ev, sizeof(ev),
4956 cmd ? cmd->sk : NULL);
4959 int mgmt_device_unblocked(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
4961 struct pending_cmd *cmd;
4962 struct mgmt_ev_device_unblocked ev;
4964 cmd = mgmt_pending_find(MGMT_OP_UNBLOCK_DEVICE, hdev);
4966 bacpy(&ev.addr.bdaddr, bdaddr);
4967 ev.addr.type = type;
4969 return mgmt_event(MGMT_EV_DEVICE_UNBLOCKED, hdev, &ev, sizeof(ev),
4970 cmd ? cmd->sk : NULL);
4973 static void adv_enable_complete(struct hci_dev *hdev, u8 status)
4975 BT_DBG("%s status %u", hdev->name, status);
4977 /* Clear the advertising mgmt setting if we failed to re-enable it */
4979 clear_bit(HCI_ADVERTISING, &hdev->dev_flags);
4980 new_settings(hdev, NULL);
4984 void mgmt_reenable_advertising(struct hci_dev *hdev)
4986 struct hci_request req;
4988 if (hci_conn_num(hdev, LE_LINK) > 0)
4991 if (!test_bit(HCI_ADVERTISING, &hdev->dev_flags))
4994 hci_req_init(&req, hdev);
4995 enable_advertising(&req);
4997 /* If this fails we have no option but to let user space know
4998 * that we've disabled advertising.
5000 if (hci_req_run(&req, adv_enable_complete) < 0) {
5001 clear_bit(HCI_ADVERTISING, &hdev->dev_flags);
5002 new_settings(hdev, NULL);