2 BlueZ - Bluetooth protocol stack for Linux
4 Copyright (C) 2010 Nokia Corporation
5 Copyright (C) 2011-2012 Intel Corporation
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
25 /* Bluetooth HCI Management interface */
27 #include <linux/module.h>
28 #include <asm/unaligned.h>
30 #include <net/bluetooth/bluetooth.h>
31 #include <net/bluetooth/hci_core.h>
32 #include <net/bluetooth/mgmt.h>
33 #include <net/bluetooth/smp.h>
35 #define MGMT_VERSION 1
36 #define MGMT_REVISION 3
38 static const u16 mgmt_commands[] = {
39 MGMT_OP_READ_INDEX_LIST,
42 MGMT_OP_SET_DISCOVERABLE,
43 MGMT_OP_SET_CONNECTABLE,
44 MGMT_OP_SET_FAST_CONNECTABLE,
46 MGMT_OP_SET_LINK_SECURITY,
50 MGMT_OP_SET_DEV_CLASS,
51 MGMT_OP_SET_LOCAL_NAME,
54 MGMT_OP_LOAD_LINK_KEYS,
55 MGMT_OP_LOAD_LONG_TERM_KEYS,
57 MGMT_OP_GET_CONNECTIONS,
58 MGMT_OP_PIN_CODE_REPLY,
59 MGMT_OP_PIN_CODE_NEG_REPLY,
60 MGMT_OP_SET_IO_CAPABILITY,
62 MGMT_OP_CANCEL_PAIR_DEVICE,
63 MGMT_OP_UNPAIR_DEVICE,
64 MGMT_OP_USER_CONFIRM_REPLY,
65 MGMT_OP_USER_CONFIRM_NEG_REPLY,
66 MGMT_OP_USER_PASSKEY_REPLY,
67 MGMT_OP_USER_PASSKEY_NEG_REPLY,
68 MGMT_OP_READ_LOCAL_OOB_DATA,
69 MGMT_OP_ADD_REMOTE_OOB_DATA,
70 MGMT_OP_REMOVE_REMOTE_OOB_DATA,
71 MGMT_OP_START_DISCOVERY,
72 MGMT_OP_STOP_DISCOVERY,
75 MGMT_OP_UNBLOCK_DEVICE,
76 MGMT_OP_SET_DEVICE_ID,
77 MGMT_OP_SET_ADVERTISING,
80 static const u16 mgmt_events[] = {
81 MGMT_EV_CONTROLLER_ERROR,
83 MGMT_EV_INDEX_REMOVED,
85 MGMT_EV_CLASS_OF_DEV_CHANGED,
86 MGMT_EV_LOCAL_NAME_CHANGED,
88 MGMT_EV_NEW_LONG_TERM_KEY,
89 MGMT_EV_DEVICE_CONNECTED,
90 MGMT_EV_DEVICE_DISCONNECTED,
91 MGMT_EV_CONNECT_FAILED,
92 MGMT_EV_PIN_CODE_REQUEST,
93 MGMT_EV_USER_CONFIRM_REQUEST,
94 MGMT_EV_USER_PASSKEY_REQUEST,
98 MGMT_EV_DEVICE_BLOCKED,
99 MGMT_EV_DEVICE_UNBLOCKED,
100 MGMT_EV_DEVICE_UNPAIRED,
101 MGMT_EV_PASSKEY_NOTIFY,
104 #define CACHE_TIMEOUT msecs_to_jiffies(2 * 1000)
106 #define hdev_is_powered(hdev) (test_bit(HCI_UP, &hdev->flags) && \
107 !test_bit(HCI_AUTO_OFF, &hdev->dev_flags))
110 struct list_head list;
118 /* HCI to MGMT error code conversion table */
119 static u8 mgmt_status_table[] = {
121 MGMT_STATUS_UNKNOWN_COMMAND, /* Unknown Command */
122 MGMT_STATUS_NOT_CONNECTED, /* No Connection */
123 MGMT_STATUS_FAILED, /* Hardware Failure */
124 MGMT_STATUS_CONNECT_FAILED, /* Page Timeout */
125 MGMT_STATUS_AUTH_FAILED, /* Authentication Failed */
126 MGMT_STATUS_NOT_PAIRED, /* PIN or Key Missing */
127 MGMT_STATUS_NO_RESOURCES, /* Memory Full */
128 MGMT_STATUS_TIMEOUT, /* Connection Timeout */
129 MGMT_STATUS_NO_RESOURCES, /* Max Number of Connections */
130 MGMT_STATUS_NO_RESOURCES, /* Max Number of SCO Connections */
131 MGMT_STATUS_ALREADY_CONNECTED, /* ACL Connection Exists */
132 MGMT_STATUS_BUSY, /* Command Disallowed */
133 MGMT_STATUS_NO_RESOURCES, /* Rejected Limited Resources */
134 MGMT_STATUS_REJECTED, /* Rejected Security */
135 MGMT_STATUS_REJECTED, /* Rejected Personal */
136 MGMT_STATUS_TIMEOUT, /* Host Timeout */
137 MGMT_STATUS_NOT_SUPPORTED, /* Unsupported Feature */
138 MGMT_STATUS_INVALID_PARAMS, /* Invalid Parameters */
139 MGMT_STATUS_DISCONNECTED, /* OE User Ended Connection */
140 MGMT_STATUS_NO_RESOURCES, /* OE Low Resources */
141 MGMT_STATUS_DISCONNECTED, /* OE Power Off */
142 MGMT_STATUS_DISCONNECTED, /* Connection Terminated */
143 MGMT_STATUS_BUSY, /* Repeated Attempts */
144 MGMT_STATUS_REJECTED, /* Pairing Not Allowed */
145 MGMT_STATUS_FAILED, /* Unknown LMP PDU */
146 MGMT_STATUS_NOT_SUPPORTED, /* Unsupported Remote Feature */
147 MGMT_STATUS_REJECTED, /* SCO Offset Rejected */
148 MGMT_STATUS_REJECTED, /* SCO Interval Rejected */
149 MGMT_STATUS_REJECTED, /* Air Mode Rejected */
150 MGMT_STATUS_INVALID_PARAMS, /* Invalid LMP Parameters */
151 MGMT_STATUS_FAILED, /* Unspecified Error */
152 MGMT_STATUS_NOT_SUPPORTED, /* Unsupported LMP Parameter Value */
153 MGMT_STATUS_FAILED, /* Role Change Not Allowed */
154 MGMT_STATUS_TIMEOUT, /* LMP Response Timeout */
155 MGMT_STATUS_FAILED, /* LMP Error Transaction Collision */
156 MGMT_STATUS_FAILED, /* LMP PDU Not Allowed */
157 MGMT_STATUS_REJECTED, /* Encryption Mode Not Accepted */
158 MGMT_STATUS_FAILED, /* Unit Link Key Used */
159 MGMT_STATUS_NOT_SUPPORTED, /* QoS Not Supported */
160 MGMT_STATUS_TIMEOUT, /* Instant Passed */
161 MGMT_STATUS_NOT_SUPPORTED, /* Pairing Not Supported */
162 MGMT_STATUS_FAILED, /* Transaction Collision */
163 MGMT_STATUS_INVALID_PARAMS, /* Unacceptable Parameter */
164 MGMT_STATUS_REJECTED, /* QoS Rejected */
165 MGMT_STATUS_NOT_SUPPORTED, /* Classification Not Supported */
166 MGMT_STATUS_REJECTED, /* Insufficient Security */
167 MGMT_STATUS_INVALID_PARAMS, /* Parameter Out Of Range */
168 MGMT_STATUS_BUSY, /* Role Switch Pending */
169 MGMT_STATUS_FAILED, /* Slot Violation */
170 MGMT_STATUS_FAILED, /* Role Switch Failed */
171 MGMT_STATUS_INVALID_PARAMS, /* EIR Too Large */
172 MGMT_STATUS_NOT_SUPPORTED, /* Simple Pairing Not Supported */
173 MGMT_STATUS_BUSY, /* Host Busy Pairing */
174 MGMT_STATUS_REJECTED, /* Rejected, No Suitable Channel */
175 MGMT_STATUS_BUSY, /* Controller Busy */
176 MGMT_STATUS_INVALID_PARAMS, /* Unsuitable Connection Interval */
177 MGMT_STATUS_TIMEOUT, /* Directed Advertising Timeout */
178 MGMT_STATUS_AUTH_FAILED, /* Terminated Due to MIC Failure */
179 MGMT_STATUS_CONNECT_FAILED, /* Connection Establishment Failed */
180 MGMT_STATUS_CONNECT_FAILED, /* MAC Connection Failed */
183 bool mgmt_valid_hdev(struct hci_dev *hdev)
185 return hdev->dev_type == HCI_BREDR;
188 static u8 mgmt_status(u8 hci_status)
190 if (hci_status < ARRAY_SIZE(mgmt_status_table))
191 return mgmt_status_table[hci_status];
193 return MGMT_STATUS_FAILED;
196 static int cmd_status(struct sock *sk, u16 index, u16 cmd, u8 status)
199 struct mgmt_hdr *hdr;
200 struct mgmt_ev_cmd_status *ev;
203 BT_DBG("sock %p, index %u, cmd %u, status %u", sk, index, cmd, status);
205 skb = alloc_skb(sizeof(*hdr) + sizeof(*ev), GFP_KERNEL);
209 hdr = (void *) skb_put(skb, sizeof(*hdr));
211 hdr->opcode = __constant_cpu_to_le16(MGMT_EV_CMD_STATUS);
212 hdr->index = cpu_to_le16(index);
213 hdr->len = cpu_to_le16(sizeof(*ev));
215 ev = (void *) skb_put(skb, sizeof(*ev));
217 ev->opcode = cpu_to_le16(cmd);
219 err = sock_queue_rcv_skb(sk, skb);
226 static int cmd_complete(struct sock *sk, u16 index, u16 cmd, u8 status,
227 void *rp, size_t rp_len)
230 struct mgmt_hdr *hdr;
231 struct mgmt_ev_cmd_complete *ev;
234 BT_DBG("sock %p", sk);
236 skb = alloc_skb(sizeof(*hdr) + sizeof(*ev) + rp_len, GFP_KERNEL);
240 hdr = (void *) skb_put(skb, sizeof(*hdr));
242 hdr->opcode = __constant_cpu_to_le16(MGMT_EV_CMD_COMPLETE);
243 hdr->index = cpu_to_le16(index);
244 hdr->len = cpu_to_le16(sizeof(*ev) + rp_len);
246 ev = (void *) skb_put(skb, sizeof(*ev) + rp_len);
247 ev->opcode = cpu_to_le16(cmd);
251 memcpy(ev->data, rp, rp_len);
253 err = sock_queue_rcv_skb(sk, skb);
260 static int read_version(struct sock *sk, struct hci_dev *hdev, void *data,
263 struct mgmt_rp_read_version rp;
265 BT_DBG("sock %p", sk);
267 rp.version = MGMT_VERSION;
268 rp.revision = __constant_cpu_to_le16(MGMT_REVISION);
270 return cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_VERSION, 0, &rp,
274 static int read_commands(struct sock *sk, struct hci_dev *hdev, void *data,
277 struct mgmt_rp_read_commands *rp;
278 const u16 num_commands = ARRAY_SIZE(mgmt_commands);
279 const u16 num_events = ARRAY_SIZE(mgmt_events);
284 BT_DBG("sock %p", sk);
286 rp_size = sizeof(*rp) + ((num_commands + num_events) * sizeof(u16));
288 rp = kmalloc(rp_size, GFP_KERNEL);
292 rp->num_commands = __constant_cpu_to_le16(num_commands);
293 rp->num_events = __constant_cpu_to_le16(num_events);
295 for (i = 0, opcode = rp->opcodes; i < num_commands; i++, opcode++)
296 put_unaligned_le16(mgmt_commands[i], opcode);
298 for (i = 0; i < num_events; i++, opcode++)
299 put_unaligned_le16(mgmt_events[i], opcode);
301 err = cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_COMMANDS, 0, rp,
308 static int read_index_list(struct sock *sk, struct hci_dev *hdev, void *data,
311 struct mgmt_rp_read_index_list *rp;
317 BT_DBG("sock %p", sk);
319 read_lock(&hci_dev_list_lock);
322 list_for_each_entry(d, &hci_dev_list, list) {
323 if (!mgmt_valid_hdev(d))
329 rp_len = sizeof(*rp) + (2 * count);
330 rp = kmalloc(rp_len, GFP_ATOMIC);
332 read_unlock(&hci_dev_list_lock);
337 list_for_each_entry(d, &hci_dev_list, list) {
338 if (test_bit(HCI_SETUP, &d->dev_flags))
341 if (test_bit(HCI_USER_CHANNEL, &d->dev_flags))
344 if (!mgmt_valid_hdev(d))
347 rp->index[count++] = cpu_to_le16(d->id);
348 BT_DBG("Added hci%u", d->id);
351 rp->num_controllers = cpu_to_le16(count);
352 rp_len = sizeof(*rp) + (2 * count);
354 read_unlock(&hci_dev_list_lock);
356 err = cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_INDEX_LIST, 0, rp,
364 static u32 get_supported_settings(struct hci_dev *hdev)
368 settings |= MGMT_SETTING_POWERED;
369 settings |= MGMT_SETTING_PAIRABLE;
371 if (lmp_ssp_capable(hdev))
372 settings |= MGMT_SETTING_SSP;
374 if (lmp_bredr_capable(hdev)) {
375 settings |= MGMT_SETTING_CONNECTABLE;
376 if (hdev->hci_ver >= BLUETOOTH_VER_1_2)
377 settings |= MGMT_SETTING_FAST_CONNECTABLE;
378 settings |= MGMT_SETTING_DISCOVERABLE;
379 settings |= MGMT_SETTING_BREDR;
380 settings |= MGMT_SETTING_LINK_SECURITY;
381 settings |= MGMT_SETTING_HS;
384 if (lmp_le_capable(hdev)) {
385 settings |= MGMT_SETTING_LE;
386 settings |= MGMT_SETTING_ADVERTISING;
392 static u32 get_current_settings(struct hci_dev *hdev)
396 if (hdev_is_powered(hdev))
397 settings |= MGMT_SETTING_POWERED;
399 if (test_bit(HCI_CONNECTABLE, &hdev->dev_flags))
400 settings |= MGMT_SETTING_CONNECTABLE;
402 if (test_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags))
403 settings |= MGMT_SETTING_FAST_CONNECTABLE;
405 if (test_bit(HCI_DISCOVERABLE, &hdev->dev_flags))
406 settings |= MGMT_SETTING_DISCOVERABLE;
408 if (test_bit(HCI_PAIRABLE, &hdev->dev_flags))
409 settings |= MGMT_SETTING_PAIRABLE;
411 if (lmp_bredr_capable(hdev))
412 settings |= MGMT_SETTING_BREDR;
414 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
415 settings |= MGMT_SETTING_LE;
417 if (test_bit(HCI_LINK_SECURITY, &hdev->dev_flags))
418 settings |= MGMT_SETTING_LINK_SECURITY;
420 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags))
421 settings |= MGMT_SETTING_SSP;
423 if (test_bit(HCI_HS_ENABLED, &hdev->dev_flags))
424 settings |= MGMT_SETTING_HS;
426 if (test_bit(HCI_LE_PERIPHERAL, &hdev->dev_flags))
427 settings |= MGMT_SETTING_ADVERTISING;
432 #define PNP_INFO_SVCLASS_ID 0x1200
434 static u8 *create_uuid16_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
436 u8 *ptr = data, *uuids_start = NULL;
437 struct bt_uuid *uuid;
442 list_for_each_entry(uuid, &hdev->uuids, list) {
445 if (uuid->size != 16)
448 uuid16 = get_unaligned_le16(&uuid->uuid[12]);
452 if (uuid16 == PNP_INFO_SVCLASS_ID)
458 uuids_start[1] = EIR_UUID16_ALL;
462 /* Stop if not enough space to put next UUID */
463 if ((ptr - data) + sizeof(u16) > len) {
464 uuids_start[1] = EIR_UUID16_SOME;
468 *ptr++ = (uuid16 & 0x00ff);
469 *ptr++ = (uuid16 & 0xff00) >> 8;
470 uuids_start[0] += sizeof(uuid16);
476 static u8 *create_uuid32_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
478 u8 *ptr = data, *uuids_start = NULL;
479 struct bt_uuid *uuid;
484 list_for_each_entry(uuid, &hdev->uuids, list) {
485 if (uuid->size != 32)
491 uuids_start[1] = EIR_UUID32_ALL;
495 /* Stop if not enough space to put next UUID */
496 if ((ptr - data) + sizeof(u32) > len) {
497 uuids_start[1] = EIR_UUID32_SOME;
501 memcpy(ptr, &uuid->uuid[12], sizeof(u32));
503 uuids_start[0] += sizeof(u32);
509 static u8 *create_uuid128_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
511 u8 *ptr = data, *uuids_start = NULL;
512 struct bt_uuid *uuid;
517 list_for_each_entry(uuid, &hdev->uuids, list) {
518 if (uuid->size != 128)
524 uuids_start[1] = EIR_UUID128_ALL;
528 /* Stop if not enough space to put next UUID */
529 if ((ptr - data) + 16 > len) {
530 uuids_start[1] = EIR_UUID128_SOME;
534 memcpy(ptr, uuid->uuid, 16);
536 uuids_start[0] += 16;
542 static void create_eir(struct hci_dev *hdev, u8 *data)
547 name_len = strlen(hdev->dev_name);
553 ptr[1] = EIR_NAME_SHORT;
555 ptr[1] = EIR_NAME_COMPLETE;
557 /* EIR Data length */
558 ptr[0] = name_len + 1;
560 memcpy(ptr + 2, hdev->dev_name, name_len);
562 ptr += (name_len + 2);
565 if (hdev->inq_tx_power != HCI_TX_POWER_INVALID) {
567 ptr[1] = EIR_TX_POWER;
568 ptr[2] = (u8) hdev->inq_tx_power;
573 if (hdev->devid_source > 0) {
575 ptr[1] = EIR_DEVICE_ID;
577 put_unaligned_le16(hdev->devid_source, ptr + 2);
578 put_unaligned_le16(hdev->devid_vendor, ptr + 4);
579 put_unaligned_le16(hdev->devid_product, ptr + 6);
580 put_unaligned_le16(hdev->devid_version, ptr + 8);
585 ptr = create_uuid16_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
586 ptr = create_uuid32_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
587 ptr = create_uuid128_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
590 static void update_eir(struct hci_request *req)
592 struct hci_dev *hdev = req->hdev;
593 struct hci_cp_write_eir cp;
595 if (!hdev_is_powered(hdev))
598 if (!lmp_ext_inq_capable(hdev))
601 if (!test_bit(HCI_SSP_ENABLED, &hdev->dev_flags))
604 if (test_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
607 memset(&cp, 0, sizeof(cp));
609 create_eir(hdev, cp.data);
611 if (memcmp(cp.data, hdev->eir, sizeof(cp.data)) == 0)
614 memcpy(hdev->eir, cp.data, sizeof(cp.data));
616 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
619 static u8 get_service_classes(struct hci_dev *hdev)
621 struct bt_uuid *uuid;
624 list_for_each_entry(uuid, &hdev->uuids, list)
625 val |= uuid->svc_hint;
630 static void update_class(struct hci_request *req)
632 struct hci_dev *hdev = req->hdev;
635 BT_DBG("%s", hdev->name);
637 if (!hdev_is_powered(hdev))
640 if (test_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
643 cod[0] = hdev->minor_class;
644 cod[1] = hdev->major_class;
645 cod[2] = get_service_classes(hdev);
647 if (memcmp(cod, hdev->dev_class, 3) == 0)
650 hci_req_add(req, HCI_OP_WRITE_CLASS_OF_DEV, sizeof(cod), cod);
653 static void service_cache_off(struct work_struct *work)
655 struct hci_dev *hdev = container_of(work, struct hci_dev,
657 struct hci_request req;
659 if (!test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
662 hci_req_init(&req, hdev);
669 hci_dev_unlock(hdev);
671 hci_req_run(&req, NULL);
674 static void mgmt_init_hdev(struct sock *sk, struct hci_dev *hdev)
676 if (test_and_set_bit(HCI_MGMT, &hdev->dev_flags))
679 INIT_DELAYED_WORK(&hdev->service_cache, service_cache_off);
681 /* Non-mgmt controlled devices get this bit set
682 * implicitly so that pairing works for them, however
683 * for mgmt we require user-space to explicitly enable
686 clear_bit(HCI_PAIRABLE, &hdev->dev_flags);
689 static int read_controller_info(struct sock *sk, struct hci_dev *hdev,
690 void *data, u16 data_len)
692 struct mgmt_rp_read_info rp;
694 BT_DBG("sock %p %s", sk, hdev->name);
698 memset(&rp, 0, sizeof(rp));
700 bacpy(&rp.bdaddr, &hdev->bdaddr);
702 rp.version = hdev->hci_ver;
703 rp.manufacturer = cpu_to_le16(hdev->manufacturer);
705 rp.supported_settings = cpu_to_le32(get_supported_settings(hdev));
706 rp.current_settings = cpu_to_le32(get_current_settings(hdev));
708 memcpy(rp.dev_class, hdev->dev_class, 3);
710 memcpy(rp.name, hdev->dev_name, sizeof(hdev->dev_name));
711 memcpy(rp.short_name, hdev->short_name, sizeof(hdev->short_name));
713 hci_dev_unlock(hdev);
715 return cmd_complete(sk, hdev->id, MGMT_OP_READ_INFO, 0, &rp,
719 static void mgmt_pending_free(struct pending_cmd *cmd)
726 static struct pending_cmd *mgmt_pending_add(struct sock *sk, u16 opcode,
727 struct hci_dev *hdev, void *data,
730 struct pending_cmd *cmd;
732 cmd = kmalloc(sizeof(*cmd), GFP_KERNEL);
736 cmd->opcode = opcode;
737 cmd->index = hdev->id;
739 cmd->param = kmalloc(len, GFP_KERNEL);
746 memcpy(cmd->param, data, len);
751 list_add(&cmd->list, &hdev->mgmt_pending);
756 static void mgmt_pending_foreach(u16 opcode, struct hci_dev *hdev,
757 void (*cb)(struct pending_cmd *cmd,
761 struct pending_cmd *cmd, *tmp;
763 list_for_each_entry_safe(cmd, tmp, &hdev->mgmt_pending, list) {
764 if (opcode > 0 && cmd->opcode != opcode)
771 static struct pending_cmd *mgmt_pending_find(u16 opcode, struct hci_dev *hdev)
773 struct pending_cmd *cmd;
775 list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
776 if (cmd->opcode == opcode)
783 static void mgmt_pending_remove(struct pending_cmd *cmd)
785 list_del(&cmd->list);
786 mgmt_pending_free(cmd);
789 static int send_settings_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev)
791 __le32 settings = cpu_to_le32(get_current_settings(hdev));
793 return cmd_complete(sk, hdev->id, opcode, 0, &settings,
797 static int set_powered(struct sock *sk, struct hci_dev *hdev, void *data,
800 struct mgmt_mode *cp = data;
801 struct pending_cmd *cmd;
804 BT_DBG("request for %s", hdev->name);
806 if (cp->val != 0x00 && cp->val != 0x01)
807 return cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
808 MGMT_STATUS_INVALID_PARAMS);
812 if (mgmt_pending_find(MGMT_OP_SET_POWERED, hdev)) {
813 err = cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
818 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
819 cancel_delayed_work(&hdev->power_off);
822 mgmt_pending_add(sk, MGMT_OP_SET_POWERED, hdev,
824 err = mgmt_powered(hdev, 1);
829 if (!!cp->val == hdev_is_powered(hdev)) {
830 err = send_settings_rsp(sk, MGMT_OP_SET_POWERED, hdev);
834 cmd = mgmt_pending_add(sk, MGMT_OP_SET_POWERED, hdev, data, len);
841 queue_work(hdev->req_workqueue, &hdev->power_on);
843 queue_work(hdev->req_workqueue, &hdev->power_off.work);
848 hci_dev_unlock(hdev);
852 static int mgmt_event(u16 event, struct hci_dev *hdev, void *data, u16 data_len,
853 struct sock *skip_sk)
856 struct mgmt_hdr *hdr;
858 skb = alloc_skb(sizeof(*hdr) + data_len, GFP_KERNEL);
862 hdr = (void *) skb_put(skb, sizeof(*hdr));
863 hdr->opcode = cpu_to_le16(event);
865 hdr->index = cpu_to_le16(hdev->id);
867 hdr->index = __constant_cpu_to_le16(MGMT_INDEX_NONE);
868 hdr->len = cpu_to_le16(data_len);
871 memcpy(skb_put(skb, data_len), data, data_len);
874 __net_timestamp(skb);
876 hci_send_to_control(skb, skip_sk);
882 static int new_settings(struct hci_dev *hdev, struct sock *skip)
886 ev = cpu_to_le32(get_current_settings(hdev));
888 return mgmt_event(MGMT_EV_NEW_SETTINGS, hdev, &ev, sizeof(ev), skip);
893 struct hci_dev *hdev;
897 static void settings_rsp(struct pending_cmd *cmd, void *data)
899 struct cmd_lookup *match = data;
901 send_settings_rsp(cmd->sk, cmd->opcode, match->hdev);
903 list_del(&cmd->list);
905 if (match->sk == NULL) {
907 sock_hold(match->sk);
910 mgmt_pending_free(cmd);
913 static void cmd_status_rsp(struct pending_cmd *cmd, void *data)
917 cmd_status(cmd->sk, cmd->index, cmd->opcode, *status);
918 mgmt_pending_remove(cmd);
921 static int set_discoverable(struct sock *sk, struct hci_dev *hdev, void *data,
924 struct mgmt_cp_set_discoverable *cp = data;
925 struct pending_cmd *cmd;
930 BT_DBG("request for %s", hdev->name);
932 if (!lmp_bredr_capable(hdev))
933 return cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
934 MGMT_STATUS_NOT_SUPPORTED);
936 if (cp->val != 0x00 && cp->val != 0x01)
937 return cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
938 MGMT_STATUS_INVALID_PARAMS);
940 timeout = __le16_to_cpu(cp->timeout);
941 if (!cp->val && timeout > 0)
942 return cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
943 MGMT_STATUS_INVALID_PARAMS);
947 if (!hdev_is_powered(hdev) && timeout > 0) {
948 err = cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
949 MGMT_STATUS_NOT_POWERED);
953 if (mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
954 mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
955 err = cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
960 if (!test_bit(HCI_CONNECTABLE, &hdev->dev_flags)) {
961 err = cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
962 MGMT_STATUS_REJECTED);
966 if (!hdev_is_powered(hdev)) {
967 bool changed = false;
969 if (!!cp->val != test_bit(HCI_DISCOVERABLE, &hdev->dev_flags)) {
970 change_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
974 err = send_settings_rsp(sk, MGMT_OP_SET_DISCOVERABLE, hdev);
979 err = new_settings(hdev, sk);
984 if (!!cp->val == test_bit(HCI_DISCOVERABLE, &hdev->dev_flags)) {
985 if (hdev->discov_timeout > 0) {
986 cancel_delayed_work(&hdev->discov_off);
987 hdev->discov_timeout = 0;
990 if (cp->val && timeout > 0) {
991 hdev->discov_timeout = timeout;
992 queue_delayed_work(hdev->workqueue, &hdev->discov_off,
993 msecs_to_jiffies(hdev->discov_timeout * 1000));
996 err = send_settings_rsp(sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1000 cmd = mgmt_pending_add(sk, MGMT_OP_SET_DISCOVERABLE, hdev, data, len);
1009 scan |= SCAN_INQUIRY;
1011 cancel_delayed_work(&hdev->discov_off);
1013 err = hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
1015 mgmt_pending_remove(cmd);
1018 hdev->discov_timeout = timeout;
1021 hci_dev_unlock(hdev);
1025 static void write_fast_connectable(struct hci_request *req, bool enable)
1027 struct hci_dev *hdev = req->hdev;
1028 struct hci_cp_write_page_scan_activity acp;
1031 if (hdev->hci_ver < BLUETOOTH_VER_1_2)
1035 type = PAGE_SCAN_TYPE_INTERLACED;
1037 /* 160 msec page scan interval */
1038 acp.interval = __constant_cpu_to_le16(0x0100);
1040 type = PAGE_SCAN_TYPE_STANDARD; /* default */
1042 /* default 1.28 sec page scan */
1043 acp.interval = __constant_cpu_to_le16(0x0800);
1046 acp.window = __constant_cpu_to_le16(0x0012);
1048 if (__cpu_to_le16(hdev->page_scan_interval) != acp.interval ||
1049 __cpu_to_le16(hdev->page_scan_window) != acp.window)
1050 hci_req_add(req, HCI_OP_WRITE_PAGE_SCAN_ACTIVITY,
1053 if (hdev->page_scan_type != type)
1054 hci_req_add(req, HCI_OP_WRITE_PAGE_SCAN_TYPE, 1, &type);
1057 static void set_connectable_complete(struct hci_dev *hdev, u8 status)
1059 struct pending_cmd *cmd;
1061 BT_DBG("status 0x%02x", status);
1065 cmd = mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev);
1069 send_settings_rsp(cmd->sk, MGMT_OP_SET_CONNECTABLE, hdev);
1071 mgmt_pending_remove(cmd);
1074 hci_dev_unlock(hdev);
1077 static int set_connectable(struct sock *sk, struct hci_dev *hdev, void *data,
1080 struct mgmt_mode *cp = data;
1081 struct pending_cmd *cmd;
1082 struct hci_request req;
1086 BT_DBG("request for %s", hdev->name);
1088 if (!lmp_bredr_capable(hdev))
1089 return cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1090 MGMT_STATUS_NOT_SUPPORTED);
1092 if (cp->val != 0x00 && cp->val != 0x01)
1093 return cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1094 MGMT_STATUS_INVALID_PARAMS);
1098 if (!hdev_is_powered(hdev)) {
1099 bool changed = false;
1101 if (!!cp->val != test_bit(HCI_CONNECTABLE, &hdev->dev_flags))
1105 set_bit(HCI_CONNECTABLE, &hdev->dev_flags);
1107 clear_bit(HCI_CONNECTABLE, &hdev->dev_flags);
1108 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
1111 err = send_settings_rsp(sk, MGMT_OP_SET_CONNECTABLE, hdev);
1116 err = new_settings(hdev, sk);
1121 if (mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
1122 mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
1123 err = cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1128 if (!!cp->val == test_bit(HCI_PSCAN, &hdev->flags)) {
1129 err = send_settings_rsp(sk, MGMT_OP_SET_CONNECTABLE, hdev);
1133 cmd = mgmt_pending_add(sk, MGMT_OP_SET_CONNECTABLE, hdev, data, len);
1144 if (test_bit(HCI_ISCAN, &hdev->flags) &&
1145 hdev->discov_timeout > 0)
1146 cancel_delayed_work(&hdev->discov_off);
1149 hci_req_init(&req, hdev);
1151 hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
1153 /* If we're going from non-connectable to connectable or
1154 * vice-versa when fast connectable is enabled ensure that fast
1155 * connectable gets disabled. write_fast_connectable won't do
1156 * anything if the page scan parameters are already what they
1159 if (cp->val || test_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags))
1160 write_fast_connectable(&req, false);
1162 err = hci_req_run(&req, set_connectable_complete);
1164 mgmt_pending_remove(cmd);
1167 hci_dev_unlock(hdev);
1171 static int set_pairable(struct sock *sk, struct hci_dev *hdev, void *data,
1174 struct mgmt_mode *cp = data;
1177 BT_DBG("request for %s", hdev->name);
1179 if (cp->val != 0x00 && cp->val != 0x01)
1180 return cmd_status(sk, hdev->id, MGMT_OP_SET_PAIRABLE,
1181 MGMT_STATUS_INVALID_PARAMS);
1186 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
1188 clear_bit(HCI_PAIRABLE, &hdev->dev_flags);
1190 err = send_settings_rsp(sk, MGMT_OP_SET_PAIRABLE, hdev);
1194 err = new_settings(hdev, sk);
1197 hci_dev_unlock(hdev);
1201 static int set_link_security(struct sock *sk, struct hci_dev *hdev, void *data,
1204 struct mgmt_mode *cp = data;
1205 struct pending_cmd *cmd;
1209 BT_DBG("request for %s", hdev->name);
1211 if (!lmp_bredr_capable(hdev))
1212 return cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1213 MGMT_STATUS_NOT_SUPPORTED);
1215 if (cp->val != 0x00 && cp->val != 0x01)
1216 return cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1217 MGMT_STATUS_INVALID_PARAMS);
1221 if (!hdev_is_powered(hdev)) {
1222 bool changed = false;
1224 if (!!cp->val != test_bit(HCI_LINK_SECURITY,
1225 &hdev->dev_flags)) {
1226 change_bit(HCI_LINK_SECURITY, &hdev->dev_flags);
1230 err = send_settings_rsp(sk, MGMT_OP_SET_LINK_SECURITY, hdev);
1235 err = new_settings(hdev, sk);
1240 if (mgmt_pending_find(MGMT_OP_SET_LINK_SECURITY, hdev)) {
1241 err = cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1248 if (test_bit(HCI_AUTH, &hdev->flags) == val) {
1249 err = send_settings_rsp(sk, MGMT_OP_SET_LINK_SECURITY, hdev);
1253 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LINK_SECURITY, hdev, data, len);
1259 err = hci_send_cmd(hdev, HCI_OP_WRITE_AUTH_ENABLE, sizeof(val), &val);
1261 mgmt_pending_remove(cmd);
1266 hci_dev_unlock(hdev);
1270 static int set_ssp(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
1272 struct mgmt_mode *cp = data;
1273 struct pending_cmd *cmd;
1277 BT_DBG("request for %s", hdev->name);
1279 if (!lmp_ssp_capable(hdev))
1280 return cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
1281 MGMT_STATUS_NOT_SUPPORTED);
1283 if (cp->val != 0x00 && cp->val != 0x01)
1284 return cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
1285 MGMT_STATUS_INVALID_PARAMS);
1291 if (!hdev_is_powered(hdev)) {
1292 bool changed = false;
1294 if (val != test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
1295 change_bit(HCI_SSP_ENABLED, &hdev->dev_flags);
1299 err = send_settings_rsp(sk, MGMT_OP_SET_SSP, hdev);
1304 err = new_settings(hdev, sk);
1309 if (mgmt_pending_find(MGMT_OP_SET_SSP, hdev)) {
1310 err = cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
1315 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags) == val) {
1316 err = send_settings_rsp(sk, MGMT_OP_SET_SSP, hdev);
1320 cmd = mgmt_pending_add(sk, MGMT_OP_SET_SSP, hdev, data, len);
1326 err = hci_send_cmd(hdev, HCI_OP_WRITE_SSP_MODE, sizeof(val), &val);
1328 mgmt_pending_remove(cmd);
1333 hci_dev_unlock(hdev);
1337 static int set_hs(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
1339 struct mgmt_mode *cp = data;
1341 BT_DBG("request for %s", hdev->name);
1343 if (!lmp_bredr_capable(hdev))
1344 return cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
1345 MGMT_STATUS_NOT_SUPPORTED);
1347 if (cp->val != 0x00 && cp->val != 0x01)
1348 return cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
1349 MGMT_STATUS_INVALID_PARAMS);
1352 set_bit(HCI_HS_ENABLED, &hdev->dev_flags);
1354 clear_bit(HCI_HS_ENABLED, &hdev->dev_flags);
1356 return send_settings_rsp(sk, MGMT_OP_SET_HS, hdev);
1359 static void le_enable_complete(struct hci_dev *hdev, u8 status)
1361 struct cmd_lookup match = { NULL, hdev };
1364 u8 mgmt_err = mgmt_status(status);
1366 mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, cmd_status_rsp,
1371 mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, settings_rsp, &match);
1373 new_settings(hdev, match.sk);
1379 static int set_le(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
1381 struct mgmt_mode *cp = data;
1382 struct hci_cp_write_le_host_supported hci_cp;
1383 struct pending_cmd *cmd;
1384 struct hci_request req;
1388 BT_DBG("request for %s", hdev->name);
1390 if (!lmp_le_capable(hdev))
1391 return cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
1392 MGMT_STATUS_NOT_SUPPORTED);
1394 if (cp->val != 0x00 && cp->val != 0x01)
1395 return cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
1396 MGMT_STATUS_INVALID_PARAMS);
1398 /* LE-only devices do not allow toggling LE on/off */
1399 if (!lmp_bredr_capable(hdev))
1400 return cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
1401 MGMT_STATUS_REJECTED);
1406 enabled = lmp_host_le_capable(hdev);
1408 if (!hdev_is_powered(hdev) || val == enabled) {
1409 bool changed = false;
1411 if (val != test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
1412 change_bit(HCI_LE_ENABLED, &hdev->dev_flags);
1416 if (!val && test_bit(HCI_LE_PERIPHERAL, &hdev->dev_flags)) {
1417 clear_bit(HCI_LE_PERIPHERAL, &hdev->dev_flags);
1421 err = send_settings_rsp(sk, MGMT_OP_SET_LE, hdev);
1426 err = new_settings(hdev, sk);
1431 if (mgmt_pending_find(MGMT_OP_SET_LE, hdev) ||
1432 mgmt_pending_find(MGMT_OP_SET_ADVERTISING, hdev)) {
1433 err = cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
1438 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LE, hdev, data, len);
1444 memset(&hci_cp, 0, sizeof(hci_cp));
1448 hci_cp.simul = lmp_le_br_capable(hdev);
1451 hci_req_init(&req, hdev);
1453 if (test_bit(HCI_LE_PERIPHERAL, &hdev->dev_flags) && !val)
1454 hci_req_add(&req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(val), &val);
1456 hci_req_add(&req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(hci_cp),
1459 err = hci_req_run(&req, le_enable_complete);
1461 mgmt_pending_remove(cmd);
1464 hci_dev_unlock(hdev);
1468 /* This is a helper function to test for pending mgmt commands that can
1469 * cause CoD or EIR HCI commands. We can only allow one such pending
1470 * mgmt command at a time since otherwise we cannot easily track what
1471 * the current values are, will be, and based on that calculate if a new
1472 * HCI command needs to be sent and if yes with what value.
1474 static bool pending_eir_or_class(struct hci_dev *hdev)
1476 struct pending_cmd *cmd;
1478 list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
1479 switch (cmd->opcode) {
1480 case MGMT_OP_ADD_UUID:
1481 case MGMT_OP_REMOVE_UUID:
1482 case MGMT_OP_SET_DEV_CLASS:
1483 case MGMT_OP_SET_POWERED:
1491 static const u8 bluetooth_base_uuid[] = {
1492 0xfb, 0x34, 0x9b, 0x5f, 0x80, 0x00, 0x00, 0x80,
1493 0x00, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1496 static u8 get_uuid_size(const u8 *uuid)
1500 if (memcmp(uuid, bluetooth_base_uuid, 12))
1503 val = get_unaligned_le32(&uuid[12]);
1510 static void mgmt_class_complete(struct hci_dev *hdev, u16 mgmt_op, u8 status)
1512 struct pending_cmd *cmd;
1516 cmd = mgmt_pending_find(mgmt_op, hdev);
1520 cmd_complete(cmd->sk, cmd->index, cmd->opcode, mgmt_status(status),
1521 hdev->dev_class, 3);
1523 mgmt_pending_remove(cmd);
1526 hci_dev_unlock(hdev);
1529 static void add_uuid_complete(struct hci_dev *hdev, u8 status)
1531 BT_DBG("status 0x%02x", status);
1533 mgmt_class_complete(hdev, MGMT_OP_ADD_UUID, status);
1536 static int add_uuid(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
1538 struct mgmt_cp_add_uuid *cp = data;
1539 struct pending_cmd *cmd;
1540 struct hci_request req;
1541 struct bt_uuid *uuid;
1544 BT_DBG("request for %s", hdev->name);
1548 if (pending_eir_or_class(hdev)) {
1549 err = cmd_status(sk, hdev->id, MGMT_OP_ADD_UUID,
1554 uuid = kmalloc(sizeof(*uuid), GFP_KERNEL);
1560 memcpy(uuid->uuid, cp->uuid, 16);
1561 uuid->svc_hint = cp->svc_hint;
1562 uuid->size = get_uuid_size(cp->uuid);
1564 list_add_tail(&uuid->list, &hdev->uuids);
1566 hci_req_init(&req, hdev);
1571 err = hci_req_run(&req, add_uuid_complete);
1573 if (err != -ENODATA)
1576 err = cmd_complete(sk, hdev->id, MGMT_OP_ADD_UUID, 0,
1577 hdev->dev_class, 3);
1581 cmd = mgmt_pending_add(sk, MGMT_OP_ADD_UUID, hdev, data, len);
1590 hci_dev_unlock(hdev);
1594 static bool enable_service_cache(struct hci_dev *hdev)
1596 if (!hdev_is_powered(hdev))
1599 if (!test_and_set_bit(HCI_SERVICE_CACHE, &hdev->dev_flags)) {
1600 queue_delayed_work(hdev->workqueue, &hdev->service_cache,
1608 static void remove_uuid_complete(struct hci_dev *hdev, u8 status)
1610 BT_DBG("status 0x%02x", status);
1612 mgmt_class_complete(hdev, MGMT_OP_REMOVE_UUID, status);
1615 static int remove_uuid(struct sock *sk, struct hci_dev *hdev, void *data,
1618 struct mgmt_cp_remove_uuid *cp = data;
1619 struct pending_cmd *cmd;
1620 struct bt_uuid *match, *tmp;
1621 u8 bt_uuid_any[] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
1622 struct hci_request req;
1625 BT_DBG("request for %s", hdev->name);
1629 if (pending_eir_or_class(hdev)) {
1630 err = cmd_status(sk, hdev->id, MGMT_OP_REMOVE_UUID,
1635 if (memcmp(cp->uuid, bt_uuid_any, 16) == 0) {
1636 err = hci_uuids_clear(hdev);
1638 if (enable_service_cache(hdev)) {
1639 err = cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_UUID,
1640 0, hdev->dev_class, 3);
1649 list_for_each_entry_safe(match, tmp, &hdev->uuids, list) {
1650 if (memcmp(match->uuid, cp->uuid, 16) != 0)
1653 list_del(&match->list);
1659 err = cmd_status(sk, hdev->id, MGMT_OP_REMOVE_UUID,
1660 MGMT_STATUS_INVALID_PARAMS);
1665 hci_req_init(&req, hdev);
1670 err = hci_req_run(&req, remove_uuid_complete);
1672 if (err != -ENODATA)
1675 err = cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_UUID, 0,
1676 hdev->dev_class, 3);
1680 cmd = mgmt_pending_add(sk, MGMT_OP_REMOVE_UUID, hdev, data, len);
1689 hci_dev_unlock(hdev);
1693 static void set_class_complete(struct hci_dev *hdev, u8 status)
1695 BT_DBG("status 0x%02x", status);
1697 mgmt_class_complete(hdev, MGMT_OP_SET_DEV_CLASS, status);
1700 static int set_dev_class(struct sock *sk, struct hci_dev *hdev, void *data,
1703 struct mgmt_cp_set_dev_class *cp = data;
1704 struct pending_cmd *cmd;
1705 struct hci_request req;
1708 BT_DBG("request for %s", hdev->name);
1710 if (!lmp_bredr_capable(hdev))
1711 return cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
1712 MGMT_STATUS_NOT_SUPPORTED);
1716 if (pending_eir_or_class(hdev)) {
1717 err = cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
1722 if ((cp->minor & 0x03) != 0 || (cp->major & 0xe0) != 0) {
1723 err = cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
1724 MGMT_STATUS_INVALID_PARAMS);
1728 hdev->major_class = cp->major;
1729 hdev->minor_class = cp->minor;
1731 if (!hdev_is_powered(hdev)) {
1732 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_DEV_CLASS, 0,
1733 hdev->dev_class, 3);
1737 hci_req_init(&req, hdev);
1739 if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags)) {
1740 hci_dev_unlock(hdev);
1741 cancel_delayed_work_sync(&hdev->service_cache);
1748 err = hci_req_run(&req, set_class_complete);
1750 if (err != -ENODATA)
1753 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_DEV_CLASS, 0,
1754 hdev->dev_class, 3);
1758 cmd = mgmt_pending_add(sk, MGMT_OP_SET_DEV_CLASS, hdev, data, len);
1767 hci_dev_unlock(hdev);
1771 static int load_link_keys(struct sock *sk, struct hci_dev *hdev, void *data,
1774 struct mgmt_cp_load_link_keys *cp = data;
1775 u16 key_count, expected_len;
1778 key_count = __le16_to_cpu(cp->key_count);
1780 expected_len = sizeof(*cp) + key_count *
1781 sizeof(struct mgmt_link_key_info);
1782 if (expected_len != len) {
1783 BT_ERR("load_link_keys: expected %u bytes, got %u bytes",
1785 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
1786 MGMT_STATUS_INVALID_PARAMS);
1789 if (cp->debug_keys != 0x00 && cp->debug_keys != 0x01)
1790 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
1791 MGMT_STATUS_INVALID_PARAMS);
1793 BT_DBG("%s debug_keys %u key_count %u", hdev->name, cp->debug_keys,
1796 for (i = 0; i < key_count; i++) {
1797 struct mgmt_link_key_info *key = &cp->keys[i];
1799 if (key->addr.type != BDADDR_BREDR)
1800 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
1801 MGMT_STATUS_INVALID_PARAMS);
1806 hci_link_keys_clear(hdev);
1809 set_bit(HCI_DEBUG_KEYS, &hdev->dev_flags);
1811 clear_bit(HCI_DEBUG_KEYS, &hdev->dev_flags);
1813 for (i = 0; i < key_count; i++) {
1814 struct mgmt_link_key_info *key = &cp->keys[i];
1816 hci_add_link_key(hdev, NULL, 0, &key->addr.bdaddr, key->val,
1817 key->type, key->pin_len);
1820 cmd_complete(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS, 0, NULL, 0);
1822 hci_dev_unlock(hdev);
1827 static int device_unpaired(struct hci_dev *hdev, bdaddr_t *bdaddr,
1828 u8 addr_type, struct sock *skip_sk)
1830 struct mgmt_ev_device_unpaired ev;
1832 bacpy(&ev.addr.bdaddr, bdaddr);
1833 ev.addr.type = addr_type;
1835 return mgmt_event(MGMT_EV_DEVICE_UNPAIRED, hdev, &ev, sizeof(ev),
1839 static int unpair_device(struct sock *sk, struct hci_dev *hdev, void *data,
1842 struct mgmt_cp_unpair_device *cp = data;
1843 struct mgmt_rp_unpair_device rp;
1844 struct hci_cp_disconnect dc;
1845 struct pending_cmd *cmd;
1846 struct hci_conn *conn;
1849 memset(&rp, 0, sizeof(rp));
1850 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
1851 rp.addr.type = cp->addr.type;
1853 if (!bdaddr_type_is_valid(cp->addr.type))
1854 return cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
1855 MGMT_STATUS_INVALID_PARAMS,
1858 if (cp->disconnect != 0x00 && cp->disconnect != 0x01)
1859 return cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
1860 MGMT_STATUS_INVALID_PARAMS,
1865 if (!hdev_is_powered(hdev)) {
1866 err = cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
1867 MGMT_STATUS_NOT_POWERED, &rp, sizeof(rp));
1871 if (cp->addr.type == BDADDR_BREDR)
1872 err = hci_remove_link_key(hdev, &cp->addr.bdaddr);
1874 err = hci_remove_ltk(hdev, &cp->addr.bdaddr);
1877 err = cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
1878 MGMT_STATUS_NOT_PAIRED, &rp, sizeof(rp));
1882 if (cp->disconnect) {
1883 if (cp->addr.type == BDADDR_BREDR)
1884 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
1887 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK,
1894 err = cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE, 0,
1896 device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, sk);
1900 cmd = mgmt_pending_add(sk, MGMT_OP_UNPAIR_DEVICE, hdev, cp,
1907 dc.handle = cpu_to_le16(conn->handle);
1908 dc.reason = 0x13; /* Remote User Terminated Connection */
1909 err = hci_send_cmd(hdev, HCI_OP_DISCONNECT, sizeof(dc), &dc);
1911 mgmt_pending_remove(cmd);
1914 hci_dev_unlock(hdev);
1918 static int disconnect(struct sock *sk, struct hci_dev *hdev, void *data,
1921 struct mgmt_cp_disconnect *cp = data;
1922 struct mgmt_rp_disconnect rp;
1923 struct hci_cp_disconnect dc;
1924 struct pending_cmd *cmd;
1925 struct hci_conn *conn;
1930 memset(&rp, 0, sizeof(rp));
1931 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
1932 rp.addr.type = cp->addr.type;
1934 if (!bdaddr_type_is_valid(cp->addr.type))
1935 return cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
1936 MGMT_STATUS_INVALID_PARAMS,
1941 if (!test_bit(HCI_UP, &hdev->flags)) {
1942 err = cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
1943 MGMT_STATUS_NOT_POWERED, &rp, sizeof(rp));
1947 if (mgmt_pending_find(MGMT_OP_DISCONNECT, hdev)) {
1948 err = cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
1949 MGMT_STATUS_BUSY, &rp, sizeof(rp));
1953 if (cp->addr.type == BDADDR_BREDR)
1954 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
1957 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->addr.bdaddr);
1959 if (!conn || conn->state == BT_OPEN || conn->state == BT_CLOSED) {
1960 err = cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
1961 MGMT_STATUS_NOT_CONNECTED, &rp, sizeof(rp));
1965 cmd = mgmt_pending_add(sk, MGMT_OP_DISCONNECT, hdev, data, len);
1971 dc.handle = cpu_to_le16(conn->handle);
1972 dc.reason = HCI_ERROR_REMOTE_USER_TERM;
1974 err = hci_send_cmd(hdev, HCI_OP_DISCONNECT, sizeof(dc), &dc);
1976 mgmt_pending_remove(cmd);
1979 hci_dev_unlock(hdev);
1983 static u8 link_to_bdaddr(u8 link_type, u8 addr_type)
1985 switch (link_type) {
1987 switch (addr_type) {
1988 case ADDR_LE_DEV_PUBLIC:
1989 return BDADDR_LE_PUBLIC;
1992 /* Fallback to LE Random address type */
1993 return BDADDR_LE_RANDOM;
1997 /* Fallback to BR/EDR type */
1998 return BDADDR_BREDR;
2002 static int get_connections(struct sock *sk, struct hci_dev *hdev, void *data,
2005 struct mgmt_rp_get_connections *rp;
2015 if (!hdev_is_powered(hdev)) {
2016 err = cmd_status(sk, hdev->id, MGMT_OP_GET_CONNECTIONS,
2017 MGMT_STATUS_NOT_POWERED);
2022 list_for_each_entry(c, &hdev->conn_hash.list, list) {
2023 if (test_bit(HCI_CONN_MGMT_CONNECTED, &c->flags))
2027 rp_len = sizeof(*rp) + (i * sizeof(struct mgmt_addr_info));
2028 rp = kmalloc(rp_len, GFP_KERNEL);
2035 list_for_each_entry(c, &hdev->conn_hash.list, list) {
2036 if (!test_bit(HCI_CONN_MGMT_CONNECTED, &c->flags))
2038 bacpy(&rp->addr[i].bdaddr, &c->dst);
2039 rp->addr[i].type = link_to_bdaddr(c->type, c->dst_type);
2040 if (c->type == SCO_LINK || c->type == ESCO_LINK)
2045 rp->conn_count = cpu_to_le16(i);
2047 /* Recalculate length in case of filtered SCO connections, etc */
2048 rp_len = sizeof(*rp) + (i * sizeof(struct mgmt_addr_info));
2050 err = cmd_complete(sk, hdev->id, MGMT_OP_GET_CONNECTIONS, 0, rp,
2056 hci_dev_unlock(hdev);
2060 static int send_pin_code_neg_reply(struct sock *sk, struct hci_dev *hdev,
2061 struct mgmt_cp_pin_code_neg_reply *cp)
2063 struct pending_cmd *cmd;
2066 cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_NEG_REPLY, hdev, cp,
2071 err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_NEG_REPLY,
2072 sizeof(cp->addr.bdaddr), &cp->addr.bdaddr);
2074 mgmt_pending_remove(cmd);
2079 static int pin_code_reply(struct sock *sk, struct hci_dev *hdev, void *data,
2082 struct hci_conn *conn;
2083 struct mgmt_cp_pin_code_reply *cp = data;
2084 struct hci_cp_pin_code_reply reply;
2085 struct pending_cmd *cmd;
2092 if (!hdev_is_powered(hdev)) {
2093 err = cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
2094 MGMT_STATUS_NOT_POWERED);
2098 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->addr.bdaddr);
2100 err = cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
2101 MGMT_STATUS_NOT_CONNECTED);
2105 if (conn->pending_sec_level == BT_SECURITY_HIGH && cp->pin_len != 16) {
2106 struct mgmt_cp_pin_code_neg_reply ncp;
2108 memcpy(&ncp.addr, &cp->addr, sizeof(ncp.addr));
2110 BT_ERR("PIN code is not 16 bytes long");
2112 err = send_pin_code_neg_reply(sk, hdev, &ncp);
2114 err = cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
2115 MGMT_STATUS_INVALID_PARAMS);
2120 cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_REPLY, hdev, data, len);
2126 bacpy(&reply.bdaddr, &cp->addr.bdaddr);
2127 reply.pin_len = cp->pin_len;
2128 memcpy(reply.pin_code, cp->pin_code, sizeof(reply.pin_code));
2130 err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_REPLY, sizeof(reply), &reply);
2132 mgmt_pending_remove(cmd);
2135 hci_dev_unlock(hdev);
2139 static int set_io_capability(struct sock *sk, struct hci_dev *hdev, void *data,
2142 struct mgmt_cp_set_io_capability *cp = data;
2148 hdev->io_capability = cp->io_capability;
2150 BT_DBG("%s IO capability set to 0x%02x", hdev->name,
2151 hdev->io_capability);
2153 hci_dev_unlock(hdev);
2155 return cmd_complete(sk, hdev->id, MGMT_OP_SET_IO_CAPABILITY, 0, NULL,
2159 static struct pending_cmd *find_pairing(struct hci_conn *conn)
2161 struct hci_dev *hdev = conn->hdev;
2162 struct pending_cmd *cmd;
2164 list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
2165 if (cmd->opcode != MGMT_OP_PAIR_DEVICE)
2168 if (cmd->user_data != conn)
2177 static void pairing_complete(struct pending_cmd *cmd, u8 status)
2179 struct mgmt_rp_pair_device rp;
2180 struct hci_conn *conn = cmd->user_data;
2182 bacpy(&rp.addr.bdaddr, &conn->dst);
2183 rp.addr.type = link_to_bdaddr(conn->type, conn->dst_type);
2185 cmd_complete(cmd->sk, cmd->index, MGMT_OP_PAIR_DEVICE, status,
2188 /* So we don't get further callbacks for this connection */
2189 conn->connect_cfm_cb = NULL;
2190 conn->security_cfm_cb = NULL;
2191 conn->disconn_cfm_cb = NULL;
2193 hci_conn_drop(conn);
2195 mgmt_pending_remove(cmd);
2198 static void pairing_complete_cb(struct hci_conn *conn, u8 status)
2200 struct pending_cmd *cmd;
2202 BT_DBG("status %u", status);
2204 cmd = find_pairing(conn);
2206 BT_DBG("Unable to find a pending command");
2208 pairing_complete(cmd, mgmt_status(status));
2211 static void le_connect_complete_cb(struct hci_conn *conn, u8 status)
2213 struct pending_cmd *cmd;
2215 BT_DBG("status %u", status);
2220 cmd = find_pairing(conn);
2222 BT_DBG("Unable to find a pending command");
2224 pairing_complete(cmd, mgmt_status(status));
2227 static int pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
2230 struct mgmt_cp_pair_device *cp = data;
2231 struct mgmt_rp_pair_device rp;
2232 struct pending_cmd *cmd;
2233 u8 sec_level, auth_type;
2234 struct hci_conn *conn;
2239 memset(&rp, 0, sizeof(rp));
2240 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
2241 rp.addr.type = cp->addr.type;
2243 if (!bdaddr_type_is_valid(cp->addr.type))
2244 return cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
2245 MGMT_STATUS_INVALID_PARAMS,
2250 if (!hdev_is_powered(hdev)) {
2251 err = cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
2252 MGMT_STATUS_NOT_POWERED, &rp, sizeof(rp));
2256 sec_level = BT_SECURITY_MEDIUM;
2257 if (cp->io_cap == 0x03)
2258 auth_type = HCI_AT_DEDICATED_BONDING;
2260 auth_type = HCI_AT_DEDICATED_BONDING_MITM;
2262 if (cp->addr.type == BDADDR_BREDR)
2263 conn = hci_connect(hdev, ACL_LINK, &cp->addr.bdaddr,
2264 cp->addr.type, sec_level, auth_type);
2266 conn = hci_connect(hdev, LE_LINK, &cp->addr.bdaddr,
2267 cp->addr.type, sec_level, auth_type);
2272 if (PTR_ERR(conn) == -EBUSY)
2273 status = MGMT_STATUS_BUSY;
2275 status = MGMT_STATUS_CONNECT_FAILED;
2277 err = cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
2283 if (conn->connect_cfm_cb) {
2284 hci_conn_drop(conn);
2285 err = cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
2286 MGMT_STATUS_BUSY, &rp, sizeof(rp));
2290 cmd = mgmt_pending_add(sk, MGMT_OP_PAIR_DEVICE, hdev, data, len);
2293 hci_conn_drop(conn);
2297 /* For LE, just connecting isn't a proof that the pairing finished */
2298 if (cp->addr.type == BDADDR_BREDR)
2299 conn->connect_cfm_cb = pairing_complete_cb;
2301 conn->connect_cfm_cb = le_connect_complete_cb;
2303 conn->security_cfm_cb = pairing_complete_cb;
2304 conn->disconn_cfm_cb = pairing_complete_cb;
2305 conn->io_capability = cp->io_cap;
2306 cmd->user_data = conn;
2308 if (conn->state == BT_CONNECTED &&
2309 hci_conn_security(conn, sec_level, auth_type))
2310 pairing_complete(cmd, 0);
2315 hci_dev_unlock(hdev);
2319 static int cancel_pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
2322 struct mgmt_addr_info *addr = data;
2323 struct pending_cmd *cmd;
2324 struct hci_conn *conn;
2331 if (!hdev_is_powered(hdev)) {
2332 err = cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
2333 MGMT_STATUS_NOT_POWERED);
2337 cmd = mgmt_pending_find(MGMT_OP_PAIR_DEVICE, hdev);
2339 err = cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
2340 MGMT_STATUS_INVALID_PARAMS);
2344 conn = cmd->user_data;
2346 if (bacmp(&addr->bdaddr, &conn->dst) != 0) {
2347 err = cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
2348 MGMT_STATUS_INVALID_PARAMS);
2352 pairing_complete(cmd, MGMT_STATUS_CANCELLED);
2354 err = cmd_complete(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE, 0,
2355 addr, sizeof(*addr));
2357 hci_dev_unlock(hdev);
2361 static int user_pairing_resp(struct sock *sk, struct hci_dev *hdev,
2362 struct mgmt_addr_info *addr, u16 mgmt_op,
2363 u16 hci_op, __le32 passkey)
2365 struct pending_cmd *cmd;
2366 struct hci_conn *conn;
2371 if (!hdev_is_powered(hdev)) {
2372 err = cmd_complete(sk, hdev->id, mgmt_op,
2373 MGMT_STATUS_NOT_POWERED, addr,
2378 if (addr->type == BDADDR_BREDR)
2379 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &addr->bdaddr);
2381 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &addr->bdaddr);
2384 err = cmd_complete(sk, hdev->id, mgmt_op,
2385 MGMT_STATUS_NOT_CONNECTED, addr,
2390 if (addr->type == BDADDR_LE_PUBLIC || addr->type == BDADDR_LE_RANDOM) {
2391 /* Continue with pairing via SMP */
2392 err = smp_user_confirm_reply(conn, mgmt_op, passkey);
2395 err = cmd_complete(sk, hdev->id, mgmt_op,
2396 MGMT_STATUS_SUCCESS, addr,
2399 err = cmd_complete(sk, hdev->id, mgmt_op,
2400 MGMT_STATUS_FAILED, addr,
2406 cmd = mgmt_pending_add(sk, mgmt_op, hdev, addr, sizeof(*addr));
2412 /* Continue with pairing via HCI */
2413 if (hci_op == HCI_OP_USER_PASSKEY_REPLY) {
2414 struct hci_cp_user_passkey_reply cp;
2416 bacpy(&cp.bdaddr, &addr->bdaddr);
2417 cp.passkey = passkey;
2418 err = hci_send_cmd(hdev, hci_op, sizeof(cp), &cp);
2420 err = hci_send_cmd(hdev, hci_op, sizeof(addr->bdaddr),
2424 mgmt_pending_remove(cmd);
2427 hci_dev_unlock(hdev);
2431 static int pin_code_neg_reply(struct sock *sk, struct hci_dev *hdev,
2432 void *data, u16 len)
2434 struct mgmt_cp_pin_code_neg_reply *cp = data;
2438 return user_pairing_resp(sk, hdev, &cp->addr,
2439 MGMT_OP_PIN_CODE_NEG_REPLY,
2440 HCI_OP_PIN_CODE_NEG_REPLY, 0);
2443 static int user_confirm_reply(struct sock *sk, struct hci_dev *hdev, void *data,
2446 struct mgmt_cp_user_confirm_reply *cp = data;
2450 if (len != sizeof(*cp))
2451 return cmd_status(sk, hdev->id, MGMT_OP_USER_CONFIRM_REPLY,
2452 MGMT_STATUS_INVALID_PARAMS);
2454 return user_pairing_resp(sk, hdev, &cp->addr,
2455 MGMT_OP_USER_CONFIRM_REPLY,
2456 HCI_OP_USER_CONFIRM_REPLY, 0);
2459 static int user_confirm_neg_reply(struct sock *sk, struct hci_dev *hdev,
2460 void *data, u16 len)
2462 struct mgmt_cp_user_confirm_neg_reply *cp = data;
2466 return user_pairing_resp(sk, hdev, &cp->addr,
2467 MGMT_OP_USER_CONFIRM_NEG_REPLY,
2468 HCI_OP_USER_CONFIRM_NEG_REPLY, 0);
2471 static int user_passkey_reply(struct sock *sk, struct hci_dev *hdev, void *data,
2474 struct mgmt_cp_user_passkey_reply *cp = data;
2478 return user_pairing_resp(sk, hdev, &cp->addr,
2479 MGMT_OP_USER_PASSKEY_REPLY,
2480 HCI_OP_USER_PASSKEY_REPLY, cp->passkey);
2483 static int user_passkey_neg_reply(struct sock *sk, struct hci_dev *hdev,
2484 void *data, u16 len)
2486 struct mgmt_cp_user_passkey_neg_reply *cp = data;
2490 return user_pairing_resp(sk, hdev, &cp->addr,
2491 MGMT_OP_USER_PASSKEY_NEG_REPLY,
2492 HCI_OP_USER_PASSKEY_NEG_REPLY, 0);
2495 static void update_name(struct hci_request *req)
2497 struct hci_dev *hdev = req->hdev;
2498 struct hci_cp_write_local_name cp;
2500 memcpy(cp.name, hdev->dev_name, sizeof(cp.name));
2502 hci_req_add(req, HCI_OP_WRITE_LOCAL_NAME, sizeof(cp), &cp);
2505 static void set_name_complete(struct hci_dev *hdev, u8 status)
2507 struct mgmt_cp_set_local_name *cp;
2508 struct pending_cmd *cmd;
2510 BT_DBG("status 0x%02x", status);
2514 cmd = mgmt_pending_find(MGMT_OP_SET_LOCAL_NAME, hdev);
2521 cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME,
2522 mgmt_status(status));
2524 cmd_complete(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
2527 mgmt_pending_remove(cmd);
2530 hci_dev_unlock(hdev);
2533 static int set_local_name(struct sock *sk, struct hci_dev *hdev, void *data,
2536 struct mgmt_cp_set_local_name *cp = data;
2537 struct pending_cmd *cmd;
2538 struct hci_request req;
2545 /* If the old values are the same as the new ones just return a
2546 * direct command complete event.
2548 if (!memcmp(hdev->dev_name, cp->name, sizeof(hdev->dev_name)) &&
2549 !memcmp(hdev->short_name, cp->short_name,
2550 sizeof(hdev->short_name))) {
2551 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
2556 memcpy(hdev->short_name, cp->short_name, sizeof(hdev->short_name));
2558 if (!hdev_is_powered(hdev)) {
2559 memcpy(hdev->dev_name, cp->name, sizeof(hdev->dev_name));
2561 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
2566 err = mgmt_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev, data, len,
2572 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LOCAL_NAME, hdev, data, len);
2578 memcpy(hdev->dev_name, cp->name, sizeof(hdev->dev_name));
2580 hci_req_init(&req, hdev);
2582 if (lmp_bredr_capable(hdev)) {
2587 if (lmp_le_capable(hdev))
2588 hci_update_ad(&req);
2590 err = hci_req_run(&req, set_name_complete);
2592 mgmt_pending_remove(cmd);
2595 hci_dev_unlock(hdev);
2599 static int read_local_oob_data(struct sock *sk, struct hci_dev *hdev,
2600 void *data, u16 data_len)
2602 struct pending_cmd *cmd;
2605 BT_DBG("%s", hdev->name);
2609 if (!hdev_is_powered(hdev)) {
2610 err = cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
2611 MGMT_STATUS_NOT_POWERED);
2615 if (!lmp_ssp_capable(hdev)) {
2616 err = cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
2617 MGMT_STATUS_NOT_SUPPORTED);
2621 if (mgmt_pending_find(MGMT_OP_READ_LOCAL_OOB_DATA, hdev)) {
2622 err = cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
2627 cmd = mgmt_pending_add(sk, MGMT_OP_READ_LOCAL_OOB_DATA, hdev, NULL, 0);
2633 err = hci_send_cmd(hdev, HCI_OP_READ_LOCAL_OOB_DATA, 0, NULL);
2635 mgmt_pending_remove(cmd);
2638 hci_dev_unlock(hdev);
2642 static int add_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
2643 void *data, u16 len)
2645 struct mgmt_cp_add_remote_oob_data *cp = data;
2649 BT_DBG("%s ", hdev->name);
2653 err = hci_add_remote_oob_data(hdev, &cp->addr.bdaddr, cp->hash,
2656 status = MGMT_STATUS_FAILED;
2658 status = MGMT_STATUS_SUCCESS;
2660 err = cmd_complete(sk, hdev->id, MGMT_OP_ADD_REMOTE_OOB_DATA, status,
2661 &cp->addr, sizeof(cp->addr));
2663 hci_dev_unlock(hdev);
2667 static int remove_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
2668 void *data, u16 len)
2670 struct mgmt_cp_remove_remote_oob_data *cp = data;
2674 BT_DBG("%s", hdev->name);
2678 err = hci_remove_remote_oob_data(hdev, &cp->addr.bdaddr);
2680 status = MGMT_STATUS_INVALID_PARAMS;
2682 status = MGMT_STATUS_SUCCESS;
2684 err = cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_REMOTE_OOB_DATA,
2685 status, &cp->addr, sizeof(cp->addr));
2687 hci_dev_unlock(hdev);
2691 static int mgmt_start_discovery_failed(struct hci_dev *hdev, u8 status)
2693 struct pending_cmd *cmd;
2697 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2699 cmd = mgmt_pending_find(MGMT_OP_START_DISCOVERY, hdev);
2703 type = hdev->discovery.type;
2705 err = cmd_complete(cmd->sk, hdev->id, cmd->opcode, mgmt_status(status),
2706 &type, sizeof(type));
2707 mgmt_pending_remove(cmd);
2712 static void start_discovery_complete(struct hci_dev *hdev, u8 status)
2714 BT_DBG("status %d", status);
2718 mgmt_start_discovery_failed(hdev, status);
2719 hci_dev_unlock(hdev);
2724 hci_discovery_set_state(hdev, DISCOVERY_FINDING);
2725 hci_dev_unlock(hdev);
2727 switch (hdev->discovery.type) {
2728 case DISCOV_TYPE_LE:
2729 queue_delayed_work(hdev->workqueue, &hdev->le_scan_disable,
2733 case DISCOV_TYPE_INTERLEAVED:
2734 queue_delayed_work(hdev->workqueue, &hdev->le_scan_disable,
2735 DISCOV_INTERLEAVED_TIMEOUT);
2738 case DISCOV_TYPE_BREDR:
2742 BT_ERR("Invalid discovery type %d", hdev->discovery.type);
2746 static int start_discovery(struct sock *sk, struct hci_dev *hdev,
2747 void *data, u16 len)
2749 struct mgmt_cp_start_discovery *cp = data;
2750 struct pending_cmd *cmd;
2751 struct hci_cp_le_set_scan_param param_cp;
2752 struct hci_cp_le_set_scan_enable enable_cp;
2753 struct hci_cp_inquiry inq_cp;
2754 struct hci_request req;
2755 /* General inquiry access code (GIAC) */
2756 u8 lap[3] = { 0x33, 0x8b, 0x9e };
2759 BT_DBG("%s", hdev->name);
2763 if (!hdev_is_powered(hdev)) {
2764 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
2765 MGMT_STATUS_NOT_POWERED);
2769 if (test_bit(HCI_PERIODIC_INQ, &hdev->dev_flags)) {
2770 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
2775 if (hdev->discovery.state != DISCOVERY_STOPPED) {
2776 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
2781 cmd = mgmt_pending_add(sk, MGMT_OP_START_DISCOVERY, hdev, NULL, 0);
2787 hdev->discovery.type = cp->type;
2789 hci_req_init(&req, hdev);
2791 switch (hdev->discovery.type) {
2792 case DISCOV_TYPE_BREDR:
2793 if (!lmp_bredr_capable(hdev)) {
2794 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
2795 MGMT_STATUS_NOT_SUPPORTED);
2796 mgmt_pending_remove(cmd);
2800 if (test_bit(HCI_INQUIRY, &hdev->flags)) {
2801 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
2803 mgmt_pending_remove(cmd);
2807 hci_inquiry_cache_flush(hdev);
2809 memset(&inq_cp, 0, sizeof(inq_cp));
2810 memcpy(&inq_cp.lap, lap, sizeof(inq_cp.lap));
2811 inq_cp.length = DISCOV_BREDR_INQUIRY_LEN;
2812 hci_req_add(&req, HCI_OP_INQUIRY, sizeof(inq_cp), &inq_cp);
2815 case DISCOV_TYPE_LE:
2816 case DISCOV_TYPE_INTERLEAVED:
2817 if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
2818 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
2819 MGMT_STATUS_NOT_SUPPORTED);
2820 mgmt_pending_remove(cmd);
2824 if (hdev->discovery.type == DISCOV_TYPE_INTERLEAVED &&
2825 !lmp_bredr_capable(hdev)) {
2826 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
2827 MGMT_STATUS_NOT_SUPPORTED);
2828 mgmt_pending_remove(cmd);
2832 if (test_bit(HCI_LE_PERIPHERAL, &hdev->dev_flags)) {
2833 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
2834 MGMT_STATUS_REJECTED);
2835 mgmt_pending_remove(cmd);
2839 if (test_bit(HCI_LE_SCAN, &hdev->dev_flags)) {
2840 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
2842 mgmt_pending_remove(cmd);
2846 memset(¶m_cp, 0, sizeof(param_cp));
2847 param_cp.type = LE_SCAN_ACTIVE;
2848 param_cp.interval = cpu_to_le16(DISCOV_LE_SCAN_INT);
2849 param_cp.window = cpu_to_le16(DISCOV_LE_SCAN_WIN);
2850 hci_req_add(&req, HCI_OP_LE_SET_SCAN_PARAM, sizeof(param_cp),
2853 memset(&enable_cp, 0, sizeof(enable_cp));
2854 enable_cp.enable = LE_SCAN_ENABLE;
2855 enable_cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
2856 hci_req_add(&req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(enable_cp),
2861 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
2862 MGMT_STATUS_INVALID_PARAMS);
2863 mgmt_pending_remove(cmd);
2867 err = hci_req_run(&req, start_discovery_complete);
2869 mgmt_pending_remove(cmd);
2871 hci_discovery_set_state(hdev, DISCOVERY_STARTING);
2874 hci_dev_unlock(hdev);
2878 static int mgmt_stop_discovery_failed(struct hci_dev *hdev, u8 status)
2880 struct pending_cmd *cmd;
2883 cmd = mgmt_pending_find(MGMT_OP_STOP_DISCOVERY, hdev);
2887 err = cmd_complete(cmd->sk, hdev->id, cmd->opcode, mgmt_status(status),
2888 &hdev->discovery.type, sizeof(hdev->discovery.type));
2889 mgmt_pending_remove(cmd);
2894 static void stop_discovery_complete(struct hci_dev *hdev, u8 status)
2896 BT_DBG("status %d", status);
2901 mgmt_stop_discovery_failed(hdev, status);
2905 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2908 hci_dev_unlock(hdev);
2911 static int stop_discovery(struct sock *sk, struct hci_dev *hdev, void *data,
2914 struct mgmt_cp_stop_discovery *mgmt_cp = data;
2915 struct pending_cmd *cmd;
2916 struct hci_cp_remote_name_req_cancel cp;
2917 struct inquiry_entry *e;
2918 struct hci_request req;
2919 struct hci_cp_le_set_scan_enable enable_cp;
2922 BT_DBG("%s", hdev->name);
2926 if (!hci_discovery_active(hdev)) {
2927 err = cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
2928 MGMT_STATUS_REJECTED, &mgmt_cp->type,
2929 sizeof(mgmt_cp->type));
2933 if (hdev->discovery.type != mgmt_cp->type) {
2934 err = cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
2935 MGMT_STATUS_INVALID_PARAMS, &mgmt_cp->type,
2936 sizeof(mgmt_cp->type));
2940 cmd = mgmt_pending_add(sk, MGMT_OP_STOP_DISCOVERY, hdev, NULL, 0);
2946 hci_req_init(&req, hdev);
2948 switch (hdev->discovery.state) {
2949 case DISCOVERY_FINDING:
2950 if (test_bit(HCI_INQUIRY, &hdev->flags)) {
2951 hci_req_add(&req, HCI_OP_INQUIRY_CANCEL, 0, NULL);
2953 cancel_delayed_work(&hdev->le_scan_disable);
2955 memset(&enable_cp, 0, sizeof(enable_cp));
2956 enable_cp.enable = LE_SCAN_DISABLE;
2957 hci_req_add(&req, HCI_OP_LE_SET_SCAN_ENABLE,
2958 sizeof(enable_cp), &enable_cp);
2963 case DISCOVERY_RESOLVING:
2964 e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY,
2967 mgmt_pending_remove(cmd);
2968 err = cmd_complete(sk, hdev->id,
2969 MGMT_OP_STOP_DISCOVERY, 0,
2971 sizeof(mgmt_cp->type));
2972 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2976 bacpy(&cp.bdaddr, &e->data.bdaddr);
2977 hci_req_add(&req, HCI_OP_REMOTE_NAME_REQ_CANCEL, sizeof(cp),
2983 BT_DBG("unknown discovery state %u", hdev->discovery.state);
2985 mgmt_pending_remove(cmd);
2986 err = cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
2987 MGMT_STATUS_FAILED, &mgmt_cp->type,
2988 sizeof(mgmt_cp->type));
2992 err = hci_req_run(&req, stop_discovery_complete);
2994 mgmt_pending_remove(cmd);
2996 hci_discovery_set_state(hdev, DISCOVERY_STOPPING);
2999 hci_dev_unlock(hdev);
3003 static int confirm_name(struct sock *sk, struct hci_dev *hdev, void *data,
3006 struct mgmt_cp_confirm_name *cp = data;
3007 struct inquiry_entry *e;
3010 BT_DBG("%s", hdev->name);
3014 if (!hci_discovery_active(hdev)) {
3015 err = cmd_status(sk, hdev->id, MGMT_OP_CONFIRM_NAME,
3016 MGMT_STATUS_FAILED);
3020 e = hci_inquiry_cache_lookup_unknown(hdev, &cp->addr.bdaddr);
3022 err = cmd_status(sk, hdev->id, MGMT_OP_CONFIRM_NAME,
3023 MGMT_STATUS_INVALID_PARAMS);
3027 if (cp->name_known) {
3028 e->name_state = NAME_KNOWN;
3031 e->name_state = NAME_NEEDED;
3032 hci_inquiry_cache_update_resolve(hdev, e);
3035 err = cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME, 0, &cp->addr,
3039 hci_dev_unlock(hdev);
3043 static int block_device(struct sock *sk, struct hci_dev *hdev, void *data,
3046 struct mgmt_cp_block_device *cp = data;
3050 BT_DBG("%s", hdev->name);
3052 if (!bdaddr_type_is_valid(cp->addr.type))
3053 return cmd_complete(sk, hdev->id, MGMT_OP_BLOCK_DEVICE,
3054 MGMT_STATUS_INVALID_PARAMS,
3055 &cp->addr, sizeof(cp->addr));
3059 err = hci_blacklist_add(hdev, &cp->addr.bdaddr, cp->addr.type);
3061 status = MGMT_STATUS_FAILED;
3063 status = MGMT_STATUS_SUCCESS;
3065 err = cmd_complete(sk, hdev->id, MGMT_OP_BLOCK_DEVICE, status,
3066 &cp->addr, sizeof(cp->addr));
3068 hci_dev_unlock(hdev);
3073 static int unblock_device(struct sock *sk, struct hci_dev *hdev, void *data,
3076 struct mgmt_cp_unblock_device *cp = data;
3080 BT_DBG("%s", hdev->name);
3082 if (!bdaddr_type_is_valid(cp->addr.type))
3083 return cmd_complete(sk, hdev->id, MGMT_OP_UNBLOCK_DEVICE,
3084 MGMT_STATUS_INVALID_PARAMS,
3085 &cp->addr, sizeof(cp->addr));
3089 err = hci_blacklist_del(hdev, &cp->addr.bdaddr, cp->addr.type);
3091 status = MGMT_STATUS_INVALID_PARAMS;
3093 status = MGMT_STATUS_SUCCESS;
3095 err = cmd_complete(sk, hdev->id, MGMT_OP_UNBLOCK_DEVICE, status,
3096 &cp->addr, sizeof(cp->addr));
3098 hci_dev_unlock(hdev);
3103 static int set_device_id(struct sock *sk, struct hci_dev *hdev, void *data,
3106 struct mgmt_cp_set_device_id *cp = data;
3107 struct hci_request req;
3111 BT_DBG("%s", hdev->name);
3113 source = __le16_to_cpu(cp->source);
3115 if (source > 0x0002)
3116 return cmd_status(sk, hdev->id, MGMT_OP_SET_DEVICE_ID,
3117 MGMT_STATUS_INVALID_PARAMS);
3121 hdev->devid_source = source;
3122 hdev->devid_vendor = __le16_to_cpu(cp->vendor);
3123 hdev->devid_product = __le16_to_cpu(cp->product);
3124 hdev->devid_version = __le16_to_cpu(cp->version);
3126 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_DEVICE_ID, 0, NULL, 0);
3128 hci_req_init(&req, hdev);
3130 hci_req_run(&req, NULL);
3132 hci_dev_unlock(hdev);
3137 static void set_advertising_complete(struct hci_dev *hdev, u8 status)
3139 struct cmd_lookup match = { NULL, hdev };
3142 u8 mgmt_err = mgmt_status(status);
3144 mgmt_pending_foreach(MGMT_OP_SET_ADVERTISING, hdev,
3145 cmd_status_rsp, &mgmt_err);
3149 mgmt_pending_foreach(MGMT_OP_SET_ADVERTISING, hdev, settings_rsp,
3152 new_settings(hdev, match.sk);
3158 static int set_advertising(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
3160 struct mgmt_mode *cp = data;
3161 struct pending_cmd *cmd;
3162 struct hci_request req;
3166 BT_DBG("request for %s", hdev->name);
3168 if (!lmp_le_capable(hdev))
3169 return cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
3170 MGMT_STATUS_NOT_SUPPORTED);
3172 if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
3173 return cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
3174 MGMT_STATUS_REJECTED);
3176 if (cp->val != 0x00 && cp->val != 0x01)
3177 return cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
3178 MGMT_STATUS_INVALID_PARAMS);
3183 enabled = test_bit(HCI_LE_PERIPHERAL, &hdev->dev_flags);
3185 if (!hdev_is_powered(hdev) || val == enabled) {
3186 bool changed = false;
3188 if (val != test_bit(HCI_LE_PERIPHERAL, &hdev->dev_flags)) {
3189 change_bit(HCI_LE_PERIPHERAL, &hdev->dev_flags);
3193 err = send_settings_rsp(sk, MGMT_OP_SET_ADVERTISING, hdev);
3198 err = new_settings(hdev, sk);
3203 if (mgmt_pending_find(MGMT_OP_SET_ADVERTISING, hdev) ||
3204 mgmt_pending_find(MGMT_OP_SET_LE, hdev)) {
3205 err = cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
3210 cmd = mgmt_pending_add(sk, MGMT_OP_SET_ADVERTISING, hdev, data, len);
3216 hci_req_init(&req, hdev);
3218 hci_req_add(&req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(val), &val);
3220 err = hci_req_run(&req, set_advertising_complete);
3222 mgmt_pending_remove(cmd);
3225 hci_dev_unlock(hdev);
3229 static void fast_connectable_complete(struct hci_dev *hdev, u8 status)
3231 struct pending_cmd *cmd;
3233 BT_DBG("status 0x%02x", status);
3237 cmd = mgmt_pending_find(MGMT_OP_SET_FAST_CONNECTABLE, hdev);
3242 cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
3243 mgmt_status(status));
3245 struct mgmt_mode *cp = cmd->param;
3248 set_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags);
3250 clear_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags);
3252 send_settings_rsp(cmd->sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev);
3253 new_settings(hdev, cmd->sk);
3256 mgmt_pending_remove(cmd);
3259 hci_dev_unlock(hdev);
3262 static int set_fast_connectable(struct sock *sk, struct hci_dev *hdev,
3263 void *data, u16 len)
3265 struct mgmt_mode *cp = data;
3266 struct pending_cmd *cmd;
3267 struct hci_request req;
3270 BT_DBG("%s", hdev->name);
3272 if (!lmp_bredr_capable(hdev) || hdev->hci_ver < BLUETOOTH_VER_1_2)
3273 return cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
3274 MGMT_STATUS_NOT_SUPPORTED);
3276 if (cp->val != 0x00 && cp->val != 0x01)
3277 return cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
3278 MGMT_STATUS_INVALID_PARAMS);
3280 if (!hdev_is_powered(hdev))
3281 return cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
3282 MGMT_STATUS_NOT_POWERED);
3284 if (!test_bit(HCI_CONNECTABLE, &hdev->dev_flags))
3285 return cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
3286 MGMT_STATUS_REJECTED);
3290 if (mgmt_pending_find(MGMT_OP_SET_FAST_CONNECTABLE, hdev)) {
3291 err = cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
3296 if (!!cp->val == test_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags)) {
3297 err = send_settings_rsp(sk, MGMT_OP_SET_FAST_CONNECTABLE,
3302 cmd = mgmt_pending_add(sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev,
3309 hci_req_init(&req, hdev);
3311 write_fast_connectable(&req, cp->val);
3313 err = hci_req_run(&req, fast_connectable_complete);
3315 err = cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
3316 MGMT_STATUS_FAILED);
3317 mgmt_pending_remove(cmd);
3321 hci_dev_unlock(hdev);
3326 static bool ltk_is_valid(struct mgmt_ltk_info *key)
3328 if (key->authenticated != 0x00 && key->authenticated != 0x01)
3330 if (key->master != 0x00 && key->master != 0x01)
3332 if (!bdaddr_type_is_le(key->addr.type))
3337 static int load_long_term_keys(struct sock *sk, struct hci_dev *hdev,
3338 void *cp_data, u16 len)
3340 struct mgmt_cp_load_long_term_keys *cp = cp_data;
3341 u16 key_count, expected_len;
3344 key_count = __le16_to_cpu(cp->key_count);
3346 expected_len = sizeof(*cp) + key_count *
3347 sizeof(struct mgmt_ltk_info);
3348 if (expected_len != len) {
3349 BT_ERR("load_keys: expected %u bytes, got %u bytes",
3351 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
3352 MGMT_STATUS_INVALID_PARAMS);
3355 BT_DBG("%s key_count %u", hdev->name, key_count);
3357 for (i = 0; i < key_count; i++) {
3358 struct mgmt_ltk_info *key = &cp->keys[i];
3360 if (!ltk_is_valid(key))
3361 return cmd_status(sk, hdev->id,
3362 MGMT_OP_LOAD_LONG_TERM_KEYS,
3363 MGMT_STATUS_INVALID_PARAMS);
3368 hci_smp_ltks_clear(hdev);
3370 for (i = 0; i < key_count; i++) {
3371 struct mgmt_ltk_info *key = &cp->keys[i];
3377 type = HCI_SMP_LTK_SLAVE;
3379 hci_add_ltk(hdev, &key->addr.bdaddr,
3380 bdaddr_to_le(key->addr.type),
3381 type, 0, key->authenticated, key->val,
3382 key->enc_size, key->ediv, key->rand);
3385 err = cmd_complete(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS, 0,
3388 hci_dev_unlock(hdev);
3393 static const struct mgmt_handler {
3394 int (*func) (struct sock *sk, struct hci_dev *hdev, void *data,
3398 } mgmt_handlers[] = {
3399 { NULL }, /* 0x0000 (no command) */
3400 { read_version, false, MGMT_READ_VERSION_SIZE },
3401 { read_commands, false, MGMT_READ_COMMANDS_SIZE },
3402 { read_index_list, false, MGMT_READ_INDEX_LIST_SIZE },
3403 { read_controller_info, false, MGMT_READ_INFO_SIZE },
3404 { set_powered, false, MGMT_SETTING_SIZE },
3405 { set_discoverable, false, MGMT_SET_DISCOVERABLE_SIZE },
3406 { set_connectable, false, MGMT_SETTING_SIZE },
3407 { set_fast_connectable, false, MGMT_SETTING_SIZE },
3408 { set_pairable, false, MGMT_SETTING_SIZE },
3409 { set_link_security, false, MGMT_SETTING_SIZE },
3410 { set_ssp, false, MGMT_SETTING_SIZE },
3411 { set_hs, false, MGMT_SETTING_SIZE },
3412 { set_le, false, MGMT_SETTING_SIZE },
3413 { set_dev_class, false, MGMT_SET_DEV_CLASS_SIZE },
3414 { set_local_name, false, MGMT_SET_LOCAL_NAME_SIZE },
3415 { add_uuid, false, MGMT_ADD_UUID_SIZE },
3416 { remove_uuid, false, MGMT_REMOVE_UUID_SIZE },
3417 { load_link_keys, true, MGMT_LOAD_LINK_KEYS_SIZE },
3418 { load_long_term_keys, true, MGMT_LOAD_LONG_TERM_KEYS_SIZE },
3419 { disconnect, false, MGMT_DISCONNECT_SIZE },
3420 { get_connections, false, MGMT_GET_CONNECTIONS_SIZE },
3421 { pin_code_reply, false, MGMT_PIN_CODE_REPLY_SIZE },
3422 { pin_code_neg_reply, false, MGMT_PIN_CODE_NEG_REPLY_SIZE },
3423 { set_io_capability, false, MGMT_SET_IO_CAPABILITY_SIZE },
3424 { pair_device, false, MGMT_PAIR_DEVICE_SIZE },
3425 { cancel_pair_device, false, MGMT_CANCEL_PAIR_DEVICE_SIZE },
3426 { unpair_device, false, MGMT_UNPAIR_DEVICE_SIZE },
3427 { user_confirm_reply, false, MGMT_USER_CONFIRM_REPLY_SIZE },
3428 { user_confirm_neg_reply, false, MGMT_USER_CONFIRM_NEG_REPLY_SIZE },
3429 { user_passkey_reply, false, MGMT_USER_PASSKEY_REPLY_SIZE },
3430 { user_passkey_neg_reply, false, MGMT_USER_PASSKEY_NEG_REPLY_SIZE },
3431 { read_local_oob_data, false, MGMT_READ_LOCAL_OOB_DATA_SIZE },
3432 { add_remote_oob_data, false, MGMT_ADD_REMOTE_OOB_DATA_SIZE },
3433 { remove_remote_oob_data, false, MGMT_REMOVE_REMOTE_OOB_DATA_SIZE },
3434 { start_discovery, false, MGMT_START_DISCOVERY_SIZE },
3435 { stop_discovery, false, MGMT_STOP_DISCOVERY_SIZE },
3436 { confirm_name, false, MGMT_CONFIRM_NAME_SIZE },
3437 { block_device, false, MGMT_BLOCK_DEVICE_SIZE },
3438 { unblock_device, false, MGMT_UNBLOCK_DEVICE_SIZE },
3439 { set_device_id, false, MGMT_SET_DEVICE_ID_SIZE },
3440 { set_advertising, false, MGMT_SETTING_SIZE },
3444 int mgmt_control(struct sock *sk, struct msghdr *msg, size_t msglen)
3448 struct mgmt_hdr *hdr;
3449 u16 opcode, index, len;
3450 struct hci_dev *hdev = NULL;
3451 const struct mgmt_handler *handler;
3454 BT_DBG("got %zu bytes", msglen);
3456 if (msglen < sizeof(*hdr))
3459 buf = kmalloc(msglen, GFP_KERNEL);
3463 if (memcpy_fromiovec(buf, msg->msg_iov, msglen)) {
3469 opcode = __le16_to_cpu(hdr->opcode);
3470 index = __le16_to_cpu(hdr->index);
3471 len = __le16_to_cpu(hdr->len);
3473 if (len != msglen - sizeof(*hdr)) {
3478 if (index != MGMT_INDEX_NONE) {
3479 hdev = hci_dev_get(index);
3481 err = cmd_status(sk, index, opcode,
3482 MGMT_STATUS_INVALID_INDEX);
3486 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
3487 err = cmd_status(sk, index, opcode,
3488 MGMT_STATUS_INVALID_INDEX);
3493 if (opcode >= ARRAY_SIZE(mgmt_handlers) ||
3494 mgmt_handlers[opcode].func == NULL) {
3495 BT_DBG("Unknown op %u", opcode);
3496 err = cmd_status(sk, index, opcode,
3497 MGMT_STATUS_UNKNOWN_COMMAND);
3501 if ((hdev && opcode < MGMT_OP_READ_INFO) ||
3502 (!hdev && opcode >= MGMT_OP_READ_INFO)) {
3503 err = cmd_status(sk, index, opcode,
3504 MGMT_STATUS_INVALID_INDEX);
3508 handler = &mgmt_handlers[opcode];
3510 if ((handler->var_len && len < handler->data_len) ||
3511 (!handler->var_len && len != handler->data_len)) {
3512 err = cmd_status(sk, index, opcode,
3513 MGMT_STATUS_INVALID_PARAMS);
3518 mgmt_init_hdev(sk, hdev);
3520 cp = buf + sizeof(*hdr);
3522 err = handler->func(sk, hdev, cp, len);
3536 int mgmt_index_added(struct hci_dev *hdev)
3538 if (!mgmt_valid_hdev(hdev))
3541 return mgmt_event(MGMT_EV_INDEX_ADDED, hdev, NULL, 0, NULL);
3544 int mgmt_index_removed(struct hci_dev *hdev)
3546 u8 status = MGMT_STATUS_INVALID_INDEX;
3548 if (!mgmt_valid_hdev(hdev))
3551 mgmt_pending_foreach(0, hdev, cmd_status_rsp, &status);
3553 return mgmt_event(MGMT_EV_INDEX_REMOVED, hdev, NULL, 0, NULL);
3556 static void set_bredr_scan(struct hci_request *req)
3558 struct hci_dev *hdev = req->hdev;
3561 /* Ensure that fast connectable is disabled. This function will
3562 * not do anything if the page scan parameters are already what
3565 write_fast_connectable(req, false);
3567 if (test_bit(HCI_CONNECTABLE, &hdev->dev_flags))
3569 if (test_bit(HCI_DISCOVERABLE, &hdev->dev_flags))
3570 scan |= SCAN_INQUIRY;
3573 hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
3576 static void powered_complete(struct hci_dev *hdev, u8 status)
3578 struct cmd_lookup match = { NULL, hdev };
3580 BT_DBG("status 0x%02x", status);
3584 mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp, &match);
3586 new_settings(hdev, match.sk);
3588 hci_dev_unlock(hdev);
3594 static int powered_update_hci(struct hci_dev *hdev)
3596 struct hci_request req;
3599 hci_req_init(&req, hdev);
3601 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags) &&
3602 !lmp_host_ssp_capable(hdev)) {
3605 hci_req_add(&req, HCI_OP_WRITE_SSP_MODE, 1, &ssp);
3608 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags) &&
3609 lmp_bredr_capable(hdev)) {
3610 struct hci_cp_write_le_host_supported cp;
3613 cp.simul = lmp_le_br_capable(hdev);
3615 /* Check first if we already have the right
3616 * host state (host features set)
3618 if (cp.le != lmp_host_le_capable(hdev) ||
3619 cp.simul != lmp_host_le_br_capable(hdev))
3620 hci_req_add(&req, HCI_OP_WRITE_LE_HOST_SUPPORTED,
3624 if (test_bit(HCI_LE_PERIPHERAL, &hdev->dev_flags)) {
3627 hci_req_add(&req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(adv), &adv);
3630 link_sec = test_bit(HCI_LINK_SECURITY, &hdev->dev_flags);
3631 if (link_sec != test_bit(HCI_AUTH, &hdev->flags))
3632 hci_req_add(&req, HCI_OP_WRITE_AUTH_ENABLE,
3633 sizeof(link_sec), &link_sec);
3635 if (lmp_bredr_capable(hdev)) {
3636 set_bredr_scan(&req);
3642 return hci_req_run(&req, powered_complete);
3645 int mgmt_powered(struct hci_dev *hdev, u8 powered)
3647 struct cmd_lookup match = { NULL, hdev };
3648 u8 status_not_powered = MGMT_STATUS_NOT_POWERED;
3649 u8 zero_cod[] = { 0, 0, 0 };
3652 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
3656 if (powered_update_hci(hdev) == 0)
3659 mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp,
3664 mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp, &match);
3665 mgmt_pending_foreach(0, hdev, cmd_status_rsp, &status_not_powered);
3667 if (memcmp(hdev->dev_class, zero_cod, sizeof(zero_cod)) != 0)
3668 mgmt_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev,
3669 zero_cod, sizeof(zero_cod), NULL);
3672 err = new_settings(hdev, match.sk);
3680 int mgmt_set_powered_failed(struct hci_dev *hdev, int err)
3682 struct pending_cmd *cmd;
3685 cmd = mgmt_pending_find(MGMT_OP_SET_POWERED, hdev);
3689 if (err == -ERFKILL)
3690 status = MGMT_STATUS_RFKILLED;
3692 status = MGMT_STATUS_FAILED;
3694 err = cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_POWERED, status);
3696 mgmt_pending_remove(cmd);
3701 int mgmt_discoverable(struct hci_dev *hdev, u8 discoverable)
3703 struct cmd_lookup match = { NULL, hdev };
3704 bool changed = false;
3708 if (!test_and_set_bit(HCI_DISCOVERABLE, &hdev->dev_flags))
3711 if (test_and_clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags))
3715 mgmt_pending_foreach(MGMT_OP_SET_DISCOVERABLE, hdev, settings_rsp,
3719 err = new_settings(hdev, match.sk);
3727 int mgmt_connectable(struct hci_dev *hdev, u8 connectable)
3729 struct pending_cmd *cmd;
3730 bool changed = false;
3734 if (!test_and_set_bit(HCI_CONNECTABLE, &hdev->dev_flags))
3737 if (test_and_clear_bit(HCI_CONNECTABLE, &hdev->dev_flags))
3741 cmd = mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev);
3744 err = new_settings(hdev, cmd ? cmd->sk : NULL);
3749 int mgmt_write_scan_failed(struct hci_dev *hdev, u8 scan, u8 status)
3751 u8 mgmt_err = mgmt_status(status);
3753 if (scan & SCAN_PAGE)
3754 mgmt_pending_foreach(MGMT_OP_SET_CONNECTABLE, hdev,
3755 cmd_status_rsp, &mgmt_err);
3757 if (scan & SCAN_INQUIRY)
3758 mgmt_pending_foreach(MGMT_OP_SET_DISCOVERABLE, hdev,
3759 cmd_status_rsp, &mgmt_err);
3764 int mgmt_new_link_key(struct hci_dev *hdev, struct link_key *key,
3767 struct mgmt_ev_new_link_key ev;
3769 memset(&ev, 0, sizeof(ev));
3771 ev.store_hint = persistent;
3772 bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
3773 ev.key.addr.type = BDADDR_BREDR;
3774 ev.key.type = key->type;
3775 memcpy(ev.key.val, key->val, HCI_LINK_KEY_SIZE);
3776 ev.key.pin_len = key->pin_len;
3778 return mgmt_event(MGMT_EV_NEW_LINK_KEY, hdev, &ev, sizeof(ev), NULL);
3781 int mgmt_new_ltk(struct hci_dev *hdev, struct smp_ltk *key, u8 persistent)
3783 struct mgmt_ev_new_long_term_key ev;
3785 memset(&ev, 0, sizeof(ev));
3787 ev.store_hint = persistent;
3788 bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
3789 ev.key.addr.type = link_to_bdaddr(LE_LINK, key->bdaddr_type);
3790 ev.key.authenticated = key->authenticated;
3791 ev.key.enc_size = key->enc_size;
3792 ev.key.ediv = key->ediv;
3794 if (key->type == HCI_SMP_LTK)
3797 memcpy(ev.key.rand, key->rand, sizeof(key->rand));
3798 memcpy(ev.key.val, key->val, sizeof(key->val));
3800 return mgmt_event(MGMT_EV_NEW_LONG_TERM_KEY, hdev, &ev, sizeof(ev),
3804 int mgmt_device_connected(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
3805 u8 addr_type, u32 flags, u8 *name, u8 name_len,
3809 struct mgmt_ev_device_connected *ev = (void *) buf;
3812 bacpy(&ev->addr.bdaddr, bdaddr);
3813 ev->addr.type = link_to_bdaddr(link_type, addr_type);
3815 ev->flags = __cpu_to_le32(flags);
3818 eir_len = eir_append_data(ev->eir, 0, EIR_NAME_COMPLETE,
3821 if (dev_class && memcmp(dev_class, "\0\0\0", 3) != 0)
3822 eir_len = eir_append_data(ev->eir, eir_len,
3823 EIR_CLASS_OF_DEV, dev_class, 3);
3825 ev->eir_len = cpu_to_le16(eir_len);
3827 return mgmt_event(MGMT_EV_DEVICE_CONNECTED, hdev, buf,
3828 sizeof(*ev) + eir_len, NULL);
3831 static void disconnect_rsp(struct pending_cmd *cmd, void *data)
3833 struct mgmt_cp_disconnect *cp = cmd->param;
3834 struct sock **sk = data;
3835 struct mgmt_rp_disconnect rp;
3837 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
3838 rp.addr.type = cp->addr.type;
3840 cmd_complete(cmd->sk, cmd->index, MGMT_OP_DISCONNECT, 0, &rp,
3846 mgmt_pending_remove(cmd);
3849 static void unpair_device_rsp(struct pending_cmd *cmd, void *data)
3851 struct hci_dev *hdev = data;
3852 struct mgmt_cp_unpair_device *cp = cmd->param;
3853 struct mgmt_rp_unpair_device rp;
3855 memset(&rp, 0, sizeof(rp));
3856 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
3857 rp.addr.type = cp->addr.type;
3859 device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, cmd->sk);
3861 cmd_complete(cmd->sk, cmd->index, cmd->opcode, 0, &rp, sizeof(rp));
3863 mgmt_pending_remove(cmd);
3866 int mgmt_device_disconnected(struct hci_dev *hdev, bdaddr_t *bdaddr,
3867 u8 link_type, u8 addr_type, u8 reason)
3869 struct mgmt_ev_device_disconnected ev;
3870 struct sock *sk = NULL;
3873 mgmt_pending_foreach(MGMT_OP_DISCONNECT, hdev, disconnect_rsp, &sk);
3875 bacpy(&ev.addr.bdaddr, bdaddr);
3876 ev.addr.type = link_to_bdaddr(link_type, addr_type);
3879 err = mgmt_event(MGMT_EV_DEVICE_DISCONNECTED, hdev, &ev, sizeof(ev),
3885 mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, unpair_device_rsp,
3891 int mgmt_disconnect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr,
3892 u8 link_type, u8 addr_type, u8 status)
3894 struct mgmt_rp_disconnect rp;
3895 struct pending_cmd *cmd;
3898 mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, unpair_device_rsp,
3901 cmd = mgmt_pending_find(MGMT_OP_DISCONNECT, hdev);
3905 bacpy(&rp.addr.bdaddr, bdaddr);
3906 rp.addr.type = link_to_bdaddr(link_type, addr_type);
3908 err = cmd_complete(cmd->sk, cmd->index, MGMT_OP_DISCONNECT,
3909 mgmt_status(status), &rp, sizeof(rp));
3911 mgmt_pending_remove(cmd);
3916 int mgmt_connect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
3917 u8 addr_type, u8 status)
3919 struct mgmt_ev_connect_failed ev;
3921 bacpy(&ev.addr.bdaddr, bdaddr);
3922 ev.addr.type = link_to_bdaddr(link_type, addr_type);
3923 ev.status = mgmt_status(status);
3925 return mgmt_event(MGMT_EV_CONNECT_FAILED, hdev, &ev, sizeof(ev), NULL);
3928 int mgmt_pin_code_request(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 secure)
3930 struct mgmt_ev_pin_code_request ev;
3932 bacpy(&ev.addr.bdaddr, bdaddr);
3933 ev.addr.type = BDADDR_BREDR;
3936 return mgmt_event(MGMT_EV_PIN_CODE_REQUEST, hdev, &ev, sizeof(ev),
3940 int mgmt_pin_code_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
3943 struct pending_cmd *cmd;
3944 struct mgmt_rp_pin_code_reply rp;
3947 cmd = mgmt_pending_find(MGMT_OP_PIN_CODE_REPLY, hdev);
3951 bacpy(&rp.addr.bdaddr, bdaddr);
3952 rp.addr.type = BDADDR_BREDR;
3954 err = cmd_complete(cmd->sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
3955 mgmt_status(status), &rp, sizeof(rp));
3957 mgmt_pending_remove(cmd);
3962 int mgmt_pin_code_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
3965 struct pending_cmd *cmd;
3966 struct mgmt_rp_pin_code_reply rp;
3969 cmd = mgmt_pending_find(MGMT_OP_PIN_CODE_NEG_REPLY, hdev);
3973 bacpy(&rp.addr.bdaddr, bdaddr);
3974 rp.addr.type = BDADDR_BREDR;
3976 err = cmd_complete(cmd->sk, hdev->id, MGMT_OP_PIN_CODE_NEG_REPLY,
3977 mgmt_status(status), &rp, sizeof(rp));
3979 mgmt_pending_remove(cmd);
3984 int mgmt_user_confirm_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
3985 u8 link_type, u8 addr_type, __le32 value,
3988 struct mgmt_ev_user_confirm_request ev;
3990 BT_DBG("%s", hdev->name);
3992 bacpy(&ev.addr.bdaddr, bdaddr);
3993 ev.addr.type = link_to_bdaddr(link_type, addr_type);
3994 ev.confirm_hint = confirm_hint;
3997 return mgmt_event(MGMT_EV_USER_CONFIRM_REQUEST, hdev, &ev, sizeof(ev),
4001 int mgmt_user_passkey_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
4002 u8 link_type, u8 addr_type)
4004 struct mgmt_ev_user_passkey_request ev;
4006 BT_DBG("%s", hdev->name);
4008 bacpy(&ev.addr.bdaddr, bdaddr);
4009 ev.addr.type = link_to_bdaddr(link_type, addr_type);
4011 return mgmt_event(MGMT_EV_USER_PASSKEY_REQUEST, hdev, &ev, sizeof(ev),
4015 static int user_pairing_resp_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
4016 u8 link_type, u8 addr_type, u8 status,
4019 struct pending_cmd *cmd;
4020 struct mgmt_rp_user_confirm_reply rp;
4023 cmd = mgmt_pending_find(opcode, hdev);
4027 bacpy(&rp.addr.bdaddr, bdaddr);
4028 rp.addr.type = link_to_bdaddr(link_type, addr_type);
4029 err = cmd_complete(cmd->sk, hdev->id, opcode, mgmt_status(status),
4032 mgmt_pending_remove(cmd);
4037 int mgmt_user_confirm_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
4038 u8 link_type, u8 addr_type, u8 status)
4040 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
4041 status, MGMT_OP_USER_CONFIRM_REPLY);
4044 int mgmt_user_confirm_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
4045 u8 link_type, u8 addr_type, u8 status)
4047 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
4049 MGMT_OP_USER_CONFIRM_NEG_REPLY);
4052 int mgmt_user_passkey_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
4053 u8 link_type, u8 addr_type, u8 status)
4055 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
4056 status, MGMT_OP_USER_PASSKEY_REPLY);
4059 int mgmt_user_passkey_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
4060 u8 link_type, u8 addr_type, u8 status)
4062 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
4064 MGMT_OP_USER_PASSKEY_NEG_REPLY);
4067 int mgmt_user_passkey_notify(struct hci_dev *hdev, bdaddr_t *bdaddr,
4068 u8 link_type, u8 addr_type, u32 passkey,
4071 struct mgmt_ev_passkey_notify ev;
4073 BT_DBG("%s", hdev->name);
4075 bacpy(&ev.addr.bdaddr, bdaddr);
4076 ev.addr.type = link_to_bdaddr(link_type, addr_type);
4077 ev.passkey = __cpu_to_le32(passkey);
4078 ev.entered = entered;
4080 return mgmt_event(MGMT_EV_PASSKEY_NOTIFY, hdev, &ev, sizeof(ev), NULL);
4083 int mgmt_auth_failed(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
4084 u8 addr_type, u8 status)
4086 struct mgmt_ev_auth_failed ev;
4088 bacpy(&ev.addr.bdaddr, bdaddr);
4089 ev.addr.type = link_to_bdaddr(link_type, addr_type);
4090 ev.status = mgmt_status(status);
4092 return mgmt_event(MGMT_EV_AUTH_FAILED, hdev, &ev, sizeof(ev), NULL);
4095 int mgmt_auth_enable_complete(struct hci_dev *hdev, u8 status)
4097 struct cmd_lookup match = { NULL, hdev };
4098 bool changed = false;
4102 u8 mgmt_err = mgmt_status(status);
4103 mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev,
4104 cmd_status_rsp, &mgmt_err);
4108 if (test_bit(HCI_AUTH, &hdev->flags)) {
4109 if (!test_and_set_bit(HCI_LINK_SECURITY, &hdev->dev_flags))
4112 if (test_and_clear_bit(HCI_LINK_SECURITY, &hdev->dev_flags))
4116 mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev, settings_rsp,
4120 err = new_settings(hdev, match.sk);
4128 static void clear_eir(struct hci_request *req)
4130 struct hci_dev *hdev = req->hdev;
4131 struct hci_cp_write_eir cp;
4133 if (!lmp_ext_inq_capable(hdev))
4136 memset(hdev->eir, 0, sizeof(hdev->eir));
4138 memset(&cp, 0, sizeof(cp));
4140 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
4143 int mgmt_ssp_enable_complete(struct hci_dev *hdev, u8 enable, u8 status)
4145 struct cmd_lookup match = { NULL, hdev };
4146 struct hci_request req;
4147 bool changed = false;
4151 u8 mgmt_err = mgmt_status(status);
4153 if (enable && test_and_clear_bit(HCI_SSP_ENABLED,
4155 err = new_settings(hdev, NULL);
4157 mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, cmd_status_rsp,
4164 if (!test_and_set_bit(HCI_SSP_ENABLED, &hdev->dev_flags))
4167 if (test_and_clear_bit(HCI_SSP_ENABLED, &hdev->dev_flags))
4171 mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, settings_rsp, &match);
4174 err = new_settings(hdev, match.sk);
4179 hci_req_init(&req, hdev);
4181 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags))
4186 hci_req_run(&req, NULL);
4191 static void sk_lookup(struct pending_cmd *cmd, void *data)
4193 struct cmd_lookup *match = data;
4195 if (match->sk == NULL) {
4196 match->sk = cmd->sk;
4197 sock_hold(match->sk);
4201 int mgmt_set_class_of_dev_complete(struct hci_dev *hdev, u8 *dev_class,
4204 struct cmd_lookup match = { NULL, hdev, mgmt_status(status) };
4207 mgmt_pending_foreach(MGMT_OP_SET_DEV_CLASS, hdev, sk_lookup, &match);
4208 mgmt_pending_foreach(MGMT_OP_ADD_UUID, hdev, sk_lookup, &match);
4209 mgmt_pending_foreach(MGMT_OP_REMOVE_UUID, hdev, sk_lookup, &match);
4212 err = mgmt_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev, dev_class,
4221 int mgmt_set_local_name_complete(struct hci_dev *hdev, u8 *name, u8 status)
4223 struct mgmt_cp_set_local_name ev;
4224 struct pending_cmd *cmd;
4229 memset(&ev, 0, sizeof(ev));
4230 memcpy(ev.name, name, HCI_MAX_NAME_LENGTH);
4231 memcpy(ev.short_name, hdev->short_name, HCI_MAX_SHORT_NAME_LENGTH);
4233 cmd = mgmt_pending_find(MGMT_OP_SET_LOCAL_NAME, hdev);
4235 memcpy(hdev->dev_name, name, sizeof(hdev->dev_name));
4237 /* If this is a HCI command related to powering on the
4238 * HCI dev don't send any mgmt signals.
4240 if (mgmt_pending_find(MGMT_OP_SET_POWERED, hdev))
4244 return mgmt_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev, &ev, sizeof(ev),
4245 cmd ? cmd->sk : NULL);
4248 int mgmt_read_local_oob_data_reply_complete(struct hci_dev *hdev, u8 *hash,
4249 u8 *randomizer, u8 status)
4251 struct pending_cmd *cmd;
4254 BT_DBG("%s status %u", hdev->name, status);
4256 cmd = mgmt_pending_find(MGMT_OP_READ_LOCAL_OOB_DATA, hdev);
4261 err = cmd_status(cmd->sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
4262 mgmt_status(status));
4264 struct mgmt_rp_read_local_oob_data rp;
4266 memcpy(rp.hash, hash, sizeof(rp.hash));
4267 memcpy(rp.randomizer, randomizer, sizeof(rp.randomizer));
4269 err = cmd_complete(cmd->sk, hdev->id,
4270 MGMT_OP_READ_LOCAL_OOB_DATA, 0, &rp,
4274 mgmt_pending_remove(cmd);
4279 int mgmt_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
4280 u8 addr_type, u8 *dev_class, s8 rssi, u8 cfm_name, u8
4281 ssp, u8 *eir, u16 eir_len)
4284 struct mgmt_ev_device_found *ev = (void *) buf;
4287 if (!hci_discovery_active(hdev))
4290 /* Leave 5 bytes for a potential CoD field */
4291 if (sizeof(*ev) + eir_len + 5 > sizeof(buf))
4294 memset(buf, 0, sizeof(buf));
4296 bacpy(&ev->addr.bdaddr, bdaddr);
4297 ev->addr.type = link_to_bdaddr(link_type, addr_type);
4300 ev->flags |= __constant_cpu_to_le32(MGMT_DEV_FOUND_CONFIRM_NAME);
4302 ev->flags |= __constant_cpu_to_le32(MGMT_DEV_FOUND_LEGACY_PAIRING);
4305 memcpy(ev->eir, eir, eir_len);
4307 if (dev_class && !eir_has_data_type(ev->eir, eir_len, EIR_CLASS_OF_DEV))
4308 eir_len = eir_append_data(ev->eir, eir_len, EIR_CLASS_OF_DEV,
4311 ev->eir_len = cpu_to_le16(eir_len);
4312 ev_size = sizeof(*ev) + eir_len;
4314 return mgmt_event(MGMT_EV_DEVICE_FOUND, hdev, ev, ev_size, NULL);
4317 int mgmt_remote_name(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
4318 u8 addr_type, s8 rssi, u8 *name, u8 name_len)
4320 struct mgmt_ev_device_found *ev;
4321 char buf[sizeof(*ev) + HCI_MAX_NAME_LENGTH + 2];
4324 ev = (struct mgmt_ev_device_found *) buf;
4326 memset(buf, 0, sizeof(buf));
4328 bacpy(&ev->addr.bdaddr, bdaddr);
4329 ev->addr.type = link_to_bdaddr(link_type, addr_type);
4332 eir_len = eir_append_data(ev->eir, 0, EIR_NAME_COMPLETE, name,
4335 ev->eir_len = cpu_to_le16(eir_len);
4337 return mgmt_event(MGMT_EV_DEVICE_FOUND, hdev, ev,
4338 sizeof(*ev) + eir_len, NULL);
4341 int mgmt_discovering(struct hci_dev *hdev, u8 discovering)
4343 struct mgmt_ev_discovering ev;
4344 struct pending_cmd *cmd;
4346 BT_DBG("%s discovering %u", hdev->name, discovering);
4349 cmd = mgmt_pending_find(MGMT_OP_START_DISCOVERY, hdev);
4351 cmd = mgmt_pending_find(MGMT_OP_STOP_DISCOVERY, hdev);
4354 u8 type = hdev->discovery.type;
4356 cmd_complete(cmd->sk, hdev->id, cmd->opcode, 0, &type,
4358 mgmt_pending_remove(cmd);
4361 memset(&ev, 0, sizeof(ev));
4362 ev.type = hdev->discovery.type;
4363 ev.discovering = discovering;
4365 return mgmt_event(MGMT_EV_DISCOVERING, hdev, &ev, sizeof(ev), NULL);
4368 int mgmt_device_blocked(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
4370 struct pending_cmd *cmd;
4371 struct mgmt_ev_device_blocked ev;
4373 cmd = mgmt_pending_find(MGMT_OP_BLOCK_DEVICE, hdev);
4375 bacpy(&ev.addr.bdaddr, bdaddr);
4376 ev.addr.type = type;
4378 return mgmt_event(MGMT_EV_DEVICE_BLOCKED, hdev, &ev, sizeof(ev),
4379 cmd ? cmd->sk : NULL);
4382 int mgmt_device_unblocked(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
4384 struct pending_cmd *cmd;
4385 struct mgmt_ev_device_unblocked ev;
4387 cmd = mgmt_pending_find(MGMT_OP_UNBLOCK_DEVICE, hdev);
4389 bacpy(&ev.addr.bdaddr, bdaddr);
4390 ev.addr.type = type;
4392 return mgmt_event(MGMT_EV_DEVICE_UNBLOCKED, hdev, &ev, sizeof(ev),
4393 cmd ? cmd->sk : NULL);