2 BlueZ - Bluetooth protocol stack for Linux
4 Copyright (C) 2010 Nokia Corporation
5 Copyright (C) 2011-2012 Intel Corporation
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
25 /* Bluetooth HCI Management interface */
27 #include <linux/module.h>
28 #include <asm/unaligned.h>
30 #include <net/bluetooth/bluetooth.h>
31 #include <net/bluetooth/hci_core.h>
32 #include <net/bluetooth/l2cap.h>
33 #include <net/bluetooth/mgmt.h>
37 #define MGMT_VERSION 1
38 #define MGMT_REVISION 6
40 static const u16 mgmt_commands[] = {
41 MGMT_OP_READ_INDEX_LIST,
44 MGMT_OP_SET_DISCOVERABLE,
45 MGMT_OP_SET_CONNECTABLE,
46 MGMT_OP_SET_FAST_CONNECTABLE,
48 MGMT_OP_SET_LINK_SECURITY,
52 MGMT_OP_SET_DEV_CLASS,
53 MGMT_OP_SET_LOCAL_NAME,
56 MGMT_OP_LOAD_LINK_KEYS,
57 MGMT_OP_LOAD_LONG_TERM_KEYS,
59 MGMT_OP_GET_CONNECTIONS,
60 MGMT_OP_PIN_CODE_REPLY,
61 MGMT_OP_PIN_CODE_NEG_REPLY,
62 MGMT_OP_SET_IO_CAPABILITY,
64 MGMT_OP_CANCEL_PAIR_DEVICE,
65 MGMT_OP_UNPAIR_DEVICE,
66 MGMT_OP_USER_CONFIRM_REPLY,
67 MGMT_OP_USER_CONFIRM_NEG_REPLY,
68 MGMT_OP_USER_PASSKEY_REPLY,
69 MGMT_OP_USER_PASSKEY_NEG_REPLY,
70 MGMT_OP_READ_LOCAL_OOB_DATA,
71 MGMT_OP_ADD_REMOTE_OOB_DATA,
72 MGMT_OP_REMOVE_REMOTE_OOB_DATA,
73 MGMT_OP_START_DISCOVERY,
74 MGMT_OP_STOP_DISCOVERY,
77 MGMT_OP_UNBLOCK_DEVICE,
78 MGMT_OP_SET_DEVICE_ID,
79 MGMT_OP_SET_ADVERTISING,
81 MGMT_OP_SET_STATIC_ADDRESS,
82 MGMT_OP_SET_SCAN_PARAMS,
83 MGMT_OP_SET_SECURE_CONN,
84 MGMT_OP_SET_DEBUG_KEYS,
87 MGMT_OP_GET_CONN_INFO,
90 static const u16 mgmt_events[] = {
91 MGMT_EV_CONTROLLER_ERROR,
93 MGMT_EV_INDEX_REMOVED,
95 MGMT_EV_CLASS_OF_DEV_CHANGED,
96 MGMT_EV_LOCAL_NAME_CHANGED,
98 MGMT_EV_NEW_LONG_TERM_KEY,
99 MGMT_EV_DEVICE_CONNECTED,
100 MGMT_EV_DEVICE_DISCONNECTED,
101 MGMT_EV_CONNECT_FAILED,
102 MGMT_EV_PIN_CODE_REQUEST,
103 MGMT_EV_USER_CONFIRM_REQUEST,
104 MGMT_EV_USER_PASSKEY_REQUEST,
106 MGMT_EV_DEVICE_FOUND,
108 MGMT_EV_DEVICE_BLOCKED,
109 MGMT_EV_DEVICE_UNBLOCKED,
110 MGMT_EV_DEVICE_UNPAIRED,
111 MGMT_EV_PASSKEY_NOTIFY,
116 #define CACHE_TIMEOUT msecs_to_jiffies(2 * 1000)
118 #define hdev_is_powered(hdev) (test_bit(HCI_UP, &hdev->flags) && \
119 !test_bit(HCI_AUTO_OFF, &hdev->dev_flags))
122 struct list_head list;
130 /* HCI to MGMT error code conversion table */
131 static u8 mgmt_status_table[] = {
133 MGMT_STATUS_UNKNOWN_COMMAND, /* Unknown Command */
134 MGMT_STATUS_NOT_CONNECTED, /* No Connection */
135 MGMT_STATUS_FAILED, /* Hardware Failure */
136 MGMT_STATUS_CONNECT_FAILED, /* Page Timeout */
137 MGMT_STATUS_AUTH_FAILED, /* Authentication Failed */
138 MGMT_STATUS_AUTH_FAILED, /* PIN or Key Missing */
139 MGMT_STATUS_NO_RESOURCES, /* Memory Full */
140 MGMT_STATUS_TIMEOUT, /* Connection Timeout */
141 MGMT_STATUS_NO_RESOURCES, /* Max Number of Connections */
142 MGMT_STATUS_NO_RESOURCES, /* Max Number of SCO Connections */
143 MGMT_STATUS_ALREADY_CONNECTED, /* ACL Connection Exists */
144 MGMT_STATUS_BUSY, /* Command Disallowed */
145 MGMT_STATUS_NO_RESOURCES, /* Rejected Limited Resources */
146 MGMT_STATUS_REJECTED, /* Rejected Security */
147 MGMT_STATUS_REJECTED, /* Rejected Personal */
148 MGMT_STATUS_TIMEOUT, /* Host Timeout */
149 MGMT_STATUS_NOT_SUPPORTED, /* Unsupported Feature */
150 MGMT_STATUS_INVALID_PARAMS, /* Invalid Parameters */
151 MGMT_STATUS_DISCONNECTED, /* OE User Ended Connection */
152 MGMT_STATUS_NO_RESOURCES, /* OE Low Resources */
153 MGMT_STATUS_DISCONNECTED, /* OE Power Off */
154 MGMT_STATUS_DISCONNECTED, /* Connection Terminated */
155 MGMT_STATUS_BUSY, /* Repeated Attempts */
156 MGMT_STATUS_REJECTED, /* Pairing Not Allowed */
157 MGMT_STATUS_FAILED, /* Unknown LMP PDU */
158 MGMT_STATUS_NOT_SUPPORTED, /* Unsupported Remote Feature */
159 MGMT_STATUS_REJECTED, /* SCO Offset Rejected */
160 MGMT_STATUS_REJECTED, /* SCO Interval Rejected */
161 MGMT_STATUS_REJECTED, /* Air Mode Rejected */
162 MGMT_STATUS_INVALID_PARAMS, /* Invalid LMP Parameters */
163 MGMT_STATUS_FAILED, /* Unspecified Error */
164 MGMT_STATUS_NOT_SUPPORTED, /* Unsupported LMP Parameter Value */
165 MGMT_STATUS_FAILED, /* Role Change Not Allowed */
166 MGMT_STATUS_TIMEOUT, /* LMP Response Timeout */
167 MGMT_STATUS_FAILED, /* LMP Error Transaction Collision */
168 MGMT_STATUS_FAILED, /* LMP PDU Not Allowed */
169 MGMT_STATUS_REJECTED, /* Encryption Mode Not Accepted */
170 MGMT_STATUS_FAILED, /* Unit Link Key Used */
171 MGMT_STATUS_NOT_SUPPORTED, /* QoS Not Supported */
172 MGMT_STATUS_TIMEOUT, /* Instant Passed */
173 MGMT_STATUS_NOT_SUPPORTED, /* Pairing Not Supported */
174 MGMT_STATUS_FAILED, /* Transaction Collision */
175 MGMT_STATUS_INVALID_PARAMS, /* Unacceptable Parameter */
176 MGMT_STATUS_REJECTED, /* QoS Rejected */
177 MGMT_STATUS_NOT_SUPPORTED, /* Classification Not Supported */
178 MGMT_STATUS_REJECTED, /* Insufficient Security */
179 MGMT_STATUS_INVALID_PARAMS, /* Parameter Out Of Range */
180 MGMT_STATUS_BUSY, /* Role Switch Pending */
181 MGMT_STATUS_FAILED, /* Slot Violation */
182 MGMT_STATUS_FAILED, /* Role Switch Failed */
183 MGMT_STATUS_INVALID_PARAMS, /* EIR Too Large */
184 MGMT_STATUS_NOT_SUPPORTED, /* Simple Pairing Not Supported */
185 MGMT_STATUS_BUSY, /* Host Busy Pairing */
186 MGMT_STATUS_REJECTED, /* Rejected, No Suitable Channel */
187 MGMT_STATUS_BUSY, /* Controller Busy */
188 MGMT_STATUS_INVALID_PARAMS, /* Unsuitable Connection Interval */
189 MGMT_STATUS_TIMEOUT, /* Directed Advertising Timeout */
190 MGMT_STATUS_AUTH_FAILED, /* Terminated Due to MIC Failure */
191 MGMT_STATUS_CONNECT_FAILED, /* Connection Establishment Failed */
192 MGMT_STATUS_CONNECT_FAILED, /* MAC Connection Failed */
195 static u8 mgmt_status(u8 hci_status)
197 if (hci_status < ARRAY_SIZE(mgmt_status_table))
198 return mgmt_status_table[hci_status];
200 return MGMT_STATUS_FAILED;
203 static int cmd_status(struct sock *sk, u16 index, u16 cmd, u8 status)
206 struct mgmt_hdr *hdr;
207 struct mgmt_ev_cmd_status *ev;
210 BT_DBG("sock %p, index %u, cmd %u, status %u", sk, index, cmd, status);
212 skb = alloc_skb(sizeof(*hdr) + sizeof(*ev), GFP_KERNEL);
216 hdr = (void *) skb_put(skb, sizeof(*hdr));
218 hdr->opcode = cpu_to_le16(MGMT_EV_CMD_STATUS);
219 hdr->index = cpu_to_le16(index);
220 hdr->len = cpu_to_le16(sizeof(*ev));
222 ev = (void *) skb_put(skb, sizeof(*ev));
224 ev->opcode = cpu_to_le16(cmd);
226 err = sock_queue_rcv_skb(sk, skb);
233 static int cmd_complete(struct sock *sk, u16 index, u16 cmd, u8 status,
234 void *rp, size_t rp_len)
237 struct mgmt_hdr *hdr;
238 struct mgmt_ev_cmd_complete *ev;
241 BT_DBG("sock %p", sk);
243 skb = alloc_skb(sizeof(*hdr) + sizeof(*ev) + rp_len, GFP_KERNEL);
247 hdr = (void *) skb_put(skb, sizeof(*hdr));
249 hdr->opcode = cpu_to_le16(MGMT_EV_CMD_COMPLETE);
250 hdr->index = cpu_to_le16(index);
251 hdr->len = cpu_to_le16(sizeof(*ev) + rp_len);
253 ev = (void *) skb_put(skb, sizeof(*ev) + rp_len);
254 ev->opcode = cpu_to_le16(cmd);
258 memcpy(ev->data, rp, rp_len);
260 err = sock_queue_rcv_skb(sk, skb);
267 static int read_version(struct sock *sk, struct hci_dev *hdev, void *data,
270 struct mgmt_rp_read_version rp;
272 BT_DBG("sock %p", sk);
274 rp.version = MGMT_VERSION;
275 rp.revision = cpu_to_le16(MGMT_REVISION);
277 return cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_VERSION, 0, &rp,
281 static int read_commands(struct sock *sk, struct hci_dev *hdev, void *data,
284 struct mgmt_rp_read_commands *rp;
285 const u16 num_commands = ARRAY_SIZE(mgmt_commands);
286 const u16 num_events = ARRAY_SIZE(mgmt_events);
291 BT_DBG("sock %p", sk);
293 rp_size = sizeof(*rp) + ((num_commands + num_events) * sizeof(u16));
295 rp = kmalloc(rp_size, GFP_KERNEL);
299 rp->num_commands = cpu_to_le16(num_commands);
300 rp->num_events = cpu_to_le16(num_events);
302 for (i = 0, opcode = rp->opcodes; i < num_commands; i++, opcode++)
303 put_unaligned_le16(mgmt_commands[i], opcode);
305 for (i = 0; i < num_events; i++, opcode++)
306 put_unaligned_le16(mgmt_events[i], opcode);
308 err = cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_COMMANDS, 0, rp,
315 static int read_index_list(struct sock *sk, struct hci_dev *hdev, void *data,
318 struct mgmt_rp_read_index_list *rp;
324 BT_DBG("sock %p", sk);
326 read_lock(&hci_dev_list_lock);
329 list_for_each_entry(d, &hci_dev_list, list) {
330 if (d->dev_type == HCI_BREDR)
334 rp_len = sizeof(*rp) + (2 * count);
335 rp = kmalloc(rp_len, GFP_ATOMIC);
337 read_unlock(&hci_dev_list_lock);
342 list_for_each_entry(d, &hci_dev_list, list) {
343 if (test_bit(HCI_SETUP, &d->dev_flags))
346 if (test_bit(HCI_USER_CHANNEL, &d->dev_flags))
349 if (d->dev_type == HCI_BREDR) {
350 rp->index[count++] = cpu_to_le16(d->id);
351 BT_DBG("Added hci%u", d->id);
355 rp->num_controllers = cpu_to_le16(count);
356 rp_len = sizeof(*rp) + (2 * count);
358 read_unlock(&hci_dev_list_lock);
360 err = cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_INDEX_LIST, 0, rp,
368 static u32 get_supported_settings(struct hci_dev *hdev)
372 settings |= MGMT_SETTING_POWERED;
373 settings |= MGMT_SETTING_PAIRABLE;
374 settings |= MGMT_SETTING_DEBUG_KEYS;
376 if (lmp_bredr_capable(hdev)) {
377 settings |= MGMT_SETTING_CONNECTABLE;
378 if (hdev->hci_ver >= BLUETOOTH_VER_1_2)
379 settings |= MGMT_SETTING_FAST_CONNECTABLE;
380 settings |= MGMT_SETTING_DISCOVERABLE;
381 settings |= MGMT_SETTING_BREDR;
382 settings |= MGMT_SETTING_LINK_SECURITY;
384 if (lmp_ssp_capable(hdev)) {
385 settings |= MGMT_SETTING_SSP;
386 settings |= MGMT_SETTING_HS;
389 if (lmp_sc_capable(hdev) ||
390 test_bit(HCI_FORCE_SC, &hdev->dev_flags))
391 settings |= MGMT_SETTING_SECURE_CONN;
394 if (lmp_le_capable(hdev)) {
395 settings |= MGMT_SETTING_LE;
396 settings |= MGMT_SETTING_ADVERTISING;
397 settings |= MGMT_SETTING_PRIVACY;
403 static u32 get_current_settings(struct hci_dev *hdev)
407 if (hdev_is_powered(hdev))
408 settings |= MGMT_SETTING_POWERED;
410 if (test_bit(HCI_CONNECTABLE, &hdev->dev_flags))
411 settings |= MGMT_SETTING_CONNECTABLE;
413 if (test_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags))
414 settings |= MGMT_SETTING_FAST_CONNECTABLE;
416 if (test_bit(HCI_DISCOVERABLE, &hdev->dev_flags))
417 settings |= MGMT_SETTING_DISCOVERABLE;
419 if (test_bit(HCI_PAIRABLE, &hdev->dev_flags))
420 settings |= MGMT_SETTING_PAIRABLE;
422 if (test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
423 settings |= MGMT_SETTING_BREDR;
425 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
426 settings |= MGMT_SETTING_LE;
428 if (test_bit(HCI_LINK_SECURITY, &hdev->dev_flags))
429 settings |= MGMT_SETTING_LINK_SECURITY;
431 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags))
432 settings |= MGMT_SETTING_SSP;
434 if (test_bit(HCI_HS_ENABLED, &hdev->dev_flags))
435 settings |= MGMT_SETTING_HS;
437 if (test_bit(HCI_ADVERTISING, &hdev->dev_flags))
438 settings |= MGMT_SETTING_ADVERTISING;
440 if (test_bit(HCI_SC_ENABLED, &hdev->dev_flags))
441 settings |= MGMT_SETTING_SECURE_CONN;
443 if (test_bit(HCI_DEBUG_KEYS, &hdev->dev_flags))
444 settings |= MGMT_SETTING_DEBUG_KEYS;
446 if (test_bit(HCI_PRIVACY, &hdev->dev_flags))
447 settings |= MGMT_SETTING_PRIVACY;
452 #define PNP_INFO_SVCLASS_ID 0x1200
454 static u8 *create_uuid16_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
456 u8 *ptr = data, *uuids_start = NULL;
457 struct bt_uuid *uuid;
462 list_for_each_entry(uuid, &hdev->uuids, list) {
465 if (uuid->size != 16)
468 uuid16 = get_unaligned_le16(&uuid->uuid[12]);
472 if (uuid16 == PNP_INFO_SVCLASS_ID)
478 uuids_start[1] = EIR_UUID16_ALL;
482 /* Stop if not enough space to put next UUID */
483 if ((ptr - data) + sizeof(u16) > len) {
484 uuids_start[1] = EIR_UUID16_SOME;
488 *ptr++ = (uuid16 & 0x00ff);
489 *ptr++ = (uuid16 & 0xff00) >> 8;
490 uuids_start[0] += sizeof(uuid16);
496 static u8 *create_uuid32_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
498 u8 *ptr = data, *uuids_start = NULL;
499 struct bt_uuid *uuid;
504 list_for_each_entry(uuid, &hdev->uuids, list) {
505 if (uuid->size != 32)
511 uuids_start[1] = EIR_UUID32_ALL;
515 /* Stop if not enough space to put next UUID */
516 if ((ptr - data) + sizeof(u32) > len) {
517 uuids_start[1] = EIR_UUID32_SOME;
521 memcpy(ptr, &uuid->uuid[12], sizeof(u32));
523 uuids_start[0] += sizeof(u32);
529 static u8 *create_uuid128_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
531 u8 *ptr = data, *uuids_start = NULL;
532 struct bt_uuid *uuid;
537 list_for_each_entry(uuid, &hdev->uuids, list) {
538 if (uuid->size != 128)
544 uuids_start[1] = EIR_UUID128_ALL;
548 /* Stop if not enough space to put next UUID */
549 if ((ptr - data) + 16 > len) {
550 uuids_start[1] = EIR_UUID128_SOME;
554 memcpy(ptr, uuid->uuid, 16);
556 uuids_start[0] += 16;
562 static struct pending_cmd *mgmt_pending_find(u16 opcode, struct hci_dev *hdev)
564 struct pending_cmd *cmd;
566 list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
567 if (cmd->opcode == opcode)
574 static u8 create_scan_rsp_data(struct hci_dev *hdev, u8 *ptr)
579 name_len = strlen(hdev->dev_name);
581 size_t max_len = HCI_MAX_AD_LENGTH - ad_len - 2;
583 if (name_len > max_len) {
585 ptr[1] = EIR_NAME_SHORT;
587 ptr[1] = EIR_NAME_COMPLETE;
589 ptr[0] = name_len + 1;
591 memcpy(ptr + 2, hdev->dev_name, name_len);
593 ad_len += (name_len + 2);
594 ptr += (name_len + 2);
600 static void update_scan_rsp_data(struct hci_request *req)
602 struct hci_dev *hdev = req->hdev;
603 struct hci_cp_le_set_scan_rsp_data cp;
606 if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
609 memset(&cp, 0, sizeof(cp));
611 len = create_scan_rsp_data(hdev, cp.data);
613 if (hdev->scan_rsp_data_len == len &&
614 memcmp(cp.data, hdev->scan_rsp_data, len) == 0)
617 memcpy(hdev->scan_rsp_data, cp.data, sizeof(cp.data));
618 hdev->scan_rsp_data_len = len;
622 hci_req_add(req, HCI_OP_LE_SET_SCAN_RSP_DATA, sizeof(cp), &cp);
625 static u8 get_adv_discov_flags(struct hci_dev *hdev)
627 struct pending_cmd *cmd;
629 /* If there's a pending mgmt command the flags will not yet have
630 * their final values, so check for this first.
632 cmd = mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, hdev);
634 struct mgmt_mode *cp = cmd->param;
636 return LE_AD_GENERAL;
637 else if (cp->val == 0x02)
638 return LE_AD_LIMITED;
640 if (test_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags))
641 return LE_AD_LIMITED;
642 else if (test_bit(HCI_DISCOVERABLE, &hdev->dev_flags))
643 return LE_AD_GENERAL;
649 static u8 create_adv_data(struct hci_dev *hdev, u8 *ptr)
651 u8 ad_len = 0, flags = 0;
653 flags |= get_adv_discov_flags(hdev);
655 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
656 flags |= LE_AD_NO_BREDR;
659 BT_DBG("adv flags 0x%02x", flags);
669 if (hdev->adv_tx_power != HCI_TX_POWER_INVALID) {
671 ptr[1] = EIR_TX_POWER;
672 ptr[2] = (u8) hdev->adv_tx_power;
681 static void update_adv_data(struct hci_request *req)
683 struct hci_dev *hdev = req->hdev;
684 struct hci_cp_le_set_adv_data cp;
687 if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
690 memset(&cp, 0, sizeof(cp));
692 len = create_adv_data(hdev, cp.data);
694 if (hdev->adv_data_len == len &&
695 memcmp(cp.data, hdev->adv_data, len) == 0)
698 memcpy(hdev->adv_data, cp.data, sizeof(cp.data));
699 hdev->adv_data_len = len;
703 hci_req_add(req, HCI_OP_LE_SET_ADV_DATA, sizeof(cp), &cp);
706 static void create_eir(struct hci_dev *hdev, u8 *data)
711 name_len = strlen(hdev->dev_name);
717 ptr[1] = EIR_NAME_SHORT;
719 ptr[1] = EIR_NAME_COMPLETE;
721 /* EIR Data length */
722 ptr[0] = name_len + 1;
724 memcpy(ptr + 2, hdev->dev_name, name_len);
726 ptr += (name_len + 2);
729 if (hdev->inq_tx_power != HCI_TX_POWER_INVALID) {
731 ptr[1] = EIR_TX_POWER;
732 ptr[2] = (u8) hdev->inq_tx_power;
737 if (hdev->devid_source > 0) {
739 ptr[1] = EIR_DEVICE_ID;
741 put_unaligned_le16(hdev->devid_source, ptr + 2);
742 put_unaligned_le16(hdev->devid_vendor, ptr + 4);
743 put_unaligned_le16(hdev->devid_product, ptr + 6);
744 put_unaligned_le16(hdev->devid_version, ptr + 8);
749 ptr = create_uuid16_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
750 ptr = create_uuid32_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
751 ptr = create_uuid128_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
754 static void update_eir(struct hci_request *req)
756 struct hci_dev *hdev = req->hdev;
757 struct hci_cp_write_eir cp;
759 if (!hdev_is_powered(hdev))
762 if (!lmp_ext_inq_capable(hdev))
765 if (!test_bit(HCI_SSP_ENABLED, &hdev->dev_flags))
768 if (test_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
771 memset(&cp, 0, sizeof(cp));
773 create_eir(hdev, cp.data);
775 if (memcmp(cp.data, hdev->eir, sizeof(cp.data)) == 0)
778 memcpy(hdev->eir, cp.data, sizeof(cp.data));
780 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
783 static u8 get_service_classes(struct hci_dev *hdev)
785 struct bt_uuid *uuid;
788 list_for_each_entry(uuid, &hdev->uuids, list)
789 val |= uuid->svc_hint;
794 static void update_class(struct hci_request *req)
796 struct hci_dev *hdev = req->hdev;
799 BT_DBG("%s", hdev->name);
801 if (!hdev_is_powered(hdev))
804 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
807 if (test_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
810 cod[0] = hdev->minor_class;
811 cod[1] = hdev->major_class;
812 cod[2] = get_service_classes(hdev);
814 if (test_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags))
817 if (memcmp(cod, hdev->dev_class, 3) == 0)
820 hci_req_add(req, HCI_OP_WRITE_CLASS_OF_DEV, sizeof(cod), cod);
823 static bool get_connectable(struct hci_dev *hdev)
825 struct pending_cmd *cmd;
827 /* If there's a pending mgmt command the flag will not yet have
828 * it's final value, so check for this first.
830 cmd = mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev);
832 struct mgmt_mode *cp = cmd->param;
836 return test_bit(HCI_CONNECTABLE, &hdev->dev_flags);
839 static void enable_advertising(struct hci_request *req)
841 struct hci_dev *hdev = req->hdev;
842 struct hci_cp_le_set_adv_param cp;
843 u8 own_addr_type, enable = 0x01;
846 /* Clear the HCI_ADVERTISING bit temporarily so that the
847 * hci_update_random_address knows that it's safe to go ahead
848 * and write a new random address. The flag will be set back on
849 * as soon as the SET_ADV_ENABLE HCI command completes.
851 clear_bit(HCI_ADVERTISING, &hdev->dev_flags);
853 connectable = get_connectable(hdev);
855 /* Set require_privacy to true only when non-connectable
856 * advertising is used. In that case it is fine to use a
857 * non-resolvable private address.
859 if (hci_update_random_address(req, !connectable, &own_addr_type) < 0)
862 memset(&cp, 0, sizeof(cp));
863 cp.min_interval = cpu_to_le16(0x0800);
864 cp.max_interval = cpu_to_le16(0x0800);
865 cp.type = connectable ? LE_ADV_IND : LE_ADV_NONCONN_IND;
866 cp.own_address_type = own_addr_type;
867 cp.channel_map = hdev->le_adv_channel_map;
869 hci_req_add(req, HCI_OP_LE_SET_ADV_PARAM, sizeof(cp), &cp);
871 hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable);
874 static void disable_advertising(struct hci_request *req)
878 hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable);
881 static void service_cache_off(struct work_struct *work)
883 struct hci_dev *hdev = container_of(work, struct hci_dev,
885 struct hci_request req;
887 if (!test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
890 hci_req_init(&req, hdev);
897 hci_dev_unlock(hdev);
899 hci_req_run(&req, NULL);
902 static void rpa_expired(struct work_struct *work)
904 struct hci_dev *hdev = container_of(work, struct hci_dev,
906 struct hci_request req;
910 set_bit(HCI_RPA_EXPIRED, &hdev->dev_flags);
912 if (!test_bit(HCI_ADVERTISING, &hdev->dev_flags) ||
913 hci_conn_num(hdev, LE_LINK) > 0)
916 /* The generation of a new RPA and programming it into the
917 * controller happens in the enable_advertising() function.
920 hci_req_init(&req, hdev);
922 disable_advertising(&req);
923 enable_advertising(&req);
925 hci_req_run(&req, NULL);
928 static void mgmt_init_hdev(struct sock *sk, struct hci_dev *hdev)
930 if (test_and_set_bit(HCI_MGMT, &hdev->dev_flags))
933 INIT_DELAYED_WORK(&hdev->service_cache, service_cache_off);
934 INIT_DELAYED_WORK(&hdev->rpa_expired, rpa_expired);
936 /* Non-mgmt controlled devices get this bit set
937 * implicitly so that pairing works for them, however
938 * for mgmt we require user-space to explicitly enable
941 clear_bit(HCI_PAIRABLE, &hdev->dev_flags);
944 static int read_controller_info(struct sock *sk, struct hci_dev *hdev,
945 void *data, u16 data_len)
947 struct mgmt_rp_read_info rp;
949 BT_DBG("sock %p %s", sk, hdev->name);
953 memset(&rp, 0, sizeof(rp));
955 bacpy(&rp.bdaddr, &hdev->bdaddr);
957 rp.version = hdev->hci_ver;
958 rp.manufacturer = cpu_to_le16(hdev->manufacturer);
960 rp.supported_settings = cpu_to_le32(get_supported_settings(hdev));
961 rp.current_settings = cpu_to_le32(get_current_settings(hdev));
963 memcpy(rp.dev_class, hdev->dev_class, 3);
965 memcpy(rp.name, hdev->dev_name, sizeof(hdev->dev_name));
966 memcpy(rp.short_name, hdev->short_name, sizeof(hdev->short_name));
968 hci_dev_unlock(hdev);
970 return cmd_complete(sk, hdev->id, MGMT_OP_READ_INFO, 0, &rp,
974 static void mgmt_pending_free(struct pending_cmd *cmd)
981 static struct pending_cmd *mgmt_pending_add(struct sock *sk, u16 opcode,
982 struct hci_dev *hdev, void *data,
985 struct pending_cmd *cmd;
987 cmd = kmalloc(sizeof(*cmd), GFP_KERNEL);
991 cmd->opcode = opcode;
992 cmd->index = hdev->id;
994 cmd->param = kmalloc(len, GFP_KERNEL);
1001 memcpy(cmd->param, data, len);
1006 list_add(&cmd->list, &hdev->mgmt_pending);
1011 static void mgmt_pending_foreach(u16 opcode, struct hci_dev *hdev,
1012 void (*cb)(struct pending_cmd *cmd,
1016 struct pending_cmd *cmd, *tmp;
1018 list_for_each_entry_safe(cmd, tmp, &hdev->mgmt_pending, list) {
1019 if (opcode > 0 && cmd->opcode != opcode)
1026 static void mgmt_pending_remove(struct pending_cmd *cmd)
1028 list_del(&cmd->list);
1029 mgmt_pending_free(cmd);
1032 static int send_settings_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev)
1034 __le32 settings = cpu_to_le32(get_current_settings(hdev));
1036 return cmd_complete(sk, hdev->id, opcode, 0, &settings,
1040 static void clean_up_hci_complete(struct hci_dev *hdev, u8 status)
1042 BT_DBG("%s status 0x%02x", hdev->name, status);
1044 if (hci_conn_count(hdev) == 0) {
1045 cancel_delayed_work(&hdev->power_off);
1046 queue_work(hdev->req_workqueue, &hdev->power_off.work);
1050 static int clean_up_hci_state(struct hci_dev *hdev)
1052 struct hci_request req;
1053 struct hci_conn *conn;
1055 hci_req_init(&req, hdev);
1057 if (test_bit(HCI_ISCAN, &hdev->flags) ||
1058 test_bit(HCI_PSCAN, &hdev->flags)) {
1060 hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
1063 if (test_bit(HCI_ADVERTISING, &hdev->dev_flags))
1064 disable_advertising(&req);
1066 if (test_bit(HCI_LE_SCAN, &hdev->dev_flags)) {
1067 hci_req_add_le_scan_disable(&req);
1070 list_for_each_entry(conn, &hdev->conn_hash.list, list) {
1071 struct hci_cp_disconnect dc;
1072 struct hci_cp_reject_conn_req rej;
1074 switch (conn->state) {
1077 dc.handle = cpu_to_le16(conn->handle);
1078 dc.reason = 0x15; /* Terminated due to Power Off */
1079 hci_req_add(&req, HCI_OP_DISCONNECT, sizeof(dc), &dc);
1082 if (conn->type == LE_LINK)
1083 hci_req_add(&req, HCI_OP_LE_CREATE_CONN_CANCEL,
1085 else if (conn->type == ACL_LINK)
1086 hci_req_add(&req, HCI_OP_CREATE_CONN_CANCEL,
1090 bacpy(&rej.bdaddr, &conn->dst);
1091 rej.reason = 0x15; /* Terminated due to Power Off */
1092 if (conn->type == ACL_LINK)
1093 hci_req_add(&req, HCI_OP_REJECT_CONN_REQ,
1095 else if (conn->type == SCO_LINK)
1096 hci_req_add(&req, HCI_OP_REJECT_SYNC_CONN_REQ,
1102 return hci_req_run(&req, clean_up_hci_complete);
1105 static int set_powered(struct sock *sk, struct hci_dev *hdev, void *data,
1108 struct mgmt_mode *cp = data;
1109 struct pending_cmd *cmd;
1112 BT_DBG("request for %s", hdev->name);
1114 if (cp->val != 0x00 && cp->val != 0x01)
1115 return cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
1116 MGMT_STATUS_INVALID_PARAMS);
1120 if (mgmt_pending_find(MGMT_OP_SET_POWERED, hdev)) {
1121 err = cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
1126 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
1127 cancel_delayed_work(&hdev->power_off);
1130 mgmt_pending_add(sk, MGMT_OP_SET_POWERED, hdev,
1132 err = mgmt_powered(hdev, 1);
1137 if (!!cp->val == hdev_is_powered(hdev)) {
1138 err = send_settings_rsp(sk, MGMT_OP_SET_POWERED, hdev);
1142 cmd = mgmt_pending_add(sk, MGMT_OP_SET_POWERED, hdev, data, len);
1149 queue_work(hdev->req_workqueue, &hdev->power_on);
1152 /* Disconnect connections, stop scans, etc */
1153 err = clean_up_hci_state(hdev);
1155 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
1156 HCI_POWER_OFF_TIMEOUT);
1158 /* ENODATA means there were no HCI commands queued */
1159 if (err == -ENODATA) {
1160 cancel_delayed_work(&hdev->power_off);
1161 queue_work(hdev->req_workqueue, &hdev->power_off.work);
1167 hci_dev_unlock(hdev);
1171 static int mgmt_event(u16 event, struct hci_dev *hdev, void *data, u16 data_len,
1172 struct sock *skip_sk)
1174 struct sk_buff *skb;
1175 struct mgmt_hdr *hdr;
1177 skb = alloc_skb(sizeof(*hdr) + data_len, GFP_KERNEL);
1181 hdr = (void *) skb_put(skb, sizeof(*hdr));
1182 hdr->opcode = cpu_to_le16(event);
1184 hdr->index = cpu_to_le16(hdev->id);
1186 hdr->index = cpu_to_le16(MGMT_INDEX_NONE);
1187 hdr->len = cpu_to_le16(data_len);
1190 memcpy(skb_put(skb, data_len), data, data_len);
1193 __net_timestamp(skb);
1195 hci_send_to_control(skb, skip_sk);
1201 static int new_settings(struct hci_dev *hdev, struct sock *skip)
1205 ev = cpu_to_le32(get_current_settings(hdev));
1207 return mgmt_event(MGMT_EV_NEW_SETTINGS, hdev, &ev, sizeof(ev), skip);
1212 struct hci_dev *hdev;
1216 static void settings_rsp(struct pending_cmd *cmd, void *data)
1218 struct cmd_lookup *match = data;
1220 send_settings_rsp(cmd->sk, cmd->opcode, match->hdev);
1222 list_del(&cmd->list);
1224 if (match->sk == NULL) {
1225 match->sk = cmd->sk;
1226 sock_hold(match->sk);
1229 mgmt_pending_free(cmd);
1232 static void cmd_status_rsp(struct pending_cmd *cmd, void *data)
1236 cmd_status(cmd->sk, cmd->index, cmd->opcode, *status);
1237 mgmt_pending_remove(cmd);
1240 static u8 mgmt_bredr_support(struct hci_dev *hdev)
1242 if (!lmp_bredr_capable(hdev))
1243 return MGMT_STATUS_NOT_SUPPORTED;
1244 else if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
1245 return MGMT_STATUS_REJECTED;
1247 return MGMT_STATUS_SUCCESS;
1250 static u8 mgmt_le_support(struct hci_dev *hdev)
1252 if (!lmp_le_capable(hdev))
1253 return MGMT_STATUS_NOT_SUPPORTED;
1254 else if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
1255 return MGMT_STATUS_REJECTED;
1257 return MGMT_STATUS_SUCCESS;
1260 static void set_discoverable_complete(struct hci_dev *hdev, u8 status)
1262 struct pending_cmd *cmd;
1263 struct mgmt_mode *cp;
1264 struct hci_request req;
1267 BT_DBG("status 0x%02x", status);
1271 cmd = mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, hdev);
1276 u8 mgmt_err = mgmt_status(status);
1277 cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
1278 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
1284 changed = !test_and_set_bit(HCI_DISCOVERABLE,
1287 if (hdev->discov_timeout > 0) {
1288 int to = msecs_to_jiffies(hdev->discov_timeout * 1000);
1289 queue_delayed_work(hdev->workqueue, &hdev->discov_off,
1293 changed = test_and_clear_bit(HCI_DISCOVERABLE,
1297 send_settings_rsp(cmd->sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1300 new_settings(hdev, cmd->sk);
1302 /* When the discoverable mode gets changed, make sure
1303 * that class of device has the limited discoverable
1304 * bit correctly set.
1306 hci_req_init(&req, hdev);
1308 hci_req_run(&req, NULL);
1311 mgmt_pending_remove(cmd);
1314 hci_dev_unlock(hdev);
1317 static int set_discoverable(struct sock *sk, struct hci_dev *hdev, void *data,
1320 struct mgmt_cp_set_discoverable *cp = data;
1321 struct pending_cmd *cmd;
1322 struct hci_request req;
1327 BT_DBG("request for %s", hdev->name);
1329 if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags) &&
1330 !test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
1331 return cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1332 MGMT_STATUS_REJECTED);
1334 if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
1335 return cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1336 MGMT_STATUS_INVALID_PARAMS);
1338 timeout = __le16_to_cpu(cp->timeout);
1340 /* Disabling discoverable requires that no timeout is set,
1341 * and enabling limited discoverable requires a timeout.
1343 if ((cp->val == 0x00 && timeout > 0) ||
1344 (cp->val == 0x02 && timeout == 0))
1345 return cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1346 MGMT_STATUS_INVALID_PARAMS);
1350 if (!hdev_is_powered(hdev) && timeout > 0) {
1351 err = cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1352 MGMT_STATUS_NOT_POWERED);
1356 if (mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
1357 mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
1358 err = cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1363 if (!test_bit(HCI_CONNECTABLE, &hdev->dev_flags)) {
1364 err = cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1365 MGMT_STATUS_REJECTED);
1369 if (!hdev_is_powered(hdev)) {
1370 bool changed = false;
1372 /* Setting limited discoverable when powered off is
1373 * not a valid operation since it requires a timeout
1374 * and so no need to check HCI_LIMITED_DISCOVERABLE.
1376 if (!!cp->val != test_bit(HCI_DISCOVERABLE, &hdev->dev_flags)) {
1377 change_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
1381 err = send_settings_rsp(sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1386 err = new_settings(hdev, sk);
1391 /* If the current mode is the same, then just update the timeout
1392 * value with the new value. And if only the timeout gets updated,
1393 * then no need for any HCI transactions.
1395 if (!!cp->val == test_bit(HCI_DISCOVERABLE, &hdev->dev_flags) &&
1396 (cp->val == 0x02) == test_bit(HCI_LIMITED_DISCOVERABLE,
1397 &hdev->dev_flags)) {
1398 cancel_delayed_work(&hdev->discov_off);
1399 hdev->discov_timeout = timeout;
1401 if (cp->val && hdev->discov_timeout > 0) {
1402 int to = msecs_to_jiffies(hdev->discov_timeout * 1000);
1403 queue_delayed_work(hdev->workqueue, &hdev->discov_off,
1407 err = send_settings_rsp(sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1411 cmd = mgmt_pending_add(sk, MGMT_OP_SET_DISCOVERABLE, hdev, data, len);
1417 /* Cancel any potential discoverable timeout that might be
1418 * still active and store new timeout value. The arming of
1419 * the timeout happens in the complete handler.
1421 cancel_delayed_work(&hdev->discov_off);
1422 hdev->discov_timeout = timeout;
1424 /* Limited discoverable mode */
1425 if (cp->val == 0x02)
1426 set_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
1428 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
1430 hci_req_init(&req, hdev);
1432 /* The procedure for LE-only controllers is much simpler - just
1433 * update the advertising data.
1435 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
1441 struct hci_cp_write_current_iac_lap hci_cp;
1443 if (cp->val == 0x02) {
1444 /* Limited discoverable mode */
1445 hci_cp.num_iac = min_t(u8, hdev->num_iac, 2);
1446 hci_cp.iac_lap[0] = 0x00; /* LIAC */
1447 hci_cp.iac_lap[1] = 0x8b;
1448 hci_cp.iac_lap[2] = 0x9e;
1449 hci_cp.iac_lap[3] = 0x33; /* GIAC */
1450 hci_cp.iac_lap[4] = 0x8b;
1451 hci_cp.iac_lap[5] = 0x9e;
1453 /* General discoverable mode */
1455 hci_cp.iac_lap[0] = 0x33; /* GIAC */
1456 hci_cp.iac_lap[1] = 0x8b;
1457 hci_cp.iac_lap[2] = 0x9e;
1460 hci_req_add(&req, HCI_OP_WRITE_CURRENT_IAC_LAP,
1461 (hci_cp.num_iac * 3) + 1, &hci_cp);
1463 scan |= SCAN_INQUIRY;
1465 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
1468 hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE, sizeof(scan), &scan);
1471 update_adv_data(&req);
1473 err = hci_req_run(&req, set_discoverable_complete);
1475 mgmt_pending_remove(cmd);
1478 hci_dev_unlock(hdev);
1482 static void write_fast_connectable(struct hci_request *req, bool enable)
1484 struct hci_dev *hdev = req->hdev;
1485 struct hci_cp_write_page_scan_activity acp;
1488 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
1491 if (hdev->hci_ver < BLUETOOTH_VER_1_2)
1495 type = PAGE_SCAN_TYPE_INTERLACED;
1497 /* 160 msec page scan interval */
1498 acp.interval = cpu_to_le16(0x0100);
1500 type = PAGE_SCAN_TYPE_STANDARD; /* default */
1502 /* default 1.28 sec page scan */
1503 acp.interval = cpu_to_le16(0x0800);
1506 acp.window = cpu_to_le16(0x0012);
1508 if (__cpu_to_le16(hdev->page_scan_interval) != acp.interval ||
1509 __cpu_to_le16(hdev->page_scan_window) != acp.window)
1510 hci_req_add(req, HCI_OP_WRITE_PAGE_SCAN_ACTIVITY,
1513 if (hdev->page_scan_type != type)
1514 hci_req_add(req, HCI_OP_WRITE_PAGE_SCAN_TYPE, 1, &type);
1517 static void set_connectable_complete(struct hci_dev *hdev, u8 status)
1519 struct pending_cmd *cmd;
1520 struct mgmt_mode *cp;
1523 BT_DBG("status 0x%02x", status);
1527 cmd = mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev);
1532 u8 mgmt_err = mgmt_status(status);
1533 cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
1539 changed = !test_and_set_bit(HCI_CONNECTABLE, &hdev->dev_flags);
1541 changed = test_and_clear_bit(HCI_CONNECTABLE, &hdev->dev_flags);
1543 send_settings_rsp(cmd->sk, MGMT_OP_SET_CONNECTABLE, hdev);
1546 new_settings(hdev, cmd->sk);
1549 mgmt_pending_remove(cmd);
1552 hci_dev_unlock(hdev);
1555 static int set_connectable_update_settings(struct hci_dev *hdev,
1556 struct sock *sk, u8 val)
1558 bool changed = false;
1561 if (!!val != test_bit(HCI_CONNECTABLE, &hdev->dev_flags))
1565 set_bit(HCI_CONNECTABLE, &hdev->dev_flags);
1567 clear_bit(HCI_CONNECTABLE, &hdev->dev_flags);
1568 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
1571 err = send_settings_rsp(sk, MGMT_OP_SET_CONNECTABLE, hdev);
1576 return new_settings(hdev, sk);
1581 static int set_connectable(struct sock *sk, struct hci_dev *hdev, void *data,
1584 struct mgmt_mode *cp = data;
1585 struct pending_cmd *cmd;
1586 struct hci_request req;
1590 BT_DBG("request for %s", hdev->name);
1592 if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags) &&
1593 !test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
1594 return cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1595 MGMT_STATUS_REJECTED);
1597 if (cp->val != 0x00 && cp->val != 0x01)
1598 return cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1599 MGMT_STATUS_INVALID_PARAMS);
1603 if (!hdev_is_powered(hdev)) {
1604 err = set_connectable_update_settings(hdev, sk, cp->val);
1608 if (mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
1609 mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
1610 err = cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1615 cmd = mgmt_pending_add(sk, MGMT_OP_SET_CONNECTABLE, hdev, data, len);
1621 hci_req_init(&req, hdev);
1623 /* If BR/EDR is not enabled and we disable advertising as a
1624 * by-product of disabling connectable, we need to update the
1625 * advertising flags.
1627 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
1629 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
1630 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
1632 update_adv_data(&req);
1633 } else if (cp->val != test_bit(HCI_PSCAN, &hdev->flags)) {
1639 if (test_bit(HCI_ISCAN, &hdev->flags) &&
1640 hdev->discov_timeout > 0)
1641 cancel_delayed_work(&hdev->discov_off);
1644 hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
1647 /* If we're going from non-connectable to connectable or
1648 * vice-versa when fast connectable is enabled ensure that fast
1649 * connectable gets disabled. write_fast_connectable won't do
1650 * anything if the page scan parameters are already what they
1653 if (cp->val || test_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags))
1654 write_fast_connectable(&req, false);
1656 if (test_bit(HCI_ADVERTISING, &hdev->dev_flags) &&
1657 hci_conn_num(hdev, LE_LINK) == 0) {
1658 disable_advertising(&req);
1659 enable_advertising(&req);
1662 err = hci_req_run(&req, set_connectable_complete);
1664 mgmt_pending_remove(cmd);
1665 if (err == -ENODATA)
1666 err = set_connectable_update_settings(hdev, sk,
1672 hci_dev_unlock(hdev);
1676 static int set_pairable(struct sock *sk, struct hci_dev *hdev, void *data,
1679 struct mgmt_mode *cp = data;
1683 BT_DBG("request for %s", hdev->name);
1685 if (cp->val != 0x00 && cp->val != 0x01)
1686 return cmd_status(sk, hdev->id, MGMT_OP_SET_PAIRABLE,
1687 MGMT_STATUS_INVALID_PARAMS);
1692 changed = !test_and_set_bit(HCI_PAIRABLE, &hdev->dev_flags);
1694 changed = test_and_clear_bit(HCI_PAIRABLE, &hdev->dev_flags);
1696 err = send_settings_rsp(sk, MGMT_OP_SET_PAIRABLE, hdev);
1701 err = new_settings(hdev, sk);
1704 hci_dev_unlock(hdev);
1708 static int set_link_security(struct sock *sk, struct hci_dev *hdev, void *data,
1711 struct mgmt_mode *cp = data;
1712 struct pending_cmd *cmd;
1716 BT_DBG("request for %s", hdev->name);
1718 status = mgmt_bredr_support(hdev);
1720 return cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1723 if (cp->val != 0x00 && cp->val != 0x01)
1724 return cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1725 MGMT_STATUS_INVALID_PARAMS);
1729 if (!hdev_is_powered(hdev)) {
1730 bool changed = false;
1732 if (!!cp->val != test_bit(HCI_LINK_SECURITY,
1733 &hdev->dev_flags)) {
1734 change_bit(HCI_LINK_SECURITY, &hdev->dev_flags);
1738 err = send_settings_rsp(sk, MGMT_OP_SET_LINK_SECURITY, hdev);
1743 err = new_settings(hdev, sk);
1748 if (mgmt_pending_find(MGMT_OP_SET_LINK_SECURITY, hdev)) {
1749 err = cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1756 if (test_bit(HCI_AUTH, &hdev->flags) == val) {
1757 err = send_settings_rsp(sk, MGMT_OP_SET_LINK_SECURITY, hdev);
1761 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LINK_SECURITY, hdev, data, len);
1767 err = hci_send_cmd(hdev, HCI_OP_WRITE_AUTH_ENABLE, sizeof(val), &val);
1769 mgmt_pending_remove(cmd);
1774 hci_dev_unlock(hdev);
1778 static int set_ssp(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
1780 struct mgmt_mode *cp = data;
1781 struct pending_cmd *cmd;
1785 BT_DBG("request for %s", hdev->name);
1787 status = mgmt_bredr_support(hdev);
1789 return cmd_status(sk, hdev->id, MGMT_OP_SET_SSP, status);
1791 if (!lmp_ssp_capable(hdev))
1792 return cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
1793 MGMT_STATUS_NOT_SUPPORTED);
1795 if (cp->val != 0x00 && cp->val != 0x01)
1796 return cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
1797 MGMT_STATUS_INVALID_PARAMS);
1801 if (!hdev_is_powered(hdev)) {
1805 changed = !test_and_set_bit(HCI_SSP_ENABLED,
1808 changed = test_and_clear_bit(HCI_SSP_ENABLED,
1811 changed = test_and_clear_bit(HCI_HS_ENABLED,
1814 clear_bit(HCI_HS_ENABLED, &hdev->dev_flags);
1817 err = send_settings_rsp(sk, MGMT_OP_SET_SSP, hdev);
1822 err = new_settings(hdev, sk);
1827 if (mgmt_pending_find(MGMT_OP_SET_SSP, hdev) ||
1828 mgmt_pending_find(MGMT_OP_SET_HS, hdev)) {
1829 err = cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
1834 if (!!cp->val == test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
1835 err = send_settings_rsp(sk, MGMT_OP_SET_SSP, hdev);
1839 cmd = mgmt_pending_add(sk, MGMT_OP_SET_SSP, hdev, data, len);
1845 err = hci_send_cmd(hdev, HCI_OP_WRITE_SSP_MODE, 1, &cp->val);
1847 mgmt_pending_remove(cmd);
1852 hci_dev_unlock(hdev);
1856 static int set_hs(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
1858 struct mgmt_mode *cp = data;
1863 BT_DBG("request for %s", hdev->name);
1865 status = mgmt_bredr_support(hdev);
1867 return cmd_status(sk, hdev->id, MGMT_OP_SET_HS, status);
1869 if (!lmp_ssp_capable(hdev))
1870 return cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
1871 MGMT_STATUS_NOT_SUPPORTED);
1873 if (!test_bit(HCI_SSP_ENABLED, &hdev->dev_flags))
1874 return cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
1875 MGMT_STATUS_REJECTED);
1877 if (cp->val != 0x00 && cp->val != 0x01)
1878 return cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
1879 MGMT_STATUS_INVALID_PARAMS);
1884 changed = !test_and_set_bit(HCI_HS_ENABLED, &hdev->dev_flags);
1886 if (hdev_is_powered(hdev)) {
1887 err = cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
1888 MGMT_STATUS_REJECTED);
1892 changed = test_and_clear_bit(HCI_HS_ENABLED, &hdev->dev_flags);
1895 err = send_settings_rsp(sk, MGMT_OP_SET_HS, hdev);
1900 err = new_settings(hdev, sk);
1903 hci_dev_unlock(hdev);
1907 static void le_enable_complete(struct hci_dev *hdev, u8 status)
1909 struct cmd_lookup match = { NULL, hdev };
1912 u8 mgmt_err = mgmt_status(status);
1914 mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, cmd_status_rsp,
1919 mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, settings_rsp, &match);
1921 new_settings(hdev, match.sk);
1926 /* Make sure the controller has a good default for
1927 * advertising data. Restrict the update to when LE
1928 * has actually been enabled. During power on, the
1929 * update in powered_update_hci will take care of it.
1931 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
1932 struct hci_request req;
1936 hci_req_init(&req, hdev);
1937 update_adv_data(&req);
1938 update_scan_rsp_data(&req);
1939 hci_req_run(&req, NULL);
1941 hci_dev_unlock(hdev);
1945 static int set_le(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
1947 struct mgmt_mode *cp = data;
1948 struct hci_cp_write_le_host_supported hci_cp;
1949 struct pending_cmd *cmd;
1950 struct hci_request req;
1954 BT_DBG("request for %s", hdev->name);
1956 if (!lmp_le_capable(hdev))
1957 return cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
1958 MGMT_STATUS_NOT_SUPPORTED);
1960 if (cp->val != 0x00 && cp->val != 0x01)
1961 return cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
1962 MGMT_STATUS_INVALID_PARAMS);
1964 /* LE-only devices do not allow toggling LE on/off */
1965 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
1966 return cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
1967 MGMT_STATUS_REJECTED);
1972 enabled = lmp_host_le_capable(hdev);
1974 if (!hdev_is_powered(hdev) || val == enabled) {
1975 bool changed = false;
1977 if (val != test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
1978 change_bit(HCI_LE_ENABLED, &hdev->dev_flags);
1982 if (!val && test_bit(HCI_ADVERTISING, &hdev->dev_flags)) {
1983 clear_bit(HCI_ADVERTISING, &hdev->dev_flags);
1987 err = send_settings_rsp(sk, MGMT_OP_SET_LE, hdev);
1992 err = new_settings(hdev, sk);
1997 if (mgmt_pending_find(MGMT_OP_SET_LE, hdev) ||
1998 mgmt_pending_find(MGMT_OP_SET_ADVERTISING, hdev)) {
1999 err = cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2004 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LE, hdev, data, len);
2010 hci_req_init(&req, hdev);
2012 memset(&hci_cp, 0, sizeof(hci_cp));
2016 hci_cp.simul = lmp_le_br_capable(hdev);
2018 if (test_bit(HCI_ADVERTISING, &hdev->dev_flags))
2019 disable_advertising(&req);
2022 hci_req_add(&req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(hci_cp),
2025 err = hci_req_run(&req, le_enable_complete);
2027 mgmt_pending_remove(cmd);
2030 hci_dev_unlock(hdev);
2034 /* This is a helper function to test for pending mgmt commands that can
2035 * cause CoD or EIR HCI commands. We can only allow one such pending
2036 * mgmt command at a time since otherwise we cannot easily track what
2037 * the current values are, will be, and based on that calculate if a new
2038 * HCI command needs to be sent and if yes with what value.
2040 static bool pending_eir_or_class(struct hci_dev *hdev)
2042 struct pending_cmd *cmd;
2044 list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
2045 switch (cmd->opcode) {
2046 case MGMT_OP_ADD_UUID:
2047 case MGMT_OP_REMOVE_UUID:
2048 case MGMT_OP_SET_DEV_CLASS:
2049 case MGMT_OP_SET_POWERED:
2057 static const u8 bluetooth_base_uuid[] = {
2058 0xfb, 0x34, 0x9b, 0x5f, 0x80, 0x00, 0x00, 0x80,
2059 0x00, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
2062 static u8 get_uuid_size(const u8 *uuid)
2066 if (memcmp(uuid, bluetooth_base_uuid, 12))
2069 val = get_unaligned_le32(&uuid[12]);
2076 static void mgmt_class_complete(struct hci_dev *hdev, u16 mgmt_op, u8 status)
2078 struct pending_cmd *cmd;
2082 cmd = mgmt_pending_find(mgmt_op, hdev);
2086 cmd_complete(cmd->sk, cmd->index, cmd->opcode, mgmt_status(status),
2087 hdev->dev_class, 3);
2089 mgmt_pending_remove(cmd);
2092 hci_dev_unlock(hdev);
2095 static void add_uuid_complete(struct hci_dev *hdev, u8 status)
2097 BT_DBG("status 0x%02x", status);
2099 mgmt_class_complete(hdev, MGMT_OP_ADD_UUID, status);
2102 static int add_uuid(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2104 struct mgmt_cp_add_uuid *cp = data;
2105 struct pending_cmd *cmd;
2106 struct hci_request req;
2107 struct bt_uuid *uuid;
2110 BT_DBG("request for %s", hdev->name);
2114 if (pending_eir_or_class(hdev)) {
2115 err = cmd_status(sk, hdev->id, MGMT_OP_ADD_UUID,
2120 uuid = kmalloc(sizeof(*uuid), GFP_KERNEL);
2126 memcpy(uuid->uuid, cp->uuid, 16);
2127 uuid->svc_hint = cp->svc_hint;
2128 uuid->size = get_uuid_size(cp->uuid);
2130 list_add_tail(&uuid->list, &hdev->uuids);
2132 hci_req_init(&req, hdev);
2137 err = hci_req_run(&req, add_uuid_complete);
2139 if (err != -ENODATA)
2142 err = cmd_complete(sk, hdev->id, MGMT_OP_ADD_UUID, 0,
2143 hdev->dev_class, 3);
2147 cmd = mgmt_pending_add(sk, MGMT_OP_ADD_UUID, hdev, data, len);
2156 hci_dev_unlock(hdev);
2160 static bool enable_service_cache(struct hci_dev *hdev)
2162 if (!hdev_is_powered(hdev))
2165 if (!test_and_set_bit(HCI_SERVICE_CACHE, &hdev->dev_flags)) {
2166 queue_delayed_work(hdev->workqueue, &hdev->service_cache,
2174 static void remove_uuid_complete(struct hci_dev *hdev, u8 status)
2176 BT_DBG("status 0x%02x", status);
2178 mgmt_class_complete(hdev, MGMT_OP_REMOVE_UUID, status);
2181 static int remove_uuid(struct sock *sk, struct hci_dev *hdev, void *data,
2184 struct mgmt_cp_remove_uuid *cp = data;
2185 struct pending_cmd *cmd;
2186 struct bt_uuid *match, *tmp;
2187 u8 bt_uuid_any[] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
2188 struct hci_request req;
2191 BT_DBG("request for %s", hdev->name);
2195 if (pending_eir_or_class(hdev)) {
2196 err = cmd_status(sk, hdev->id, MGMT_OP_REMOVE_UUID,
2201 if (memcmp(cp->uuid, bt_uuid_any, 16) == 0) {
2202 hci_uuids_clear(hdev);
2204 if (enable_service_cache(hdev)) {
2205 err = cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_UUID,
2206 0, hdev->dev_class, 3);
2215 list_for_each_entry_safe(match, tmp, &hdev->uuids, list) {
2216 if (memcmp(match->uuid, cp->uuid, 16) != 0)
2219 list_del(&match->list);
2225 err = cmd_status(sk, hdev->id, MGMT_OP_REMOVE_UUID,
2226 MGMT_STATUS_INVALID_PARAMS);
2231 hci_req_init(&req, hdev);
2236 err = hci_req_run(&req, remove_uuid_complete);
2238 if (err != -ENODATA)
2241 err = cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_UUID, 0,
2242 hdev->dev_class, 3);
2246 cmd = mgmt_pending_add(sk, MGMT_OP_REMOVE_UUID, hdev, data, len);
2255 hci_dev_unlock(hdev);
2259 static void set_class_complete(struct hci_dev *hdev, u8 status)
2261 BT_DBG("status 0x%02x", status);
2263 mgmt_class_complete(hdev, MGMT_OP_SET_DEV_CLASS, status);
2266 static int set_dev_class(struct sock *sk, struct hci_dev *hdev, void *data,
2269 struct mgmt_cp_set_dev_class *cp = data;
2270 struct pending_cmd *cmd;
2271 struct hci_request req;
2274 BT_DBG("request for %s", hdev->name);
2276 if (!lmp_bredr_capable(hdev))
2277 return cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2278 MGMT_STATUS_NOT_SUPPORTED);
2282 if (pending_eir_or_class(hdev)) {
2283 err = cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2288 if ((cp->minor & 0x03) != 0 || (cp->major & 0xe0) != 0) {
2289 err = cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2290 MGMT_STATUS_INVALID_PARAMS);
2294 hdev->major_class = cp->major;
2295 hdev->minor_class = cp->minor;
2297 if (!hdev_is_powered(hdev)) {
2298 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_DEV_CLASS, 0,
2299 hdev->dev_class, 3);
2303 hci_req_init(&req, hdev);
2305 if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags)) {
2306 hci_dev_unlock(hdev);
2307 cancel_delayed_work_sync(&hdev->service_cache);
2314 err = hci_req_run(&req, set_class_complete);
2316 if (err != -ENODATA)
2319 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_DEV_CLASS, 0,
2320 hdev->dev_class, 3);
2324 cmd = mgmt_pending_add(sk, MGMT_OP_SET_DEV_CLASS, hdev, data, len);
2333 hci_dev_unlock(hdev);
2337 static int load_link_keys(struct sock *sk, struct hci_dev *hdev, void *data,
2340 struct mgmt_cp_load_link_keys *cp = data;
2341 u16 key_count, expected_len;
2345 BT_DBG("request for %s", hdev->name);
2347 if (!lmp_bredr_capable(hdev))
2348 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2349 MGMT_STATUS_NOT_SUPPORTED);
2351 key_count = __le16_to_cpu(cp->key_count);
2353 expected_len = sizeof(*cp) + key_count *
2354 sizeof(struct mgmt_link_key_info);
2355 if (expected_len != len) {
2356 BT_ERR("load_link_keys: expected %u bytes, got %u bytes",
2358 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2359 MGMT_STATUS_INVALID_PARAMS);
2362 if (cp->debug_keys != 0x00 && cp->debug_keys != 0x01)
2363 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2364 MGMT_STATUS_INVALID_PARAMS);
2366 BT_DBG("%s debug_keys %u key_count %u", hdev->name, cp->debug_keys,
2369 for (i = 0; i < key_count; i++) {
2370 struct mgmt_link_key_info *key = &cp->keys[i];
2372 if (key->addr.type != BDADDR_BREDR || key->type > 0x08)
2373 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2374 MGMT_STATUS_INVALID_PARAMS);
2379 hci_link_keys_clear(hdev);
2382 changed = !test_and_set_bit(HCI_DEBUG_KEYS, &hdev->dev_flags);
2384 changed = test_and_clear_bit(HCI_DEBUG_KEYS, &hdev->dev_flags);
2387 new_settings(hdev, NULL);
2389 for (i = 0; i < key_count; i++) {
2390 struct mgmt_link_key_info *key = &cp->keys[i];
2392 hci_add_link_key(hdev, NULL, 0, &key->addr.bdaddr, key->val,
2393 key->type, key->pin_len);
2396 cmd_complete(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS, 0, NULL, 0);
2398 hci_dev_unlock(hdev);
2403 static int device_unpaired(struct hci_dev *hdev, bdaddr_t *bdaddr,
2404 u8 addr_type, struct sock *skip_sk)
2406 struct mgmt_ev_device_unpaired ev;
2408 bacpy(&ev.addr.bdaddr, bdaddr);
2409 ev.addr.type = addr_type;
2411 return mgmt_event(MGMT_EV_DEVICE_UNPAIRED, hdev, &ev, sizeof(ev),
2415 static int unpair_device(struct sock *sk, struct hci_dev *hdev, void *data,
2418 struct mgmt_cp_unpair_device *cp = data;
2419 struct mgmt_rp_unpair_device rp;
2420 struct hci_cp_disconnect dc;
2421 struct pending_cmd *cmd;
2422 struct hci_conn *conn;
2425 memset(&rp, 0, sizeof(rp));
2426 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
2427 rp.addr.type = cp->addr.type;
2429 if (!bdaddr_type_is_valid(cp->addr.type))
2430 return cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2431 MGMT_STATUS_INVALID_PARAMS,
2434 if (cp->disconnect != 0x00 && cp->disconnect != 0x01)
2435 return cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2436 MGMT_STATUS_INVALID_PARAMS,
2441 if (!hdev_is_powered(hdev)) {
2442 err = cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2443 MGMT_STATUS_NOT_POWERED, &rp, sizeof(rp));
2447 if (cp->addr.type == BDADDR_BREDR) {
2448 err = hci_remove_link_key(hdev, &cp->addr.bdaddr);
2452 if (cp->addr.type == BDADDR_LE_PUBLIC)
2453 addr_type = ADDR_LE_DEV_PUBLIC;
2455 addr_type = ADDR_LE_DEV_RANDOM;
2457 hci_remove_irk(hdev, &cp->addr.bdaddr, addr_type);
2459 hci_conn_params_del(hdev, &cp->addr.bdaddr, addr_type);
2461 err = hci_remove_ltk(hdev, &cp->addr.bdaddr, addr_type);
2465 err = cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2466 MGMT_STATUS_NOT_PAIRED, &rp, sizeof(rp));
2470 if (cp->disconnect) {
2471 if (cp->addr.type == BDADDR_BREDR)
2472 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
2475 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK,
2482 err = cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE, 0,
2484 device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, sk);
2488 cmd = mgmt_pending_add(sk, MGMT_OP_UNPAIR_DEVICE, hdev, cp,
2495 dc.handle = cpu_to_le16(conn->handle);
2496 dc.reason = 0x13; /* Remote User Terminated Connection */
2497 err = hci_send_cmd(hdev, HCI_OP_DISCONNECT, sizeof(dc), &dc);
2499 mgmt_pending_remove(cmd);
2502 hci_dev_unlock(hdev);
2506 static int disconnect(struct sock *sk, struct hci_dev *hdev, void *data,
2509 struct mgmt_cp_disconnect *cp = data;
2510 struct mgmt_rp_disconnect rp;
2511 struct hci_cp_disconnect dc;
2512 struct pending_cmd *cmd;
2513 struct hci_conn *conn;
2518 memset(&rp, 0, sizeof(rp));
2519 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
2520 rp.addr.type = cp->addr.type;
2522 if (!bdaddr_type_is_valid(cp->addr.type))
2523 return cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2524 MGMT_STATUS_INVALID_PARAMS,
2529 if (!test_bit(HCI_UP, &hdev->flags)) {
2530 err = cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2531 MGMT_STATUS_NOT_POWERED, &rp, sizeof(rp));
2535 if (mgmt_pending_find(MGMT_OP_DISCONNECT, hdev)) {
2536 err = cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2537 MGMT_STATUS_BUSY, &rp, sizeof(rp));
2541 if (cp->addr.type == BDADDR_BREDR)
2542 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
2545 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->addr.bdaddr);
2547 if (!conn || conn->state == BT_OPEN || conn->state == BT_CLOSED) {
2548 err = cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2549 MGMT_STATUS_NOT_CONNECTED, &rp, sizeof(rp));
2553 cmd = mgmt_pending_add(sk, MGMT_OP_DISCONNECT, hdev, data, len);
2559 dc.handle = cpu_to_le16(conn->handle);
2560 dc.reason = HCI_ERROR_REMOTE_USER_TERM;
2562 err = hci_send_cmd(hdev, HCI_OP_DISCONNECT, sizeof(dc), &dc);
2564 mgmt_pending_remove(cmd);
2567 hci_dev_unlock(hdev);
2571 static u8 link_to_bdaddr(u8 link_type, u8 addr_type)
2573 switch (link_type) {
2575 switch (addr_type) {
2576 case ADDR_LE_DEV_PUBLIC:
2577 return BDADDR_LE_PUBLIC;
2580 /* Fallback to LE Random address type */
2581 return BDADDR_LE_RANDOM;
2585 /* Fallback to BR/EDR type */
2586 return BDADDR_BREDR;
2590 static int get_connections(struct sock *sk, struct hci_dev *hdev, void *data,
2593 struct mgmt_rp_get_connections *rp;
2603 if (!hdev_is_powered(hdev)) {
2604 err = cmd_status(sk, hdev->id, MGMT_OP_GET_CONNECTIONS,
2605 MGMT_STATUS_NOT_POWERED);
2610 list_for_each_entry(c, &hdev->conn_hash.list, list) {
2611 if (test_bit(HCI_CONN_MGMT_CONNECTED, &c->flags))
2615 rp_len = sizeof(*rp) + (i * sizeof(struct mgmt_addr_info));
2616 rp = kmalloc(rp_len, GFP_KERNEL);
2623 list_for_each_entry(c, &hdev->conn_hash.list, list) {
2624 if (!test_bit(HCI_CONN_MGMT_CONNECTED, &c->flags))
2626 bacpy(&rp->addr[i].bdaddr, &c->dst);
2627 rp->addr[i].type = link_to_bdaddr(c->type, c->dst_type);
2628 if (c->type == SCO_LINK || c->type == ESCO_LINK)
2633 rp->conn_count = cpu_to_le16(i);
2635 /* Recalculate length in case of filtered SCO connections, etc */
2636 rp_len = sizeof(*rp) + (i * sizeof(struct mgmt_addr_info));
2638 err = cmd_complete(sk, hdev->id, MGMT_OP_GET_CONNECTIONS, 0, rp,
2644 hci_dev_unlock(hdev);
2648 static int send_pin_code_neg_reply(struct sock *sk, struct hci_dev *hdev,
2649 struct mgmt_cp_pin_code_neg_reply *cp)
2651 struct pending_cmd *cmd;
2654 cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_NEG_REPLY, hdev, cp,
2659 err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_NEG_REPLY,
2660 sizeof(cp->addr.bdaddr), &cp->addr.bdaddr);
2662 mgmt_pending_remove(cmd);
2667 static int pin_code_reply(struct sock *sk, struct hci_dev *hdev, void *data,
2670 struct hci_conn *conn;
2671 struct mgmt_cp_pin_code_reply *cp = data;
2672 struct hci_cp_pin_code_reply reply;
2673 struct pending_cmd *cmd;
2680 if (!hdev_is_powered(hdev)) {
2681 err = cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
2682 MGMT_STATUS_NOT_POWERED);
2686 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->addr.bdaddr);
2688 err = cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
2689 MGMT_STATUS_NOT_CONNECTED);
2693 if (conn->pending_sec_level == BT_SECURITY_HIGH && cp->pin_len != 16) {
2694 struct mgmt_cp_pin_code_neg_reply ncp;
2696 memcpy(&ncp.addr, &cp->addr, sizeof(ncp.addr));
2698 BT_ERR("PIN code is not 16 bytes long");
2700 err = send_pin_code_neg_reply(sk, hdev, &ncp);
2702 err = cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
2703 MGMT_STATUS_INVALID_PARAMS);
2708 cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_REPLY, hdev, data, len);
2714 bacpy(&reply.bdaddr, &cp->addr.bdaddr);
2715 reply.pin_len = cp->pin_len;
2716 memcpy(reply.pin_code, cp->pin_code, sizeof(reply.pin_code));
2718 err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_REPLY, sizeof(reply), &reply);
2720 mgmt_pending_remove(cmd);
2723 hci_dev_unlock(hdev);
2727 static int set_io_capability(struct sock *sk, struct hci_dev *hdev, void *data,
2730 struct mgmt_cp_set_io_capability *cp = data;
2736 hdev->io_capability = cp->io_capability;
2738 BT_DBG("%s IO capability set to 0x%02x", hdev->name,
2739 hdev->io_capability);
2741 hci_dev_unlock(hdev);
2743 return cmd_complete(sk, hdev->id, MGMT_OP_SET_IO_CAPABILITY, 0, NULL,
2747 static struct pending_cmd *find_pairing(struct hci_conn *conn)
2749 struct hci_dev *hdev = conn->hdev;
2750 struct pending_cmd *cmd;
2752 list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
2753 if (cmd->opcode != MGMT_OP_PAIR_DEVICE)
2756 if (cmd->user_data != conn)
2765 static void pairing_complete(struct pending_cmd *cmd, u8 status)
2767 struct mgmt_rp_pair_device rp;
2768 struct hci_conn *conn = cmd->user_data;
2770 bacpy(&rp.addr.bdaddr, &conn->dst);
2771 rp.addr.type = link_to_bdaddr(conn->type, conn->dst_type);
2773 cmd_complete(cmd->sk, cmd->index, MGMT_OP_PAIR_DEVICE, status,
2776 /* So we don't get further callbacks for this connection */
2777 conn->connect_cfm_cb = NULL;
2778 conn->security_cfm_cb = NULL;
2779 conn->disconn_cfm_cb = NULL;
2781 hci_conn_drop(conn);
2783 mgmt_pending_remove(cmd);
2786 void mgmt_smp_complete(struct hci_conn *conn, bool complete)
2788 u8 status = complete ? MGMT_STATUS_SUCCESS : MGMT_STATUS_FAILED;
2789 struct pending_cmd *cmd;
2791 cmd = find_pairing(conn);
2793 pairing_complete(cmd, status);
2796 static void pairing_complete_cb(struct hci_conn *conn, u8 status)
2798 struct pending_cmd *cmd;
2800 BT_DBG("status %u", status);
2802 cmd = find_pairing(conn);
2804 BT_DBG("Unable to find a pending command");
2806 pairing_complete(cmd, mgmt_status(status));
2809 static void le_pairing_complete_cb(struct hci_conn *conn, u8 status)
2811 struct pending_cmd *cmd;
2813 BT_DBG("status %u", status);
2818 cmd = find_pairing(conn);
2820 BT_DBG("Unable to find a pending command");
2822 pairing_complete(cmd, mgmt_status(status));
2825 static int pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
2828 struct mgmt_cp_pair_device *cp = data;
2829 struct mgmt_rp_pair_device rp;
2830 struct pending_cmd *cmd;
2831 u8 sec_level, auth_type;
2832 struct hci_conn *conn;
2837 memset(&rp, 0, sizeof(rp));
2838 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
2839 rp.addr.type = cp->addr.type;
2841 if (!bdaddr_type_is_valid(cp->addr.type))
2842 return cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
2843 MGMT_STATUS_INVALID_PARAMS,
2848 if (!hdev_is_powered(hdev)) {
2849 err = cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
2850 MGMT_STATUS_NOT_POWERED, &rp, sizeof(rp));
2854 sec_level = BT_SECURITY_MEDIUM;
2855 auth_type = HCI_AT_DEDICATED_BONDING;
2857 if (cp->addr.type == BDADDR_BREDR) {
2858 conn = hci_connect_acl(hdev, &cp->addr.bdaddr, sec_level,
2863 /* Convert from L2CAP channel address type to HCI address type
2865 if (cp->addr.type == BDADDR_LE_PUBLIC)
2866 addr_type = ADDR_LE_DEV_PUBLIC;
2868 addr_type = ADDR_LE_DEV_RANDOM;
2870 conn = hci_connect_le(hdev, &cp->addr.bdaddr, addr_type,
2871 sec_level, auth_type);
2877 if (PTR_ERR(conn) == -EBUSY)
2878 status = MGMT_STATUS_BUSY;
2880 status = MGMT_STATUS_CONNECT_FAILED;
2882 err = cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
2888 if (conn->connect_cfm_cb) {
2889 hci_conn_drop(conn);
2890 err = cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
2891 MGMT_STATUS_BUSY, &rp, sizeof(rp));
2895 cmd = mgmt_pending_add(sk, MGMT_OP_PAIR_DEVICE, hdev, data, len);
2898 hci_conn_drop(conn);
2902 /* For LE, just connecting isn't a proof that the pairing finished */
2903 if (cp->addr.type == BDADDR_BREDR) {
2904 conn->connect_cfm_cb = pairing_complete_cb;
2905 conn->security_cfm_cb = pairing_complete_cb;
2906 conn->disconn_cfm_cb = pairing_complete_cb;
2908 conn->connect_cfm_cb = le_pairing_complete_cb;
2909 conn->security_cfm_cb = le_pairing_complete_cb;
2910 conn->disconn_cfm_cb = le_pairing_complete_cb;
2913 conn->io_capability = cp->io_cap;
2914 cmd->user_data = conn;
2916 if (conn->state == BT_CONNECTED &&
2917 hci_conn_security(conn, sec_level, auth_type))
2918 pairing_complete(cmd, 0);
2923 hci_dev_unlock(hdev);
2927 static int cancel_pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
2930 struct mgmt_addr_info *addr = data;
2931 struct pending_cmd *cmd;
2932 struct hci_conn *conn;
2939 if (!hdev_is_powered(hdev)) {
2940 err = cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
2941 MGMT_STATUS_NOT_POWERED);
2945 cmd = mgmt_pending_find(MGMT_OP_PAIR_DEVICE, hdev);
2947 err = cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
2948 MGMT_STATUS_INVALID_PARAMS);
2952 conn = cmd->user_data;
2954 if (bacmp(&addr->bdaddr, &conn->dst) != 0) {
2955 err = cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
2956 MGMT_STATUS_INVALID_PARAMS);
2960 pairing_complete(cmd, MGMT_STATUS_CANCELLED);
2962 err = cmd_complete(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE, 0,
2963 addr, sizeof(*addr));
2965 hci_dev_unlock(hdev);
2969 static int user_pairing_resp(struct sock *sk, struct hci_dev *hdev,
2970 struct mgmt_addr_info *addr, u16 mgmt_op,
2971 u16 hci_op, __le32 passkey)
2973 struct pending_cmd *cmd;
2974 struct hci_conn *conn;
2979 if (!hdev_is_powered(hdev)) {
2980 err = cmd_complete(sk, hdev->id, mgmt_op,
2981 MGMT_STATUS_NOT_POWERED, addr,
2986 if (addr->type == BDADDR_BREDR)
2987 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &addr->bdaddr);
2989 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &addr->bdaddr);
2992 err = cmd_complete(sk, hdev->id, mgmt_op,
2993 MGMT_STATUS_NOT_CONNECTED, addr,
2998 if (addr->type == BDADDR_LE_PUBLIC || addr->type == BDADDR_LE_RANDOM) {
2999 /* Continue with pairing via SMP */
3000 err = smp_user_confirm_reply(conn, mgmt_op, passkey);
3003 err = cmd_complete(sk, hdev->id, mgmt_op,
3004 MGMT_STATUS_SUCCESS, addr,
3007 err = cmd_complete(sk, hdev->id, mgmt_op,
3008 MGMT_STATUS_FAILED, addr,
3014 cmd = mgmt_pending_add(sk, mgmt_op, hdev, addr, sizeof(*addr));
3020 /* Continue with pairing via HCI */
3021 if (hci_op == HCI_OP_USER_PASSKEY_REPLY) {
3022 struct hci_cp_user_passkey_reply cp;
3024 bacpy(&cp.bdaddr, &addr->bdaddr);
3025 cp.passkey = passkey;
3026 err = hci_send_cmd(hdev, hci_op, sizeof(cp), &cp);
3028 err = hci_send_cmd(hdev, hci_op, sizeof(addr->bdaddr),
3032 mgmt_pending_remove(cmd);
3035 hci_dev_unlock(hdev);
3039 static int pin_code_neg_reply(struct sock *sk, struct hci_dev *hdev,
3040 void *data, u16 len)
3042 struct mgmt_cp_pin_code_neg_reply *cp = data;
3046 return user_pairing_resp(sk, hdev, &cp->addr,
3047 MGMT_OP_PIN_CODE_NEG_REPLY,
3048 HCI_OP_PIN_CODE_NEG_REPLY, 0);
3051 static int user_confirm_reply(struct sock *sk, struct hci_dev *hdev, void *data,
3054 struct mgmt_cp_user_confirm_reply *cp = data;
3058 if (len != sizeof(*cp))
3059 return cmd_status(sk, hdev->id, MGMT_OP_USER_CONFIRM_REPLY,
3060 MGMT_STATUS_INVALID_PARAMS);
3062 return user_pairing_resp(sk, hdev, &cp->addr,
3063 MGMT_OP_USER_CONFIRM_REPLY,
3064 HCI_OP_USER_CONFIRM_REPLY, 0);
3067 static int user_confirm_neg_reply(struct sock *sk, struct hci_dev *hdev,
3068 void *data, u16 len)
3070 struct mgmt_cp_user_confirm_neg_reply *cp = data;
3074 return user_pairing_resp(sk, hdev, &cp->addr,
3075 MGMT_OP_USER_CONFIRM_NEG_REPLY,
3076 HCI_OP_USER_CONFIRM_NEG_REPLY, 0);
3079 static int user_passkey_reply(struct sock *sk, struct hci_dev *hdev, void *data,
3082 struct mgmt_cp_user_passkey_reply *cp = data;
3086 return user_pairing_resp(sk, hdev, &cp->addr,
3087 MGMT_OP_USER_PASSKEY_REPLY,
3088 HCI_OP_USER_PASSKEY_REPLY, cp->passkey);
3091 static int user_passkey_neg_reply(struct sock *sk, struct hci_dev *hdev,
3092 void *data, u16 len)
3094 struct mgmt_cp_user_passkey_neg_reply *cp = data;
3098 return user_pairing_resp(sk, hdev, &cp->addr,
3099 MGMT_OP_USER_PASSKEY_NEG_REPLY,
3100 HCI_OP_USER_PASSKEY_NEG_REPLY, 0);
3103 static void update_name(struct hci_request *req)
3105 struct hci_dev *hdev = req->hdev;
3106 struct hci_cp_write_local_name cp;
3108 memcpy(cp.name, hdev->dev_name, sizeof(cp.name));
3110 hci_req_add(req, HCI_OP_WRITE_LOCAL_NAME, sizeof(cp), &cp);
3113 static void set_name_complete(struct hci_dev *hdev, u8 status)
3115 struct mgmt_cp_set_local_name *cp;
3116 struct pending_cmd *cmd;
3118 BT_DBG("status 0x%02x", status);
3122 cmd = mgmt_pending_find(MGMT_OP_SET_LOCAL_NAME, hdev);
3129 cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME,
3130 mgmt_status(status));
3132 cmd_complete(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3135 mgmt_pending_remove(cmd);
3138 hci_dev_unlock(hdev);
3141 static int set_local_name(struct sock *sk, struct hci_dev *hdev, void *data,
3144 struct mgmt_cp_set_local_name *cp = data;
3145 struct pending_cmd *cmd;
3146 struct hci_request req;
3153 /* If the old values are the same as the new ones just return a
3154 * direct command complete event.
3156 if (!memcmp(hdev->dev_name, cp->name, sizeof(hdev->dev_name)) &&
3157 !memcmp(hdev->short_name, cp->short_name,
3158 sizeof(hdev->short_name))) {
3159 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3164 memcpy(hdev->short_name, cp->short_name, sizeof(hdev->short_name));
3166 if (!hdev_is_powered(hdev)) {
3167 memcpy(hdev->dev_name, cp->name, sizeof(hdev->dev_name));
3169 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3174 err = mgmt_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev, data, len,
3180 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LOCAL_NAME, hdev, data, len);
3186 memcpy(hdev->dev_name, cp->name, sizeof(hdev->dev_name));
3188 hci_req_init(&req, hdev);
3190 if (lmp_bredr_capable(hdev)) {
3195 /* The name is stored in the scan response data and so
3196 * no need to udpate the advertising data here.
3198 if (lmp_le_capable(hdev))
3199 update_scan_rsp_data(&req);
3201 err = hci_req_run(&req, set_name_complete);
3203 mgmt_pending_remove(cmd);
3206 hci_dev_unlock(hdev);
3210 static int read_local_oob_data(struct sock *sk, struct hci_dev *hdev,
3211 void *data, u16 data_len)
3213 struct pending_cmd *cmd;
3216 BT_DBG("%s", hdev->name);
3220 if (!hdev_is_powered(hdev)) {
3221 err = cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
3222 MGMT_STATUS_NOT_POWERED);
3226 if (!lmp_ssp_capable(hdev)) {
3227 err = cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
3228 MGMT_STATUS_NOT_SUPPORTED);
3232 if (mgmt_pending_find(MGMT_OP_READ_LOCAL_OOB_DATA, hdev)) {
3233 err = cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
3238 cmd = mgmt_pending_add(sk, MGMT_OP_READ_LOCAL_OOB_DATA, hdev, NULL, 0);
3244 if (test_bit(HCI_SC_ENABLED, &hdev->dev_flags))
3245 err = hci_send_cmd(hdev, HCI_OP_READ_LOCAL_OOB_EXT_DATA,
3248 err = hci_send_cmd(hdev, HCI_OP_READ_LOCAL_OOB_DATA, 0, NULL);
3251 mgmt_pending_remove(cmd);
3254 hci_dev_unlock(hdev);
3258 static int add_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
3259 void *data, u16 len)
3263 BT_DBG("%s ", hdev->name);
3267 if (len == MGMT_ADD_REMOTE_OOB_DATA_SIZE) {
3268 struct mgmt_cp_add_remote_oob_data *cp = data;
3271 err = hci_add_remote_oob_data(hdev, &cp->addr.bdaddr,
3272 cp->hash, cp->randomizer);
3274 status = MGMT_STATUS_FAILED;
3276 status = MGMT_STATUS_SUCCESS;
3278 err = cmd_complete(sk, hdev->id, MGMT_OP_ADD_REMOTE_OOB_DATA,
3279 status, &cp->addr, sizeof(cp->addr));
3280 } else if (len == MGMT_ADD_REMOTE_OOB_EXT_DATA_SIZE) {
3281 struct mgmt_cp_add_remote_oob_ext_data *cp = data;
3284 err = hci_add_remote_oob_ext_data(hdev, &cp->addr.bdaddr,
3290 status = MGMT_STATUS_FAILED;
3292 status = MGMT_STATUS_SUCCESS;
3294 err = cmd_complete(sk, hdev->id, MGMT_OP_ADD_REMOTE_OOB_DATA,
3295 status, &cp->addr, sizeof(cp->addr));
3297 BT_ERR("add_remote_oob_data: invalid length of %u bytes", len);
3298 err = cmd_status(sk, hdev->id, MGMT_OP_ADD_REMOTE_OOB_DATA,
3299 MGMT_STATUS_INVALID_PARAMS);
3302 hci_dev_unlock(hdev);
3306 static int remove_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
3307 void *data, u16 len)
3309 struct mgmt_cp_remove_remote_oob_data *cp = data;
3313 BT_DBG("%s", hdev->name);
3317 err = hci_remove_remote_oob_data(hdev, &cp->addr.bdaddr);
3319 status = MGMT_STATUS_INVALID_PARAMS;
3321 status = MGMT_STATUS_SUCCESS;
3323 err = cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_REMOTE_OOB_DATA,
3324 status, &cp->addr, sizeof(cp->addr));
3326 hci_dev_unlock(hdev);
3330 static int mgmt_start_discovery_failed(struct hci_dev *hdev, u8 status)
3332 struct pending_cmd *cmd;
3336 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3338 cmd = mgmt_pending_find(MGMT_OP_START_DISCOVERY, hdev);
3342 type = hdev->discovery.type;
3344 err = cmd_complete(cmd->sk, hdev->id, cmd->opcode, mgmt_status(status),
3345 &type, sizeof(type));
3346 mgmt_pending_remove(cmd);
3351 static void start_discovery_complete(struct hci_dev *hdev, u8 status)
3353 unsigned long timeout = 0;
3355 BT_DBG("status %d", status);
3359 mgmt_start_discovery_failed(hdev, status);
3360 hci_dev_unlock(hdev);
3365 hci_discovery_set_state(hdev, DISCOVERY_FINDING);
3366 hci_dev_unlock(hdev);
3368 switch (hdev->discovery.type) {
3369 case DISCOV_TYPE_LE:
3370 timeout = msecs_to_jiffies(DISCOV_LE_TIMEOUT);
3373 case DISCOV_TYPE_INTERLEAVED:
3374 timeout = msecs_to_jiffies(hdev->discov_interleaved_timeout);
3377 case DISCOV_TYPE_BREDR:
3381 BT_ERR("Invalid discovery type %d", hdev->discovery.type);
3387 queue_delayed_work(hdev->workqueue, &hdev->le_scan_disable, timeout);
3390 static int start_discovery(struct sock *sk, struct hci_dev *hdev,
3391 void *data, u16 len)
3393 struct mgmt_cp_start_discovery *cp = data;
3394 struct pending_cmd *cmd;
3395 struct hci_cp_le_set_scan_param param_cp;
3396 struct hci_cp_le_set_scan_enable enable_cp;
3397 struct hci_cp_inquiry inq_cp;
3398 struct hci_request req;
3399 /* General inquiry access code (GIAC) */
3400 u8 lap[3] = { 0x33, 0x8b, 0x9e };
3401 u8 status, own_addr_type;
3404 BT_DBG("%s", hdev->name);
3408 if (!hdev_is_powered(hdev)) {
3409 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3410 MGMT_STATUS_NOT_POWERED);
3414 if (test_bit(HCI_PERIODIC_INQ, &hdev->dev_flags)) {
3415 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3420 if (hdev->discovery.state != DISCOVERY_STOPPED) {
3421 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3426 cmd = mgmt_pending_add(sk, MGMT_OP_START_DISCOVERY, hdev, NULL, 0);
3432 hdev->discovery.type = cp->type;
3434 hci_req_init(&req, hdev);
3436 switch (hdev->discovery.type) {
3437 case DISCOV_TYPE_BREDR:
3438 status = mgmt_bredr_support(hdev);
3440 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3442 mgmt_pending_remove(cmd);
3446 if (test_bit(HCI_INQUIRY, &hdev->flags)) {
3447 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3449 mgmt_pending_remove(cmd);
3453 hci_inquiry_cache_flush(hdev);
3455 memset(&inq_cp, 0, sizeof(inq_cp));
3456 memcpy(&inq_cp.lap, lap, sizeof(inq_cp.lap));
3457 inq_cp.length = DISCOV_BREDR_INQUIRY_LEN;
3458 hci_req_add(&req, HCI_OP_INQUIRY, sizeof(inq_cp), &inq_cp);
3461 case DISCOV_TYPE_LE:
3462 case DISCOV_TYPE_INTERLEAVED:
3463 status = mgmt_le_support(hdev);
3465 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3467 mgmt_pending_remove(cmd);
3471 if (hdev->discovery.type == DISCOV_TYPE_INTERLEAVED &&
3472 !test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
3473 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3474 MGMT_STATUS_NOT_SUPPORTED);
3475 mgmt_pending_remove(cmd);
3479 if (test_bit(HCI_ADVERTISING, &hdev->dev_flags)) {
3480 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3481 MGMT_STATUS_REJECTED);
3482 mgmt_pending_remove(cmd);
3486 /* If controller is scanning, it means the background scanning
3487 * is running. Thus, we should temporarily stop it in order to
3488 * set the discovery scanning parameters.
3490 if (test_bit(HCI_LE_SCAN, &hdev->dev_flags))
3491 hci_req_add_le_scan_disable(&req);
3493 memset(¶m_cp, 0, sizeof(param_cp));
3495 /* All active scans will be done with either a resolvable
3496 * private address (when privacy feature has been enabled)
3497 * or unresolvable private address.
3499 err = hci_update_random_address(&req, true, &own_addr_type);
3501 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3502 MGMT_STATUS_FAILED);
3503 mgmt_pending_remove(cmd);
3507 param_cp.type = LE_SCAN_ACTIVE;
3508 param_cp.interval = cpu_to_le16(DISCOV_LE_SCAN_INT);
3509 param_cp.window = cpu_to_le16(DISCOV_LE_SCAN_WIN);
3510 param_cp.own_address_type = own_addr_type;
3511 hci_req_add(&req, HCI_OP_LE_SET_SCAN_PARAM, sizeof(param_cp),
3514 memset(&enable_cp, 0, sizeof(enable_cp));
3515 enable_cp.enable = LE_SCAN_ENABLE;
3516 enable_cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
3517 hci_req_add(&req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(enable_cp),
3522 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3523 MGMT_STATUS_INVALID_PARAMS);
3524 mgmt_pending_remove(cmd);
3528 err = hci_req_run(&req, start_discovery_complete);
3530 mgmt_pending_remove(cmd);
3532 hci_discovery_set_state(hdev, DISCOVERY_STARTING);
3535 hci_dev_unlock(hdev);
3539 static int mgmt_stop_discovery_failed(struct hci_dev *hdev, u8 status)
3541 struct pending_cmd *cmd;
3544 cmd = mgmt_pending_find(MGMT_OP_STOP_DISCOVERY, hdev);
3548 err = cmd_complete(cmd->sk, hdev->id, cmd->opcode, mgmt_status(status),
3549 &hdev->discovery.type, sizeof(hdev->discovery.type));
3550 mgmt_pending_remove(cmd);
3555 static void stop_discovery_complete(struct hci_dev *hdev, u8 status)
3557 BT_DBG("status %d", status);
3562 mgmt_stop_discovery_failed(hdev, status);
3566 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3569 hci_dev_unlock(hdev);
3572 static int stop_discovery(struct sock *sk, struct hci_dev *hdev, void *data,
3575 struct mgmt_cp_stop_discovery *mgmt_cp = data;
3576 struct pending_cmd *cmd;
3577 struct hci_cp_remote_name_req_cancel cp;
3578 struct inquiry_entry *e;
3579 struct hci_request req;
3582 BT_DBG("%s", hdev->name);
3586 if (!hci_discovery_active(hdev)) {
3587 err = cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
3588 MGMT_STATUS_REJECTED, &mgmt_cp->type,
3589 sizeof(mgmt_cp->type));
3593 if (hdev->discovery.type != mgmt_cp->type) {
3594 err = cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
3595 MGMT_STATUS_INVALID_PARAMS, &mgmt_cp->type,
3596 sizeof(mgmt_cp->type));
3600 cmd = mgmt_pending_add(sk, MGMT_OP_STOP_DISCOVERY, hdev, NULL, 0);
3606 hci_req_init(&req, hdev);
3608 switch (hdev->discovery.state) {
3609 case DISCOVERY_FINDING:
3610 if (test_bit(HCI_INQUIRY, &hdev->flags)) {
3611 hci_req_add(&req, HCI_OP_INQUIRY_CANCEL, 0, NULL);
3613 cancel_delayed_work(&hdev->le_scan_disable);
3615 hci_req_add_le_scan_disable(&req);
3620 case DISCOVERY_RESOLVING:
3621 e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY,
3624 mgmt_pending_remove(cmd);
3625 err = cmd_complete(sk, hdev->id,
3626 MGMT_OP_STOP_DISCOVERY, 0,
3628 sizeof(mgmt_cp->type));
3629 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3633 bacpy(&cp.bdaddr, &e->data.bdaddr);
3634 hci_req_add(&req, HCI_OP_REMOTE_NAME_REQ_CANCEL, sizeof(cp),
3640 BT_DBG("unknown discovery state %u", hdev->discovery.state);
3642 mgmt_pending_remove(cmd);
3643 err = cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
3644 MGMT_STATUS_FAILED, &mgmt_cp->type,
3645 sizeof(mgmt_cp->type));
3649 err = hci_req_run(&req, stop_discovery_complete);
3651 mgmt_pending_remove(cmd);
3653 hci_discovery_set_state(hdev, DISCOVERY_STOPPING);
3656 hci_dev_unlock(hdev);
3660 static int confirm_name(struct sock *sk, struct hci_dev *hdev, void *data,
3663 struct mgmt_cp_confirm_name *cp = data;
3664 struct inquiry_entry *e;
3667 BT_DBG("%s", hdev->name);
3671 if (!hci_discovery_active(hdev)) {
3672 err = cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME,
3673 MGMT_STATUS_FAILED, &cp->addr,
3678 e = hci_inquiry_cache_lookup_unknown(hdev, &cp->addr.bdaddr);
3680 err = cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME,
3681 MGMT_STATUS_INVALID_PARAMS, &cp->addr,
3686 if (cp->name_known) {
3687 e->name_state = NAME_KNOWN;
3690 e->name_state = NAME_NEEDED;
3691 hci_inquiry_cache_update_resolve(hdev, e);
3694 err = cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME, 0, &cp->addr,
3698 hci_dev_unlock(hdev);
3702 static int block_device(struct sock *sk, struct hci_dev *hdev, void *data,
3705 struct mgmt_cp_block_device *cp = data;
3709 BT_DBG("%s", hdev->name);
3711 if (!bdaddr_type_is_valid(cp->addr.type))
3712 return cmd_complete(sk, hdev->id, MGMT_OP_BLOCK_DEVICE,
3713 MGMT_STATUS_INVALID_PARAMS,
3714 &cp->addr, sizeof(cp->addr));
3718 err = hci_blacklist_add(hdev, &cp->addr.bdaddr, cp->addr.type);
3720 status = MGMT_STATUS_FAILED;
3722 status = MGMT_STATUS_SUCCESS;
3724 err = cmd_complete(sk, hdev->id, MGMT_OP_BLOCK_DEVICE, status,
3725 &cp->addr, sizeof(cp->addr));
3727 hci_dev_unlock(hdev);
3732 static int unblock_device(struct sock *sk, struct hci_dev *hdev, void *data,
3735 struct mgmt_cp_unblock_device *cp = data;
3739 BT_DBG("%s", hdev->name);
3741 if (!bdaddr_type_is_valid(cp->addr.type))
3742 return cmd_complete(sk, hdev->id, MGMT_OP_UNBLOCK_DEVICE,
3743 MGMT_STATUS_INVALID_PARAMS,
3744 &cp->addr, sizeof(cp->addr));
3748 err = hci_blacklist_del(hdev, &cp->addr.bdaddr, cp->addr.type);
3750 status = MGMT_STATUS_INVALID_PARAMS;
3752 status = MGMT_STATUS_SUCCESS;
3754 err = cmd_complete(sk, hdev->id, MGMT_OP_UNBLOCK_DEVICE, status,
3755 &cp->addr, sizeof(cp->addr));
3757 hci_dev_unlock(hdev);
3762 static int set_device_id(struct sock *sk, struct hci_dev *hdev, void *data,
3765 struct mgmt_cp_set_device_id *cp = data;
3766 struct hci_request req;
3770 BT_DBG("%s", hdev->name);
3772 source = __le16_to_cpu(cp->source);
3774 if (source > 0x0002)
3775 return cmd_status(sk, hdev->id, MGMT_OP_SET_DEVICE_ID,
3776 MGMT_STATUS_INVALID_PARAMS);
3780 hdev->devid_source = source;
3781 hdev->devid_vendor = __le16_to_cpu(cp->vendor);
3782 hdev->devid_product = __le16_to_cpu(cp->product);
3783 hdev->devid_version = __le16_to_cpu(cp->version);
3785 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_DEVICE_ID, 0, NULL, 0);
3787 hci_req_init(&req, hdev);
3789 hci_req_run(&req, NULL);
3791 hci_dev_unlock(hdev);
3796 static void set_advertising_complete(struct hci_dev *hdev, u8 status)
3798 struct cmd_lookup match = { NULL, hdev };
3801 u8 mgmt_err = mgmt_status(status);
3803 mgmt_pending_foreach(MGMT_OP_SET_ADVERTISING, hdev,
3804 cmd_status_rsp, &mgmt_err);
3808 mgmt_pending_foreach(MGMT_OP_SET_ADVERTISING, hdev, settings_rsp,
3811 new_settings(hdev, match.sk);
3817 static int set_advertising(struct sock *sk, struct hci_dev *hdev, void *data,
3820 struct mgmt_mode *cp = data;
3821 struct pending_cmd *cmd;
3822 struct hci_request req;
3823 u8 val, enabled, status;
3826 BT_DBG("request for %s", hdev->name);
3828 status = mgmt_le_support(hdev);
3830 return cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
3833 if (cp->val != 0x00 && cp->val != 0x01)
3834 return cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
3835 MGMT_STATUS_INVALID_PARAMS);
3840 enabled = test_bit(HCI_ADVERTISING, &hdev->dev_flags);
3842 /* The following conditions are ones which mean that we should
3843 * not do any HCI communication but directly send a mgmt
3844 * response to user space (after toggling the flag if
3847 if (!hdev_is_powered(hdev) || val == enabled ||
3848 hci_conn_num(hdev, LE_LINK) > 0) {
3849 bool changed = false;
3851 if (val != test_bit(HCI_ADVERTISING, &hdev->dev_flags)) {
3852 change_bit(HCI_ADVERTISING, &hdev->dev_flags);
3856 err = send_settings_rsp(sk, MGMT_OP_SET_ADVERTISING, hdev);
3861 err = new_settings(hdev, sk);
3866 if (mgmt_pending_find(MGMT_OP_SET_ADVERTISING, hdev) ||
3867 mgmt_pending_find(MGMT_OP_SET_LE, hdev)) {
3868 err = cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
3873 cmd = mgmt_pending_add(sk, MGMT_OP_SET_ADVERTISING, hdev, data, len);
3879 hci_req_init(&req, hdev);
3882 enable_advertising(&req);
3884 disable_advertising(&req);
3886 err = hci_req_run(&req, set_advertising_complete);
3888 mgmt_pending_remove(cmd);
3891 hci_dev_unlock(hdev);
3895 static int set_static_address(struct sock *sk, struct hci_dev *hdev,
3896 void *data, u16 len)
3898 struct mgmt_cp_set_static_address *cp = data;
3901 BT_DBG("%s", hdev->name);
3903 if (!lmp_le_capable(hdev))
3904 return cmd_status(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS,
3905 MGMT_STATUS_NOT_SUPPORTED);
3907 if (hdev_is_powered(hdev))
3908 return cmd_status(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS,
3909 MGMT_STATUS_REJECTED);
3911 if (bacmp(&cp->bdaddr, BDADDR_ANY)) {
3912 if (!bacmp(&cp->bdaddr, BDADDR_NONE))
3913 return cmd_status(sk, hdev->id,
3914 MGMT_OP_SET_STATIC_ADDRESS,
3915 MGMT_STATUS_INVALID_PARAMS);
3917 /* Two most significant bits shall be set */
3918 if ((cp->bdaddr.b[5] & 0xc0) != 0xc0)
3919 return cmd_status(sk, hdev->id,
3920 MGMT_OP_SET_STATIC_ADDRESS,
3921 MGMT_STATUS_INVALID_PARAMS);
3926 bacpy(&hdev->static_addr, &cp->bdaddr);
3928 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS, 0, NULL, 0);
3930 hci_dev_unlock(hdev);
3935 static int set_scan_params(struct sock *sk, struct hci_dev *hdev,
3936 void *data, u16 len)
3938 struct mgmt_cp_set_scan_params *cp = data;
3939 __u16 interval, window;
3942 BT_DBG("%s", hdev->name);
3944 if (!lmp_le_capable(hdev))
3945 return cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
3946 MGMT_STATUS_NOT_SUPPORTED);
3948 interval = __le16_to_cpu(cp->interval);
3950 if (interval < 0x0004 || interval > 0x4000)
3951 return cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
3952 MGMT_STATUS_INVALID_PARAMS);
3954 window = __le16_to_cpu(cp->window);
3956 if (window < 0x0004 || window > 0x4000)
3957 return cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
3958 MGMT_STATUS_INVALID_PARAMS);
3960 if (window > interval)
3961 return cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
3962 MGMT_STATUS_INVALID_PARAMS);
3966 hdev->le_scan_interval = interval;
3967 hdev->le_scan_window = window;
3969 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS, 0, NULL, 0);
3971 /* If background scan is running, restart it so new parameters are
3974 if (test_bit(HCI_LE_SCAN, &hdev->dev_flags) &&
3975 hdev->discovery.state == DISCOVERY_STOPPED) {
3976 struct hci_request req;
3978 hci_req_init(&req, hdev);
3980 hci_req_add_le_scan_disable(&req);
3981 hci_req_add_le_passive_scan(&req);
3983 hci_req_run(&req, NULL);
3986 hci_dev_unlock(hdev);
3991 static void fast_connectable_complete(struct hci_dev *hdev, u8 status)
3993 struct pending_cmd *cmd;
3995 BT_DBG("status 0x%02x", status);
3999 cmd = mgmt_pending_find(MGMT_OP_SET_FAST_CONNECTABLE, hdev);
4004 cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
4005 mgmt_status(status));
4007 struct mgmt_mode *cp = cmd->param;
4010 set_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags);
4012 clear_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags);
4014 send_settings_rsp(cmd->sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev);
4015 new_settings(hdev, cmd->sk);
4018 mgmt_pending_remove(cmd);
4021 hci_dev_unlock(hdev);
4024 static int set_fast_connectable(struct sock *sk, struct hci_dev *hdev,
4025 void *data, u16 len)
4027 struct mgmt_mode *cp = data;
4028 struct pending_cmd *cmd;
4029 struct hci_request req;
4032 BT_DBG("%s", hdev->name);
4034 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags) ||
4035 hdev->hci_ver < BLUETOOTH_VER_1_2)
4036 return cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
4037 MGMT_STATUS_NOT_SUPPORTED);
4039 if (cp->val != 0x00 && cp->val != 0x01)
4040 return cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
4041 MGMT_STATUS_INVALID_PARAMS);
4043 if (!hdev_is_powered(hdev))
4044 return cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
4045 MGMT_STATUS_NOT_POWERED);
4047 if (!test_bit(HCI_CONNECTABLE, &hdev->dev_flags))
4048 return cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
4049 MGMT_STATUS_REJECTED);
4053 if (mgmt_pending_find(MGMT_OP_SET_FAST_CONNECTABLE, hdev)) {
4054 err = cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
4059 if (!!cp->val == test_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags)) {
4060 err = send_settings_rsp(sk, MGMT_OP_SET_FAST_CONNECTABLE,
4065 cmd = mgmt_pending_add(sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev,
4072 hci_req_init(&req, hdev);
4074 write_fast_connectable(&req, cp->val);
4076 err = hci_req_run(&req, fast_connectable_complete);
4078 err = cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
4079 MGMT_STATUS_FAILED);
4080 mgmt_pending_remove(cmd);
4084 hci_dev_unlock(hdev);
4089 static void set_bredr_scan(struct hci_request *req)
4091 struct hci_dev *hdev = req->hdev;
4094 /* Ensure that fast connectable is disabled. This function will
4095 * not do anything if the page scan parameters are already what
4098 write_fast_connectable(req, false);
4100 if (test_bit(HCI_CONNECTABLE, &hdev->dev_flags))
4102 if (test_bit(HCI_DISCOVERABLE, &hdev->dev_flags))
4103 scan |= SCAN_INQUIRY;
4106 hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
4109 static void set_bredr_complete(struct hci_dev *hdev, u8 status)
4111 struct pending_cmd *cmd;
4113 BT_DBG("status 0x%02x", status);
4117 cmd = mgmt_pending_find(MGMT_OP_SET_BREDR, hdev);
4122 u8 mgmt_err = mgmt_status(status);
4124 /* We need to restore the flag if related HCI commands
4127 clear_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
4129 cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
4131 send_settings_rsp(cmd->sk, MGMT_OP_SET_BREDR, hdev);
4132 new_settings(hdev, cmd->sk);
4135 mgmt_pending_remove(cmd);
4138 hci_dev_unlock(hdev);
4141 static int set_bredr(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
4143 struct mgmt_mode *cp = data;
4144 struct pending_cmd *cmd;
4145 struct hci_request req;
4148 BT_DBG("request for %s", hdev->name);
4150 if (!lmp_bredr_capable(hdev) || !lmp_le_capable(hdev))
4151 return cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
4152 MGMT_STATUS_NOT_SUPPORTED);
4154 if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
4155 return cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
4156 MGMT_STATUS_REJECTED);
4158 if (cp->val != 0x00 && cp->val != 0x01)
4159 return cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
4160 MGMT_STATUS_INVALID_PARAMS);
4164 if (cp->val == test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
4165 err = send_settings_rsp(sk, MGMT_OP_SET_BREDR, hdev);
4169 if (!hdev_is_powered(hdev)) {
4171 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
4172 clear_bit(HCI_SSP_ENABLED, &hdev->dev_flags);
4173 clear_bit(HCI_LINK_SECURITY, &hdev->dev_flags);
4174 clear_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags);
4175 clear_bit(HCI_HS_ENABLED, &hdev->dev_flags);
4178 change_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
4180 err = send_settings_rsp(sk, MGMT_OP_SET_BREDR, hdev);
4184 err = new_settings(hdev, sk);
4188 /* Reject disabling when powered on */
4190 err = cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
4191 MGMT_STATUS_REJECTED);
4195 if (mgmt_pending_find(MGMT_OP_SET_BREDR, hdev)) {
4196 err = cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
4201 cmd = mgmt_pending_add(sk, MGMT_OP_SET_BREDR, hdev, data, len);
4207 /* We need to flip the bit already here so that update_adv_data
4208 * generates the correct flags.
4210 set_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
4212 hci_req_init(&req, hdev);
4214 if (test_bit(HCI_CONNECTABLE, &hdev->dev_flags))
4215 set_bredr_scan(&req);
4217 /* Since only the advertising data flags will change, there
4218 * is no need to update the scan response data.
4220 update_adv_data(&req);
4222 err = hci_req_run(&req, set_bredr_complete);
4224 mgmt_pending_remove(cmd);
4227 hci_dev_unlock(hdev);
4231 static int set_secure_conn(struct sock *sk, struct hci_dev *hdev,
4232 void *data, u16 len)
4234 struct mgmt_mode *cp = data;
4235 struct pending_cmd *cmd;
4239 BT_DBG("request for %s", hdev->name);
4241 status = mgmt_bredr_support(hdev);
4243 return cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
4246 if (!lmp_sc_capable(hdev) &&
4247 !test_bit(HCI_FORCE_SC, &hdev->dev_flags))
4248 return cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
4249 MGMT_STATUS_NOT_SUPPORTED);
4251 if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
4252 return cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
4253 MGMT_STATUS_INVALID_PARAMS);
4257 if (!hdev_is_powered(hdev)) {
4261 changed = !test_and_set_bit(HCI_SC_ENABLED,
4263 if (cp->val == 0x02)
4264 set_bit(HCI_SC_ONLY, &hdev->dev_flags);
4266 clear_bit(HCI_SC_ONLY, &hdev->dev_flags);
4268 changed = test_and_clear_bit(HCI_SC_ENABLED,
4270 clear_bit(HCI_SC_ONLY, &hdev->dev_flags);
4273 err = send_settings_rsp(sk, MGMT_OP_SET_SECURE_CONN, hdev);
4278 err = new_settings(hdev, sk);
4283 if (mgmt_pending_find(MGMT_OP_SET_SECURE_CONN, hdev)) {
4284 err = cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
4291 if (val == test_bit(HCI_SC_ENABLED, &hdev->dev_flags) &&
4292 (cp->val == 0x02) == test_bit(HCI_SC_ONLY, &hdev->dev_flags)) {
4293 err = send_settings_rsp(sk, MGMT_OP_SET_SECURE_CONN, hdev);
4297 cmd = mgmt_pending_add(sk, MGMT_OP_SET_SECURE_CONN, hdev, data, len);
4303 err = hci_send_cmd(hdev, HCI_OP_WRITE_SC_SUPPORT, 1, &val);
4305 mgmt_pending_remove(cmd);
4309 if (cp->val == 0x02)
4310 set_bit(HCI_SC_ONLY, &hdev->dev_flags);
4312 clear_bit(HCI_SC_ONLY, &hdev->dev_flags);
4315 hci_dev_unlock(hdev);
4319 static int set_debug_keys(struct sock *sk, struct hci_dev *hdev,
4320 void *data, u16 len)
4322 struct mgmt_mode *cp = data;
4326 BT_DBG("request for %s", hdev->name);
4328 if (cp->val != 0x00 && cp->val != 0x01)
4329 return cmd_status(sk, hdev->id, MGMT_OP_SET_DEBUG_KEYS,
4330 MGMT_STATUS_INVALID_PARAMS);
4335 changed = !test_and_set_bit(HCI_DEBUG_KEYS, &hdev->dev_flags);
4337 changed = test_and_clear_bit(HCI_DEBUG_KEYS, &hdev->dev_flags);
4339 err = send_settings_rsp(sk, MGMT_OP_SET_DEBUG_KEYS, hdev);
4344 err = new_settings(hdev, sk);
4347 hci_dev_unlock(hdev);
4351 static int set_privacy(struct sock *sk, struct hci_dev *hdev, void *cp_data,
4354 struct mgmt_cp_set_privacy *cp = cp_data;
4358 BT_DBG("request for %s", hdev->name);
4360 if (!lmp_le_capable(hdev))
4361 return cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
4362 MGMT_STATUS_NOT_SUPPORTED);
4364 if (cp->privacy != 0x00 && cp->privacy != 0x01)
4365 return cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
4366 MGMT_STATUS_INVALID_PARAMS);
4368 if (hdev_is_powered(hdev))
4369 return cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
4370 MGMT_STATUS_REJECTED);
4374 /* If user space supports this command it is also expected to
4375 * handle IRKs. Therefore, set the HCI_RPA_RESOLVING flag.
4377 set_bit(HCI_RPA_RESOLVING, &hdev->dev_flags);
4380 changed = !test_and_set_bit(HCI_PRIVACY, &hdev->dev_flags);
4381 memcpy(hdev->irk, cp->irk, sizeof(hdev->irk));
4382 set_bit(HCI_RPA_EXPIRED, &hdev->dev_flags);
4384 changed = test_and_clear_bit(HCI_PRIVACY, &hdev->dev_flags);
4385 memset(hdev->irk, 0, sizeof(hdev->irk));
4386 clear_bit(HCI_RPA_EXPIRED, &hdev->dev_flags);
4389 err = send_settings_rsp(sk, MGMT_OP_SET_PRIVACY, hdev);
4394 err = new_settings(hdev, sk);
4397 hci_dev_unlock(hdev);
4401 static bool irk_is_valid(struct mgmt_irk_info *irk)
4403 switch (irk->addr.type) {
4404 case BDADDR_LE_PUBLIC:
4407 case BDADDR_LE_RANDOM:
4408 /* Two most significant bits shall be set */
4409 if ((irk->addr.bdaddr.b[5] & 0xc0) != 0xc0)
4417 static int load_irks(struct sock *sk, struct hci_dev *hdev, void *cp_data,
4420 struct mgmt_cp_load_irks *cp = cp_data;
4421 u16 irk_count, expected_len;
4424 BT_DBG("request for %s", hdev->name);
4426 if (!lmp_le_capable(hdev))
4427 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
4428 MGMT_STATUS_NOT_SUPPORTED);
4430 irk_count = __le16_to_cpu(cp->irk_count);
4432 expected_len = sizeof(*cp) + irk_count * sizeof(struct mgmt_irk_info);
4433 if (expected_len != len) {
4434 BT_ERR("load_irks: expected %u bytes, got %u bytes",
4436 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
4437 MGMT_STATUS_INVALID_PARAMS);
4440 BT_DBG("%s irk_count %u", hdev->name, irk_count);
4442 for (i = 0; i < irk_count; i++) {
4443 struct mgmt_irk_info *key = &cp->irks[i];
4445 if (!irk_is_valid(key))
4446 return cmd_status(sk, hdev->id,
4448 MGMT_STATUS_INVALID_PARAMS);
4453 hci_smp_irks_clear(hdev);
4455 for (i = 0; i < irk_count; i++) {
4456 struct mgmt_irk_info *irk = &cp->irks[i];
4459 if (irk->addr.type == BDADDR_LE_PUBLIC)
4460 addr_type = ADDR_LE_DEV_PUBLIC;
4462 addr_type = ADDR_LE_DEV_RANDOM;
4464 hci_add_irk(hdev, &irk->addr.bdaddr, addr_type, irk->val,
4468 set_bit(HCI_RPA_RESOLVING, &hdev->dev_flags);
4470 err = cmd_complete(sk, hdev->id, MGMT_OP_LOAD_IRKS, 0, NULL, 0);
4472 hci_dev_unlock(hdev);
4477 static bool ltk_is_valid(struct mgmt_ltk_info *key)
4479 if (key->master != 0x00 && key->master != 0x01)
4482 switch (key->addr.type) {
4483 case BDADDR_LE_PUBLIC:
4486 case BDADDR_LE_RANDOM:
4487 /* Two most significant bits shall be set */
4488 if ((key->addr.bdaddr.b[5] & 0xc0) != 0xc0)
4496 static int load_long_term_keys(struct sock *sk, struct hci_dev *hdev,
4497 void *cp_data, u16 len)
4499 struct mgmt_cp_load_long_term_keys *cp = cp_data;
4500 u16 key_count, expected_len;
4503 BT_DBG("request for %s", hdev->name);
4505 if (!lmp_le_capable(hdev))
4506 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
4507 MGMT_STATUS_NOT_SUPPORTED);
4509 key_count = __le16_to_cpu(cp->key_count);
4511 expected_len = sizeof(*cp) + key_count *
4512 sizeof(struct mgmt_ltk_info);
4513 if (expected_len != len) {
4514 BT_ERR("load_keys: expected %u bytes, got %u bytes",
4516 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
4517 MGMT_STATUS_INVALID_PARAMS);
4520 BT_DBG("%s key_count %u", hdev->name, key_count);
4522 for (i = 0; i < key_count; i++) {
4523 struct mgmt_ltk_info *key = &cp->keys[i];
4525 if (!ltk_is_valid(key))
4526 return cmd_status(sk, hdev->id,
4527 MGMT_OP_LOAD_LONG_TERM_KEYS,
4528 MGMT_STATUS_INVALID_PARAMS);
4533 hci_smp_ltks_clear(hdev);
4535 for (i = 0; i < key_count; i++) {
4536 struct mgmt_ltk_info *key = &cp->keys[i];
4537 u8 type, addr_type, authenticated;
4539 if (key->addr.type == BDADDR_LE_PUBLIC)
4540 addr_type = ADDR_LE_DEV_PUBLIC;
4542 addr_type = ADDR_LE_DEV_RANDOM;
4547 type = HCI_SMP_LTK_SLAVE;
4549 switch (key->type) {
4550 case MGMT_LTK_UNAUTHENTICATED:
4551 authenticated = 0x00;
4553 case MGMT_LTK_AUTHENTICATED:
4554 authenticated = 0x01;
4560 hci_add_ltk(hdev, &key->addr.bdaddr, addr_type, type,
4561 authenticated, key->val, key->enc_size, key->ediv,
4565 err = cmd_complete(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS, 0,
4568 hci_dev_unlock(hdev);
4573 struct cmd_conn_lookup {
4574 struct hci_conn *conn;
4575 bool valid_tx_power;
4579 static void get_conn_info_complete(struct pending_cmd *cmd, void *data)
4581 struct cmd_conn_lookup *match = data;
4582 struct mgmt_cp_get_conn_info *cp;
4583 struct mgmt_rp_get_conn_info rp;
4584 struct hci_conn *conn = cmd->user_data;
4586 if (conn != match->conn)
4589 cp = (struct mgmt_cp_get_conn_info *) cmd->param;
4591 memset(&rp, 0, sizeof(rp));
4592 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
4593 rp.addr.type = cp->addr.type;
4595 if (!match->mgmt_status) {
4596 rp.rssi = conn->rssi;
4598 if (match->valid_tx_power) {
4599 rp.tx_power = conn->tx_power;
4600 rp.max_tx_power = conn->max_tx_power;
4602 rp.tx_power = HCI_TX_POWER_INVALID;
4603 rp.max_tx_power = HCI_TX_POWER_INVALID;
4607 cmd_complete(cmd->sk, cmd->index, MGMT_OP_GET_CONN_INFO,
4608 match->mgmt_status, &rp, sizeof(rp));
4610 hci_conn_drop(conn);
4612 mgmt_pending_remove(cmd);
4615 static void conn_info_refresh_complete(struct hci_dev *hdev, u8 status)
4617 struct hci_cp_read_rssi *cp;
4618 struct hci_conn *conn;
4619 struct cmd_conn_lookup match;
4622 BT_DBG("status 0x%02x", status);
4626 /* TX power data is valid in case request completed successfully,
4627 * otherwise we assume it's not valid. At the moment we assume that
4628 * either both or none of current and max values are valid to keep code
4631 match.valid_tx_power = !status;
4633 /* Commands sent in request are either Read RSSI or Read Transmit Power
4634 * Level so we check which one was last sent to retrieve connection
4635 * handle. Both commands have handle as first parameter so it's safe to
4636 * cast data on the same command struct.
4638 * First command sent is always Read RSSI and we fail only if it fails.
4639 * In other case we simply override error to indicate success as we
4640 * already remembered if TX power value is actually valid.
4642 cp = hci_sent_cmd_data(hdev, HCI_OP_READ_RSSI);
4644 cp = hci_sent_cmd_data(hdev, HCI_OP_READ_TX_POWER);
4649 BT_ERR("invalid sent_cmd in response");
4653 handle = __le16_to_cpu(cp->handle);
4654 conn = hci_conn_hash_lookup_handle(hdev, handle);
4656 BT_ERR("unknown handle (%d) in response", handle);
4661 match.mgmt_status = mgmt_status(status);
4663 /* Cache refresh is complete, now reply for mgmt request for given
4666 mgmt_pending_foreach(MGMT_OP_GET_CONN_INFO, hdev,
4667 get_conn_info_complete, &match);
4670 hci_dev_unlock(hdev);
4673 static int get_conn_info(struct sock *sk, struct hci_dev *hdev, void *data,
4676 struct mgmt_cp_get_conn_info *cp = data;
4677 struct mgmt_rp_get_conn_info rp;
4678 struct hci_conn *conn;
4679 unsigned long conn_info_age;
4682 BT_DBG("%s", hdev->name);
4684 memset(&rp, 0, sizeof(rp));
4685 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
4686 rp.addr.type = cp->addr.type;
4688 if (!bdaddr_type_is_valid(cp->addr.type))
4689 return cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
4690 MGMT_STATUS_INVALID_PARAMS,
4695 if (!hdev_is_powered(hdev)) {
4696 err = cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
4697 MGMT_STATUS_NOT_POWERED, &rp, sizeof(rp));
4701 if (cp->addr.type == BDADDR_BREDR)
4702 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
4705 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->addr.bdaddr);
4707 if (!conn || conn->state != BT_CONNECTED) {
4708 err = cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
4709 MGMT_STATUS_NOT_CONNECTED, &rp, sizeof(rp));
4713 /* To avoid client trying to guess when to poll again for information we
4714 * calculate conn info age as random value between min/max set in hdev.
4716 conn_info_age = hdev->conn_info_min_age +
4717 prandom_u32_max(hdev->conn_info_max_age -
4718 hdev->conn_info_min_age);
4720 /* Query controller to refresh cached values if they are too old or were
4723 if (time_after(jiffies, conn->conn_info_timestamp +
4724 msecs_to_jiffies(conn_info_age)) ||
4725 !conn->conn_info_timestamp) {
4726 struct hci_request req;
4727 struct hci_cp_read_tx_power req_txp_cp;
4728 struct hci_cp_read_rssi req_rssi_cp;
4729 struct pending_cmd *cmd;
4731 hci_req_init(&req, hdev);
4732 req_rssi_cp.handle = cpu_to_le16(conn->handle);
4733 hci_req_add(&req, HCI_OP_READ_RSSI, sizeof(req_rssi_cp),
4736 /* For LE links TX power does not change thus we don't need to
4737 * query for it once value is known.
4739 if (!bdaddr_type_is_le(cp->addr.type) ||
4740 conn->tx_power == HCI_TX_POWER_INVALID) {
4741 req_txp_cp.handle = cpu_to_le16(conn->handle);
4742 req_txp_cp.type = 0x00;
4743 hci_req_add(&req, HCI_OP_READ_TX_POWER,
4744 sizeof(req_txp_cp), &req_txp_cp);
4747 /* Max TX power needs to be read only once per connection */
4748 if (conn->max_tx_power == HCI_TX_POWER_INVALID) {
4749 req_txp_cp.handle = cpu_to_le16(conn->handle);
4750 req_txp_cp.type = 0x01;
4751 hci_req_add(&req, HCI_OP_READ_TX_POWER,
4752 sizeof(req_txp_cp), &req_txp_cp);
4755 err = hci_req_run(&req, conn_info_refresh_complete);
4759 cmd = mgmt_pending_add(sk, MGMT_OP_GET_CONN_INFO, hdev,
4766 hci_conn_hold(conn);
4767 cmd->user_data = conn;
4769 conn->conn_info_timestamp = jiffies;
4771 /* Cache is valid, just reply with values cached in hci_conn */
4772 rp.rssi = conn->rssi;
4773 rp.tx_power = conn->tx_power;
4774 rp.max_tx_power = conn->max_tx_power;
4776 err = cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
4777 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
4781 hci_dev_unlock(hdev);
4785 static const struct mgmt_handler {
4786 int (*func) (struct sock *sk, struct hci_dev *hdev, void *data,
4790 } mgmt_handlers[] = {
4791 { NULL }, /* 0x0000 (no command) */
4792 { read_version, false, MGMT_READ_VERSION_SIZE },
4793 { read_commands, false, MGMT_READ_COMMANDS_SIZE },
4794 { read_index_list, false, MGMT_READ_INDEX_LIST_SIZE },
4795 { read_controller_info, false, MGMT_READ_INFO_SIZE },
4796 { set_powered, false, MGMT_SETTING_SIZE },
4797 { set_discoverable, false, MGMT_SET_DISCOVERABLE_SIZE },
4798 { set_connectable, false, MGMT_SETTING_SIZE },
4799 { set_fast_connectable, false, MGMT_SETTING_SIZE },
4800 { set_pairable, false, MGMT_SETTING_SIZE },
4801 { set_link_security, false, MGMT_SETTING_SIZE },
4802 { set_ssp, false, MGMT_SETTING_SIZE },
4803 { set_hs, false, MGMT_SETTING_SIZE },
4804 { set_le, false, MGMT_SETTING_SIZE },
4805 { set_dev_class, false, MGMT_SET_DEV_CLASS_SIZE },
4806 { set_local_name, false, MGMT_SET_LOCAL_NAME_SIZE },
4807 { add_uuid, false, MGMT_ADD_UUID_SIZE },
4808 { remove_uuid, false, MGMT_REMOVE_UUID_SIZE },
4809 { load_link_keys, true, MGMT_LOAD_LINK_KEYS_SIZE },
4810 { load_long_term_keys, true, MGMT_LOAD_LONG_TERM_KEYS_SIZE },
4811 { disconnect, false, MGMT_DISCONNECT_SIZE },
4812 { get_connections, false, MGMT_GET_CONNECTIONS_SIZE },
4813 { pin_code_reply, false, MGMT_PIN_CODE_REPLY_SIZE },
4814 { pin_code_neg_reply, false, MGMT_PIN_CODE_NEG_REPLY_SIZE },
4815 { set_io_capability, false, MGMT_SET_IO_CAPABILITY_SIZE },
4816 { pair_device, false, MGMT_PAIR_DEVICE_SIZE },
4817 { cancel_pair_device, false, MGMT_CANCEL_PAIR_DEVICE_SIZE },
4818 { unpair_device, false, MGMT_UNPAIR_DEVICE_SIZE },
4819 { user_confirm_reply, false, MGMT_USER_CONFIRM_REPLY_SIZE },
4820 { user_confirm_neg_reply, false, MGMT_USER_CONFIRM_NEG_REPLY_SIZE },
4821 { user_passkey_reply, false, MGMT_USER_PASSKEY_REPLY_SIZE },
4822 { user_passkey_neg_reply, false, MGMT_USER_PASSKEY_NEG_REPLY_SIZE },
4823 { read_local_oob_data, false, MGMT_READ_LOCAL_OOB_DATA_SIZE },
4824 { add_remote_oob_data, true, MGMT_ADD_REMOTE_OOB_DATA_SIZE },
4825 { remove_remote_oob_data, false, MGMT_REMOVE_REMOTE_OOB_DATA_SIZE },
4826 { start_discovery, false, MGMT_START_DISCOVERY_SIZE },
4827 { stop_discovery, false, MGMT_STOP_DISCOVERY_SIZE },
4828 { confirm_name, false, MGMT_CONFIRM_NAME_SIZE },
4829 { block_device, false, MGMT_BLOCK_DEVICE_SIZE },
4830 { unblock_device, false, MGMT_UNBLOCK_DEVICE_SIZE },
4831 { set_device_id, false, MGMT_SET_DEVICE_ID_SIZE },
4832 { set_advertising, false, MGMT_SETTING_SIZE },
4833 { set_bredr, false, MGMT_SETTING_SIZE },
4834 { set_static_address, false, MGMT_SET_STATIC_ADDRESS_SIZE },
4835 { set_scan_params, false, MGMT_SET_SCAN_PARAMS_SIZE },
4836 { set_secure_conn, false, MGMT_SETTING_SIZE },
4837 { set_debug_keys, false, MGMT_SETTING_SIZE },
4838 { set_privacy, false, MGMT_SET_PRIVACY_SIZE },
4839 { load_irks, true, MGMT_LOAD_IRKS_SIZE },
4840 { get_conn_info, false, MGMT_GET_CONN_INFO_SIZE },
4844 int mgmt_control(struct sock *sk, struct msghdr *msg, size_t msglen)
4848 struct mgmt_hdr *hdr;
4849 u16 opcode, index, len;
4850 struct hci_dev *hdev = NULL;
4851 const struct mgmt_handler *handler;
4854 BT_DBG("got %zu bytes", msglen);
4856 if (msglen < sizeof(*hdr))
4859 buf = kmalloc(msglen, GFP_KERNEL);
4863 if (memcpy_fromiovec(buf, msg->msg_iov, msglen)) {
4869 opcode = __le16_to_cpu(hdr->opcode);
4870 index = __le16_to_cpu(hdr->index);
4871 len = __le16_to_cpu(hdr->len);
4873 if (len != msglen - sizeof(*hdr)) {
4878 if (index != MGMT_INDEX_NONE) {
4879 hdev = hci_dev_get(index);
4881 err = cmd_status(sk, index, opcode,
4882 MGMT_STATUS_INVALID_INDEX);
4886 if (test_bit(HCI_SETUP, &hdev->dev_flags) ||
4887 test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
4888 err = cmd_status(sk, index, opcode,
4889 MGMT_STATUS_INVALID_INDEX);
4894 if (opcode >= ARRAY_SIZE(mgmt_handlers) ||
4895 mgmt_handlers[opcode].func == NULL) {
4896 BT_DBG("Unknown op %u", opcode);
4897 err = cmd_status(sk, index, opcode,
4898 MGMT_STATUS_UNKNOWN_COMMAND);
4902 if ((hdev && opcode < MGMT_OP_READ_INFO) ||
4903 (!hdev && opcode >= MGMT_OP_READ_INFO)) {
4904 err = cmd_status(sk, index, opcode,
4905 MGMT_STATUS_INVALID_INDEX);
4909 handler = &mgmt_handlers[opcode];
4911 if ((handler->var_len && len < handler->data_len) ||
4912 (!handler->var_len && len != handler->data_len)) {
4913 err = cmd_status(sk, index, opcode,
4914 MGMT_STATUS_INVALID_PARAMS);
4919 mgmt_init_hdev(sk, hdev);
4921 cp = buf + sizeof(*hdr);
4923 err = handler->func(sk, hdev, cp, len);
4937 void mgmt_index_added(struct hci_dev *hdev)
4939 if (hdev->dev_type != HCI_BREDR)
4942 mgmt_event(MGMT_EV_INDEX_ADDED, hdev, NULL, 0, NULL);
4945 void mgmt_index_removed(struct hci_dev *hdev)
4947 u8 status = MGMT_STATUS_INVALID_INDEX;
4949 if (hdev->dev_type != HCI_BREDR)
4952 mgmt_pending_foreach(0, hdev, cmd_status_rsp, &status);
4954 mgmt_event(MGMT_EV_INDEX_REMOVED, hdev, NULL, 0, NULL);
4957 /* This function requires the caller holds hdev->lock */
4958 static void restart_le_auto_conns(struct hci_dev *hdev)
4960 struct hci_conn_params *p;
4962 list_for_each_entry(p, &hdev->le_conn_params, list) {
4963 if (p->auto_connect == HCI_AUTO_CONN_ALWAYS)
4964 hci_pend_le_conn_add(hdev, &p->addr, p->addr_type);
4968 static void powered_complete(struct hci_dev *hdev, u8 status)
4970 struct cmd_lookup match = { NULL, hdev };
4972 BT_DBG("status 0x%02x", status);
4976 restart_le_auto_conns(hdev);
4978 mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp, &match);
4980 new_settings(hdev, match.sk);
4982 hci_dev_unlock(hdev);
4988 static int powered_update_hci(struct hci_dev *hdev)
4990 struct hci_request req;
4993 hci_req_init(&req, hdev);
4995 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags) &&
4996 !lmp_host_ssp_capable(hdev)) {
4999 hci_req_add(&req, HCI_OP_WRITE_SSP_MODE, 1, &ssp);
5002 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags) &&
5003 lmp_bredr_capable(hdev)) {
5004 struct hci_cp_write_le_host_supported cp;
5007 cp.simul = lmp_le_br_capable(hdev);
5009 /* Check first if we already have the right
5010 * host state (host features set)
5012 if (cp.le != lmp_host_le_capable(hdev) ||
5013 cp.simul != lmp_host_le_br_capable(hdev))
5014 hci_req_add(&req, HCI_OP_WRITE_LE_HOST_SUPPORTED,
5018 if (lmp_le_capable(hdev)) {
5019 /* Make sure the controller has a good default for
5020 * advertising data. This also applies to the case
5021 * where BR/EDR was toggled during the AUTO_OFF phase.
5023 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
5024 update_adv_data(&req);
5025 update_scan_rsp_data(&req);
5028 if (test_bit(HCI_ADVERTISING, &hdev->dev_flags))
5029 enable_advertising(&req);
5032 link_sec = test_bit(HCI_LINK_SECURITY, &hdev->dev_flags);
5033 if (link_sec != test_bit(HCI_AUTH, &hdev->flags))
5034 hci_req_add(&req, HCI_OP_WRITE_AUTH_ENABLE,
5035 sizeof(link_sec), &link_sec);
5037 if (lmp_bredr_capable(hdev)) {
5038 if (test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
5039 set_bredr_scan(&req);
5045 return hci_req_run(&req, powered_complete);
5048 int mgmt_powered(struct hci_dev *hdev, u8 powered)
5050 struct cmd_lookup match = { NULL, hdev };
5051 u8 status_not_powered = MGMT_STATUS_NOT_POWERED;
5052 u8 zero_cod[] = { 0, 0, 0 };
5055 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
5059 if (powered_update_hci(hdev) == 0)
5062 mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp,
5067 mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp, &match);
5068 mgmt_pending_foreach(0, hdev, cmd_status_rsp, &status_not_powered);
5070 if (memcmp(hdev->dev_class, zero_cod, sizeof(zero_cod)) != 0)
5071 mgmt_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev,
5072 zero_cod, sizeof(zero_cod), NULL);
5075 err = new_settings(hdev, match.sk);
5083 void mgmt_set_powered_failed(struct hci_dev *hdev, int err)
5085 struct pending_cmd *cmd;
5088 cmd = mgmt_pending_find(MGMT_OP_SET_POWERED, hdev);
5092 if (err == -ERFKILL)
5093 status = MGMT_STATUS_RFKILLED;
5095 status = MGMT_STATUS_FAILED;
5097 cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_POWERED, status);
5099 mgmt_pending_remove(cmd);
5102 void mgmt_discoverable_timeout(struct hci_dev *hdev)
5104 struct hci_request req;
5108 /* When discoverable timeout triggers, then just make sure
5109 * the limited discoverable flag is cleared. Even in the case
5110 * of a timeout triggered from general discoverable, it is
5111 * safe to unconditionally clear the flag.
5113 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
5114 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
5116 hci_req_init(&req, hdev);
5117 if (test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
5118 u8 scan = SCAN_PAGE;
5119 hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE,
5120 sizeof(scan), &scan);
5123 update_adv_data(&req);
5124 hci_req_run(&req, NULL);
5126 hdev->discov_timeout = 0;
5128 new_settings(hdev, NULL);
5130 hci_dev_unlock(hdev);
5133 void mgmt_discoverable(struct hci_dev *hdev, u8 discoverable)
5137 /* Nothing needed here if there's a pending command since that
5138 * commands request completion callback takes care of everything
5141 if (mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, hdev))
5144 /* Powering off may clear the scan mode - don't let that interfere */
5145 if (!discoverable && mgmt_pending_find(MGMT_OP_SET_POWERED, hdev))
5149 changed = !test_and_set_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
5151 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
5152 changed = test_and_clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
5156 struct hci_request req;
5158 /* In case this change in discoverable was triggered by
5159 * a disabling of connectable there could be a need to
5160 * update the advertising flags.
5162 hci_req_init(&req, hdev);
5163 update_adv_data(&req);
5164 hci_req_run(&req, NULL);
5166 new_settings(hdev, NULL);
5170 void mgmt_connectable(struct hci_dev *hdev, u8 connectable)
5174 /* Nothing needed here if there's a pending command since that
5175 * commands request completion callback takes care of everything
5178 if (mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev))
5181 /* Powering off may clear the scan mode - don't let that interfere */
5182 if (!connectable && mgmt_pending_find(MGMT_OP_SET_POWERED, hdev))
5186 changed = !test_and_set_bit(HCI_CONNECTABLE, &hdev->dev_flags);
5188 changed = test_and_clear_bit(HCI_CONNECTABLE, &hdev->dev_flags);
5191 new_settings(hdev, NULL);
5194 void mgmt_advertising(struct hci_dev *hdev, u8 advertising)
5196 /* Powering off may stop advertising - don't let that interfere */
5197 if (!advertising && mgmt_pending_find(MGMT_OP_SET_POWERED, hdev))
5201 set_bit(HCI_ADVERTISING, &hdev->dev_flags);
5203 clear_bit(HCI_ADVERTISING, &hdev->dev_flags);
5206 void mgmt_write_scan_failed(struct hci_dev *hdev, u8 scan, u8 status)
5208 u8 mgmt_err = mgmt_status(status);
5210 if (scan & SCAN_PAGE)
5211 mgmt_pending_foreach(MGMT_OP_SET_CONNECTABLE, hdev,
5212 cmd_status_rsp, &mgmt_err);
5214 if (scan & SCAN_INQUIRY)
5215 mgmt_pending_foreach(MGMT_OP_SET_DISCOVERABLE, hdev,
5216 cmd_status_rsp, &mgmt_err);
5219 void mgmt_new_link_key(struct hci_dev *hdev, struct link_key *key,
5222 struct mgmt_ev_new_link_key ev;
5224 memset(&ev, 0, sizeof(ev));
5226 ev.store_hint = persistent;
5227 bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
5228 ev.key.addr.type = BDADDR_BREDR;
5229 ev.key.type = key->type;
5230 memcpy(ev.key.val, key->val, HCI_LINK_KEY_SIZE);
5231 ev.key.pin_len = key->pin_len;
5233 mgmt_event(MGMT_EV_NEW_LINK_KEY, hdev, &ev, sizeof(ev), NULL);
5236 static u8 mgmt_ltk_type(struct smp_ltk *ltk)
5238 if (ltk->authenticated)
5239 return MGMT_LTK_AUTHENTICATED;
5241 return MGMT_LTK_UNAUTHENTICATED;
5244 void mgmt_new_ltk(struct hci_dev *hdev, struct smp_ltk *key, bool persistent)
5246 struct mgmt_ev_new_long_term_key ev;
5248 memset(&ev, 0, sizeof(ev));
5250 /* Devices using resolvable or non-resolvable random addresses
5251 * without providing an indentity resolving key don't require
5252 * to store long term keys. Their addresses will change the
5255 * Only when a remote device provides an identity address
5256 * make sure the long term key is stored. If the remote
5257 * identity is known, the long term keys are internally
5258 * mapped to the identity address. So allow static random
5259 * and public addresses here.
5261 if (key->bdaddr_type == ADDR_LE_DEV_RANDOM &&
5262 (key->bdaddr.b[5] & 0xc0) != 0xc0)
5263 ev.store_hint = 0x00;
5265 ev.store_hint = persistent;
5267 bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
5268 ev.key.addr.type = link_to_bdaddr(LE_LINK, key->bdaddr_type);
5269 ev.key.type = mgmt_ltk_type(key);
5270 ev.key.enc_size = key->enc_size;
5271 ev.key.ediv = key->ediv;
5272 ev.key.rand = key->rand;
5274 if (key->type == HCI_SMP_LTK)
5277 memcpy(ev.key.val, key->val, sizeof(key->val));
5279 mgmt_event(MGMT_EV_NEW_LONG_TERM_KEY, hdev, &ev, sizeof(ev), NULL);
5282 void mgmt_new_irk(struct hci_dev *hdev, struct smp_irk *irk)
5284 struct mgmt_ev_new_irk ev;
5286 memset(&ev, 0, sizeof(ev));
5288 /* For identity resolving keys from devices that are already
5289 * using a public address or static random address, do not
5290 * ask for storing this key. The identity resolving key really
5291 * is only mandatory for devices using resovlable random
5294 * Storing all identity resolving keys has the downside that
5295 * they will be also loaded on next boot of they system. More
5296 * identity resolving keys, means more time during scanning is
5297 * needed to actually resolve these addresses.
5299 if (bacmp(&irk->rpa, BDADDR_ANY))
5300 ev.store_hint = 0x01;
5302 ev.store_hint = 0x00;
5304 bacpy(&ev.rpa, &irk->rpa);
5305 bacpy(&ev.irk.addr.bdaddr, &irk->bdaddr);
5306 ev.irk.addr.type = link_to_bdaddr(LE_LINK, irk->addr_type);
5307 memcpy(ev.irk.val, irk->val, sizeof(irk->val));
5309 mgmt_event(MGMT_EV_NEW_IRK, hdev, &ev, sizeof(ev), NULL);
5312 void mgmt_new_csrk(struct hci_dev *hdev, struct smp_csrk *csrk,
5315 struct mgmt_ev_new_csrk ev;
5317 memset(&ev, 0, sizeof(ev));
5319 /* Devices using resolvable or non-resolvable random addresses
5320 * without providing an indentity resolving key don't require
5321 * to store signature resolving keys. Their addresses will change
5322 * the next time around.
5324 * Only when a remote device provides an identity address
5325 * make sure the signature resolving key is stored. So allow
5326 * static random and public addresses here.
5328 if (csrk->bdaddr_type == ADDR_LE_DEV_RANDOM &&
5329 (csrk->bdaddr.b[5] & 0xc0) != 0xc0)
5330 ev.store_hint = 0x00;
5332 ev.store_hint = persistent;
5334 bacpy(&ev.key.addr.bdaddr, &csrk->bdaddr);
5335 ev.key.addr.type = link_to_bdaddr(LE_LINK, csrk->bdaddr_type);
5336 ev.key.master = csrk->master;
5337 memcpy(ev.key.val, csrk->val, sizeof(csrk->val));
5339 mgmt_event(MGMT_EV_NEW_CSRK, hdev, &ev, sizeof(ev), NULL);
5342 static inline u16 eir_append_data(u8 *eir, u16 eir_len, u8 type, u8 *data,
5345 eir[eir_len++] = sizeof(type) + data_len;
5346 eir[eir_len++] = type;
5347 memcpy(&eir[eir_len], data, data_len);
5348 eir_len += data_len;
5353 void mgmt_device_connected(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
5354 u8 addr_type, u32 flags, u8 *name, u8 name_len,
5358 struct mgmt_ev_device_connected *ev = (void *) buf;
5361 bacpy(&ev->addr.bdaddr, bdaddr);
5362 ev->addr.type = link_to_bdaddr(link_type, addr_type);
5364 ev->flags = __cpu_to_le32(flags);
5367 eir_len = eir_append_data(ev->eir, 0, EIR_NAME_COMPLETE,
5370 if (dev_class && memcmp(dev_class, "\0\0\0", 3) != 0)
5371 eir_len = eir_append_data(ev->eir, eir_len,
5372 EIR_CLASS_OF_DEV, dev_class, 3);
5374 ev->eir_len = cpu_to_le16(eir_len);
5376 mgmt_event(MGMT_EV_DEVICE_CONNECTED, hdev, buf,
5377 sizeof(*ev) + eir_len, NULL);
5380 static void disconnect_rsp(struct pending_cmd *cmd, void *data)
5382 struct mgmt_cp_disconnect *cp = cmd->param;
5383 struct sock **sk = data;
5384 struct mgmt_rp_disconnect rp;
5386 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
5387 rp.addr.type = cp->addr.type;
5389 cmd_complete(cmd->sk, cmd->index, MGMT_OP_DISCONNECT, 0, &rp,
5395 mgmt_pending_remove(cmd);
5398 static void unpair_device_rsp(struct pending_cmd *cmd, void *data)
5400 struct hci_dev *hdev = data;
5401 struct mgmt_cp_unpair_device *cp = cmd->param;
5402 struct mgmt_rp_unpair_device rp;
5404 memset(&rp, 0, sizeof(rp));
5405 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
5406 rp.addr.type = cp->addr.type;
5408 device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, cmd->sk);
5410 cmd_complete(cmd->sk, cmd->index, cmd->opcode, 0, &rp, sizeof(rp));
5412 mgmt_pending_remove(cmd);
5415 void mgmt_device_disconnected(struct hci_dev *hdev, bdaddr_t *bdaddr,
5416 u8 link_type, u8 addr_type, u8 reason,
5417 bool mgmt_connected)
5419 struct mgmt_ev_device_disconnected ev;
5420 struct pending_cmd *power_off;
5421 struct sock *sk = NULL;
5423 power_off = mgmt_pending_find(MGMT_OP_SET_POWERED, hdev);
5425 struct mgmt_mode *cp = power_off->param;
5427 /* The connection is still in hci_conn_hash so test for 1
5428 * instead of 0 to know if this is the last one.
5430 if (!cp->val && hci_conn_count(hdev) == 1) {
5431 cancel_delayed_work(&hdev->power_off);
5432 queue_work(hdev->req_workqueue, &hdev->power_off.work);
5436 if (!mgmt_connected)
5439 if (link_type != ACL_LINK && link_type != LE_LINK)
5442 mgmt_pending_foreach(MGMT_OP_DISCONNECT, hdev, disconnect_rsp, &sk);
5444 bacpy(&ev.addr.bdaddr, bdaddr);
5445 ev.addr.type = link_to_bdaddr(link_type, addr_type);
5448 mgmt_event(MGMT_EV_DEVICE_DISCONNECTED, hdev, &ev, sizeof(ev), sk);
5453 mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, unpair_device_rsp,
5457 void mgmt_disconnect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr,
5458 u8 link_type, u8 addr_type, u8 status)
5460 u8 bdaddr_type = link_to_bdaddr(link_type, addr_type);
5461 struct mgmt_cp_disconnect *cp;
5462 struct mgmt_rp_disconnect rp;
5463 struct pending_cmd *cmd;
5465 mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, unpair_device_rsp,
5468 cmd = mgmt_pending_find(MGMT_OP_DISCONNECT, hdev);
5474 if (bacmp(bdaddr, &cp->addr.bdaddr))
5477 if (cp->addr.type != bdaddr_type)
5480 bacpy(&rp.addr.bdaddr, bdaddr);
5481 rp.addr.type = bdaddr_type;
5483 cmd_complete(cmd->sk, cmd->index, MGMT_OP_DISCONNECT,
5484 mgmt_status(status), &rp, sizeof(rp));
5486 mgmt_pending_remove(cmd);
5489 void mgmt_connect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
5490 u8 addr_type, u8 status)
5492 struct mgmt_ev_connect_failed ev;
5493 struct pending_cmd *power_off;
5495 power_off = mgmt_pending_find(MGMT_OP_SET_POWERED, hdev);
5497 struct mgmt_mode *cp = power_off->param;
5499 /* The connection is still in hci_conn_hash so test for 1
5500 * instead of 0 to know if this is the last one.
5502 if (!cp->val && hci_conn_count(hdev) == 1) {
5503 cancel_delayed_work(&hdev->power_off);
5504 queue_work(hdev->req_workqueue, &hdev->power_off.work);
5508 bacpy(&ev.addr.bdaddr, bdaddr);
5509 ev.addr.type = link_to_bdaddr(link_type, addr_type);
5510 ev.status = mgmt_status(status);
5512 mgmt_event(MGMT_EV_CONNECT_FAILED, hdev, &ev, sizeof(ev), NULL);
5515 void mgmt_pin_code_request(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 secure)
5517 struct mgmt_ev_pin_code_request ev;
5519 bacpy(&ev.addr.bdaddr, bdaddr);
5520 ev.addr.type = BDADDR_BREDR;
5523 mgmt_event(MGMT_EV_PIN_CODE_REQUEST, hdev, &ev, sizeof(ev), NULL);
5526 void mgmt_pin_code_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
5529 struct pending_cmd *cmd;
5530 struct mgmt_rp_pin_code_reply rp;
5532 cmd = mgmt_pending_find(MGMT_OP_PIN_CODE_REPLY, hdev);
5536 bacpy(&rp.addr.bdaddr, bdaddr);
5537 rp.addr.type = BDADDR_BREDR;
5539 cmd_complete(cmd->sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
5540 mgmt_status(status), &rp, sizeof(rp));
5542 mgmt_pending_remove(cmd);
5545 void mgmt_pin_code_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
5548 struct pending_cmd *cmd;
5549 struct mgmt_rp_pin_code_reply rp;
5551 cmd = mgmt_pending_find(MGMT_OP_PIN_CODE_NEG_REPLY, hdev);
5555 bacpy(&rp.addr.bdaddr, bdaddr);
5556 rp.addr.type = BDADDR_BREDR;
5558 cmd_complete(cmd->sk, hdev->id, MGMT_OP_PIN_CODE_NEG_REPLY,
5559 mgmt_status(status), &rp, sizeof(rp));
5561 mgmt_pending_remove(cmd);
5564 int mgmt_user_confirm_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
5565 u8 link_type, u8 addr_type, u32 value,
5568 struct mgmt_ev_user_confirm_request ev;
5570 BT_DBG("%s", hdev->name);
5572 bacpy(&ev.addr.bdaddr, bdaddr);
5573 ev.addr.type = link_to_bdaddr(link_type, addr_type);
5574 ev.confirm_hint = confirm_hint;
5575 ev.value = cpu_to_le32(value);
5577 return mgmt_event(MGMT_EV_USER_CONFIRM_REQUEST, hdev, &ev, sizeof(ev),
5581 int mgmt_user_passkey_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
5582 u8 link_type, u8 addr_type)
5584 struct mgmt_ev_user_passkey_request ev;
5586 BT_DBG("%s", hdev->name);
5588 bacpy(&ev.addr.bdaddr, bdaddr);
5589 ev.addr.type = link_to_bdaddr(link_type, addr_type);
5591 return mgmt_event(MGMT_EV_USER_PASSKEY_REQUEST, hdev, &ev, sizeof(ev),
5595 static int user_pairing_resp_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
5596 u8 link_type, u8 addr_type, u8 status,
5599 struct pending_cmd *cmd;
5600 struct mgmt_rp_user_confirm_reply rp;
5603 cmd = mgmt_pending_find(opcode, hdev);
5607 bacpy(&rp.addr.bdaddr, bdaddr);
5608 rp.addr.type = link_to_bdaddr(link_type, addr_type);
5609 err = cmd_complete(cmd->sk, hdev->id, opcode, mgmt_status(status),
5612 mgmt_pending_remove(cmd);
5617 int mgmt_user_confirm_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
5618 u8 link_type, u8 addr_type, u8 status)
5620 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
5621 status, MGMT_OP_USER_CONFIRM_REPLY);
5624 int mgmt_user_confirm_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
5625 u8 link_type, u8 addr_type, u8 status)
5627 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
5629 MGMT_OP_USER_CONFIRM_NEG_REPLY);
5632 int mgmt_user_passkey_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
5633 u8 link_type, u8 addr_type, u8 status)
5635 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
5636 status, MGMT_OP_USER_PASSKEY_REPLY);
5639 int mgmt_user_passkey_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
5640 u8 link_type, u8 addr_type, u8 status)
5642 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
5644 MGMT_OP_USER_PASSKEY_NEG_REPLY);
5647 int mgmt_user_passkey_notify(struct hci_dev *hdev, bdaddr_t *bdaddr,
5648 u8 link_type, u8 addr_type, u32 passkey,
5651 struct mgmt_ev_passkey_notify ev;
5653 BT_DBG("%s", hdev->name);
5655 bacpy(&ev.addr.bdaddr, bdaddr);
5656 ev.addr.type = link_to_bdaddr(link_type, addr_type);
5657 ev.passkey = __cpu_to_le32(passkey);
5658 ev.entered = entered;
5660 return mgmt_event(MGMT_EV_PASSKEY_NOTIFY, hdev, &ev, sizeof(ev), NULL);
5663 void mgmt_auth_failed(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
5664 u8 addr_type, u8 status)
5666 struct mgmt_ev_auth_failed ev;
5668 bacpy(&ev.addr.bdaddr, bdaddr);
5669 ev.addr.type = link_to_bdaddr(link_type, addr_type);
5670 ev.status = mgmt_status(status);
5672 mgmt_event(MGMT_EV_AUTH_FAILED, hdev, &ev, sizeof(ev), NULL);
5675 void mgmt_auth_enable_complete(struct hci_dev *hdev, u8 status)
5677 struct cmd_lookup match = { NULL, hdev };
5681 u8 mgmt_err = mgmt_status(status);
5682 mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev,
5683 cmd_status_rsp, &mgmt_err);
5687 if (test_bit(HCI_AUTH, &hdev->flags))
5688 changed = !test_and_set_bit(HCI_LINK_SECURITY,
5691 changed = test_and_clear_bit(HCI_LINK_SECURITY,
5694 mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev, settings_rsp,
5698 new_settings(hdev, match.sk);
5704 static void clear_eir(struct hci_request *req)
5706 struct hci_dev *hdev = req->hdev;
5707 struct hci_cp_write_eir cp;
5709 if (!lmp_ext_inq_capable(hdev))
5712 memset(hdev->eir, 0, sizeof(hdev->eir));
5714 memset(&cp, 0, sizeof(cp));
5716 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
5719 void mgmt_ssp_enable_complete(struct hci_dev *hdev, u8 enable, u8 status)
5721 struct cmd_lookup match = { NULL, hdev };
5722 struct hci_request req;
5723 bool changed = false;
5726 u8 mgmt_err = mgmt_status(status);
5728 if (enable && test_and_clear_bit(HCI_SSP_ENABLED,
5729 &hdev->dev_flags)) {
5730 clear_bit(HCI_HS_ENABLED, &hdev->dev_flags);
5731 new_settings(hdev, NULL);
5734 mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, cmd_status_rsp,
5740 changed = !test_and_set_bit(HCI_SSP_ENABLED, &hdev->dev_flags);
5742 changed = test_and_clear_bit(HCI_SSP_ENABLED, &hdev->dev_flags);
5744 changed = test_and_clear_bit(HCI_HS_ENABLED,
5747 clear_bit(HCI_HS_ENABLED, &hdev->dev_flags);
5750 mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, settings_rsp, &match);
5753 new_settings(hdev, match.sk);
5758 hci_req_init(&req, hdev);
5760 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags))
5765 hci_req_run(&req, NULL);
5768 void mgmt_sc_enable_complete(struct hci_dev *hdev, u8 enable, u8 status)
5770 struct cmd_lookup match = { NULL, hdev };
5771 bool changed = false;
5774 u8 mgmt_err = mgmt_status(status);
5777 if (test_and_clear_bit(HCI_SC_ENABLED,
5779 new_settings(hdev, NULL);
5780 clear_bit(HCI_SC_ONLY, &hdev->dev_flags);
5783 mgmt_pending_foreach(MGMT_OP_SET_SECURE_CONN, hdev,
5784 cmd_status_rsp, &mgmt_err);
5789 changed = !test_and_set_bit(HCI_SC_ENABLED, &hdev->dev_flags);
5791 changed = test_and_clear_bit(HCI_SC_ENABLED, &hdev->dev_flags);
5792 clear_bit(HCI_SC_ONLY, &hdev->dev_flags);
5795 mgmt_pending_foreach(MGMT_OP_SET_SECURE_CONN, hdev,
5796 settings_rsp, &match);
5799 new_settings(hdev, match.sk);
5805 static void sk_lookup(struct pending_cmd *cmd, void *data)
5807 struct cmd_lookup *match = data;
5809 if (match->sk == NULL) {
5810 match->sk = cmd->sk;
5811 sock_hold(match->sk);
5815 void mgmt_set_class_of_dev_complete(struct hci_dev *hdev, u8 *dev_class,
5818 struct cmd_lookup match = { NULL, hdev, mgmt_status(status) };
5820 mgmt_pending_foreach(MGMT_OP_SET_DEV_CLASS, hdev, sk_lookup, &match);
5821 mgmt_pending_foreach(MGMT_OP_ADD_UUID, hdev, sk_lookup, &match);
5822 mgmt_pending_foreach(MGMT_OP_REMOVE_UUID, hdev, sk_lookup, &match);
5825 mgmt_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev, dev_class, 3,
5832 void mgmt_set_local_name_complete(struct hci_dev *hdev, u8 *name, u8 status)
5834 struct mgmt_cp_set_local_name ev;
5835 struct pending_cmd *cmd;
5840 memset(&ev, 0, sizeof(ev));
5841 memcpy(ev.name, name, HCI_MAX_NAME_LENGTH);
5842 memcpy(ev.short_name, hdev->short_name, HCI_MAX_SHORT_NAME_LENGTH);
5844 cmd = mgmt_pending_find(MGMT_OP_SET_LOCAL_NAME, hdev);
5846 memcpy(hdev->dev_name, name, sizeof(hdev->dev_name));
5848 /* If this is a HCI command related to powering on the
5849 * HCI dev don't send any mgmt signals.
5851 if (mgmt_pending_find(MGMT_OP_SET_POWERED, hdev))
5855 mgmt_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev, &ev, sizeof(ev),
5856 cmd ? cmd->sk : NULL);
5859 void mgmt_read_local_oob_data_complete(struct hci_dev *hdev, u8 *hash192,
5860 u8 *randomizer192, u8 *hash256,
5861 u8 *randomizer256, u8 status)
5863 struct pending_cmd *cmd;
5865 BT_DBG("%s status %u", hdev->name, status);
5867 cmd = mgmt_pending_find(MGMT_OP_READ_LOCAL_OOB_DATA, hdev);
5872 cmd_status(cmd->sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
5873 mgmt_status(status));
5875 if (test_bit(HCI_SC_ENABLED, &hdev->dev_flags) &&
5876 hash256 && randomizer256) {
5877 struct mgmt_rp_read_local_oob_ext_data rp;
5879 memcpy(rp.hash192, hash192, sizeof(rp.hash192));
5880 memcpy(rp.randomizer192, randomizer192,
5881 sizeof(rp.randomizer192));
5883 memcpy(rp.hash256, hash256, sizeof(rp.hash256));
5884 memcpy(rp.randomizer256, randomizer256,
5885 sizeof(rp.randomizer256));
5887 cmd_complete(cmd->sk, hdev->id,
5888 MGMT_OP_READ_LOCAL_OOB_DATA, 0,
5891 struct mgmt_rp_read_local_oob_data rp;
5893 memcpy(rp.hash, hash192, sizeof(rp.hash));
5894 memcpy(rp.randomizer, randomizer192,
5895 sizeof(rp.randomizer));
5897 cmd_complete(cmd->sk, hdev->id,
5898 MGMT_OP_READ_LOCAL_OOB_DATA, 0,
5903 mgmt_pending_remove(cmd);
5906 void mgmt_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
5907 u8 addr_type, u8 *dev_class, s8 rssi, u8 cfm_name,
5908 u8 ssp, u8 *eir, u16 eir_len, u8 *scan_rsp,
5912 struct mgmt_ev_device_found *ev = (void *) buf;
5913 struct smp_irk *irk;
5916 if (!hci_discovery_active(hdev))
5919 /* Make sure that the buffer is big enough. The 5 extra bytes
5920 * are for the potential CoD field.
5922 if (sizeof(*ev) + eir_len + scan_rsp_len + 5 > sizeof(buf))
5925 memset(buf, 0, sizeof(buf));
5927 irk = hci_get_irk(hdev, bdaddr, addr_type);
5929 bacpy(&ev->addr.bdaddr, &irk->bdaddr);
5930 ev->addr.type = link_to_bdaddr(link_type, irk->addr_type);
5932 bacpy(&ev->addr.bdaddr, bdaddr);
5933 ev->addr.type = link_to_bdaddr(link_type, addr_type);
5938 ev->flags |= cpu_to_le32(MGMT_DEV_FOUND_CONFIRM_NAME);
5940 ev->flags |= cpu_to_le32(MGMT_DEV_FOUND_LEGACY_PAIRING);
5943 memcpy(ev->eir, eir, eir_len);
5945 if (dev_class && !eir_has_data_type(ev->eir, eir_len, EIR_CLASS_OF_DEV))
5946 eir_len = eir_append_data(ev->eir, eir_len, EIR_CLASS_OF_DEV,
5949 if (scan_rsp_len > 0)
5950 memcpy(ev->eir + eir_len, scan_rsp, scan_rsp_len);
5952 ev->eir_len = cpu_to_le16(eir_len + scan_rsp_len);
5953 ev_size = sizeof(*ev) + eir_len + scan_rsp_len;
5955 mgmt_event(MGMT_EV_DEVICE_FOUND, hdev, ev, ev_size, NULL);
5958 void mgmt_remote_name(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
5959 u8 addr_type, s8 rssi, u8 *name, u8 name_len)
5961 struct mgmt_ev_device_found *ev;
5962 char buf[sizeof(*ev) + HCI_MAX_NAME_LENGTH + 2];
5965 ev = (struct mgmt_ev_device_found *) buf;
5967 memset(buf, 0, sizeof(buf));
5969 bacpy(&ev->addr.bdaddr, bdaddr);
5970 ev->addr.type = link_to_bdaddr(link_type, addr_type);
5973 eir_len = eir_append_data(ev->eir, 0, EIR_NAME_COMPLETE, name,
5976 ev->eir_len = cpu_to_le16(eir_len);
5978 mgmt_event(MGMT_EV_DEVICE_FOUND, hdev, ev, sizeof(*ev) + eir_len, NULL);
5981 void mgmt_discovering(struct hci_dev *hdev, u8 discovering)
5983 struct mgmt_ev_discovering ev;
5984 struct pending_cmd *cmd;
5986 BT_DBG("%s discovering %u", hdev->name, discovering);
5989 cmd = mgmt_pending_find(MGMT_OP_START_DISCOVERY, hdev);
5991 cmd = mgmt_pending_find(MGMT_OP_STOP_DISCOVERY, hdev);
5994 u8 type = hdev->discovery.type;
5996 cmd_complete(cmd->sk, hdev->id, cmd->opcode, 0, &type,
5998 mgmt_pending_remove(cmd);
6001 memset(&ev, 0, sizeof(ev));
6002 ev.type = hdev->discovery.type;
6003 ev.discovering = discovering;
6005 mgmt_event(MGMT_EV_DISCOVERING, hdev, &ev, sizeof(ev), NULL);
6008 int mgmt_device_blocked(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
6010 struct pending_cmd *cmd;
6011 struct mgmt_ev_device_blocked ev;
6013 cmd = mgmt_pending_find(MGMT_OP_BLOCK_DEVICE, hdev);
6015 bacpy(&ev.addr.bdaddr, bdaddr);
6016 ev.addr.type = type;
6018 return mgmt_event(MGMT_EV_DEVICE_BLOCKED, hdev, &ev, sizeof(ev),
6019 cmd ? cmd->sk : NULL);
6022 int mgmt_device_unblocked(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
6024 struct pending_cmd *cmd;
6025 struct mgmt_ev_device_unblocked ev;
6027 cmd = mgmt_pending_find(MGMT_OP_UNBLOCK_DEVICE, hdev);
6029 bacpy(&ev.addr.bdaddr, bdaddr);
6030 ev.addr.type = type;
6032 return mgmt_event(MGMT_EV_DEVICE_UNBLOCKED, hdev, &ev, sizeof(ev),
6033 cmd ? cmd->sk : NULL);
6036 static void adv_enable_complete(struct hci_dev *hdev, u8 status)
6038 BT_DBG("%s status %u", hdev->name, status);
6040 /* Clear the advertising mgmt setting if we failed to re-enable it */
6042 clear_bit(HCI_ADVERTISING, &hdev->dev_flags);
6043 new_settings(hdev, NULL);
6047 void mgmt_reenable_advertising(struct hci_dev *hdev)
6049 struct hci_request req;
6051 if (hci_conn_num(hdev, LE_LINK) > 0)
6054 if (!test_bit(HCI_ADVERTISING, &hdev->dev_flags))
6057 hci_req_init(&req, hdev);
6058 enable_advertising(&req);
6060 /* If this fails we have no option but to let user space know
6061 * that we've disabled advertising.
6063 if (hci_req_run(&req, adv_enable_complete) < 0) {
6064 clear_bit(HCI_ADVERTISING, &hdev->dev_flags);
6065 new_settings(hdev, NULL);