2 BlueZ - Bluetooth protocol stack for Linux
4 Copyright (C) 2010 Nokia Corporation
5 Copyright (C) 2011-2012 Intel Corporation
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
25 /* Bluetooth HCI Management interface */
27 #include <linux/module.h>
28 #include <asm/unaligned.h>
30 #include <net/bluetooth/bluetooth.h>
31 #include <net/bluetooth/hci_core.h>
32 #include <net/bluetooth/mgmt.h>
33 #include <net/bluetooth/smp.h>
37 #define MGMT_VERSION 1
38 #define MGMT_REVISION 3
40 static const u16 mgmt_commands[] = {
41 MGMT_OP_READ_INDEX_LIST,
44 MGMT_OP_SET_DISCOVERABLE,
45 MGMT_OP_SET_CONNECTABLE,
46 MGMT_OP_SET_FAST_CONNECTABLE,
48 MGMT_OP_SET_LINK_SECURITY,
52 MGMT_OP_SET_DEV_CLASS,
53 MGMT_OP_SET_LOCAL_NAME,
56 MGMT_OP_LOAD_LINK_KEYS,
57 MGMT_OP_LOAD_LONG_TERM_KEYS,
59 MGMT_OP_GET_CONNECTIONS,
60 MGMT_OP_PIN_CODE_REPLY,
61 MGMT_OP_PIN_CODE_NEG_REPLY,
62 MGMT_OP_SET_IO_CAPABILITY,
64 MGMT_OP_CANCEL_PAIR_DEVICE,
65 MGMT_OP_UNPAIR_DEVICE,
66 MGMT_OP_USER_CONFIRM_REPLY,
67 MGMT_OP_USER_CONFIRM_NEG_REPLY,
68 MGMT_OP_USER_PASSKEY_REPLY,
69 MGMT_OP_USER_PASSKEY_NEG_REPLY,
70 MGMT_OP_READ_LOCAL_OOB_DATA,
71 MGMT_OP_ADD_REMOTE_OOB_DATA,
72 MGMT_OP_REMOVE_REMOTE_OOB_DATA,
73 MGMT_OP_START_DISCOVERY,
74 MGMT_OP_STOP_DISCOVERY,
77 MGMT_OP_UNBLOCK_DEVICE,
78 MGMT_OP_SET_DEVICE_ID,
79 MGMT_OP_SET_ADVERTISING,
82 static const u16 mgmt_events[] = {
83 MGMT_EV_CONTROLLER_ERROR,
85 MGMT_EV_INDEX_REMOVED,
87 MGMT_EV_CLASS_OF_DEV_CHANGED,
88 MGMT_EV_LOCAL_NAME_CHANGED,
90 MGMT_EV_NEW_LONG_TERM_KEY,
91 MGMT_EV_DEVICE_CONNECTED,
92 MGMT_EV_DEVICE_DISCONNECTED,
93 MGMT_EV_CONNECT_FAILED,
94 MGMT_EV_PIN_CODE_REQUEST,
95 MGMT_EV_USER_CONFIRM_REQUEST,
96 MGMT_EV_USER_PASSKEY_REQUEST,
100 MGMT_EV_DEVICE_BLOCKED,
101 MGMT_EV_DEVICE_UNBLOCKED,
102 MGMT_EV_DEVICE_UNPAIRED,
103 MGMT_EV_PASSKEY_NOTIFY,
106 #define CACHE_TIMEOUT msecs_to_jiffies(2 * 1000)
108 #define hdev_is_powered(hdev) (test_bit(HCI_UP, &hdev->flags) && \
109 !test_bit(HCI_AUTO_OFF, &hdev->dev_flags))
112 struct list_head list;
120 /* HCI to MGMT error code conversion table */
121 static u8 mgmt_status_table[] = {
123 MGMT_STATUS_UNKNOWN_COMMAND, /* Unknown Command */
124 MGMT_STATUS_NOT_CONNECTED, /* No Connection */
125 MGMT_STATUS_FAILED, /* Hardware Failure */
126 MGMT_STATUS_CONNECT_FAILED, /* Page Timeout */
127 MGMT_STATUS_AUTH_FAILED, /* Authentication Failed */
128 MGMT_STATUS_NOT_PAIRED, /* PIN or Key Missing */
129 MGMT_STATUS_NO_RESOURCES, /* Memory Full */
130 MGMT_STATUS_TIMEOUT, /* Connection Timeout */
131 MGMT_STATUS_NO_RESOURCES, /* Max Number of Connections */
132 MGMT_STATUS_NO_RESOURCES, /* Max Number of SCO Connections */
133 MGMT_STATUS_ALREADY_CONNECTED, /* ACL Connection Exists */
134 MGMT_STATUS_BUSY, /* Command Disallowed */
135 MGMT_STATUS_NO_RESOURCES, /* Rejected Limited Resources */
136 MGMT_STATUS_REJECTED, /* Rejected Security */
137 MGMT_STATUS_REJECTED, /* Rejected Personal */
138 MGMT_STATUS_TIMEOUT, /* Host Timeout */
139 MGMT_STATUS_NOT_SUPPORTED, /* Unsupported Feature */
140 MGMT_STATUS_INVALID_PARAMS, /* Invalid Parameters */
141 MGMT_STATUS_DISCONNECTED, /* OE User Ended Connection */
142 MGMT_STATUS_NO_RESOURCES, /* OE Low Resources */
143 MGMT_STATUS_DISCONNECTED, /* OE Power Off */
144 MGMT_STATUS_DISCONNECTED, /* Connection Terminated */
145 MGMT_STATUS_BUSY, /* Repeated Attempts */
146 MGMT_STATUS_REJECTED, /* Pairing Not Allowed */
147 MGMT_STATUS_FAILED, /* Unknown LMP PDU */
148 MGMT_STATUS_NOT_SUPPORTED, /* Unsupported Remote Feature */
149 MGMT_STATUS_REJECTED, /* SCO Offset Rejected */
150 MGMT_STATUS_REJECTED, /* SCO Interval Rejected */
151 MGMT_STATUS_REJECTED, /* Air Mode Rejected */
152 MGMT_STATUS_INVALID_PARAMS, /* Invalid LMP Parameters */
153 MGMT_STATUS_FAILED, /* Unspecified Error */
154 MGMT_STATUS_NOT_SUPPORTED, /* Unsupported LMP Parameter Value */
155 MGMT_STATUS_FAILED, /* Role Change Not Allowed */
156 MGMT_STATUS_TIMEOUT, /* LMP Response Timeout */
157 MGMT_STATUS_FAILED, /* LMP Error Transaction Collision */
158 MGMT_STATUS_FAILED, /* LMP PDU Not Allowed */
159 MGMT_STATUS_REJECTED, /* Encryption Mode Not Accepted */
160 MGMT_STATUS_FAILED, /* Unit Link Key Used */
161 MGMT_STATUS_NOT_SUPPORTED, /* QoS Not Supported */
162 MGMT_STATUS_TIMEOUT, /* Instant Passed */
163 MGMT_STATUS_NOT_SUPPORTED, /* Pairing Not Supported */
164 MGMT_STATUS_FAILED, /* Transaction Collision */
165 MGMT_STATUS_INVALID_PARAMS, /* Unacceptable Parameter */
166 MGMT_STATUS_REJECTED, /* QoS Rejected */
167 MGMT_STATUS_NOT_SUPPORTED, /* Classification Not Supported */
168 MGMT_STATUS_REJECTED, /* Insufficient Security */
169 MGMT_STATUS_INVALID_PARAMS, /* Parameter Out Of Range */
170 MGMT_STATUS_BUSY, /* Role Switch Pending */
171 MGMT_STATUS_FAILED, /* Slot Violation */
172 MGMT_STATUS_FAILED, /* Role Switch Failed */
173 MGMT_STATUS_INVALID_PARAMS, /* EIR Too Large */
174 MGMT_STATUS_NOT_SUPPORTED, /* Simple Pairing Not Supported */
175 MGMT_STATUS_BUSY, /* Host Busy Pairing */
176 MGMT_STATUS_REJECTED, /* Rejected, No Suitable Channel */
177 MGMT_STATUS_BUSY, /* Controller Busy */
178 MGMT_STATUS_INVALID_PARAMS, /* Unsuitable Connection Interval */
179 MGMT_STATUS_TIMEOUT, /* Directed Advertising Timeout */
180 MGMT_STATUS_AUTH_FAILED, /* Terminated Due to MIC Failure */
181 MGMT_STATUS_CONNECT_FAILED, /* Connection Establishment Failed */
182 MGMT_STATUS_CONNECT_FAILED, /* MAC Connection Failed */
185 bool mgmt_valid_hdev(struct hci_dev *hdev)
187 return hdev->dev_type == HCI_BREDR;
190 static u8 mgmt_status(u8 hci_status)
192 if (hci_status < ARRAY_SIZE(mgmt_status_table))
193 return mgmt_status_table[hci_status];
195 return MGMT_STATUS_FAILED;
198 static int cmd_status(struct sock *sk, u16 index, u16 cmd, u8 status)
201 struct mgmt_hdr *hdr;
202 struct mgmt_ev_cmd_status *ev;
205 BT_DBG("sock %p, index %u, cmd %u, status %u", sk, index, cmd, status);
207 skb = alloc_skb(sizeof(*hdr) + sizeof(*ev), GFP_KERNEL);
211 hdr = (void *) skb_put(skb, sizeof(*hdr));
213 hdr->opcode = __constant_cpu_to_le16(MGMT_EV_CMD_STATUS);
214 hdr->index = cpu_to_le16(index);
215 hdr->len = cpu_to_le16(sizeof(*ev));
217 ev = (void *) skb_put(skb, sizeof(*ev));
219 ev->opcode = cpu_to_le16(cmd);
221 err = sock_queue_rcv_skb(sk, skb);
228 static int cmd_complete(struct sock *sk, u16 index, u16 cmd, u8 status,
229 void *rp, size_t rp_len)
232 struct mgmt_hdr *hdr;
233 struct mgmt_ev_cmd_complete *ev;
236 BT_DBG("sock %p", sk);
238 skb = alloc_skb(sizeof(*hdr) + sizeof(*ev) + rp_len, GFP_KERNEL);
242 hdr = (void *) skb_put(skb, sizeof(*hdr));
244 hdr->opcode = __constant_cpu_to_le16(MGMT_EV_CMD_COMPLETE);
245 hdr->index = cpu_to_le16(index);
246 hdr->len = cpu_to_le16(sizeof(*ev) + rp_len);
248 ev = (void *) skb_put(skb, sizeof(*ev) + rp_len);
249 ev->opcode = cpu_to_le16(cmd);
253 memcpy(ev->data, rp, rp_len);
255 err = sock_queue_rcv_skb(sk, skb);
262 static int read_version(struct sock *sk, struct hci_dev *hdev, void *data,
265 struct mgmt_rp_read_version rp;
267 BT_DBG("sock %p", sk);
269 rp.version = MGMT_VERSION;
270 rp.revision = __constant_cpu_to_le16(MGMT_REVISION);
272 return cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_VERSION, 0, &rp,
276 static int read_commands(struct sock *sk, struct hci_dev *hdev, void *data,
279 struct mgmt_rp_read_commands *rp;
280 const u16 num_commands = ARRAY_SIZE(mgmt_commands);
281 const u16 num_events = ARRAY_SIZE(mgmt_events);
286 BT_DBG("sock %p", sk);
288 rp_size = sizeof(*rp) + ((num_commands + num_events) * sizeof(u16));
290 rp = kmalloc(rp_size, GFP_KERNEL);
294 rp->num_commands = __constant_cpu_to_le16(num_commands);
295 rp->num_events = __constant_cpu_to_le16(num_events);
297 for (i = 0, opcode = rp->opcodes; i < num_commands; i++, opcode++)
298 put_unaligned_le16(mgmt_commands[i], opcode);
300 for (i = 0; i < num_events; i++, opcode++)
301 put_unaligned_le16(mgmt_events[i], opcode);
303 err = cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_COMMANDS, 0, rp,
310 static int read_index_list(struct sock *sk, struct hci_dev *hdev, void *data,
313 struct mgmt_rp_read_index_list *rp;
319 BT_DBG("sock %p", sk);
321 read_lock(&hci_dev_list_lock);
324 list_for_each_entry(d, &hci_dev_list, list) {
325 if (!mgmt_valid_hdev(d))
331 rp_len = sizeof(*rp) + (2 * count);
332 rp = kmalloc(rp_len, GFP_ATOMIC);
334 read_unlock(&hci_dev_list_lock);
339 list_for_each_entry(d, &hci_dev_list, list) {
340 if (test_bit(HCI_SETUP, &d->dev_flags))
343 if (test_bit(HCI_USER_CHANNEL, &d->dev_flags))
346 if (!mgmt_valid_hdev(d))
349 rp->index[count++] = cpu_to_le16(d->id);
350 BT_DBG("Added hci%u", d->id);
353 rp->num_controllers = cpu_to_le16(count);
354 rp_len = sizeof(*rp) + (2 * count);
356 read_unlock(&hci_dev_list_lock);
358 err = cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_INDEX_LIST, 0, rp,
366 static u32 get_supported_settings(struct hci_dev *hdev)
370 settings |= MGMT_SETTING_POWERED;
371 settings |= MGMT_SETTING_PAIRABLE;
373 if (lmp_ssp_capable(hdev))
374 settings |= MGMT_SETTING_SSP;
376 if (lmp_bredr_capable(hdev)) {
377 settings |= MGMT_SETTING_CONNECTABLE;
378 if (hdev->hci_ver >= BLUETOOTH_VER_1_2)
379 settings |= MGMT_SETTING_FAST_CONNECTABLE;
380 settings |= MGMT_SETTING_DISCOVERABLE;
381 settings |= MGMT_SETTING_BREDR;
382 settings |= MGMT_SETTING_LINK_SECURITY;
386 settings |= MGMT_SETTING_HS;
388 if (lmp_le_capable(hdev)) {
389 settings |= MGMT_SETTING_LE;
390 settings |= MGMT_SETTING_ADVERTISING;
396 static u32 get_current_settings(struct hci_dev *hdev)
400 if (hdev_is_powered(hdev))
401 settings |= MGMT_SETTING_POWERED;
403 if (test_bit(HCI_CONNECTABLE, &hdev->dev_flags))
404 settings |= MGMT_SETTING_CONNECTABLE;
406 if (test_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags))
407 settings |= MGMT_SETTING_FAST_CONNECTABLE;
409 if (test_bit(HCI_DISCOVERABLE, &hdev->dev_flags))
410 settings |= MGMT_SETTING_DISCOVERABLE;
412 if (test_bit(HCI_PAIRABLE, &hdev->dev_flags))
413 settings |= MGMT_SETTING_PAIRABLE;
415 if (lmp_bredr_capable(hdev))
416 settings |= MGMT_SETTING_BREDR;
418 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
419 settings |= MGMT_SETTING_LE;
421 if (test_bit(HCI_LINK_SECURITY, &hdev->dev_flags))
422 settings |= MGMT_SETTING_LINK_SECURITY;
424 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags))
425 settings |= MGMT_SETTING_SSP;
427 if (test_bit(HCI_HS_ENABLED, &hdev->dev_flags))
428 settings |= MGMT_SETTING_HS;
430 if (test_bit(HCI_LE_PERIPHERAL, &hdev->dev_flags))
431 settings |= MGMT_SETTING_ADVERTISING;
436 #define PNP_INFO_SVCLASS_ID 0x1200
438 static u8 *create_uuid16_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
440 u8 *ptr = data, *uuids_start = NULL;
441 struct bt_uuid *uuid;
446 list_for_each_entry(uuid, &hdev->uuids, list) {
449 if (uuid->size != 16)
452 uuid16 = get_unaligned_le16(&uuid->uuid[12]);
456 if (uuid16 == PNP_INFO_SVCLASS_ID)
462 uuids_start[1] = EIR_UUID16_ALL;
466 /* Stop if not enough space to put next UUID */
467 if ((ptr - data) + sizeof(u16) > len) {
468 uuids_start[1] = EIR_UUID16_SOME;
472 *ptr++ = (uuid16 & 0x00ff);
473 *ptr++ = (uuid16 & 0xff00) >> 8;
474 uuids_start[0] += sizeof(uuid16);
480 static u8 *create_uuid32_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
482 u8 *ptr = data, *uuids_start = NULL;
483 struct bt_uuid *uuid;
488 list_for_each_entry(uuid, &hdev->uuids, list) {
489 if (uuid->size != 32)
495 uuids_start[1] = EIR_UUID32_ALL;
499 /* Stop if not enough space to put next UUID */
500 if ((ptr - data) + sizeof(u32) > len) {
501 uuids_start[1] = EIR_UUID32_SOME;
505 memcpy(ptr, &uuid->uuid[12], sizeof(u32));
507 uuids_start[0] += sizeof(u32);
513 static u8 *create_uuid128_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
515 u8 *ptr = data, *uuids_start = NULL;
516 struct bt_uuid *uuid;
521 list_for_each_entry(uuid, &hdev->uuids, list) {
522 if (uuid->size != 128)
528 uuids_start[1] = EIR_UUID128_ALL;
532 /* Stop if not enough space to put next UUID */
533 if ((ptr - data) + 16 > len) {
534 uuids_start[1] = EIR_UUID128_SOME;
538 memcpy(ptr, uuid->uuid, 16);
540 uuids_start[0] += 16;
546 static void create_eir(struct hci_dev *hdev, u8 *data)
551 name_len = strlen(hdev->dev_name);
557 ptr[1] = EIR_NAME_SHORT;
559 ptr[1] = EIR_NAME_COMPLETE;
561 /* EIR Data length */
562 ptr[0] = name_len + 1;
564 memcpy(ptr + 2, hdev->dev_name, name_len);
566 ptr += (name_len + 2);
569 if (hdev->inq_tx_power != HCI_TX_POWER_INVALID) {
571 ptr[1] = EIR_TX_POWER;
572 ptr[2] = (u8) hdev->inq_tx_power;
577 if (hdev->devid_source > 0) {
579 ptr[1] = EIR_DEVICE_ID;
581 put_unaligned_le16(hdev->devid_source, ptr + 2);
582 put_unaligned_le16(hdev->devid_vendor, ptr + 4);
583 put_unaligned_le16(hdev->devid_product, ptr + 6);
584 put_unaligned_le16(hdev->devid_version, ptr + 8);
589 ptr = create_uuid16_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
590 ptr = create_uuid32_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
591 ptr = create_uuid128_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
594 static void update_eir(struct hci_request *req)
596 struct hci_dev *hdev = req->hdev;
597 struct hci_cp_write_eir cp;
599 if (!hdev_is_powered(hdev))
602 if (!lmp_ext_inq_capable(hdev))
605 if (!test_bit(HCI_SSP_ENABLED, &hdev->dev_flags))
608 if (test_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
611 memset(&cp, 0, sizeof(cp));
613 create_eir(hdev, cp.data);
615 if (memcmp(cp.data, hdev->eir, sizeof(cp.data)) == 0)
618 memcpy(hdev->eir, cp.data, sizeof(cp.data));
620 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
623 static u8 get_service_classes(struct hci_dev *hdev)
625 struct bt_uuid *uuid;
628 list_for_each_entry(uuid, &hdev->uuids, list)
629 val |= uuid->svc_hint;
634 static void update_class(struct hci_request *req)
636 struct hci_dev *hdev = req->hdev;
639 BT_DBG("%s", hdev->name);
641 if (!hdev_is_powered(hdev))
644 if (test_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
647 cod[0] = hdev->minor_class;
648 cod[1] = hdev->major_class;
649 cod[2] = get_service_classes(hdev);
651 if (memcmp(cod, hdev->dev_class, 3) == 0)
654 hci_req_add(req, HCI_OP_WRITE_CLASS_OF_DEV, sizeof(cod), cod);
657 static void service_cache_off(struct work_struct *work)
659 struct hci_dev *hdev = container_of(work, struct hci_dev,
661 struct hci_request req;
663 if (!test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
666 hci_req_init(&req, hdev);
673 hci_dev_unlock(hdev);
675 hci_req_run(&req, NULL);
678 static void mgmt_init_hdev(struct sock *sk, struct hci_dev *hdev)
680 if (test_and_set_bit(HCI_MGMT, &hdev->dev_flags))
683 INIT_DELAYED_WORK(&hdev->service_cache, service_cache_off);
685 /* Non-mgmt controlled devices get this bit set
686 * implicitly so that pairing works for them, however
687 * for mgmt we require user-space to explicitly enable
690 clear_bit(HCI_PAIRABLE, &hdev->dev_flags);
693 static int read_controller_info(struct sock *sk, struct hci_dev *hdev,
694 void *data, u16 data_len)
696 struct mgmt_rp_read_info rp;
698 BT_DBG("sock %p %s", sk, hdev->name);
702 memset(&rp, 0, sizeof(rp));
704 bacpy(&rp.bdaddr, &hdev->bdaddr);
706 rp.version = hdev->hci_ver;
707 rp.manufacturer = cpu_to_le16(hdev->manufacturer);
709 rp.supported_settings = cpu_to_le32(get_supported_settings(hdev));
710 rp.current_settings = cpu_to_le32(get_current_settings(hdev));
712 memcpy(rp.dev_class, hdev->dev_class, 3);
714 memcpy(rp.name, hdev->dev_name, sizeof(hdev->dev_name));
715 memcpy(rp.short_name, hdev->short_name, sizeof(hdev->short_name));
717 hci_dev_unlock(hdev);
719 return cmd_complete(sk, hdev->id, MGMT_OP_READ_INFO, 0, &rp,
723 static void mgmt_pending_free(struct pending_cmd *cmd)
730 static struct pending_cmd *mgmt_pending_add(struct sock *sk, u16 opcode,
731 struct hci_dev *hdev, void *data,
734 struct pending_cmd *cmd;
736 cmd = kmalloc(sizeof(*cmd), GFP_KERNEL);
740 cmd->opcode = opcode;
741 cmd->index = hdev->id;
743 cmd->param = kmalloc(len, GFP_KERNEL);
750 memcpy(cmd->param, data, len);
755 list_add(&cmd->list, &hdev->mgmt_pending);
760 static void mgmt_pending_foreach(u16 opcode, struct hci_dev *hdev,
761 void (*cb)(struct pending_cmd *cmd,
765 struct pending_cmd *cmd, *tmp;
767 list_for_each_entry_safe(cmd, tmp, &hdev->mgmt_pending, list) {
768 if (opcode > 0 && cmd->opcode != opcode)
775 static struct pending_cmd *mgmt_pending_find(u16 opcode, struct hci_dev *hdev)
777 struct pending_cmd *cmd;
779 list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
780 if (cmd->opcode == opcode)
787 static void mgmt_pending_remove(struct pending_cmd *cmd)
789 list_del(&cmd->list);
790 mgmt_pending_free(cmd);
793 static int send_settings_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev)
795 __le32 settings = cpu_to_le32(get_current_settings(hdev));
797 return cmd_complete(sk, hdev->id, opcode, 0, &settings,
801 static int set_powered(struct sock *sk, struct hci_dev *hdev, void *data,
804 struct mgmt_mode *cp = data;
805 struct pending_cmd *cmd;
808 BT_DBG("request for %s", hdev->name);
810 if (cp->val != 0x00 && cp->val != 0x01)
811 return cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
812 MGMT_STATUS_INVALID_PARAMS);
816 if (mgmt_pending_find(MGMT_OP_SET_POWERED, hdev)) {
817 err = cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
822 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
823 cancel_delayed_work(&hdev->power_off);
826 mgmt_pending_add(sk, MGMT_OP_SET_POWERED, hdev,
828 err = mgmt_powered(hdev, 1);
833 if (!!cp->val == hdev_is_powered(hdev)) {
834 err = send_settings_rsp(sk, MGMT_OP_SET_POWERED, hdev);
838 cmd = mgmt_pending_add(sk, MGMT_OP_SET_POWERED, hdev, data, len);
845 queue_work(hdev->req_workqueue, &hdev->power_on);
847 queue_work(hdev->req_workqueue, &hdev->power_off.work);
852 hci_dev_unlock(hdev);
856 static int mgmt_event(u16 event, struct hci_dev *hdev, void *data, u16 data_len,
857 struct sock *skip_sk)
860 struct mgmt_hdr *hdr;
862 skb = alloc_skb(sizeof(*hdr) + data_len, GFP_KERNEL);
866 hdr = (void *) skb_put(skb, sizeof(*hdr));
867 hdr->opcode = cpu_to_le16(event);
869 hdr->index = cpu_to_le16(hdev->id);
871 hdr->index = __constant_cpu_to_le16(MGMT_INDEX_NONE);
872 hdr->len = cpu_to_le16(data_len);
875 memcpy(skb_put(skb, data_len), data, data_len);
878 __net_timestamp(skb);
880 hci_send_to_control(skb, skip_sk);
886 static int new_settings(struct hci_dev *hdev, struct sock *skip)
890 ev = cpu_to_le32(get_current_settings(hdev));
892 return mgmt_event(MGMT_EV_NEW_SETTINGS, hdev, &ev, sizeof(ev), skip);
897 struct hci_dev *hdev;
901 static void settings_rsp(struct pending_cmd *cmd, void *data)
903 struct cmd_lookup *match = data;
905 send_settings_rsp(cmd->sk, cmd->opcode, match->hdev);
907 list_del(&cmd->list);
909 if (match->sk == NULL) {
911 sock_hold(match->sk);
914 mgmt_pending_free(cmd);
917 static void cmd_status_rsp(struct pending_cmd *cmd, void *data)
921 cmd_status(cmd->sk, cmd->index, cmd->opcode, *status);
922 mgmt_pending_remove(cmd);
925 static int set_discoverable(struct sock *sk, struct hci_dev *hdev, void *data,
928 struct mgmt_cp_set_discoverable *cp = data;
929 struct pending_cmd *cmd;
934 BT_DBG("request for %s", hdev->name);
936 if (!lmp_bredr_capable(hdev))
937 return cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
938 MGMT_STATUS_NOT_SUPPORTED);
940 if (cp->val != 0x00 && cp->val != 0x01)
941 return cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
942 MGMT_STATUS_INVALID_PARAMS);
944 timeout = __le16_to_cpu(cp->timeout);
945 if (!cp->val && timeout > 0)
946 return cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
947 MGMT_STATUS_INVALID_PARAMS);
951 if (!hdev_is_powered(hdev) && timeout > 0) {
952 err = cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
953 MGMT_STATUS_NOT_POWERED);
957 if (mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
958 mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
959 err = cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
964 if (!test_bit(HCI_CONNECTABLE, &hdev->dev_flags)) {
965 err = cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
966 MGMT_STATUS_REJECTED);
970 if (!hdev_is_powered(hdev)) {
971 bool changed = false;
973 if (!!cp->val != test_bit(HCI_DISCOVERABLE, &hdev->dev_flags)) {
974 change_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
978 err = send_settings_rsp(sk, MGMT_OP_SET_DISCOVERABLE, hdev);
983 err = new_settings(hdev, sk);
988 if (!!cp->val == test_bit(HCI_DISCOVERABLE, &hdev->dev_flags)) {
989 if (hdev->discov_timeout > 0) {
990 cancel_delayed_work(&hdev->discov_off);
991 hdev->discov_timeout = 0;
994 if (cp->val && timeout > 0) {
995 hdev->discov_timeout = timeout;
996 queue_delayed_work(hdev->workqueue, &hdev->discov_off,
997 msecs_to_jiffies(hdev->discov_timeout * 1000));
1000 err = send_settings_rsp(sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1004 cmd = mgmt_pending_add(sk, MGMT_OP_SET_DISCOVERABLE, hdev, data, len);
1013 scan |= SCAN_INQUIRY;
1015 cancel_delayed_work(&hdev->discov_off);
1017 err = hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
1019 mgmt_pending_remove(cmd);
1022 hdev->discov_timeout = timeout;
1025 hci_dev_unlock(hdev);
1029 static void write_fast_connectable(struct hci_request *req, bool enable)
1031 struct hci_dev *hdev = req->hdev;
1032 struct hci_cp_write_page_scan_activity acp;
1035 if (hdev->hci_ver < BLUETOOTH_VER_1_2)
1039 type = PAGE_SCAN_TYPE_INTERLACED;
1041 /* 160 msec page scan interval */
1042 acp.interval = __constant_cpu_to_le16(0x0100);
1044 type = PAGE_SCAN_TYPE_STANDARD; /* default */
1046 /* default 1.28 sec page scan */
1047 acp.interval = __constant_cpu_to_le16(0x0800);
1050 acp.window = __constant_cpu_to_le16(0x0012);
1052 if (__cpu_to_le16(hdev->page_scan_interval) != acp.interval ||
1053 __cpu_to_le16(hdev->page_scan_window) != acp.window)
1054 hci_req_add(req, HCI_OP_WRITE_PAGE_SCAN_ACTIVITY,
1057 if (hdev->page_scan_type != type)
1058 hci_req_add(req, HCI_OP_WRITE_PAGE_SCAN_TYPE, 1, &type);
1061 static void set_connectable_complete(struct hci_dev *hdev, u8 status)
1063 struct pending_cmd *cmd;
1065 BT_DBG("status 0x%02x", status);
1069 cmd = mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev);
1073 send_settings_rsp(cmd->sk, MGMT_OP_SET_CONNECTABLE, hdev);
1075 mgmt_pending_remove(cmd);
1078 hci_dev_unlock(hdev);
1081 static int set_connectable(struct sock *sk, struct hci_dev *hdev, void *data,
1084 struct mgmt_mode *cp = data;
1085 struct pending_cmd *cmd;
1086 struct hci_request req;
1090 BT_DBG("request for %s", hdev->name);
1092 if (!lmp_bredr_capable(hdev))
1093 return cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1094 MGMT_STATUS_NOT_SUPPORTED);
1096 if (cp->val != 0x00 && cp->val != 0x01)
1097 return cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1098 MGMT_STATUS_INVALID_PARAMS);
1102 if (!hdev_is_powered(hdev)) {
1103 bool changed = false;
1105 if (!!cp->val != test_bit(HCI_CONNECTABLE, &hdev->dev_flags))
1109 set_bit(HCI_CONNECTABLE, &hdev->dev_flags);
1111 clear_bit(HCI_CONNECTABLE, &hdev->dev_flags);
1112 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
1115 err = send_settings_rsp(sk, MGMT_OP_SET_CONNECTABLE, hdev);
1120 err = new_settings(hdev, sk);
1125 if (mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
1126 mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
1127 err = cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1132 if (!!cp->val == test_bit(HCI_PSCAN, &hdev->flags)) {
1133 err = send_settings_rsp(sk, MGMT_OP_SET_CONNECTABLE, hdev);
1137 cmd = mgmt_pending_add(sk, MGMT_OP_SET_CONNECTABLE, hdev, data, len);
1148 if (test_bit(HCI_ISCAN, &hdev->flags) &&
1149 hdev->discov_timeout > 0)
1150 cancel_delayed_work(&hdev->discov_off);
1153 hci_req_init(&req, hdev);
1155 hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
1157 /* If we're going from non-connectable to connectable or
1158 * vice-versa when fast connectable is enabled ensure that fast
1159 * connectable gets disabled. write_fast_connectable won't do
1160 * anything if the page scan parameters are already what they
1163 if (cp->val || test_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags))
1164 write_fast_connectable(&req, false);
1166 err = hci_req_run(&req, set_connectable_complete);
1168 mgmt_pending_remove(cmd);
1171 hci_dev_unlock(hdev);
1175 static int set_pairable(struct sock *sk, struct hci_dev *hdev, void *data,
1178 struct mgmt_mode *cp = data;
1181 BT_DBG("request for %s", hdev->name);
1183 if (cp->val != 0x00 && cp->val != 0x01)
1184 return cmd_status(sk, hdev->id, MGMT_OP_SET_PAIRABLE,
1185 MGMT_STATUS_INVALID_PARAMS);
1190 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
1192 clear_bit(HCI_PAIRABLE, &hdev->dev_flags);
1194 err = send_settings_rsp(sk, MGMT_OP_SET_PAIRABLE, hdev);
1198 err = new_settings(hdev, sk);
1201 hci_dev_unlock(hdev);
1205 static int set_link_security(struct sock *sk, struct hci_dev *hdev, void *data,
1208 struct mgmt_mode *cp = data;
1209 struct pending_cmd *cmd;
1213 BT_DBG("request for %s", hdev->name);
1215 if (!lmp_bredr_capable(hdev))
1216 return cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1217 MGMT_STATUS_NOT_SUPPORTED);
1219 if (cp->val != 0x00 && cp->val != 0x01)
1220 return cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1221 MGMT_STATUS_INVALID_PARAMS);
1225 if (!hdev_is_powered(hdev)) {
1226 bool changed = false;
1228 if (!!cp->val != test_bit(HCI_LINK_SECURITY,
1229 &hdev->dev_flags)) {
1230 change_bit(HCI_LINK_SECURITY, &hdev->dev_flags);
1234 err = send_settings_rsp(sk, MGMT_OP_SET_LINK_SECURITY, hdev);
1239 err = new_settings(hdev, sk);
1244 if (mgmt_pending_find(MGMT_OP_SET_LINK_SECURITY, hdev)) {
1245 err = cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1252 if (test_bit(HCI_AUTH, &hdev->flags) == val) {
1253 err = send_settings_rsp(sk, MGMT_OP_SET_LINK_SECURITY, hdev);
1257 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LINK_SECURITY, hdev, data, len);
1263 err = hci_send_cmd(hdev, HCI_OP_WRITE_AUTH_ENABLE, sizeof(val), &val);
1265 mgmt_pending_remove(cmd);
1270 hci_dev_unlock(hdev);
1274 static int set_ssp(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
1276 struct mgmt_mode *cp = data;
1277 struct pending_cmd *cmd;
1281 BT_DBG("request for %s", hdev->name);
1283 if (!lmp_ssp_capable(hdev))
1284 return cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
1285 MGMT_STATUS_NOT_SUPPORTED);
1287 if (cp->val != 0x00 && cp->val != 0x01)
1288 return cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
1289 MGMT_STATUS_INVALID_PARAMS);
1295 if (!hdev_is_powered(hdev)) {
1296 bool changed = false;
1298 if (val != test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
1299 change_bit(HCI_SSP_ENABLED, &hdev->dev_flags);
1303 err = send_settings_rsp(sk, MGMT_OP_SET_SSP, hdev);
1308 err = new_settings(hdev, sk);
1313 if (mgmt_pending_find(MGMT_OP_SET_SSP, hdev)) {
1314 err = cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
1319 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags) == val) {
1320 err = send_settings_rsp(sk, MGMT_OP_SET_SSP, hdev);
1324 cmd = mgmt_pending_add(sk, MGMT_OP_SET_SSP, hdev, data, len);
1330 err = hci_send_cmd(hdev, HCI_OP_WRITE_SSP_MODE, sizeof(val), &val);
1332 mgmt_pending_remove(cmd);
1337 hci_dev_unlock(hdev);
1341 static int set_hs(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
1343 struct mgmt_mode *cp = data;
1345 BT_DBG("request for %s", hdev->name);
1348 return cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
1349 MGMT_STATUS_NOT_SUPPORTED);
1351 if (cp->val != 0x00 && cp->val != 0x01)
1352 return cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
1353 MGMT_STATUS_INVALID_PARAMS);
1356 set_bit(HCI_HS_ENABLED, &hdev->dev_flags);
1358 clear_bit(HCI_HS_ENABLED, &hdev->dev_flags);
1360 return send_settings_rsp(sk, MGMT_OP_SET_HS, hdev);
1363 static void le_enable_complete(struct hci_dev *hdev, u8 status)
1365 struct cmd_lookup match = { NULL, hdev };
1368 u8 mgmt_err = mgmt_status(status);
1370 mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, cmd_status_rsp,
1375 mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, settings_rsp, &match);
1377 new_settings(hdev, match.sk);
1383 static int set_le(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
1385 struct mgmt_mode *cp = data;
1386 struct hci_cp_write_le_host_supported hci_cp;
1387 struct pending_cmd *cmd;
1388 struct hci_request req;
1392 BT_DBG("request for %s", hdev->name);
1394 if (!lmp_le_capable(hdev))
1395 return cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
1396 MGMT_STATUS_NOT_SUPPORTED);
1398 if (cp->val != 0x00 && cp->val != 0x01)
1399 return cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
1400 MGMT_STATUS_INVALID_PARAMS);
1402 /* LE-only devices do not allow toggling LE on/off */
1403 if (!lmp_bredr_capable(hdev))
1404 return cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
1405 MGMT_STATUS_REJECTED);
1410 enabled = lmp_host_le_capable(hdev);
1412 if (!hdev_is_powered(hdev) || val == enabled) {
1413 bool changed = false;
1415 if (val != test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
1416 change_bit(HCI_LE_ENABLED, &hdev->dev_flags);
1420 if (!val && test_bit(HCI_LE_PERIPHERAL, &hdev->dev_flags)) {
1421 clear_bit(HCI_LE_PERIPHERAL, &hdev->dev_flags);
1425 err = send_settings_rsp(sk, MGMT_OP_SET_LE, hdev);
1430 err = new_settings(hdev, sk);
1435 if (mgmt_pending_find(MGMT_OP_SET_LE, hdev) ||
1436 mgmt_pending_find(MGMT_OP_SET_ADVERTISING, hdev)) {
1437 err = cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
1442 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LE, hdev, data, len);
1448 memset(&hci_cp, 0, sizeof(hci_cp));
1452 hci_cp.simul = lmp_le_br_capable(hdev);
1455 hci_req_init(&req, hdev);
1457 if (test_bit(HCI_LE_PERIPHERAL, &hdev->dev_flags) && !val)
1458 hci_req_add(&req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(val), &val);
1460 hci_req_add(&req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(hci_cp),
1463 err = hci_req_run(&req, le_enable_complete);
1465 mgmt_pending_remove(cmd);
1468 hci_dev_unlock(hdev);
1472 /* This is a helper function to test for pending mgmt commands that can
1473 * cause CoD or EIR HCI commands. We can only allow one such pending
1474 * mgmt command at a time since otherwise we cannot easily track what
1475 * the current values are, will be, and based on that calculate if a new
1476 * HCI command needs to be sent and if yes with what value.
1478 static bool pending_eir_or_class(struct hci_dev *hdev)
1480 struct pending_cmd *cmd;
1482 list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
1483 switch (cmd->opcode) {
1484 case MGMT_OP_ADD_UUID:
1485 case MGMT_OP_REMOVE_UUID:
1486 case MGMT_OP_SET_DEV_CLASS:
1487 case MGMT_OP_SET_POWERED:
1495 static const u8 bluetooth_base_uuid[] = {
1496 0xfb, 0x34, 0x9b, 0x5f, 0x80, 0x00, 0x00, 0x80,
1497 0x00, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1500 static u8 get_uuid_size(const u8 *uuid)
1504 if (memcmp(uuid, bluetooth_base_uuid, 12))
1507 val = get_unaligned_le32(&uuid[12]);
1514 static void mgmt_class_complete(struct hci_dev *hdev, u16 mgmt_op, u8 status)
1516 struct pending_cmd *cmd;
1520 cmd = mgmt_pending_find(mgmt_op, hdev);
1524 cmd_complete(cmd->sk, cmd->index, cmd->opcode, mgmt_status(status),
1525 hdev->dev_class, 3);
1527 mgmt_pending_remove(cmd);
1530 hci_dev_unlock(hdev);
1533 static void add_uuid_complete(struct hci_dev *hdev, u8 status)
1535 BT_DBG("status 0x%02x", status);
1537 mgmt_class_complete(hdev, MGMT_OP_ADD_UUID, status);
1540 static int add_uuid(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
1542 struct mgmt_cp_add_uuid *cp = data;
1543 struct pending_cmd *cmd;
1544 struct hci_request req;
1545 struct bt_uuid *uuid;
1548 BT_DBG("request for %s", hdev->name);
1552 if (pending_eir_or_class(hdev)) {
1553 err = cmd_status(sk, hdev->id, MGMT_OP_ADD_UUID,
1558 uuid = kmalloc(sizeof(*uuid), GFP_KERNEL);
1564 memcpy(uuid->uuid, cp->uuid, 16);
1565 uuid->svc_hint = cp->svc_hint;
1566 uuid->size = get_uuid_size(cp->uuid);
1568 list_add_tail(&uuid->list, &hdev->uuids);
1570 hci_req_init(&req, hdev);
1575 err = hci_req_run(&req, add_uuid_complete);
1577 if (err != -ENODATA)
1580 err = cmd_complete(sk, hdev->id, MGMT_OP_ADD_UUID, 0,
1581 hdev->dev_class, 3);
1585 cmd = mgmt_pending_add(sk, MGMT_OP_ADD_UUID, hdev, data, len);
1594 hci_dev_unlock(hdev);
1598 static bool enable_service_cache(struct hci_dev *hdev)
1600 if (!hdev_is_powered(hdev))
1603 if (!test_and_set_bit(HCI_SERVICE_CACHE, &hdev->dev_flags)) {
1604 queue_delayed_work(hdev->workqueue, &hdev->service_cache,
1612 static void remove_uuid_complete(struct hci_dev *hdev, u8 status)
1614 BT_DBG("status 0x%02x", status);
1616 mgmt_class_complete(hdev, MGMT_OP_REMOVE_UUID, status);
1619 static int remove_uuid(struct sock *sk, struct hci_dev *hdev, void *data,
1622 struct mgmt_cp_remove_uuid *cp = data;
1623 struct pending_cmd *cmd;
1624 struct bt_uuid *match, *tmp;
1625 u8 bt_uuid_any[] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
1626 struct hci_request req;
1629 BT_DBG("request for %s", hdev->name);
1633 if (pending_eir_or_class(hdev)) {
1634 err = cmd_status(sk, hdev->id, MGMT_OP_REMOVE_UUID,
1639 if (memcmp(cp->uuid, bt_uuid_any, 16) == 0) {
1640 err = hci_uuids_clear(hdev);
1642 if (enable_service_cache(hdev)) {
1643 err = cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_UUID,
1644 0, hdev->dev_class, 3);
1653 list_for_each_entry_safe(match, tmp, &hdev->uuids, list) {
1654 if (memcmp(match->uuid, cp->uuid, 16) != 0)
1657 list_del(&match->list);
1663 err = cmd_status(sk, hdev->id, MGMT_OP_REMOVE_UUID,
1664 MGMT_STATUS_INVALID_PARAMS);
1669 hci_req_init(&req, hdev);
1674 err = hci_req_run(&req, remove_uuid_complete);
1676 if (err != -ENODATA)
1679 err = cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_UUID, 0,
1680 hdev->dev_class, 3);
1684 cmd = mgmt_pending_add(sk, MGMT_OP_REMOVE_UUID, hdev, data, len);
1693 hci_dev_unlock(hdev);
1697 static void set_class_complete(struct hci_dev *hdev, u8 status)
1699 BT_DBG("status 0x%02x", status);
1701 mgmt_class_complete(hdev, MGMT_OP_SET_DEV_CLASS, status);
1704 static int set_dev_class(struct sock *sk, struct hci_dev *hdev, void *data,
1707 struct mgmt_cp_set_dev_class *cp = data;
1708 struct pending_cmd *cmd;
1709 struct hci_request req;
1712 BT_DBG("request for %s", hdev->name);
1714 if (!lmp_bredr_capable(hdev))
1715 return cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
1716 MGMT_STATUS_NOT_SUPPORTED);
1720 if (pending_eir_or_class(hdev)) {
1721 err = cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
1726 if ((cp->minor & 0x03) != 0 || (cp->major & 0xe0) != 0) {
1727 err = cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
1728 MGMT_STATUS_INVALID_PARAMS);
1732 hdev->major_class = cp->major;
1733 hdev->minor_class = cp->minor;
1735 if (!hdev_is_powered(hdev)) {
1736 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_DEV_CLASS, 0,
1737 hdev->dev_class, 3);
1741 hci_req_init(&req, hdev);
1743 if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags)) {
1744 hci_dev_unlock(hdev);
1745 cancel_delayed_work_sync(&hdev->service_cache);
1752 err = hci_req_run(&req, set_class_complete);
1754 if (err != -ENODATA)
1757 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_DEV_CLASS, 0,
1758 hdev->dev_class, 3);
1762 cmd = mgmt_pending_add(sk, MGMT_OP_SET_DEV_CLASS, hdev, data, len);
1771 hci_dev_unlock(hdev);
1775 static int load_link_keys(struct sock *sk, struct hci_dev *hdev, void *data,
1778 struct mgmt_cp_load_link_keys *cp = data;
1779 u16 key_count, expected_len;
1782 key_count = __le16_to_cpu(cp->key_count);
1784 expected_len = sizeof(*cp) + key_count *
1785 sizeof(struct mgmt_link_key_info);
1786 if (expected_len != len) {
1787 BT_ERR("load_link_keys: expected %u bytes, got %u bytes",
1789 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
1790 MGMT_STATUS_INVALID_PARAMS);
1793 if (cp->debug_keys != 0x00 && cp->debug_keys != 0x01)
1794 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
1795 MGMT_STATUS_INVALID_PARAMS);
1797 BT_DBG("%s debug_keys %u key_count %u", hdev->name, cp->debug_keys,
1800 for (i = 0; i < key_count; i++) {
1801 struct mgmt_link_key_info *key = &cp->keys[i];
1803 if (key->addr.type != BDADDR_BREDR)
1804 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
1805 MGMT_STATUS_INVALID_PARAMS);
1810 hci_link_keys_clear(hdev);
1813 set_bit(HCI_DEBUG_KEYS, &hdev->dev_flags);
1815 clear_bit(HCI_DEBUG_KEYS, &hdev->dev_flags);
1817 for (i = 0; i < key_count; i++) {
1818 struct mgmt_link_key_info *key = &cp->keys[i];
1820 hci_add_link_key(hdev, NULL, 0, &key->addr.bdaddr, key->val,
1821 key->type, key->pin_len);
1824 cmd_complete(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS, 0, NULL, 0);
1826 hci_dev_unlock(hdev);
1831 static int device_unpaired(struct hci_dev *hdev, bdaddr_t *bdaddr,
1832 u8 addr_type, struct sock *skip_sk)
1834 struct mgmt_ev_device_unpaired ev;
1836 bacpy(&ev.addr.bdaddr, bdaddr);
1837 ev.addr.type = addr_type;
1839 return mgmt_event(MGMT_EV_DEVICE_UNPAIRED, hdev, &ev, sizeof(ev),
1843 static int unpair_device(struct sock *sk, struct hci_dev *hdev, void *data,
1846 struct mgmt_cp_unpair_device *cp = data;
1847 struct mgmt_rp_unpair_device rp;
1848 struct hci_cp_disconnect dc;
1849 struct pending_cmd *cmd;
1850 struct hci_conn *conn;
1853 memset(&rp, 0, sizeof(rp));
1854 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
1855 rp.addr.type = cp->addr.type;
1857 if (!bdaddr_type_is_valid(cp->addr.type))
1858 return cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
1859 MGMT_STATUS_INVALID_PARAMS,
1862 if (cp->disconnect != 0x00 && cp->disconnect != 0x01)
1863 return cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
1864 MGMT_STATUS_INVALID_PARAMS,
1869 if (!hdev_is_powered(hdev)) {
1870 err = cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
1871 MGMT_STATUS_NOT_POWERED, &rp, sizeof(rp));
1875 if (cp->addr.type == BDADDR_BREDR)
1876 err = hci_remove_link_key(hdev, &cp->addr.bdaddr);
1878 err = hci_remove_ltk(hdev, &cp->addr.bdaddr);
1881 err = cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
1882 MGMT_STATUS_NOT_PAIRED, &rp, sizeof(rp));
1886 if (cp->disconnect) {
1887 if (cp->addr.type == BDADDR_BREDR)
1888 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
1891 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK,
1898 err = cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE, 0,
1900 device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, sk);
1904 cmd = mgmt_pending_add(sk, MGMT_OP_UNPAIR_DEVICE, hdev, cp,
1911 dc.handle = cpu_to_le16(conn->handle);
1912 dc.reason = 0x13; /* Remote User Terminated Connection */
1913 err = hci_send_cmd(hdev, HCI_OP_DISCONNECT, sizeof(dc), &dc);
1915 mgmt_pending_remove(cmd);
1918 hci_dev_unlock(hdev);
1922 static int disconnect(struct sock *sk, struct hci_dev *hdev, void *data,
1925 struct mgmt_cp_disconnect *cp = data;
1926 struct mgmt_rp_disconnect rp;
1927 struct hci_cp_disconnect dc;
1928 struct pending_cmd *cmd;
1929 struct hci_conn *conn;
1934 memset(&rp, 0, sizeof(rp));
1935 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
1936 rp.addr.type = cp->addr.type;
1938 if (!bdaddr_type_is_valid(cp->addr.type))
1939 return cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
1940 MGMT_STATUS_INVALID_PARAMS,
1945 if (!test_bit(HCI_UP, &hdev->flags)) {
1946 err = cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
1947 MGMT_STATUS_NOT_POWERED, &rp, sizeof(rp));
1951 if (mgmt_pending_find(MGMT_OP_DISCONNECT, hdev)) {
1952 err = cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
1953 MGMT_STATUS_BUSY, &rp, sizeof(rp));
1957 if (cp->addr.type == BDADDR_BREDR)
1958 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
1961 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->addr.bdaddr);
1963 if (!conn || conn->state == BT_OPEN || conn->state == BT_CLOSED) {
1964 err = cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
1965 MGMT_STATUS_NOT_CONNECTED, &rp, sizeof(rp));
1969 cmd = mgmt_pending_add(sk, MGMT_OP_DISCONNECT, hdev, data, len);
1975 dc.handle = cpu_to_le16(conn->handle);
1976 dc.reason = HCI_ERROR_REMOTE_USER_TERM;
1978 err = hci_send_cmd(hdev, HCI_OP_DISCONNECT, sizeof(dc), &dc);
1980 mgmt_pending_remove(cmd);
1983 hci_dev_unlock(hdev);
1987 static u8 link_to_bdaddr(u8 link_type, u8 addr_type)
1989 switch (link_type) {
1991 switch (addr_type) {
1992 case ADDR_LE_DEV_PUBLIC:
1993 return BDADDR_LE_PUBLIC;
1996 /* Fallback to LE Random address type */
1997 return BDADDR_LE_RANDOM;
2001 /* Fallback to BR/EDR type */
2002 return BDADDR_BREDR;
2006 static int get_connections(struct sock *sk, struct hci_dev *hdev, void *data,
2009 struct mgmt_rp_get_connections *rp;
2019 if (!hdev_is_powered(hdev)) {
2020 err = cmd_status(sk, hdev->id, MGMT_OP_GET_CONNECTIONS,
2021 MGMT_STATUS_NOT_POWERED);
2026 list_for_each_entry(c, &hdev->conn_hash.list, list) {
2027 if (test_bit(HCI_CONN_MGMT_CONNECTED, &c->flags))
2031 rp_len = sizeof(*rp) + (i * sizeof(struct mgmt_addr_info));
2032 rp = kmalloc(rp_len, GFP_KERNEL);
2039 list_for_each_entry(c, &hdev->conn_hash.list, list) {
2040 if (!test_bit(HCI_CONN_MGMT_CONNECTED, &c->flags))
2042 bacpy(&rp->addr[i].bdaddr, &c->dst);
2043 rp->addr[i].type = link_to_bdaddr(c->type, c->dst_type);
2044 if (c->type == SCO_LINK || c->type == ESCO_LINK)
2049 rp->conn_count = cpu_to_le16(i);
2051 /* Recalculate length in case of filtered SCO connections, etc */
2052 rp_len = sizeof(*rp) + (i * sizeof(struct mgmt_addr_info));
2054 err = cmd_complete(sk, hdev->id, MGMT_OP_GET_CONNECTIONS, 0, rp,
2060 hci_dev_unlock(hdev);
2064 static int send_pin_code_neg_reply(struct sock *sk, struct hci_dev *hdev,
2065 struct mgmt_cp_pin_code_neg_reply *cp)
2067 struct pending_cmd *cmd;
2070 cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_NEG_REPLY, hdev, cp,
2075 err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_NEG_REPLY,
2076 sizeof(cp->addr.bdaddr), &cp->addr.bdaddr);
2078 mgmt_pending_remove(cmd);
2083 static int pin_code_reply(struct sock *sk, struct hci_dev *hdev, void *data,
2086 struct hci_conn *conn;
2087 struct mgmt_cp_pin_code_reply *cp = data;
2088 struct hci_cp_pin_code_reply reply;
2089 struct pending_cmd *cmd;
2096 if (!hdev_is_powered(hdev)) {
2097 err = cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
2098 MGMT_STATUS_NOT_POWERED);
2102 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->addr.bdaddr);
2104 err = cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
2105 MGMT_STATUS_NOT_CONNECTED);
2109 if (conn->pending_sec_level == BT_SECURITY_HIGH && cp->pin_len != 16) {
2110 struct mgmt_cp_pin_code_neg_reply ncp;
2112 memcpy(&ncp.addr, &cp->addr, sizeof(ncp.addr));
2114 BT_ERR("PIN code is not 16 bytes long");
2116 err = send_pin_code_neg_reply(sk, hdev, &ncp);
2118 err = cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
2119 MGMT_STATUS_INVALID_PARAMS);
2124 cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_REPLY, hdev, data, len);
2130 bacpy(&reply.bdaddr, &cp->addr.bdaddr);
2131 reply.pin_len = cp->pin_len;
2132 memcpy(reply.pin_code, cp->pin_code, sizeof(reply.pin_code));
2134 err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_REPLY, sizeof(reply), &reply);
2136 mgmt_pending_remove(cmd);
2139 hci_dev_unlock(hdev);
2143 static int set_io_capability(struct sock *sk, struct hci_dev *hdev, void *data,
2146 struct mgmt_cp_set_io_capability *cp = data;
2152 hdev->io_capability = cp->io_capability;
2154 BT_DBG("%s IO capability set to 0x%02x", hdev->name,
2155 hdev->io_capability);
2157 hci_dev_unlock(hdev);
2159 return cmd_complete(sk, hdev->id, MGMT_OP_SET_IO_CAPABILITY, 0, NULL,
2163 static struct pending_cmd *find_pairing(struct hci_conn *conn)
2165 struct hci_dev *hdev = conn->hdev;
2166 struct pending_cmd *cmd;
2168 list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
2169 if (cmd->opcode != MGMT_OP_PAIR_DEVICE)
2172 if (cmd->user_data != conn)
2181 static void pairing_complete(struct pending_cmd *cmd, u8 status)
2183 struct mgmt_rp_pair_device rp;
2184 struct hci_conn *conn = cmd->user_data;
2186 bacpy(&rp.addr.bdaddr, &conn->dst);
2187 rp.addr.type = link_to_bdaddr(conn->type, conn->dst_type);
2189 cmd_complete(cmd->sk, cmd->index, MGMT_OP_PAIR_DEVICE, status,
2192 /* So we don't get further callbacks for this connection */
2193 conn->connect_cfm_cb = NULL;
2194 conn->security_cfm_cb = NULL;
2195 conn->disconn_cfm_cb = NULL;
2197 hci_conn_drop(conn);
2199 mgmt_pending_remove(cmd);
2202 static void pairing_complete_cb(struct hci_conn *conn, u8 status)
2204 struct pending_cmd *cmd;
2206 BT_DBG("status %u", status);
2208 cmd = find_pairing(conn);
2210 BT_DBG("Unable to find a pending command");
2212 pairing_complete(cmd, mgmt_status(status));
2215 static void le_connect_complete_cb(struct hci_conn *conn, u8 status)
2217 struct pending_cmd *cmd;
2219 BT_DBG("status %u", status);
2224 cmd = find_pairing(conn);
2226 BT_DBG("Unable to find a pending command");
2228 pairing_complete(cmd, mgmt_status(status));
2231 static int pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
2234 struct mgmt_cp_pair_device *cp = data;
2235 struct mgmt_rp_pair_device rp;
2236 struct pending_cmd *cmd;
2237 u8 sec_level, auth_type;
2238 struct hci_conn *conn;
2243 memset(&rp, 0, sizeof(rp));
2244 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
2245 rp.addr.type = cp->addr.type;
2247 if (!bdaddr_type_is_valid(cp->addr.type))
2248 return cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
2249 MGMT_STATUS_INVALID_PARAMS,
2254 if (!hdev_is_powered(hdev)) {
2255 err = cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
2256 MGMT_STATUS_NOT_POWERED, &rp, sizeof(rp));
2260 sec_level = BT_SECURITY_MEDIUM;
2261 if (cp->io_cap == 0x03)
2262 auth_type = HCI_AT_DEDICATED_BONDING;
2264 auth_type = HCI_AT_DEDICATED_BONDING_MITM;
2266 if (cp->addr.type == BDADDR_BREDR)
2267 conn = hci_connect(hdev, ACL_LINK, &cp->addr.bdaddr,
2268 cp->addr.type, sec_level, auth_type);
2270 conn = hci_connect(hdev, LE_LINK, &cp->addr.bdaddr,
2271 cp->addr.type, sec_level, auth_type);
2276 if (PTR_ERR(conn) == -EBUSY)
2277 status = MGMT_STATUS_BUSY;
2279 status = MGMT_STATUS_CONNECT_FAILED;
2281 err = cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
2287 if (conn->connect_cfm_cb) {
2288 hci_conn_drop(conn);
2289 err = cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
2290 MGMT_STATUS_BUSY, &rp, sizeof(rp));
2294 cmd = mgmt_pending_add(sk, MGMT_OP_PAIR_DEVICE, hdev, data, len);
2297 hci_conn_drop(conn);
2301 /* For LE, just connecting isn't a proof that the pairing finished */
2302 if (cp->addr.type == BDADDR_BREDR)
2303 conn->connect_cfm_cb = pairing_complete_cb;
2305 conn->connect_cfm_cb = le_connect_complete_cb;
2307 conn->security_cfm_cb = pairing_complete_cb;
2308 conn->disconn_cfm_cb = pairing_complete_cb;
2309 conn->io_capability = cp->io_cap;
2310 cmd->user_data = conn;
2312 if (conn->state == BT_CONNECTED &&
2313 hci_conn_security(conn, sec_level, auth_type))
2314 pairing_complete(cmd, 0);
2319 hci_dev_unlock(hdev);
2323 static int cancel_pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
2326 struct mgmt_addr_info *addr = data;
2327 struct pending_cmd *cmd;
2328 struct hci_conn *conn;
2335 if (!hdev_is_powered(hdev)) {
2336 err = cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
2337 MGMT_STATUS_NOT_POWERED);
2341 cmd = mgmt_pending_find(MGMT_OP_PAIR_DEVICE, hdev);
2343 err = cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
2344 MGMT_STATUS_INVALID_PARAMS);
2348 conn = cmd->user_data;
2350 if (bacmp(&addr->bdaddr, &conn->dst) != 0) {
2351 err = cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
2352 MGMT_STATUS_INVALID_PARAMS);
2356 pairing_complete(cmd, MGMT_STATUS_CANCELLED);
2358 err = cmd_complete(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE, 0,
2359 addr, sizeof(*addr));
2361 hci_dev_unlock(hdev);
2365 static int user_pairing_resp(struct sock *sk, struct hci_dev *hdev,
2366 struct mgmt_addr_info *addr, u16 mgmt_op,
2367 u16 hci_op, __le32 passkey)
2369 struct pending_cmd *cmd;
2370 struct hci_conn *conn;
2375 if (!hdev_is_powered(hdev)) {
2376 err = cmd_complete(sk, hdev->id, mgmt_op,
2377 MGMT_STATUS_NOT_POWERED, addr,
2382 if (addr->type == BDADDR_BREDR)
2383 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &addr->bdaddr);
2385 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &addr->bdaddr);
2388 err = cmd_complete(sk, hdev->id, mgmt_op,
2389 MGMT_STATUS_NOT_CONNECTED, addr,
2394 if (addr->type == BDADDR_LE_PUBLIC || addr->type == BDADDR_LE_RANDOM) {
2395 /* Continue with pairing via SMP */
2396 err = smp_user_confirm_reply(conn, mgmt_op, passkey);
2399 err = cmd_complete(sk, hdev->id, mgmt_op,
2400 MGMT_STATUS_SUCCESS, addr,
2403 err = cmd_complete(sk, hdev->id, mgmt_op,
2404 MGMT_STATUS_FAILED, addr,
2410 cmd = mgmt_pending_add(sk, mgmt_op, hdev, addr, sizeof(*addr));
2416 /* Continue with pairing via HCI */
2417 if (hci_op == HCI_OP_USER_PASSKEY_REPLY) {
2418 struct hci_cp_user_passkey_reply cp;
2420 bacpy(&cp.bdaddr, &addr->bdaddr);
2421 cp.passkey = passkey;
2422 err = hci_send_cmd(hdev, hci_op, sizeof(cp), &cp);
2424 err = hci_send_cmd(hdev, hci_op, sizeof(addr->bdaddr),
2428 mgmt_pending_remove(cmd);
2431 hci_dev_unlock(hdev);
2435 static int pin_code_neg_reply(struct sock *sk, struct hci_dev *hdev,
2436 void *data, u16 len)
2438 struct mgmt_cp_pin_code_neg_reply *cp = data;
2442 return user_pairing_resp(sk, hdev, &cp->addr,
2443 MGMT_OP_PIN_CODE_NEG_REPLY,
2444 HCI_OP_PIN_CODE_NEG_REPLY, 0);
2447 static int user_confirm_reply(struct sock *sk, struct hci_dev *hdev, void *data,
2450 struct mgmt_cp_user_confirm_reply *cp = data;
2454 if (len != sizeof(*cp))
2455 return cmd_status(sk, hdev->id, MGMT_OP_USER_CONFIRM_REPLY,
2456 MGMT_STATUS_INVALID_PARAMS);
2458 return user_pairing_resp(sk, hdev, &cp->addr,
2459 MGMT_OP_USER_CONFIRM_REPLY,
2460 HCI_OP_USER_CONFIRM_REPLY, 0);
2463 static int user_confirm_neg_reply(struct sock *sk, struct hci_dev *hdev,
2464 void *data, u16 len)
2466 struct mgmt_cp_user_confirm_neg_reply *cp = data;
2470 return user_pairing_resp(sk, hdev, &cp->addr,
2471 MGMT_OP_USER_CONFIRM_NEG_REPLY,
2472 HCI_OP_USER_CONFIRM_NEG_REPLY, 0);
2475 static int user_passkey_reply(struct sock *sk, struct hci_dev *hdev, void *data,
2478 struct mgmt_cp_user_passkey_reply *cp = data;
2482 return user_pairing_resp(sk, hdev, &cp->addr,
2483 MGMT_OP_USER_PASSKEY_REPLY,
2484 HCI_OP_USER_PASSKEY_REPLY, cp->passkey);
2487 static int user_passkey_neg_reply(struct sock *sk, struct hci_dev *hdev,
2488 void *data, u16 len)
2490 struct mgmt_cp_user_passkey_neg_reply *cp = data;
2494 return user_pairing_resp(sk, hdev, &cp->addr,
2495 MGMT_OP_USER_PASSKEY_NEG_REPLY,
2496 HCI_OP_USER_PASSKEY_NEG_REPLY, 0);
2499 static void update_name(struct hci_request *req)
2501 struct hci_dev *hdev = req->hdev;
2502 struct hci_cp_write_local_name cp;
2504 memcpy(cp.name, hdev->dev_name, sizeof(cp.name));
2506 hci_req_add(req, HCI_OP_WRITE_LOCAL_NAME, sizeof(cp), &cp);
2509 static void set_name_complete(struct hci_dev *hdev, u8 status)
2511 struct mgmt_cp_set_local_name *cp;
2512 struct pending_cmd *cmd;
2514 BT_DBG("status 0x%02x", status);
2518 cmd = mgmt_pending_find(MGMT_OP_SET_LOCAL_NAME, hdev);
2525 cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME,
2526 mgmt_status(status));
2528 cmd_complete(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
2531 mgmt_pending_remove(cmd);
2534 hci_dev_unlock(hdev);
2537 static int set_local_name(struct sock *sk, struct hci_dev *hdev, void *data,
2540 struct mgmt_cp_set_local_name *cp = data;
2541 struct pending_cmd *cmd;
2542 struct hci_request req;
2549 /* If the old values are the same as the new ones just return a
2550 * direct command complete event.
2552 if (!memcmp(hdev->dev_name, cp->name, sizeof(hdev->dev_name)) &&
2553 !memcmp(hdev->short_name, cp->short_name,
2554 sizeof(hdev->short_name))) {
2555 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
2560 memcpy(hdev->short_name, cp->short_name, sizeof(hdev->short_name));
2562 if (!hdev_is_powered(hdev)) {
2563 memcpy(hdev->dev_name, cp->name, sizeof(hdev->dev_name));
2565 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
2570 err = mgmt_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev, data, len,
2576 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LOCAL_NAME, hdev, data, len);
2582 memcpy(hdev->dev_name, cp->name, sizeof(hdev->dev_name));
2584 hci_req_init(&req, hdev);
2586 if (lmp_bredr_capable(hdev)) {
2591 if (lmp_le_capable(hdev))
2592 hci_update_ad(&req);
2594 err = hci_req_run(&req, set_name_complete);
2596 mgmt_pending_remove(cmd);
2599 hci_dev_unlock(hdev);
2603 static int read_local_oob_data(struct sock *sk, struct hci_dev *hdev,
2604 void *data, u16 data_len)
2606 struct pending_cmd *cmd;
2609 BT_DBG("%s", hdev->name);
2613 if (!hdev_is_powered(hdev)) {
2614 err = cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
2615 MGMT_STATUS_NOT_POWERED);
2619 if (!lmp_ssp_capable(hdev)) {
2620 err = cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
2621 MGMT_STATUS_NOT_SUPPORTED);
2625 if (mgmt_pending_find(MGMT_OP_READ_LOCAL_OOB_DATA, hdev)) {
2626 err = cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
2631 cmd = mgmt_pending_add(sk, MGMT_OP_READ_LOCAL_OOB_DATA, hdev, NULL, 0);
2637 err = hci_send_cmd(hdev, HCI_OP_READ_LOCAL_OOB_DATA, 0, NULL);
2639 mgmt_pending_remove(cmd);
2642 hci_dev_unlock(hdev);
2646 static int add_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
2647 void *data, u16 len)
2649 struct mgmt_cp_add_remote_oob_data *cp = data;
2653 BT_DBG("%s ", hdev->name);
2657 err = hci_add_remote_oob_data(hdev, &cp->addr.bdaddr, cp->hash,
2660 status = MGMT_STATUS_FAILED;
2662 status = MGMT_STATUS_SUCCESS;
2664 err = cmd_complete(sk, hdev->id, MGMT_OP_ADD_REMOTE_OOB_DATA, status,
2665 &cp->addr, sizeof(cp->addr));
2667 hci_dev_unlock(hdev);
2671 static int remove_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
2672 void *data, u16 len)
2674 struct mgmt_cp_remove_remote_oob_data *cp = data;
2678 BT_DBG("%s", hdev->name);
2682 err = hci_remove_remote_oob_data(hdev, &cp->addr.bdaddr);
2684 status = MGMT_STATUS_INVALID_PARAMS;
2686 status = MGMT_STATUS_SUCCESS;
2688 err = cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_REMOTE_OOB_DATA,
2689 status, &cp->addr, sizeof(cp->addr));
2691 hci_dev_unlock(hdev);
2695 static int mgmt_start_discovery_failed(struct hci_dev *hdev, u8 status)
2697 struct pending_cmd *cmd;
2701 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2703 cmd = mgmt_pending_find(MGMT_OP_START_DISCOVERY, hdev);
2707 type = hdev->discovery.type;
2709 err = cmd_complete(cmd->sk, hdev->id, cmd->opcode, mgmt_status(status),
2710 &type, sizeof(type));
2711 mgmt_pending_remove(cmd);
2716 static void start_discovery_complete(struct hci_dev *hdev, u8 status)
2718 BT_DBG("status %d", status);
2722 mgmt_start_discovery_failed(hdev, status);
2723 hci_dev_unlock(hdev);
2728 hci_discovery_set_state(hdev, DISCOVERY_FINDING);
2729 hci_dev_unlock(hdev);
2731 switch (hdev->discovery.type) {
2732 case DISCOV_TYPE_LE:
2733 queue_delayed_work(hdev->workqueue, &hdev->le_scan_disable,
2737 case DISCOV_TYPE_INTERLEAVED:
2738 queue_delayed_work(hdev->workqueue, &hdev->le_scan_disable,
2739 DISCOV_INTERLEAVED_TIMEOUT);
2742 case DISCOV_TYPE_BREDR:
2746 BT_ERR("Invalid discovery type %d", hdev->discovery.type);
2750 static int start_discovery(struct sock *sk, struct hci_dev *hdev,
2751 void *data, u16 len)
2753 struct mgmt_cp_start_discovery *cp = data;
2754 struct pending_cmd *cmd;
2755 struct hci_cp_le_set_scan_param param_cp;
2756 struct hci_cp_le_set_scan_enable enable_cp;
2757 struct hci_cp_inquiry inq_cp;
2758 struct hci_request req;
2759 /* General inquiry access code (GIAC) */
2760 u8 lap[3] = { 0x33, 0x8b, 0x9e };
2763 BT_DBG("%s", hdev->name);
2767 if (!hdev_is_powered(hdev)) {
2768 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
2769 MGMT_STATUS_NOT_POWERED);
2773 if (test_bit(HCI_PERIODIC_INQ, &hdev->dev_flags)) {
2774 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
2779 if (hdev->discovery.state != DISCOVERY_STOPPED) {
2780 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
2785 cmd = mgmt_pending_add(sk, MGMT_OP_START_DISCOVERY, hdev, NULL, 0);
2791 hdev->discovery.type = cp->type;
2793 hci_req_init(&req, hdev);
2795 switch (hdev->discovery.type) {
2796 case DISCOV_TYPE_BREDR:
2797 if (!lmp_bredr_capable(hdev)) {
2798 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
2799 MGMT_STATUS_NOT_SUPPORTED);
2800 mgmt_pending_remove(cmd);
2804 if (test_bit(HCI_INQUIRY, &hdev->flags)) {
2805 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
2807 mgmt_pending_remove(cmd);
2811 hci_inquiry_cache_flush(hdev);
2813 memset(&inq_cp, 0, sizeof(inq_cp));
2814 memcpy(&inq_cp.lap, lap, sizeof(inq_cp.lap));
2815 inq_cp.length = DISCOV_BREDR_INQUIRY_LEN;
2816 hci_req_add(&req, HCI_OP_INQUIRY, sizeof(inq_cp), &inq_cp);
2819 case DISCOV_TYPE_LE:
2820 case DISCOV_TYPE_INTERLEAVED:
2821 if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
2822 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
2823 MGMT_STATUS_NOT_SUPPORTED);
2824 mgmt_pending_remove(cmd);
2828 if (hdev->discovery.type == DISCOV_TYPE_INTERLEAVED &&
2829 !lmp_bredr_capable(hdev)) {
2830 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
2831 MGMT_STATUS_NOT_SUPPORTED);
2832 mgmt_pending_remove(cmd);
2836 if (test_bit(HCI_LE_PERIPHERAL, &hdev->dev_flags)) {
2837 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
2838 MGMT_STATUS_REJECTED);
2839 mgmt_pending_remove(cmd);
2843 if (test_bit(HCI_LE_SCAN, &hdev->dev_flags)) {
2844 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
2846 mgmt_pending_remove(cmd);
2850 memset(¶m_cp, 0, sizeof(param_cp));
2851 param_cp.type = LE_SCAN_ACTIVE;
2852 param_cp.interval = cpu_to_le16(DISCOV_LE_SCAN_INT);
2853 param_cp.window = cpu_to_le16(DISCOV_LE_SCAN_WIN);
2854 hci_req_add(&req, HCI_OP_LE_SET_SCAN_PARAM, sizeof(param_cp),
2857 memset(&enable_cp, 0, sizeof(enable_cp));
2858 enable_cp.enable = LE_SCAN_ENABLE;
2859 enable_cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
2860 hci_req_add(&req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(enable_cp),
2865 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
2866 MGMT_STATUS_INVALID_PARAMS);
2867 mgmt_pending_remove(cmd);
2871 err = hci_req_run(&req, start_discovery_complete);
2873 mgmt_pending_remove(cmd);
2875 hci_discovery_set_state(hdev, DISCOVERY_STARTING);
2878 hci_dev_unlock(hdev);
2882 static int mgmt_stop_discovery_failed(struct hci_dev *hdev, u8 status)
2884 struct pending_cmd *cmd;
2887 cmd = mgmt_pending_find(MGMT_OP_STOP_DISCOVERY, hdev);
2891 err = cmd_complete(cmd->sk, hdev->id, cmd->opcode, mgmt_status(status),
2892 &hdev->discovery.type, sizeof(hdev->discovery.type));
2893 mgmt_pending_remove(cmd);
2898 static void stop_discovery_complete(struct hci_dev *hdev, u8 status)
2900 BT_DBG("status %d", status);
2905 mgmt_stop_discovery_failed(hdev, status);
2909 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2912 hci_dev_unlock(hdev);
2915 static int stop_discovery(struct sock *sk, struct hci_dev *hdev, void *data,
2918 struct mgmt_cp_stop_discovery *mgmt_cp = data;
2919 struct pending_cmd *cmd;
2920 struct hci_cp_remote_name_req_cancel cp;
2921 struct inquiry_entry *e;
2922 struct hci_request req;
2923 struct hci_cp_le_set_scan_enable enable_cp;
2926 BT_DBG("%s", hdev->name);
2930 if (!hci_discovery_active(hdev)) {
2931 err = cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
2932 MGMT_STATUS_REJECTED, &mgmt_cp->type,
2933 sizeof(mgmt_cp->type));
2937 if (hdev->discovery.type != mgmt_cp->type) {
2938 err = cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
2939 MGMT_STATUS_INVALID_PARAMS, &mgmt_cp->type,
2940 sizeof(mgmt_cp->type));
2944 cmd = mgmt_pending_add(sk, MGMT_OP_STOP_DISCOVERY, hdev, NULL, 0);
2950 hci_req_init(&req, hdev);
2952 switch (hdev->discovery.state) {
2953 case DISCOVERY_FINDING:
2954 if (test_bit(HCI_INQUIRY, &hdev->flags)) {
2955 hci_req_add(&req, HCI_OP_INQUIRY_CANCEL, 0, NULL);
2957 cancel_delayed_work(&hdev->le_scan_disable);
2959 memset(&enable_cp, 0, sizeof(enable_cp));
2960 enable_cp.enable = LE_SCAN_DISABLE;
2961 hci_req_add(&req, HCI_OP_LE_SET_SCAN_ENABLE,
2962 sizeof(enable_cp), &enable_cp);
2967 case DISCOVERY_RESOLVING:
2968 e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY,
2971 mgmt_pending_remove(cmd);
2972 err = cmd_complete(sk, hdev->id,
2973 MGMT_OP_STOP_DISCOVERY, 0,
2975 sizeof(mgmt_cp->type));
2976 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2980 bacpy(&cp.bdaddr, &e->data.bdaddr);
2981 hci_req_add(&req, HCI_OP_REMOTE_NAME_REQ_CANCEL, sizeof(cp),
2987 BT_DBG("unknown discovery state %u", hdev->discovery.state);
2989 mgmt_pending_remove(cmd);
2990 err = cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
2991 MGMT_STATUS_FAILED, &mgmt_cp->type,
2992 sizeof(mgmt_cp->type));
2996 err = hci_req_run(&req, stop_discovery_complete);
2998 mgmt_pending_remove(cmd);
3000 hci_discovery_set_state(hdev, DISCOVERY_STOPPING);
3003 hci_dev_unlock(hdev);
3007 static int confirm_name(struct sock *sk, struct hci_dev *hdev, void *data,
3010 struct mgmt_cp_confirm_name *cp = data;
3011 struct inquiry_entry *e;
3014 BT_DBG("%s", hdev->name);
3018 if (!hci_discovery_active(hdev)) {
3019 err = cmd_status(sk, hdev->id, MGMT_OP_CONFIRM_NAME,
3020 MGMT_STATUS_FAILED);
3024 e = hci_inquiry_cache_lookup_unknown(hdev, &cp->addr.bdaddr);
3026 err = cmd_status(sk, hdev->id, MGMT_OP_CONFIRM_NAME,
3027 MGMT_STATUS_INVALID_PARAMS);
3031 if (cp->name_known) {
3032 e->name_state = NAME_KNOWN;
3035 e->name_state = NAME_NEEDED;
3036 hci_inquiry_cache_update_resolve(hdev, e);
3039 err = cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME, 0, &cp->addr,
3043 hci_dev_unlock(hdev);
3047 static int block_device(struct sock *sk, struct hci_dev *hdev, void *data,
3050 struct mgmt_cp_block_device *cp = data;
3054 BT_DBG("%s", hdev->name);
3056 if (!bdaddr_type_is_valid(cp->addr.type))
3057 return cmd_complete(sk, hdev->id, MGMT_OP_BLOCK_DEVICE,
3058 MGMT_STATUS_INVALID_PARAMS,
3059 &cp->addr, sizeof(cp->addr));
3063 err = hci_blacklist_add(hdev, &cp->addr.bdaddr, cp->addr.type);
3065 status = MGMT_STATUS_FAILED;
3067 status = MGMT_STATUS_SUCCESS;
3069 err = cmd_complete(sk, hdev->id, MGMT_OP_BLOCK_DEVICE, status,
3070 &cp->addr, sizeof(cp->addr));
3072 hci_dev_unlock(hdev);
3077 static int unblock_device(struct sock *sk, struct hci_dev *hdev, void *data,
3080 struct mgmt_cp_unblock_device *cp = data;
3084 BT_DBG("%s", hdev->name);
3086 if (!bdaddr_type_is_valid(cp->addr.type))
3087 return cmd_complete(sk, hdev->id, MGMT_OP_UNBLOCK_DEVICE,
3088 MGMT_STATUS_INVALID_PARAMS,
3089 &cp->addr, sizeof(cp->addr));
3093 err = hci_blacklist_del(hdev, &cp->addr.bdaddr, cp->addr.type);
3095 status = MGMT_STATUS_INVALID_PARAMS;
3097 status = MGMT_STATUS_SUCCESS;
3099 err = cmd_complete(sk, hdev->id, MGMT_OP_UNBLOCK_DEVICE, status,
3100 &cp->addr, sizeof(cp->addr));
3102 hci_dev_unlock(hdev);
3107 static int set_device_id(struct sock *sk, struct hci_dev *hdev, void *data,
3110 struct mgmt_cp_set_device_id *cp = data;
3111 struct hci_request req;
3115 BT_DBG("%s", hdev->name);
3117 source = __le16_to_cpu(cp->source);
3119 if (source > 0x0002)
3120 return cmd_status(sk, hdev->id, MGMT_OP_SET_DEVICE_ID,
3121 MGMT_STATUS_INVALID_PARAMS);
3125 hdev->devid_source = source;
3126 hdev->devid_vendor = __le16_to_cpu(cp->vendor);
3127 hdev->devid_product = __le16_to_cpu(cp->product);
3128 hdev->devid_version = __le16_to_cpu(cp->version);
3130 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_DEVICE_ID, 0, NULL, 0);
3132 hci_req_init(&req, hdev);
3134 hci_req_run(&req, NULL);
3136 hci_dev_unlock(hdev);
3141 static void set_advertising_complete(struct hci_dev *hdev, u8 status)
3143 struct cmd_lookup match = { NULL, hdev };
3146 u8 mgmt_err = mgmt_status(status);
3148 mgmt_pending_foreach(MGMT_OP_SET_ADVERTISING, hdev,
3149 cmd_status_rsp, &mgmt_err);
3153 mgmt_pending_foreach(MGMT_OP_SET_ADVERTISING, hdev, settings_rsp,
3156 new_settings(hdev, match.sk);
3162 static int set_advertising(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
3164 struct mgmt_mode *cp = data;
3165 struct pending_cmd *cmd;
3166 struct hci_request req;
3170 BT_DBG("request for %s", hdev->name);
3172 if (!lmp_le_capable(hdev))
3173 return cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
3174 MGMT_STATUS_NOT_SUPPORTED);
3176 if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
3177 return cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
3178 MGMT_STATUS_REJECTED);
3180 if (cp->val != 0x00 && cp->val != 0x01)
3181 return cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
3182 MGMT_STATUS_INVALID_PARAMS);
3187 enabled = test_bit(HCI_LE_PERIPHERAL, &hdev->dev_flags);
3189 if (!hdev_is_powered(hdev) || val == enabled) {
3190 bool changed = false;
3192 if (val != test_bit(HCI_LE_PERIPHERAL, &hdev->dev_flags)) {
3193 change_bit(HCI_LE_PERIPHERAL, &hdev->dev_flags);
3197 err = send_settings_rsp(sk, MGMT_OP_SET_ADVERTISING, hdev);
3202 err = new_settings(hdev, sk);
3207 if (mgmt_pending_find(MGMT_OP_SET_ADVERTISING, hdev) ||
3208 mgmt_pending_find(MGMT_OP_SET_LE, hdev)) {
3209 err = cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
3214 cmd = mgmt_pending_add(sk, MGMT_OP_SET_ADVERTISING, hdev, data, len);
3220 hci_req_init(&req, hdev);
3222 hci_req_add(&req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(val), &val);
3224 err = hci_req_run(&req, set_advertising_complete);
3226 mgmt_pending_remove(cmd);
3229 hci_dev_unlock(hdev);
3233 static void fast_connectable_complete(struct hci_dev *hdev, u8 status)
3235 struct pending_cmd *cmd;
3237 BT_DBG("status 0x%02x", status);
3241 cmd = mgmt_pending_find(MGMT_OP_SET_FAST_CONNECTABLE, hdev);
3246 cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
3247 mgmt_status(status));
3249 struct mgmt_mode *cp = cmd->param;
3252 set_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags);
3254 clear_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags);
3256 send_settings_rsp(cmd->sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev);
3257 new_settings(hdev, cmd->sk);
3260 mgmt_pending_remove(cmd);
3263 hci_dev_unlock(hdev);
3266 static int set_fast_connectable(struct sock *sk, struct hci_dev *hdev,
3267 void *data, u16 len)
3269 struct mgmt_mode *cp = data;
3270 struct pending_cmd *cmd;
3271 struct hci_request req;
3274 BT_DBG("%s", hdev->name);
3276 if (!lmp_bredr_capable(hdev) || hdev->hci_ver < BLUETOOTH_VER_1_2)
3277 return cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
3278 MGMT_STATUS_NOT_SUPPORTED);
3280 if (cp->val != 0x00 && cp->val != 0x01)
3281 return cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
3282 MGMT_STATUS_INVALID_PARAMS);
3284 if (!hdev_is_powered(hdev))
3285 return cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
3286 MGMT_STATUS_NOT_POWERED);
3288 if (!test_bit(HCI_CONNECTABLE, &hdev->dev_flags))
3289 return cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
3290 MGMT_STATUS_REJECTED);
3294 if (mgmt_pending_find(MGMT_OP_SET_FAST_CONNECTABLE, hdev)) {
3295 err = cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
3300 if (!!cp->val == test_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags)) {
3301 err = send_settings_rsp(sk, MGMT_OP_SET_FAST_CONNECTABLE,
3306 cmd = mgmt_pending_add(sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev,
3313 hci_req_init(&req, hdev);
3315 write_fast_connectable(&req, cp->val);
3317 err = hci_req_run(&req, fast_connectable_complete);
3319 err = cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
3320 MGMT_STATUS_FAILED);
3321 mgmt_pending_remove(cmd);
3325 hci_dev_unlock(hdev);
3330 static bool ltk_is_valid(struct mgmt_ltk_info *key)
3332 if (key->authenticated != 0x00 && key->authenticated != 0x01)
3334 if (key->master != 0x00 && key->master != 0x01)
3336 if (!bdaddr_type_is_le(key->addr.type))
3341 static int load_long_term_keys(struct sock *sk, struct hci_dev *hdev,
3342 void *cp_data, u16 len)
3344 struct mgmt_cp_load_long_term_keys *cp = cp_data;
3345 u16 key_count, expected_len;
3348 key_count = __le16_to_cpu(cp->key_count);
3350 expected_len = sizeof(*cp) + key_count *
3351 sizeof(struct mgmt_ltk_info);
3352 if (expected_len != len) {
3353 BT_ERR("load_keys: expected %u bytes, got %u bytes",
3355 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
3356 MGMT_STATUS_INVALID_PARAMS);
3359 BT_DBG("%s key_count %u", hdev->name, key_count);
3361 for (i = 0; i < key_count; i++) {
3362 struct mgmt_ltk_info *key = &cp->keys[i];
3364 if (!ltk_is_valid(key))
3365 return cmd_status(sk, hdev->id,
3366 MGMT_OP_LOAD_LONG_TERM_KEYS,
3367 MGMT_STATUS_INVALID_PARAMS);
3372 hci_smp_ltks_clear(hdev);
3374 for (i = 0; i < key_count; i++) {
3375 struct mgmt_ltk_info *key = &cp->keys[i];
3381 type = HCI_SMP_LTK_SLAVE;
3383 hci_add_ltk(hdev, &key->addr.bdaddr,
3384 bdaddr_to_le(key->addr.type),
3385 type, 0, key->authenticated, key->val,
3386 key->enc_size, key->ediv, key->rand);
3389 err = cmd_complete(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS, 0,
3392 hci_dev_unlock(hdev);
3397 static const struct mgmt_handler {
3398 int (*func) (struct sock *sk, struct hci_dev *hdev, void *data,
3402 } mgmt_handlers[] = {
3403 { NULL }, /* 0x0000 (no command) */
3404 { read_version, false, MGMT_READ_VERSION_SIZE },
3405 { read_commands, false, MGMT_READ_COMMANDS_SIZE },
3406 { read_index_list, false, MGMT_READ_INDEX_LIST_SIZE },
3407 { read_controller_info, false, MGMT_READ_INFO_SIZE },
3408 { set_powered, false, MGMT_SETTING_SIZE },
3409 { set_discoverable, false, MGMT_SET_DISCOVERABLE_SIZE },
3410 { set_connectable, false, MGMT_SETTING_SIZE },
3411 { set_fast_connectable, false, MGMT_SETTING_SIZE },
3412 { set_pairable, false, MGMT_SETTING_SIZE },
3413 { set_link_security, false, MGMT_SETTING_SIZE },
3414 { set_ssp, false, MGMT_SETTING_SIZE },
3415 { set_hs, false, MGMT_SETTING_SIZE },
3416 { set_le, false, MGMT_SETTING_SIZE },
3417 { set_dev_class, false, MGMT_SET_DEV_CLASS_SIZE },
3418 { set_local_name, false, MGMT_SET_LOCAL_NAME_SIZE },
3419 { add_uuid, false, MGMT_ADD_UUID_SIZE },
3420 { remove_uuid, false, MGMT_REMOVE_UUID_SIZE },
3421 { load_link_keys, true, MGMT_LOAD_LINK_KEYS_SIZE },
3422 { load_long_term_keys, true, MGMT_LOAD_LONG_TERM_KEYS_SIZE },
3423 { disconnect, false, MGMT_DISCONNECT_SIZE },
3424 { get_connections, false, MGMT_GET_CONNECTIONS_SIZE },
3425 { pin_code_reply, false, MGMT_PIN_CODE_REPLY_SIZE },
3426 { pin_code_neg_reply, false, MGMT_PIN_CODE_NEG_REPLY_SIZE },
3427 { set_io_capability, false, MGMT_SET_IO_CAPABILITY_SIZE },
3428 { pair_device, false, MGMT_PAIR_DEVICE_SIZE },
3429 { cancel_pair_device, false, MGMT_CANCEL_PAIR_DEVICE_SIZE },
3430 { unpair_device, false, MGMT_UNPAIR_DEVICE_SIZE },
3431 { user_confirm_reply, false, MGMT_USER_CONFIRM_REPLY_SIZE },
3432 { user_confirm_neg_reply, false, MGMT_USER_CONFIRM_NEG_REPLY_SIZE },
3433 { user_passkey_reply, false, MGMT_USER_PASSKEY_REPLY_SIZE },
3434 { user_passkey_neg_reply, false, MGMT_USER_PASSKEY_NEG_REPLY_SIZE },
3435 { read_local_oob_data, false, MGMT_READ_LOCAL_OOB_DATA_SIZE },
3436 { add_remote_oob_data, false, MGMT_ADD_REMOTE_OOB_DATA_SIZE },
3437 { remove_remote_oob_data, false, MGMT_REMOVE_REMOTE_OOB_DATA_SIZE },
3438 { start_discovery, false, MGMT_START_DISCOVERY_SIZE },
3439 { stop_discovery, false, MGMT_STOP_DISCOVERY_SIZE },
3440 { confirm_name, false, MGMT_CONFIRM_NAME_SIZE },
3441 { block_device, false, MGMT_BLOCK_DEVICE_SIZE },
3442 { unblock_device, false, MGMT_UNBLOCK_DEVICE_SIZE },
3443 { set_device_id, false, MGMT_SET_DEVICE_ID_SIZE },
3444 { set_advertising, false, MGMT_SETTING_SIZE },
3448 int mgmt_control(struct sock *sk, struct msghdr *msg, size_t msglen)
3452 struct mgmt_hdr *hdr;
3453 u16 opcode, index, len;
3454 struct hci_dev *hdev = NULL;
3455 const struct mgmt_handler *handler;
3458 BT_DBG("got %zu bytes", msglen);
3460 if (msglen < sizeof(*hdr))
3463 buf = kmalloc(msglen, GFP_KERNEL);
3467 if (memcpy_fromiovec(buf, msg->msg_iov, msglen)) {
3473 opcode = __le16_to_cpu(hdr->opcode);
3474 index = __le16_to_cpu(hdr->index);
3475 len = __le16_to_cpu(hdr->len);
3477 if (len != msglen - sizeof(*hdr)) {
3482 if (index != MGMT_INDEX_NONE) {
3483 hdev = hci_dev_get(index);
3485 err = cmd_status(sk, index, opcode,
3486 MGMT_STATUS_INVALID_INDEX);
3490 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
3491 err = cmd_status(sk, index, opcode,
3492 MGMT_STATUS_INVALID_INDEX);
3497 if (opcode >= ARRAY_SIZE(mgmt_handlers) ||
3498 mgmt_handlers[opcode].func == NULL) {
3499 BT_DBG("Unknown op %u", opcode);
3500 err = cmd_status(sk, index, opcode,
3501 MGMT_STATUS_UNKNOWN_COMMAND);
3505 if ((hdev && opcode < MGMT_OP_READ_INFO) ||
3506 (!hdev && opcode >= MGMT_OP_READ_INFO)) {
3507 err = cmd_status(sk, index, opcode,
3508 MGMT_STATUS_INVALID_INDEX);
3512 handler = &mgmt_handlers[opcode];
3514 if ((handler->var_len && len < handler->data_len) ||
3515 (!handler->var_len && len != handler->data_len)) {
3516 err = cmd_status(sk, index, opcode,
3517 MGMT_STATUS_INVALID_PARAMS);
3522 mgmt_init_hdev(sk, hdev);
3524 cp = buf + sizeof(*hdr);
3526 err = handler->func(sk, hdev, cp, len);
3540 int mgmt_index_added(struct hci_dev *hdev)
3542 if (!mgmt_valid_hdev(hdev))
3545 return mgmt_event(MGMT_EV_INDEX_ADDED, hdev, NULL, 0, NULL);
3548 int mgmt_index_removed(struct hci_dev *hdev)
3550 u8 status = MGMT_STATUS_INVALID_INDEX;
3552 if (!mgmt_valid_hdev(hdev))
3555 mgmt_pending_foreach(0, hdev, cmd_status_rsp, &status);
3557 return mgmt_event(MGMT_EV_INDEX_REMOVED, hdev, NULL, 0, NULL);
3560 static void set_bredr_scan(struct hci_request *req)
3562 struct hci_dev *hdev = req->hdev;
3565 /* Ensure that fast connectable is disabled. This function will
3566 * not do anything if the page scan parameters are already what
3569 write_fast_connectable(req, false);
3571 if (test_bit(HCI_CONNECTABLE, &hdev->dev_flags))
3573 if (test_bit(HCI_DISCOVERABLE, &hdev->dev_flags))
3574 scan |= SCAN_INQUIRY;
3577 hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
3580 static void powered_complete(struct hci_dev *hdev, u8 status)
3582 struct cmd_lookup match = { NULL, hdev };
3584 BT_DBG("status 0x%02x", status);
3588 mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp, &match);
3590 new_settings(hdev, match.sk);
3592 hci_dev_unlock(hdev);
3598 static int powered_update_hci(struct hci_dev *hdev)
3600 struct hci_request req;
3603 hci_req_init(&req, hdev);
3605 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags) &&
3606 !lmp_host_ssp_capable(hdev)) {
3609 hci_req_add(&req, HCI_OP_WRITE_SSP_MODE, 1, &ssp);
3612 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags) &&
3613 lmp_bredr_capable(hdev)) {
3614 struct hci_cp_write_le_host_supported cp;
3617 cp.simul = lmp_le_br_capable(hdev);
3619 /* Check first if we already have the right
3620 * host state (host features set)
3622 if (cp.le != lmp_host_le_capable(hdev) ||
3623 cp.simul != lmp_host_le_br_capable(hdev))
3624 hci_req_add(&req, HCI_OP_WRITE_LE_HOST_SUPPORTED,
3628 if (test_bit(HCI_LE_PERIPHERAL, &hdev->dev_flags)) {
3631 hci_req_add(&req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(adv), &adv);
3634 link_sec = test_bit(HCI_LINK_SECURITY, &hdev->dev_flags);
3635 if (link_sec != test_bit(HCI_AUTH, &hdev->flags))
3636 hci_req_add(&req, HCI_OP_WRITE_AUTH_ENABLE,
3637 sizeof(link_sec), &link_sec);
3639 if (lmp_bredr_capable(hdev)) {
3640 set_bredr_scan(&req);
3646 return hci_req_run(&req, powered_complete);
3649 int mgmt_powered(struct hci_dev *hdev, u8 powered)
3651 struct cmd_lookup match = { NULL, hdev };
3652 u8 status_not_powered = MGMT_STATUS_NOT_POWERED;
3653 u8 zero_cod[] = { 0, 0, 0 };
3656 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
3660 if (powered_update_hci(hdev) == 0)
3663 mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp,
3668 mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp, &match);
3669 mgmt_pending_foreach(0, hdev, cmd_status_rsp, &status_not_powered);
3671 if (memcmp(hdev->dev_class, zero_cod, sizeof(zero_cod)) != 0)
3672 mgmt_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev,
3673 zero_cod, sizeof(zero_cod), NULL);
3676 err = new_settings(hdev, match.sk);
3684 int mgmt_set_powered_failed(struct hci_dev *hdev, int err)
3686 struct pending_cmd *cmd;
3689 cmd = mgmt_pending_find(MGMT_OP_SET_POWERED, hdev);
3693 if (err == -ERFKILL)
3694 status = MGMT_STATUS_RFKILLED;
3696 status = MGMT_STATUS_FAILED;
3698 err = cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_POWERED, status);
3700 mgmt_pending_remove(cmd);
3705 int mgmt_discoverable(struct hci_dev *hdev, u8 discoverable)
3707 struct cmd_lookup match = { NULL, hdev };
3708 bool changed = false;
3712 if (!test_and_set_bit(HCI_DISCOVERABLE, &hdev->dev_flags))
3715 if (test_and_clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags))
3719 mgmt_pending_foreach(MGMT_OP_SET_DISCOVERABLE, hdev, settings_rsp,
3723 err = new_settings(hdev, match.sk);
3731 int mgmt_connectable(struct hci_dev *hdev, u8 connectable)
3733 struct pending_cmd *cmd;
3734 bool changed = false;
3738 if (!test_and_set_bit(HCI_CONNECTABLE, &hdev->dev_flags))
3741 if (test_and_clear_bit(HCI_CONNECTABLE, &hdev->dev_flags))
3745 cmd = mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev);
3748 err = new_settings(hdev, cmd ? cmd->sk : NULL);
3753 int mgmt_write_scan_failed(struct hci_dev *hdev, u8 scan, u8 status)
3755 u8 mgmt_err = mgmt_status(status);
3757 if (scan & SCAN_PAGE)
3758 mgmt_pending_foreach(MGMT_OP_SET_CONNECTABLE, hdev,
3759 cmd_status_rsp, &mgmt_err);
3761 if (scan & SCAN_INQUIRY)
3762 mgmt_pending_foreach(MGMT_OP_SET_DISCOVERABLE, hdev,
3763 cmd_status_rsp, &mgmt_err);
3768 int mgmt_new_link_key(struct hci_dev *hdev, struct link_key *key,
3771 struct mgmt_ev_new_link_key ev;
3773 memset(&ev, 0, sizeof(ev));
3775 ev.store_hint = persistent;
3776 bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
3777 ev.key.addr.type = BDADDR_BREDR;
3778 ev.key.type = key->type;
3779 memcpy(ev.key.val, key->val, HCI_LINK_KEY_SIZE);
3780 ev.key.pin_len = key->pin_len;
3782 return mgmt_event(MGMT_EV_NEW_LINK_KEY, hdev, &ev, sizeof(ev), NULL);
3785 int mgmt_new_ltk(struct hci_dev *hdev, struct smp_ltk *key, u8 persistent)
3787 struct mgmt_ev_new_long_term_key ev;
3789 memset(&ev, 0, sizeof(ev));
3791 ev.store_hint = persistent;
3792 bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
3793 ev.key.addr.type = link_to_bdaddr(LE_LINK, key->bdaddr_type);
3794 ev.key.authenticated = key->authenticated;
3795 ev.key.enc_size = key->enc_size;
3796 ev.key.ediv = key->ediv;
3798 if (key->type == HCI_SMP_LTK)
3801 memcpy(ev.key.rand, key->rand, sizeof(key->rand));
3802 memcpy(ev.key.val, key->val, sizeof(key->val));
3804 return mgmt_event(MGMT_EV_NEW_LONG_TERM_KEY, hdev, &ev, sizeof(ev),
3808 int mgmt_device_connected(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
3809 u8 addr_type, u32 flags, u8 *name, u8 name_len,
3813 struct mgmt_ev_device_connected *ev = (void *) buf;
3816 bacpy(&ev->addr.bdaddr, bdaddr);
3817 ev->addr.type = link_to_bdaddr(link_type, addr_type);
3819 ev->flags = __cpu_to_le32(flags);
3822 eir_len = eir_append_data(ev->eir, 0, EIR_NAME_COMPLETE,
3825 if (dev_class && memcmp(dev_class, "\0\0\0", 3) != 0)
3826 eir_len = eir_append_data(ev->eir, eir_len,
3827 EIR_CLASS_OF_DEV, dev_class, 3);
3829 ev->eir_len = cpu_to_le16(eir_len);
3831 return mgmt_event(MGMT_EV_DEVICE_CONNECTED, hdev, buf,
3832 sizeof(*ev) + eir_len, NULL);
3835 static void disconnect_rsp(struct pending_cmd *cmd, void *data)
3837 struct mgmt_cp_disconnect *cp = cmd->param;
3838 struct sock **sk = data;
3839 struct mgmt_rp_disconnect rp;
3841 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
3842 rp.addr.type = cp->addr.type;
3844 cmd_complete(cmd->sk, cmd->index, MGMT_OP_DISCONNECT, 0, &rp,
3850 mgmt_pending_remove(cmd);
3853 static void unpair_device_rsp(struct pending_cmd *cmd, void *data)
3855 struct hci_dev *hdev = data;
3856 struct mgmt_cp_unpair_device *cp = cmd->param;
3857 struct mgmt_rp_unpair_device rp;
3859 memset(&rp, 0, sizeof(rp));
3860 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
3861 rp.addr.type = cp->addr.type;
3863 device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, cmd->sk);
3865 cmd_complete(cmd->sk, cmd->index, cmd->opcode, 0, &rp, sizeof(rp));
3867 mgmt_pending_remove(cmd);
3870 int mgmt_device_disconnected(struct hci_dev *hdev, bdaddr_t *bdaddr,
3871 u8 link_type, u8 addr_type, u8 reason)
3873 struct mgmt_ev_device_disconnected ev;
3874 struct sock *sk = NULL;
3877 mgmt_pending_foreach(MGMT_OP_DISCONNECT, hdev, disconnect_rsp, &sk);
3879 bacpy(&ev.addr.bdaddr, bdaddr);
3880 ev.addr.type = link_to_bdaddr(link_type, addr_type);
3883 err = mgmt_event(MGMT_EV_DEVICE_DISCONNECTED, hdev, &ev, sizeof(ev),
3889 mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, unpair_device_rsp,
3895 int mgmt_disconnect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr,
3896 u8 link_type, u8 addr_type, u8 status)
3898 struct mgmt_rp_disconnect rp;
3899 struct pending_cmd *cmd;
3902 mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, unpair_device_rsp,
3905 cmd = mgmt_pending_find(MGMT_OP_DISCONNECT, hdev);
3909 bacpy(&rp.addr.bdaddr, bdaddr);
3910 rp.addr.type = link_to_bdaddr(link_type, addr_type);
3912 err = cmd_complete(cmd->sk, cmd->index, MGMT_OP_DISCONNECT,
3913 mgmt_status(status), &rp, sizeof(rp));
3915 mgmt_pending_remove(cmd);
3920 int mgmt_connect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
3921 u8 addr_type, u8 status)
3923 struct mgmt_ev_connect_failed ev;
3925 bacpy(&ev.addr.bdaddr, bdaddr);
3926 ev.addr.type = link_to_bdaddr(link_type, addr_type);
3927 ev.status = mgmt_status(status);
3929 return mgmt_event(MGMT_EV_CONNECT_FAILED, hdev, &ev, sizeof(ev), NULL);
3932 int mgmt_pin_code_request(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 secure)
3934 struct mgmt_ev_pin_code_request ev;
3936 bacpy(&ev.addr.bdaddr, bdaddr);
3937 ev.addr.type = BDADDR_BREDR;
3940 return mgmt_event(MGMT_EV_PIN_CODE_REQUEST, hdev, &ev, sizeof(ev),
3944 int mgmt_pin_code_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
3947 struct pending_cmd *cmd;
3948 struct mgmt_rp_pin_code_reply rp;
3951 cmd = mgmt_pending_find(MGMT_OP_PIN_CODE_REPLY, hdev);
3955 bacpy(&rp.addr.bdaddr, bdaddr);
3956 rp.addr.type = BDADDR_BREDR;
3958 err = cmd_complete(cmd->sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
3959 mgmt_status(status), &rp, sizeof(rp));
3961 mgmt_pending_remove(cmd);
3966 int mgmt_pin_code_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
3969 struct pending_cmd *cmd;
3970 struct mgmt_rp_pin_code_reply rp;
3973 cmd = mgmt_pending_find(MGMT_OP_PIN_CODE_NEG_REPLY, hdev);
3977 bacpy(&rp.addr.bdaddr, bdaddr);
3978 rp.addr.type = BDADDR_BREDR;
3980 err = cmd_complete(cmd->sk, hdev->id, MGMT_OP_PIN_CODE_NEG_REPLY,
3981 mgmt_status(status), &rp, sizeof(rp));
3983 mgmt_pending_remove(cmd);
3988 int mgmt_user_confirm_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
3989 u8 link_type, u8 addr_type, __le32 value,
3992 struct mgmt_ev_user_confirm_request ev;
3994 BT_DBG("%s", hdev->name);
3996 bacpy(&ev.addr.bdaddr, bdaddr);
3997 ev.addr.type = link_to_bdaddr(link_type, addr_type);
3998 ev.confirm_hint = confirm_hint;
4001 return mgmt_event(MGMT_EV_USER_CONFIRM_REQUEST, hdev, &ev, sizeof(ev),
4005 int mgmt_user_passkey_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
4006 u8 link_type, u8 addr_type)
4008 struct mgmt_ev_user_passkey_request ev;
4010 BT_DBG("%s", hdev->name);
4012 bacpy(&ev.addr.bdaddr, bdaddr);
4013 ev.addr.type = link_to_bdaddr(link_type, addr_type);
4015 return mgmt_event(MGMT_EV_USER_PASSKEY_REQUEST, hdev, &ev, sizeof(ev),
4019 static int user_pairing_resp_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
4020 u8 link_type, u8 addr_type, u8 status,
4023 struct pending_cmd *cmd;
4024 struct mgmt_rp_user_confirm_reply rp;
4027 cmd = mgmt_pending_find(opcode, hdev);
4031 bacpy(&rp.addr.bdaddr, bdaddr);
4032 rp.addr.type = link_to_bdaddr(link_type, addr_type);
4033 err = cmd_complete(cmd->sk, hdev->id, opcode, mgmt_status(status),
4036 mgmt_pending_remove(cmd);
4041 int mgmt_user_confirm_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
4042 u8 link_type, u8 addr_type, u8 status)
4044 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
4045 status, MGMT_OP_USER_CONFIRM_REPLY);
4048 int mgmt_user_confirm_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
4049 u8 link_type, u8 addr_type, u8 status)
4051 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
4053 MGMT_OP_USER_CONFIRM_NEG_REPLY);
4056 int mgmt_user_passkey_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
4057 u8 link_type, u8 addr_type, u8 status)
4059 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
4060 status, MGMT_OP_USER_PASSKEY_REPLY);
4063 int mgmt_user_passkey_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
4064 u8 link_type, u8 addr_type, u8 status)
4066 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
4068 MGMT_OP_USER_PASSKEY_NEG_REPLY);
4071 int mgmt_user_passkey_notify(struct hci_dev *hdev, bdaddr_t *bdaddr,
4072 u8 link_type, u8 addr_type, u32 passkey,
4075 struct mgmt_ev_passkey_notify ev;
4077 BT_DBG("%s", hdev->name);
4079 bacpy(&ev.addr.bdaddr, bdaddr);
4080 ev.addr.type = link_to_bdaddr(link_type, addr_type);
4081 ev.passkey = __cpu_to_le32(passkey);
4082 ev.entered = entered;
4084 return mgmt_event(MGMT_EV_PASSKEY_NOTIFY, hdev, &ev, sizeof(ev), NULL);
4087 int mgmt_auth_failed(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
4088 u8 addr_type, u8 status)
4090 struct mgmt_ev_auth_failed ev;
4092 bacpy(&ev.addr.bdaddr, bdaddr);
4093 ev.addr.type = link_to_bdaddr(link_type, addr_type);
4094 ev.status = mgmt_status(status);
4096 return mgmt_event(MGMT_EV_AUTH_FAILED, hdev, &ev, sizeof(ev), NULL);
4099 int mgmt_auth_enable_complete(struct hci_dev *hdev, u8 status)
4101 struct cmd_lookup match = { NULL, hdev };
4102 bool changed = false;
4106 u8 mgmt_err = mgmt_status(status);
4107 mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev,
4108 cmd_status_rsp, &mgmt_err);
4112 if (test_bit(HCI_AUTH, &hdev->flags)) {
4113 if (!test_and_set_bit(HCI_LINK_SECURITY, &hdev->dev_flags))
4116 if (test_and_clear_bit(HCI_LINK_SECURITY, &hdev->dev_flags))
4120 mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev, settings_rsp,
4124 err = new_settings(hdev, match.sk);
4132 static void clear_eir(struct hci_request *req)
4134 struct hci_dev *hdev = req->hdev;
4135 struct hci_cp_write_eir cp;
4137 if (!lmp_ext_inq_capable(hdev))
4140 memset(hdev->eir, 0, sizeof(hdev->eir));
4142 memset(&cp, 0, sizeof(cp));
4144 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
4147 int mgmt_ssp_enable_complete(struct hci_dev *hdev, u8 enable, u8 status)
4149 struct cmd_lookup match = { NULL, hdev };
4150 struct hci_request req;
4151 bool changed = false;
4155 u8 mgmt_err = mgmt_status(status);
4157 if (enable && test_and_clear_bit(HCI_SSP_ENABLED,
4159 err = new_settings(hdev, NULL);
4161 mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, cmd_status_rsp,
4168 if (!test_and_set_bit(HCI_SSP_ENABLED, &hdev->dev_flags))
4171 if (test_and_clear_bit(HCI_SSP_ENABLED, &hdev->dev_flags))
4175 mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, settings_rsp, &match);
4178 err = new_settings(hdev, match.sk);
4183 hci_req_init(&req, hdev);
4185 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags))
4190 hci_req_run(&req, NULL);
4195 static void sk_lookup(struct pending_cmd *cmd, void *data)
4197 struct cmd_lookup *match = data;
4199 if (match->sk == NULL) {
4200 match->sk = cmd->sk;
4201 sock_hold(match->sk);
4205 int mgmt_set_class_of_dev_complete(struct hci_dev *hdev, u8 *dev_class,
4208 struct cmd_lookup match = { NULL, hdev, mgmt_status(status) };
4211 mgmt_pending_foreach(MGMT_OP_SET_DEV_CLASS, hdev, sk_lookup, &match);
4212 mgmt_pending_foreach(MGMT_OP_ADD_UUID, hdev, sk_lookup, &match);
4213 mgmt_pending_foreach(MGMT_OP_REMOVE_UUID, hdev, sk_lookup, &match);
4216 err = mgmt_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev, dev_class,
4225 int mgmt_set_local_name_complete(struct hci_dev *hdev, u8 *name, u8 status)
4227 struct mgmt_cp_set_local_name ev;
4228 struct pending_cmd *cmd;
4233 memset(&ev, 0, sizeof(ev));
4234 memcpy(ev.name, name, HCI_MAX_NAME_LENGTH);
4235 memcpy(ev.short_name, hdev->short_name, HCI_MAX_SHORT_NAME_LENGTH);
4237 cmd = mgmt_pending_find(MGMT_OP_SET_LOCAL_NAME, hdev);
4239 memcpy(hdev->dev_name, name, sizeof(hdev->dev_name));
4241 /* If this is a HCI command related to powering on the
4242 * HCI dev don't send any mgmt signals.
4244 if (mgmt_pending_find(MGMT_OP_SET_POWERED, hdev))
4248 return mgmt_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev, &ev, sizeof(ev),
4249 cmd ? cmd->sk : NULL);
4252 int mgmt_read_local_oob_data_reply_complete(struct hci_dev *hdev, u8 *hash,
4253 u8 *randomizer, u8 status)
4255 struct pending_cmd *cmd;
4258 BT_DBG("%s status %u", hdev->name, status);
4260 cmd = mgmt_pending_find(MGMT_OP_READ_LOCAL_OOB_DATA, hdev);
4265 err = cmd_status(cmd->sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
4266 mgmt_status(status));
4268 struct mgmt_rp_read_local_oob_data rp;
4270 memcpy(rp.hash, hash, sizeof(rp.hash));
4271 memcpy(rp.randomizer, randomizer, sizeof(rp.randomizer));
4273 err = cmd_complete(cmd->sk, hdev->id,
4274 MGMT_OP_READ_LOCAL_OOB_DATA, 0, &rp,
4278 mgmt_pending_remove(cmd);
4283 int mgmt_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
4284 u8 addr_type, u8 *dev_class, s8 rssi, u8 cfm_name, u8
4285 ssp, u8 *eir, u16 eir_len)
4288 struct mgmt_ev_device_found *ev = (void *) buf;
4291 if (!hci_discovery_active(hdev))
4294 /* Leave 5 bytes for a potential CoD field */
4295 if (sizeof(*ev) + eir_len + 5 > sizeof(buf))
4298 memset(buf, 0, sizeof(buf));
4300 bacpy(&ev->addr.bdaddr, bdaddr);
4301 ev->addr.type = link_to_bdaddr(link_type, addr_type);
4304 ev->flags |= __constant_cpu_to_le32(MGMT_DEV_FOUND_CONFIRM_NAME);
4306 ev->flags |= __constant_cpu_to_le32(MGMT_DEV_FOUND_LEGACY_PAIRING);
4309 memcpy(ev->eir, eir, eir_len);
4311 if (dev_class && !eir_has_data_type(ev->eir, eir_len, EIR_CLASS_OF_DEV))
4312 eir_len = eir_append_data(ev->eir, eir_len, EIR_CLASS_OF_DEV,
4315 ev->eir_len = cpu_to_le16(eir_len);
4316 ev_size = sizeof(*ev) + eir_len;
4318 return mgmt_event(MGMT_EV_DEVICE_FOUND, hdev, ev, ev_size, NULL);
4321 int mgmt_remote_name(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
4322 u8 addr_type, s8 rssi, u8 *name, u8 name_len)
4324 struct mgmt_ev_device_found *ev;
4325 char buf[sizeof(*ev) + HCI_MAX_NAME_LENGTH + 2];
4328 ev = (struct mgmt_ev_device_found *) buf;
4330 memset(buf, 0, sizeof(buf));
4332 bacpy(&ev->addr.bdaddr, bdaddr);
4333 ev->addr.type = link_to_bdaddr(link_type, addr_type);
4336 eir_len = eir_append_data(ev->eir, 0, EIR_NAME_COMPLETE, name,
4339 ev->eir_len = cpu_to_le16(eir_len);
4341 return mgmt_event(MGMT_EV_DEVICE_FOUND, hdev, ev,
4342 sizeof(*ev) + eir_len, NULL);
4345 int mgmt_discovering(struct hci_dev *hdev, u8 discovering)
4347 struct mgmt_ev_discovering ev;
4348 struct pending_cmd *cmd;
4350 BT_DBG("%s discovering %u", hdev->name, discovering);
4353 cmd = mgmt_pending_find(MGMT_OP_START_DISCOVERY, hdev);
4355 cmd = mgmt_pending_find(MGMT_OP_STOP_DISCOVERY, hdev);
4358 u8 type = hdev->discovery.type;
4360 cmd_complete(cmd->sk, hdev->id, cmd->opcode, 0, &type,
4362 mgmt_pending_remove(cmd);
4365 memset(&ev, 0, sizeof(ev));
4366 ev.type = hdev->discovery.type;
4367 ev.discovering = discovering;
4369 return mgmt_event(MGMT_EV_DISCOVERING, hdev, &ev, sizeof(ev), NULL);
4372 int mgmt_device_blocked(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
4374 struct pending_cmd *cmd;
4375 struct mgmt_ev_device_blocked ev;
4377 cmd = mgmt_pending_find(MGMT_OP_BLOCK_DEVICE, hdev);
4379 bacpy(&ev.addr.bdaddr, bdaddr);
4380 ev.addr.type = type;
4382 return mgmt_event(MGMT_EV_DEVICE_BLOCKED, hdev, &ev, sizeof(ev),
4383 cmd ? cmd->sk : NULL);
4386 int mgmt_device_unblocked(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
4388 struct pending_cmd *cmd;
4389 struct mgmt_ev_device_unblocked ev;
4391 cmd = mgmt_pending_find(MGMT_OP_UNBLOCK_DEVICE, hdev);
4393 bacpy(&ev.addr.bdaddr, bdaddr);
4394 ev.addr.type = type;
4396 return mgmt_event(MGMT_EV_DEVICE_UNBLOCKED, hdev, &ev, sizeof(ev),
4397 cmd ? cmd->sk : NULL);
4400 module_param(enable_hs, bool, 0644);
4401 MODULE_PARM_DESC(enable_hs, "Enable High Speed support");