2 BlueZ - Bluetooth protocol stack for Linux
4 Copyright (C) 2010 Nokia Corporation
5 Copyright (C) 2011-2012 Intel Corporation
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
25 /* Bluetooth HCI Management interface */
27 #include <linux/module.h>
28 #include <asm/unaligned.h>
30 #include <net/bluetooth/bluetooth.h>
31 #include <net/bluetooth/hci_core.h>
32 #include <net/bluetooth/mgmt.h>
33 #include <net/bluetooth/smp.h>
37 #define MGMT_VERSION 1
38 #define MGMT_REVISION 2
40 static const u16 mgmt_commands[] = {
41 MGMT_OP_READ_INDEX_LIST,
44 MGMT_OP_SET_DISCOVERABLE,
45 MGMT_OP_SET_CONNECTABLE,
46 MGMT_OP_SET_FAST_CONNECTABLE,
48 MGMT_OP_SET_LINK_SECURITY,
52 MGMT_OP_SET_DEV_CLASS,
53 MGMT_OP_SET_LOCAL_NAME,
56 MGMT_OP_LOAD_LINK_KEYS,
57 MGMT_OP_LOAD_LONG_TERM_KEYS,
59 MGMT_OP_GET_CONNECTIONS,
60 MGMT_OP_PIN_CODE_REPLY,
61 MGMT_OP_PIN_CODE_NEG_REPLY,
62 MGMT_OP_SET_IO_CAPABILITY,
64 MGMT_OP_CANCEL_PAIR_DEVICE,
65 MGMT_OP_UNPAIR_DEVICE,
66 MGMT_OP_USER_CONFIRM_REPLY,
67 MGMT_OP_USER_CONFIRM_NEG_REPLY,
68 MGMT_OP_USER_PASSKEY_REPLY,
69 MGMT_OP_USER_PASSKEY_NEG_REPLY,
70 MGMT_OP_READ_LOCAL_OOB_DATA,
71 MGMT_OP_ADD_REMOTE_OOB_DATA,
72 MGMT_OP_REMOVE_REMOTE_OOB_DATA,
73 MGMT_OP_START_DISCOVERY,
74 MGMT_OP_STOP_DISCOVERY,
77 MGMT_OP_UNBLOCK_DEVICE,
78 MGMT_OP_SET_DEVICE_ID,
81 static const u16 mgmt_events[] = {
82 MGMT_EV_CONTROLLER_ERROR,
84 MGMT_EV_INDEX_REMOVED,
86 MGMT_EV_CLASS_OF_DEV_CHANGED,
87 MGMT_EV_LOCAL_NAME_CHANGED,
89 MGMT_EV_NEW_LONG_TERM_KEY,
90 MGMT_EV_DEVICE_CONNECTED,
91 MGMT_EV_DEVICE_DISCONNECTED,
92 MGMT_EV_CONNECT_FAILED,
93 MGMT_EV_PIN_CODE_REQUEST,
94 MGMT_EV_USER_CONFIRM_REQUEST,
95 MGMT_EV_USER_PASSKEY_REQUEST,
99 MGMT_EV_DEVICE_BLOCKED,
100 MGMT_EV_DEVICE_UNBLOCKED,
101 MGMT_EV_DEVICE_UNPAIRED,
102 MGMT_EV_PASSKEY_NOTIFY,
106 * These LE scan and inquiry parameters were chosen according to LE General
107 * Discovery Procedure specification.
109 #define LE_SCAN_TYPE 0x01
110 #define LE_SCAN_WIN 0x12
111 #define LE_SCAN_INT 0x12
112 #define LE_SCAN_TIMEOUT_LE_ONLY 10240 /* TGAP(gen_disc_scan_min) */
113 #define LE_SCAN_TIMEOUT_BREDR_LE 5120 /* TGAP(100)/2 */
115 #define INQUIRY_LEN_BREDR 0x08 /* TGAP(100) */
116 #define INQUIRY_LEN_BREDR_LE 0x04 /* TGAP(100)/2 */
118 #define CACHE_TIMEOUT msecs_to_jiffies(2 * 1000)
120 #define hdev_is_powered(hdev) (test_bit(HCI_UP, &hdev->flags) && \
121 !test_bit(HCI_AUTO_OFF, &hdev->dev_flags))
124 struct list_head list;
132 /* HCI to MGMT error code conversion table */
133 static u8 mgmt_status_table[] = {
135 MGMT_STATUS_UNKNOWN_COMMAND, /* Unknown Command */
136 MGMT_STATUS_NOT_CONNECTED, /* No Connection */
137 MGMT_STATUS_FAILED, /* Hardware Failure */
138 MGMT_STATUS_CONNECT_FAILED, /* Page Timeout */
139 MGMT_STATUS_AUTH_FAILED, /* Authentication Failed */
140 MGMT_STATUS_NOT_PAIRED, /* PIN or Key Missing */
141 MGMT_STATUS_NO_RESOURCES, /* Memory Full */
142 MGMT_STATUS_TIMEOUT, /* Connection Timeout */
143 MGMT_STATUS_NO_RESOURCES, /* Max Number of Connections */
144 MGMT_STATUS_NO_RESOURCES, /* Max Number of SCO Connections */
145 MGMT_STATUS_ALREADY_CONNECTED, /* ACL Connection Exists */
146 MGMT_STATUS_BUSY, /* Command Disallowed */
147 MGMT_STATUS_NO_RESOURCES, /* Rejected Limited Resources */
148 MGMT_STATUS_REJECTED, /* Rejected Security */
149 MGMT_STATUS_REJECTED, /* Rejected Personal */
150 MGMT_STATUS_TIMEOUT, /* Host Timeout */
151 MGMT_STATUS_NOT_SUPPORTED, /* Unsupported Feature */
152 MGMT_STATUS_INVALID_PARAMS, /* Invalid Parameters */
153 MGMT_STATUS_DISCONNECTED, /* OE User Ended Connection */
154 MGMT_STATUS_NO_RESOURCES, /* OE Low Resources */
155 MGMT_STATUS_DISCONNECTED, /* OE Power Off */
156 MGMT_STATUS_DISCONNECTED, /* Connection Terminated */
157 MGMT_STATUS_BUSY, /* Repeated Attempts */
158 MGMT_STATUS_REJECTED, /* Pairing Not Allowed */
159 MGMT_STATUS_FAILED, /* Unknown LMP PDU */
160 MGMT_STATUS_NOT_SUPPORTED, /* Unsupported Remote Feature */
161 MGMT_STATUS_REJECTED, /* SCO Offset Rejected */
162 MGMT_STATUS_REJECTED, /* SCO Interval Rejected */
163 MGMT_STATUS_REJECTED, /* Air Mode Rejected */
164 MGMT_STATUS_INVALID_PARAMS, /* Invalid LMP Parameters */
165 MGMT_STATUS_FAILED, /* Unspecified Error */
166 MGMT_STATUS_NOT_SUPPORTED, /* Unsupported LMP Parameter Value */
167 MGMT_STATUS_FAILED, /* Role Change Not Allowed */
168 MGMT_STATUS_TIMEOUT, /* LMP Response Timeout */
169 MGMT_STATUS_FAILED, /* LMP Error Transaction Collision */
170 MGMT_STATUS_FAILED, /* LMP PDU Not Allowed */
171 MGMT_STATUS_REJECTED, /* Encryption Mode Not Accepted */
172 MGMT_STATUS_FAILED, /* Unit Link Key Used */
173 MGMT_STATUS_NOT_SUPPORTED, /* QoS Not Supported */
174 MGMT_STATUS_TIMEOUT, /* Instant Passed */
175 MGMT_STATUS_NOT_SUPPORTED, /* Pairing Not Supported */
176 MGMT_STATUS_FAILED, /* Transaction Collision */
177 MGMT_STATUS_INVALID_PARAMS, /* Unacceptable Parameter */
178 MGMT_STATUS_REJECTED, /* QoS Rejected */
179 MGMT_STATUS_NOT_SUPPORTED, /* Classification Not Supported */
180 MGMT_STATUS_REJECTED, /* Insufficient Security */
181 MGMT_STATUS_INVALID_PARAMS, /* Parameter Out Of Range */
182 MGMT_STATUS_BUSY, /* Role Switch Pending */
183 MGMT_STATUS_FAILED, /* Slot Violation */
184 MGMT_STATUS_FAILED, /* Role Switch Failed */
185 MGMT_STATUS_INVALID_PARAMS, /* EIR Too Large */
186 MGMT_STATUS_NOT_SUPPORTED, /* Simple Pairing Not Supported */
187 MGMT_STATUS_BUSY, /* Host Busy Pairing */
188 MGMT_STATUS_REJECTED, /* Rejected, No Suitable Channel */
189 MGMT_STATUS_BUSY, /* Controller Busy */
190 MGMT_STATUS_INVALID_PARAMS, /* Unsuitable Connection Interval */
191 MGMT_STATUS_TIMEOUT, /* Directed Advertising Timeout */
192 MGMT_STATUS_AUTH_FAILED, /* Terminated Due to MIC Failure */
193 MGMT_STATUS_CONNECT_FAILED, /* Connection Establishment Failed */
194 MGMT_STATUS_CONNECT_FAILED, /* MAC Connection Failed */
197 bool mgmt_valid_hdev(struct hci_dev *hdev)
199 return hdev->dev_type == HCI_BREDR;
202 static u8 mgmt_status(u8 hci_status)
204 if (hci_status < ARRAY_SIZE(mgmt_status_table))
205 return mgmt_status_table[hci_status];
207 return MGMT_STATUS_FAILED;
210 static int cmd_status(struct sock *sk, u16 index, u16 cmd, u8 status)
213 struct mgmt_hdr *hdr;
214 struct mgmt_ev_cmd_status *ev;
217 BT_DBG("sock %p, index %u, cmd %u, status %u", sk, index, cmd, status);
219 skb = alloc_skb(sizeof(*hdr) + sizeof(*ev), GFP_KERNEL);
223 hdr = (void *) skb_put(skb, sizeof(*hdr));
225 hdr->opcode = __constant_cpu_to_le16(MGMT_EV_CMD_STATUS);
226 hdr->index = cpu_to_le16(index);
227 hdr->len = cpu_to_le16(sizeof(*ev));
229 ev = (void *) skb_put(skb, sizeof(*ev));
231 ev->opcode = cpu_to_le16(cmd);
233 err = sock_queue_rcv_skb(sk, skb);
240 static int cmd_complete(struct sock *sk, u16 index, u16 cmd, u8 status,
241 void *rp, size_t rp_len)
244 struct mgmt_hdr *hdr;
245 struct mgmt_ev_cmd_complete *ev;
248 BT_DBG("sock %p", sk);
250 skb = alloc_skb(sizeof(*hdr) + sizeof(*ev) + rp_len, GFP_KERNEL);
254 hdr = (void *) skb_put(skb, sizeof(*hdr));
256 hdr->opcode = __constant_cpu_to_le16(MGMT_EV_CMD_COMPLETE);
257 hdr->index = cpu_to_le16(index);
258 hdr->len = cpu_to_le16(sizeof(*ev) + rp_len);
260 ev = (void *) skb_put(skb, sizeof(*ev) + rp_len);
261 ev->opcode = cpu_to_le16(cmd);
265 memcpy(ev->data, rp, rp_len);
267 err = sock_queue_rcv_skb(sk, skb);
274 static int read_version(struct sock *sk, struct hci_dev *hdev, void *data,
277 struct mgmt_rp_read_version rp;
279 BT_DBG("sock %p", sk);
281 rp.version = MGMT_VERSION;
282 rp.revision = __constant_cpu_to_le16(MGMT_REVISION);
284 return cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_VERSION, 0, &rp,
288 static int read_commands(struct sock *sk, struct hci_dev *hdev, void *data,
291 struct mgmt_rp_read_commands *rp;
292 const u16 num_commands = ARRAY_SIZE(mgmt_commands);
293 const u16 num_events = ARRAY_SIZE(mgmt_events);
298 BT_DBG("sock %p", sk);
300 rp_size = sizeof(*rp) + ((num_commands + num_events) * sizeof(u16));
302 rp = kmalloc(rp_size, GFP_KERNEL);
306 rp->num_commands = __constant_cpu_to_le16(num_commands);
307 rp->num_events = __constant_cpu_to_le16(num_events);
309 for (i = 0, opcode = rp->opcodes; i < num_commands; i++, opcode++)
310 put_unaligned_le16(mgmt_commands[i], opcode);
312 for (i = 0; i < num_events; i++, opcode++)
313 put_unaligned_le16(mgmt_events[i], opcode);
315 err = cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_COMMANDS, 0, rp,
322 static int read_index_list(struct sock *sk, struct hci_dev *hdev, void *data,
325 struct mgmt_rp_read_index_list *rp;
331 BT_DBG("sock %p", sk);
333 read_lock(&hci_dev_list_lock);
336 list_for_each_entry(d, &hci_dev_list, list) {
337 if (!mgmt_valid_hdev(d))
343 rp_len = sizeof(*rp) + (2 * count);
344 rp = kmalloc(rp_len, GFP_ATOMIC);
346 read_unlock(&hci_dev_list_lock);
351 list_for_each_entry(d, &hci_dev_list, list) {
352 if (test_bit(HCI_SETUP, &d->dev_flags))
355 if (!mgmt_valid_hdev(d))
358 rp->index[count++] = cpu_to_le16(d->id);
359 BT_DBG("Added hci%u", d->id);
362 rp->num_controllers = cpu_to_le16(count);
363 rp_len = sizeof(*rp) + (2 * count);
365 read_unlock(&hci_dev_list_lock);
367 err = cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_INDEX_LIST, 0, rp,
375 static u32 get_supported_settings(struct hci_dev *hdev)
379 settings |= MGMT_SETTING_POWERED;
380 settings |= MGMT_SETTING_PAIRABLE;
382 if (lmp_ssp_capable(hdev))
383 settings |= MGMT_SETTING_SSP;
385 if (lmp_bredr_capable(hdev)) {
386 settings |= MGMT_SETTING_CONNECTABLE;
387 settings |= MGMT_SETTING_FAST_CONNECTABLE;
388 settings |= MGMT_SETTING_DISCOVERABLE;
389 settings |= MGMT_SETTING_BREDR;
390 settings |= MGMT_SETTING_LINK_SECURITY;
394 settings |= MGMT_SETTING_HS;
396 if (lmp_le_capable(hdev))
397 settings |= MGMT_SETTING_LE;
402 static u32 get_current_settings(struct hci_dev *hdev)
406 if (hdev_is_powered(hdev))
407 settings |= MGMT_SETTING_POWERED;
409 if (test_bit(HCI_CONNECTABLE, &hdev->dev_flags))
410 settings |= MGMT_SETTING_CONNECTABLE;
412 if (test_bit(HCI_DISCOVERABLE, &hdev->dev_flags))
413 settings |= MGMT_SETTING_DISCOVERABLE;
415 if (test_bit(HCI_PAIRABLE, &hdev->dev_flags))
416 settings |= MGMT_SETTING_PAIRABLE;
418 if (lmp_bredr_capable(hdev))
419 settings |= MGMT_SETTING_BREDR;
421 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
422 settings |= MGMT_SETTING_LE;
424 if (test_bit(HCI_LINK_SECURITY, &hdev->dev_flags))
425 settings |= MGMT_SETTING_LINK_SECURITY;
427 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags))
428 settings |= MGMT_SETTING_SSP;
430 if (test_bit(HCI_HS_ENABLED, &hdev->dev_flags))
431 settings |= MGMT_SETTING_HS;
436 #define PNP_INFO_SVCLASS_ID 0x1200
438 static u8 *create_uuid16_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
440 u8 *ptr = data, *uuids_start = NULL;
441 struct bt_uuid *uuid;
446 list_for_each_entry(uuid, &hdev->uuids, list) {
449 if (uuid->size != 16)
452 uuid16 = get_unaligned_le16(&uuid->uuid[12]);
456 if (uuid16 == PNP_INFO_SVCLASS_ID)
462 uuids_start[1] = EIR_UUID16_ALL;
466 /* Stop if not enough space to put next UUID */
467 if ((ptr - data) + sizeof(u16) > len) {
468 uuids_start[1] = EIR_UUID16_SOME;
472 *ptr++ = (uuid16 & 0x00ff);
473 *ptr++ = (uuid16 & 0xff00) >> 8;
474 uuids_start[0] += sizeof(uuid16);
480 static u8 *create_uuid32_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
482 u8 *ptr = data, *uuids_start = NULL;
483 struct bt_uuid *uuid;
488 list_for_each_entry(uuid, &hdev->uuids, list) {
489 if (uuid->size != 32)
495 uuids_start[1] = EIR_UUID32_ALL;
499 /* Stop if not enough space to put next UUID */
500 if ((ptr - data) + sizeof(u32) > len) {
501 uuids_start[1] = EIR_UUID32_SOME;
505 memcpy(ptr, &uuid->uuid[12], sizeof(u32));
507 uuids_start[0] += sizeof(u32);
513 static u8 *create_uuid128_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
515 u8 *ptr = data, *uuids_start = NULL;
516 struct bt_uuid *uuid;
521 list_for_each_entry(uuid, &hdev->uuids, list) {
522 if (uuid->size != 128)
528 uuids_start[1] = EIR_UUID128_ALL;
532 /* Stop if not enough space to put next UUID */
533 if ((ptr - data) + 16 > len) {
534 uuids_start[1] = EIR_UUID128_SOME;
538 memcpy(ptr, uuid->uuid, 16);
540 uuids_start[0] += 16;
546 static void create_eir(struct hci_dev *hdev, u8 *data)
551 name_len = strlen(hdev->dev_name);
557 ptr[1] = EIR_NAME_SHORT;
559 ptr[1] = EIR_NAME_COMPLETE;
561 /* EIR Data length */
562 ptr[0] = name_len + 1;
564 memcpy(ptr + 2, hdev->dev_name, name_len);
566 ptr += (name_len + 2);
569 if (hdev->inq_tx_power != HCI_TX_POWER_INVALID) {
571 ptr[1] = EIR_TX_POWER;
572 ptr[2] = (u8) hdev->inq_tx_power;
577 if (hdev->devid_source > 0) {
579 ptr[1] = EIR_DEVICE_ID;
581 put_unaligned_le16(hdev->devid_source, ptr + 2);
582 put_unaligned_le16(hdev->devid_vendor, ptr + 4);
583 put_unaligned_le16(hdev->devid_product, ptr + 6);
584 put_unaligned_le16(hdev->devid_version, ptr + 8);
589 ptr = create_uuid16_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
590 ptr = create_uuid32_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
591 ptr = create_uuid128_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
594 static int update_eir(struct hci_dev *hdev)
596 struct hci_cp_write_eir cp;
598 if (!hdev_is_powered(hdev))
601 if (!lmp_ext_inq_capable(hdev))
604 if (!test_bit(HCI_SSP_ENABLED, &hdev->dev_flags))
607 if (test_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
610 memset(&cp, 0, sizeof(cp));
612 create_eir(hdev, cp.data);
614 if (memcmp(cp.data, hdev->eir, sizeof(cp.data)) == 0)
617 memcpy(hdev->eir, cp.data, sizeof(cp.data));
619 return hci_send_cmd(hdev, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
622 static u8 get_service_classes(struct hci_dev *hdev)
624 struct bt_uuid *uuid;
627 list_for_each_entry(uuid, &hdev->uuids, list)
628 val |= uuid->svc_hint;
633 static int update_class(struct hci_dev *hdev)
638 BT_DBG("%s", hdev->name);
640 if (!hdev_is_powered(hdev))
643 if (test_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
646 cod[0] = hdev->minor_class;
647 cod[1] = hdev->major_class;
648 cod[2] = get_service_classes(hdev);
650 if (memcmp(cod, hdev->dev_class, 3) == 0)
653 err = hci_send_cmd(hdev, HCI_OP_WRITE_CLASS_OF_DEV, sizeof(cod), cod);
655 set_bit(HCI_PENDING_CLASS, &hdev->dev_flags);
660 static void service_cache_off(struct work_struct *work)
662 struct hci_dev *hdev = container_of(work, struct hci_dev,
665 if (!test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
673 hci_dev_unlock(hdev);
676 static void mgmt_init_hdev(struct sock *sk, struct hci_dev *hdev)
678 if (test_and_set_bit(HCI_MGMT, &hdev->dev_flags))
681 INIT_DELAYED_WORK(&hdev->service_cache, service_cache_off);
683 /* Non-mgmt controlled devices get this bit set
684 * implicitly so that pairing works for them, however
685 * for mgmt we require user-space to explicitly enable
688 clear_bit(HCI_PAIRABLE, &hdev->dev_flags);
691 static int read_controller_info(struct sock *sk, struct hci_dev *hdev,
692 void *data, u16 data_len)
694 struct mgmt_rp_read_info rp;
696 BT_DBG("sock %p %s", sk, hdev->name);
700 memset(&rp, 0, sizeof(rp));
702 bacpy(&rp.bdaddr, &hdev->bdaddr);
704 rp.version = hdev->hci_ver;
705 rp.manufacturer = cpu_to_le16(hdev->manufacturer);
707 rp.supported_settings = cpu_to_le32(get_supported_settings(hdev));
708 rp.current_settings = cpu_to_le32(get_current_settings(hdev));
710 memcpy(rp.dev_class, hdev->dev_class, 3);
712 memcpy(rp.name, hdev->dev_name, sizeof(hdev->dev_name));
713 memcpy(rp.short_name, hdev->short_name, sizeof(hdev->short_name));
715 hci_dev_unlock(hdev);
717 return cmd_complete(sk, hdev->id, MGMT_OP_READ_INFO, 0, &rp,
721 static void mgmt_pending_free(struct pending_cmd *cmd)
728 static struct pending_cmd *mgmt_pending_add(struct sock *sk, u16 opcode,
729 struct hci_dev *hdev, void *data,
732 struct pending_cmd *cmd;
734 cmd = kmalloc(sizeof(*cmd), GFP_KERNEL);
738 cmd->opcode = opcode;
739 cmd->index = hdev->id;
741 cmd->param = kmalloc(len, GFP_KERNEL);
748 memcpy(cmd->param, data, len);
753 list_add(&cmd->list, &hdev->mgmt_pending);
758 static void mgmt_pending_foreach(u16 opcode, struct hci_dev *hdev,
759 void (*cb)(struct pending_cmd *cmd,
763 struct list_head *p, *n;
765 list_for_each_safe(p, n, &hdev->mgmt_pending) {
766 struct pending_cmd *cmd;
768 cmd = list_entry(p, struct pending_cmd, list);
770 if (opcode > 0 && cmd->opcode != opcode)
777 static struct pending_cmd *mgmt_pending_find(u16 opcode, struct hci_dev *hdev)
779 struct pending_cmd *cmd;
781 list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
782 if (cmd->opcode == opcode)
789 static void mgmt_pending_remove(struct pending_cmd *cmd)
791 list_del(&cmd->list);
792 mgmt_pending_free(cmd);
795 static int send_settings_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev)
797 __le32 settings = cpu_to_le32(get_current_settings(hdev));
799 return cmd_complete(sk, hdev->id, opcode, 0, &settings,
803 static int set_powered(struct sock *sk, struct hci_dev *hdev, void *data,
806 struct mgmt_mode *cp = data;
807 struct pending_cmd *cmd;
810 BT_DBG("request for %s", hdev->name);
812 if (cp->val != 0x00 && cp->val != 0x01)
813 return cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
814 MGMT_STATUS_INVALID_PARAMS);
818 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
819 cancel_delayed_work(&hdev->power_off);
822 mgmt_pending_add(sk, MGMT_OP_SET_POWERED, hdev,
824 err = mgmt_powered(hdev, 1);
829 if (!!cp->val == hdev_is_powered(hdev)) {
830 err = send_settings_rsp(sk, MGMT_OP_SET_POWERED, hdev);
834 if (mgmt_pending_find(MGMT_OP_SET_POWERED, hdev)) {
835 err = cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
840 cmd = mgmt_pending_add(sk, MGMT_OP_SET_POWERED, hdev, data, len);
847 queue_work(hdev->req_workqueue, &hdev->power_on);
849 queue_work(hdev->req_workqueue, &hdev->power_off.work);
854 hci_dev_unlock(hdev);
858 static int mgmt_event(u16 event, struct hci_dev *hdev, void *data, u16 data_len,
859 struct sock *skip_sk)
862 struct mgmt_hdr *hdr;
864 skb = alloc_skb(sizeof(*hdr) + data_len, GFP_KERNEL);
868 hdr = (void *) skb_put(skb, sizeof(*hdr));
869 hdr->opcode = cpu_to_le16(event);
871 hdr->index = cpu_to_le16(hdev->id);
873 hdr->index = __constant_cpu_to_le16(MGMT_INDEX_NONE);
874 hdr->len = cpu_to_le16(data_len);
877 memcpy(skb_put(skb, data_len), data, data_len);
880 __net_timestamp(skb);
882 hci_send_to_control(skb, skip_sk);
888 static int new_settings(struct hci_dev *hdev, struct sock *skip)
892 ev = cpu_to_le32(get_current_settings(hdev));
894 return mgmt_event(MGMT_EV_NEW_SETTINGS, hdev, &ev, sizeof(ev), skip);
897 static int set_discoverable(struct sock *sk, struct hci_dev *hdev, void *data,
900 struct mgmt_cp_set_discoverable *cp = data;
901 struct pending_cmd *cmd;
906 BT_DBG("request for %s", hdev->name);
908 if (!lmp_bredr_capable(hdev))
909 return cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
910 MGMT_STATUS_NOT_SUPPORTED);
912 if (cp->val != 0x00 && cp->val != 0x01)
913 return cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
914 MGMT_STATUS_INVALID_PARAMS);
916 timeout = __le16_to_cpu(cp->timeout);
917 if (!cp->val && timeout > 0)
918 return cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
919 MGMT_STATUS_INVALID_PARAMS);
923 if (!hdev_is_powered(hdev) && timeout > 0) {
924 err = cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
925 MGMT_STATUS_NOT_POWERED);
929 if (mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
930 mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
931 err = cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
936 if (!test_bit(HCI_CONNECTABLE, &hdev->dev_flags)) {
937 err = cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
938 MGMT_STATUS_REJECTED);
942 if (!hdev_is_powered(hdev)) {
943 bool changed = false;
945 if (!!cp->val != test_bit(HCI_DISCOVERABLE, &hdev->dev_flags)) {
946 change_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
950 err = send_settings_rsp(sk, MGMT_OP_SET_DISCOVERABLE, hdev);
955 err = new_settings(hdev, sk);
960 if (!!cp->val == test_bit(HCI_DISCOVERABLE, &hdev->dev_flags)) {
961 if (hdev->discov_timeout > 0) {
962 cancel_delayed_work(&hdev->discov_off);
963 hdev->discov_timeout = 0;
966 if (cp->val && timeout > 0) {
967 hdev->discov_timeout = timeout;
968 queue_delayed_work(hdev->workqueue, &hdev->discov_off,
969 msecs_to_jiffies(hdev->discov_timeout * 1000));
972 err = send_settings_rsp(sk, MGMT_OP_SET_DISCOVERABLE, hdev);
976 cmd = mgmt_pending_add(sk, MGMT_OP_SET_DISCOVERABLE, hdev, data, len);
985 scan |= SCAN_INQUIRY;
987 cancel_delayed_work(&hdev->discov_off);
989 err = hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
991 mgmt_pending_remove(cmd);
994 hdev->discov_timeout = timeout;
997 hci_dev_unlock(hdev);
1001 static int set_connectable(struct sock *sk, struct hci_dev *hdev, void *data,
1004 struct mgmt_mode *cp = data;
1005 struct pending_cmd *cmd;
1009 BT_DBG("request for %s", hdev->name);
1011 if (!lmp_bredr_capable(hdev))
1012 return cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1013 MGMT_STATUS_NOT_SUPPORTED);
1015 if (cp->val != 0x00 && cp->val != 0x01)
1016 return cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1017 MGMT_STATUS_INVALID_PARAMS);
1021 if (!hdev_is_powered(hdev)) {
1022 bool changed = false;
1024 if (!!cp->val != test_bit(HCI_CONNECTABLE, &hdev->dev_flags))
1028 set_bit(HCI_CONNECTABLE, &hdev->dev_flags);
1030 clear_bit(HCI_CONNECTABLE, &hdev->dev_flags);
1031 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
1034 err = send_settings_rsp(sk, MGMT_OP_SET_CONNECTABLE, hdev);
1039 err = new_settings(hdev, sk);
1044 if (mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
1045 mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
1046 err = cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1051 if (!!cp->val == test_bit(HCI_PSCAN, &hdev->flags)) {
1052 err = send_settings_rsp(sk, MGMT_OP_SET_CONNECTABLE, hdev);
1056 cmd = mgmt_pending_add(sk, MGMT_OP_SET_CONNECTABLE, hdev, data, len);
1067 if (test_bit(HCI_ISCAN, &hdev->flags) &&
1068 hdev->discov_timeout > 0)
1069 cancel_delayed_work(&hdev->discov_off);
1072 err = hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
1074 mgmt_pending_remove(cmd);
1077 hci_dev_unlock(hdev);
1081 static int set_pairable(struct sock *sk, struct hci_dev *hdev, void *data,
1084 struct mgmt_mode *cp = data;
1087 BT_DBG("request for %s", hdev->name);
1089 if (cp->val != 0x00 && cp->val != 0x01)
1090 return cmd_status(sk, hdev->id, MGMT_OP_SET_PAIRABLE,
1091 MGMT_STATUS_INVALID_PARAMS);
1096 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
1098 clear_bit(HCI_PAIRABLE, &hdev->dev_flags);
1100 err = send_settings_rsp(sk, MGMT_OP_SET_PAIRABLE, hdev);
1104 err = new_settings(hdev, sk);
1107 hci_dev_unlock(hdev);
1111 static int set_link_security(struct sock *sk, struct hci_dev *hdev, void *data,
1114 struct mgmt_mode *cp = data;
1115 struct pending_cmd *cmd;
1119 BT_DBG("request for %s", hdev->name);
1121 if (!lmp_bredr_capable(hdev))
1122 return cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1123 MGMT_STATUS_NOT_SUPPORTED);
1125 if (cp->val != 0x00 && cp->val != 0x01)
1126 return cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1127 MGMT_STATUS_INVALID_PARAMS);
1131 if (!hdev_is_powered(hdev)) {
1132 bool changed = false;
1134 if (!!cp->val != test_bit(HCI_LINK_SECURITY,
1135 &hdev->dev_flags)) {
1136 change_bit(HCI_LINK_SECURITY, &hdev->dev_flags);
1140 err = send_settings_rsp(sk, MGMT_OP_SET_LINK_SECURITY, hdev);
1145 err = new_settings(hdev, sk);
1150 if (mgmt_pending_find(MGMT_OP_SET_LINK_SECURITY, hdev)) {
1151 err = cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1158 if (test_bit(HCI_AUTH, &hdev->flags) == val) {
1159 err = send_settings_rsp(sk, MGMT_OP_SET_LINK_SECURITY, hdev);
1163 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LINK_SECURITY, hdev, data, len);
1169 err = hci_send_cmd(hdev, HCI_OP_WRITE_AUTH_ENABLE, sizeof(val), &val);
1171 mgmt_pending_remove(cmd);
1176 hci_dev_unlock(hdev);
1180 static int set_ssp(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
1182 struct mgmt_mode *cp = data;
1183 struct pending_cmd *cmd;
1187 BT_DBG("request for %s", hdev->name);
1189 if (!lmp_ssp_capable(hdev))
1190 return cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
1191 MGMT_STATUS_NOT_SUPPORTED);
1193 if (cp->val != 0x00 && cp->val != 0x01)
1194 return cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
1195 MGMT_STATUS_INVALID_PARAMS);
1201 if (!hdev_is_powered(hdev)) {
1202 bool changed = false;
1204 if (val != test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
1205 change_bit(HCI_SSP_ENABLED, &hdev->dev_flags);
1209 err = send_settings_rsp(sk, MGMT_OP_SET_SSP, hdev);
1214 err = new_settings(hdev, sk);
1219 if (mgmt_pending_find(MGMT_OP_SET_SSP, hdev)) {
1220 err = cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
1225 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags) == val) {
1226 err = send_settings_rsp(sk, MGMT_OP_SET_SSP, hdev);
1230 cmd = mgmt_pending_add(sk, MGMT_OP_SET_SSP, hdev, data, len);
1236 err = hci_send_cmd(hdev, HCI_OP_WRITE_SSP_MODE, sizeof(val), &val);
1238 mgmt_pending_remove(cmd);
1243 hci_dev_unlock(hdev);
1247 static int set_hs(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
1249 struct mgmt_mode *cp = data;
1251 BT_DBG("request for %s", hdev->name);
1254 return cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
1255 MGMT_STATUS_NOT_SUPPORTED);
1257 if (cp->val != 0x00 && cp->val != 0x01)
1258 return cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
1259 MGMT_STATUS_INVALID_PARAMS);
1262 set_bit(HCI_HS_ENABLED, &hdev->dev_flags);
1264 clear_bit(HCI_HS_ENABLED, &hdev->dev_flags);
1266 return send_settings_rsp(sk, MGMT_OP_SET_HS, hdev);
1269 static int set_le(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
1271 struct mgmt_mode *cp = data;
1272 struct hci_cp_write_le_host_supported hci_cp;
1273 struct pending_cmd *cmd;
1277 BT_DBG("request for %s", hdev->name);
1279 if (!lmp_le_capable(hdev))
1280 return cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
1281 MGMT_STATUS_NOT_SUPPORTED);
1283 if (cp->val != 0x00 && cp->val != 0x01)
1284 return cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
1285 MGMT_STATUS_INVALID_PARAMS);
1290 enabled = lmp_host_le_capable(hdev);
1292 if (!hdev_is_powered(hdev) || val == enabled) {
1293 bool changed = false;
1295 if (val != test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
1296 change_bit(HCI_LE_ENABLED, &hdev->dev_flags);
1300 err = send_settings_rsp(sk, MGMT_OP_SET_LE, hdev);
1305 err = new_settings(hdev, sk);
1310 if (mgmt_pending_find(MGMT_OP_SET_LE, hdev)) {
1311 err = cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
1316 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LE, hdev, data, len);
1322 memset(&hci_cp, 0, sizeof(hci_cp));
1326 hci_cp.simul = lmp_le_br_capable(hdev);
1329 err = hci_send_cmd(hdev, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(hci_cp),
1332 mgmt_pending_remove(cmd);
1335 hci_dev_unlock(hdev);
1339 static const u8 bluetooth_base_uuid[] = {
1340 0xfb, 0x34, 0x9b, 0x5f, 0x80, 0x00, 0x00, 0x80,
1341 0x00, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1344 static u8 get_uuid_size(const u8 *uuid)
1348 if (memcmp(uuid, bluetooth_base_uuid, 12))
1351 val = get_unaligned_le32(&uuid[12]);
1358 static int add_uuid(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
1360 struct mgmt_cp_add_uuid *cp = data;
1361 struct pending_cmd *cmd;
1362 struct bt_uuid *uuid;
1365 BT_DBG("request for %s", hdev->name);
1369 if (test_bit(HCI_PENDING_CLASS, &hdev->dev_flags)) {
1370 err = cmd_status(sk, hdev->id, MGMT_OP_ADD_UUID,
1375 uuid = kmalloc(sizeof(*uuid), GFP_KERNEL);
1381 memcpy(uuid->uuid, cp->uuid, 16);
1382 uuid->svc_hint = cp->svc_hint;
1383 uuid->size = get_uuid_size(cp->uuid);
1385 list_add_tail(&uuid->list, &hdev->uuids);
1387 err = update_class(hdev);
1391 err = update_eir(hdev);
1395 if (!test_bit(HCI_PENDING_CLASS, &hdev->dev_flags)) {
1396 err = cmd_complete(sk, hdev->id, MGMT_OP_ADD_UUID, 0,
1397 hdev->dev_class, 3);
1401 cmd = mgmt_pending_add(sk, MGMT_OP_ADD_UUID, hdev, data, len);
1406 hci_dev_unlock(hdev);
1410 static bool enable_service_cache(struct hci_dev *hdev)
1412 if (!hdev_is_powered(hdev))
1415 if (!test_and_set_bit(HCI_SERVICE_CACHE, &hdev->dev_flags)) {
1416 queue_delayed_work(hdev->workqueue, &hdev->service_cache,
1424 static int remove_uuid(struct sock *sk, struct hci_dev *hdev, void *data,
1427 struct mgmt_cp_remove_uuid *cp = data;
1428 struct pending_cmd *cmd;
1429 struct bt_uuid *match, *tmp;
1430 u8 bt_uuid_any[] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
1433 BT_DBG("request for %s", hdev->name);
1437 if (test_bit(HCI_PENDING_CLASS, &hdev->dev_flags)) {
1438 err = cmd_status(sk, hdev->id, MGMT_OP_REMOVE_UUID,
1443 if (memcmp(cp->uuid, bt_uuid_any, 16) == 0) {
1444 err = hci_uuids_clear(hdev);
1446 if (enable_service_cache(hdev)) {
1447 err = cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_UUID,
1448 0, hdev->dev_class, 3);
1457 list_for_each_entry_safe(match, tmp, &hdev->uuids, list) {
1458 if (memcmp(match->uuid, cp->uuid, 16) != 0)
1461 list_del(&match->list);
1467 err = cmd_status(sk, hdev->id, MGMT_OP_REMOVE_UUID,
1468 MGMT_STATUS_INVALID_PARAMS);
1473 err = update_class(hdev);
1477 err = update_eir(hdev);
1481 if (!test_bit(HCI_PENDING_CLASS, &hdev->dev_flags)) {
1482 err = cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_UUID, 0,
1483 hdev->dev_class, 3);
1487 cmd = mgmt_pending_add(sk, MGMT_OP_REMOVE_UUID, hdev, data, len);
1492 hci_dev_unlock(hdev);
1496 static int set_dev_class(struct sock *sk, struct hci_dev *hdev, void *data,
1499 struct mgmt_cp_set_dev_class *cp = data;
1500 struct pending_cmd *cmd;
1503 BT_DBG("request for %s", hdev->name);
1505 if (!lmp_bredr_capable(hdev))
1506 return cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
1507 MGMT_STATUS_NOT_SUPPORTED);
1509 if (test_bit(HCI_PENDING_CLASS, &hdev->dev_flags))
1510 return cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
1513 if ((cp->minor & 0x03) != 0 || (cp->major & 0xe0) != 0)
1514 return cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
1515 MGMT_STATUS_INVALID_PARAMS);
1519 hdev->major_class = cp->major;
1520 hdev->minor_class = cp->minor;
1522 if (!hdev_is_powered(hdev)) {
1523 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_DEV_CLASS, 0,
1524 hdev->dev_class, 3);
1528 if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags)) {
1529 hci_dev_unlock(hdev);
1530 cancel_delayed_work_sync(&hdev->service_cache);
1535 err = update_class(hdev);
1539 if (!test_bit(HCI_PENDING_CLASS, &hdev->dev_flags)) {
1540 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_DEV_CLASS, 0,
1541 hdev->dev_class, 3);
1545 cmd = mgmt_pending_add(sk, MGMT_OP_SET_DEV_CLASS, hdev, data, len);
1550 hci_dev_unlock(hdev);
1554 static int load_link_keys(struct sock *sk, struct hci_dev *hdev, void *data,
1557 struct mgmt_cp_load_link_keys *cp = data;
1558 u16 key_count, expected_len;
1561 key_count = __le16_to_cpu(cp->key_count);
1563 expected_len = sizeof(*cp) + key_count *
1564 sizeof(struct mgmt_link_key_info);
1565 if (expected_len != len) {
1566 BT_ERR("load_link_keys: expected %u bytes, got %u bytes",
1568 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
1569 MGMT_STATUS_INVALID_PARAMS);
1572 if (cp->debug_keys != 0x00 && cp->debug_keys != 0x01)
1573 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
1574 MGMT_STATUS_INVALID_PARAMS);
1576 BT_DBG("%s debug_keys %u key_count %u", hdev->name, cp->debug_keys,
1579 for (i = 0; i < key_count; i++) {
1580 struct mgmt_link_key_info *key = &cp->keys[i];
1582 if (key->addr.type != BDADDR_BREDR)
1583 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
1584 MGMT_STATUS_INVALID_PARAMS);
1589 hci_link_keys_clear(hdev);
1591 set_bit(HCI_LINK_KEYS, &hdev->dev_flags);
1594 set_bit(HCI_DEBUG_KEYS, &hdev->dev_flags);
1596 clear_bit(HCI_DEBUG_KEYS, &hdev->dev_flags);
1598 for (i = 0; i < key_count; i++) {
1599 struct mgmt_link_key_info *key = &cp->keys[i];
1601 hci_add_link_key(hdev, NULL, 0, &key->addr.bdaddr, key->val,
1602 key->type, key->pin_len);
1605 cmd_complete(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS, 0, NULL, 0);
1607 hci_dev_unlock(hdev);
1612 static int device_unpaired(struct hci_dev *hdev, bdaddr_t *bdaddr,
1613 u8 addr_type, struct sock *skip_sk)
1615 struct mgmt_ev_device_unpaired ev;
1617 bacpy(&ev.addr.bdaddr, bdaddr);
1618 ev.addr.type = addr_type;
1620 return mgmt_event(MGMT_EV_DEVICE_UNPAIRED, hdev, &ev, sizeof(ev),
1624 static int unpair_device(struct sock *sk, struct hci_dev *hdev, void *data,
1627 struct mgmt_cp_unpair_device *cp = data;
1628 struct mgmt_rp_unpair_device rp;
1629 struct hci_cp_disconnect dc;
1630 struct pending_cmd *cmd;
1631 struct hci_conn *conn;
1634 memset(&rp, 0, sizeof(rp));
1635 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
1636 rp.addr.type = cp->addr.type;
1638 if (!bdaddr_type_is_valid(cp->addr.type))
1639 return cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
1640 MGMT_STATUS_INVALID_PARAMS,
1643 if (cp->disconnect != 0x00 && cp->disconnect != 0x01)
1644 return cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
1645 MGMT_STATUS_INVALID_PARAMS,
1650 if (!hdev_is_powered(hdev)) {
1651 err = cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
1652 MGMT_STATUS_NOT_POWERED, &rp, sizeof(rp));
1656 if (cp->addr.type == BDADDR_BREDR)
1657 err = hci_remove_link_key(hdev, &cp->addr.bdaddr);
1659 err = hci_remove_ltk(hdev, &cp->addr.bdaddr);
1662 err = cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
1663 MGMT_STATUS_NOT_PAIRED, &rp, sizeof(rp));
1667 if (cp->disconnect) {
1668 if (cp->addr.type == BDADDR_BREDR)
1669 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
1672 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK,
1679 err = cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE, 0,
1681 device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, sk);
1685 cmd = mgmt_pending_add(sk, MGMT_OP_UNPAIR_DEVICE, hdev, cp,
1692 dc.handle = cpu_to_le16(conn->handle);
1693 dc.reason = 0x13; /* Remote User Terminated Connection */
1694 err = hci_send_cmd(hdev, HCI_OP_DISCONNECT, sizeof(dc), &dc);
1696 mgmt_pending_remove(cmd);
1699 hci_dev_unlock(hdev);
1703 static int disconnect(struct sock *sk, struct hci_dev *hdev, void *data,
1706 struct mgmt_cp_disconnect *cp = data;
1707 struct mgmt_rp_disconnect rp;
1708 struct hci_cp_disconnect dc;
1709 struct pending_cmd *cmd;
1710 struct hci_conn *conn;
1715 memset(&rp, 0, sizeof(rp));
1716 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
1717 rp.addr.type = cp->addr.type;
1719 if (!bdaddr_type_is_valid(cp->addr.type))
1720 return cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
1721 MGMT_STATUS_INVALID_PARAMS,
1726 if (!test_bit(HCI_UP, &hdev->flags)) {
1727 err = cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
1728 MGMT_STATUS_NOT_POWERED, &rp, sizeof(rp));
1732 if (mgmt_pending_find(MGMT_OP_DISCONNECT, hdev)) {
1733 err = cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
1734 MGMT_STATUS_BUSY, &rp, sizeof(rp));
1738 if (cp->addr.type == BDADDR_BREDR)
1739 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
1742 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->addr.bdaddr);
1744 if (!conn || conn->state == BT_OPEN || conn->state == BT_CLOSED) {
1745 err = cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
1746 MGMT_STATUS_NOT_CONNECTED, &rp, sizeof(rp));
1750 cmd = mgmt_pending_add(sk, MGMT_OP_DISCONNECT, hdev, data, len);
1756 dc.handle = cpu_to_le16(conn->handle);
1757 dc.reason = HCI_ERROR_REMOTE_USER_TERM;
1759 err = hci_send_cmd(hdev, HCI_OP_DISCONNECT, sizeof(dc), &dc);
1761 mgmt_pending_remove(cmd);
1764 hci_dev_unlock(hdev);
1768 static u8 link_to_bdaddr(u8 link_type, u8 addr_type)
1770 switch (link_type) {
1772 switch (addr_type) {
1773 case ADDR_LE_DEV_PUBLIC:
1774 return BDADDR_LE_PUBLIC;
1777 /* Fallback to LE Random address type */
1778 return BDADDR_LE_RANDOM;
1782 /* Fallback to BR/EDR type */
1783 return BDADDR_BREDR;
1787 static int get_connections(struct sock *sk, struct hci_dev *hdev, void *data,
1790 struct mgmt_rp_get_connections *rp;
1800 if (!hdev_is_powered(hdev)) {
1801 err = cmd_status(sk, hdev->id, MGMT_OP_GET_CONNECTIONS,
1802 MGMT_STATUS_NOT_POWERED);
1807 list_for_each_entry(c, &hdev->conn_hash.list, list) {
1808 if (test_bit(HCI_CONN_MGMT_CONNECTED, &c->flags))
1812 rp_len = sizeof(*rp) + (i * sizeof(struct mgmt_addr_info));
1813 rp = kmalloc(rp_len, GFP_KERNEL);
1820 list_for_each_entry(c, &hdev->conn_hash.list, list) {
1821 if (!test_bit(HCI_CONN_MGMT_CONNECTED, &c->flags))
1823 bacpy(&rp->addr[i].bdaddr, &c->dst);
1824 rp->addr[i].type = link_to_bdaddr(c->type, c->dst_type);
1825 if (c->type == SCO_LINK || c->type == ESCO_LINK)
1830 rp->conn_count = cpu_to_le16(i);
1832 /* Recalculate length in case of filtered SCO connections, etc */
1833 rp_len = sizeof(*rp) + (i * sizeof(struct mgmt_addr_info));
1835 err = cmd_complete(sk, hdev->id, MGMT_OP_GET_CONNECTIONS, 0, rp,
1841 hci_dev_unlock(hdev);
1845 static int send_pin_code_neg_reply(struct sock *sk, struct hci_dev *hdev,
1846 struct mgmt_cp_pin_code_neg_reply *cp)
1848 struct pending_cmd *cmd;
1851 cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_NEG_REPLY, hdev, cp,
1856 err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_NEG_REPLY,
1857 sizeof(cp->addr.bdaddr), &cp->addr.bdaddr);
1859 mgmt_pending_remove(cmd);
1864 static int pin_code_reply(struct sock *sk, struct hci_dev *hdev, void *data,
1867 struct hci_conn *conn;
1868 struct mgmt_cp_pin_code_reply *cp = data;
1869 struct hci_cp_pin_code_reply reply;
1870 struct pending_cmd *cmd;
1877 if (!hdev_is_powered(hdev)) {
1878 err = cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
1879 MGMT_STATUS_NOT_POWERED);
1883 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->addr.bdaddr);
1885 err = cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
1886 MGMT_STATUS_NOT_CONNECTED);
1890 if (conn->pending_sec_level == BT_SECURITY_HIGH && cp->pin_len != 16) {
1891 struct mgmt_cp_pin_code_neg_reply ncp;
1893 memcpy(&ncp.addr, &cp->addr, sizeof(ncp.addr));
1895 BT_ERR("PIN code is not 16 bytes long");
1897 err = send_pin_code_neg_reply(sk, hdev, &ncp);
1899 err = cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
1900 MGMT_STATUS_INVALID_PARAMS);
1905 cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_REPLY, hdev, data, len);
1911 bacpy(&reply.bdaddr, &cp->addr.bdaddr);
1912 reply.pin_len = cp->pin_len;
1913 memcpy(reply.pin_code, cp->pin_code, sizeof(reply.pin_code));
1915 err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_REPLY, sizeof(reply), &reply);
1917 mgmt_pending_remove(cmd);
1920 hci_dev_unlock(hdev);
1924 static int set_io_capability(struct sock *sk, struct hci_dev *hdev, void *data,
1927 struct mgmt_cp_set_io_capability *cp = data;
1933 hdev->io_capability = cp->io_capability;
1935 BT_DBG("%s IO capability set to 0x%02x", hdev->name,
1936 hdev->io_capability);
1938 hci_dev_unlock(hdev);
1940 return cmd_complete(sk, hdev->id, MGMT_OP_SET_IO_CAPABILITY, 0, NULL,
1944 static struct pending_cmd *find_pairing(struct hci_conn *conn)
1946 struct hci_dev *hdev = conn->hdev;
1947 struct pending_cmd *cmd;
1949 list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
1950 if (cmd->opcode != MGMT_OP_PAIR_DEVICE)
1953 if (cmd->user_data != conn)
1962 static void pairing_complete(struct pending_cmd *cmd, u8 status)
1964 struct mgmt_rp_pair_device rp;
1965 struct hci_conn *conn = cmd->user_data;
1967 bacpy(&rp.addr.bdaddr, &conn->dst);
1968 rp.addr.type = link_to_bdaddr(conn->type, conn->dst_type);
1970 cmd_complete(cmd->sk, cmd->index, MGMT_OP_PAIR_DEVICE, status,
1973 /* So we don't get further callbacks for this connection */
1974 conn->connect_cfm_cb = NULL;
1975 conn->security_cfm_cb = NULL;
1976 conn->disconn_cfm_cb = NULL;
1980 mgmt_pending_remove(cmd);
1983 static void pairing_complete_cb(struct hci_conn *conn, u8 status)
1985 struct pending_cmd *cmd;
1987 BT_DBG("status %u", status);
1989 cmd = find_pairing(conn);
1991 BT_DBG("Unable to find a pending command");
1993 pairing_complete(cmd, mgmt_status(status));
1996 static void le_connect_complete_cb(struct hci_conn *conn, u8 status)
1998 struct pending_cmd *cmd;
2000 BT_DBG("status %u", status);
2005 cmd = find_pairing(conn);
2007 BT_DBG("Unable to find a pending command");
2009 pairing_complete(cmd, mgmt_status(status));
2012 static int pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
2015 struct mgmt_cp_pair_device *cp = data;
2016 struct mgmt_rp_pair_device rp;
2017 struct pending_cmd *cmd;
2018 u8 sec_level, auth_type;
2019 struct hci_conn *conn;
2024 memset(&rp, 0, sizeof(rp));
2025 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
2026 rp.addr.type = cp->addr.type;
2028 if (!bdaddr_type_is_valid(cp->addr.type))
2029 return cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
2030 MGMT_STATUS_INVALID_PARAMS,
2035 if (!hdev_is_powered(hdev)) {
2036 err = cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
2037 MGMT_STATUS_NOT_POWERED, &rp, sizeof(rp));
2041 sec_level = BT_SECURITY_MEDIUM;
2042 if (cp->io_cap == 0x03)
2043 auth_type = HCI_AT_DEDICATED_BONDING;
2045 auth_type = HCI_AT_DEDICATED_BONDING_MITM;
2047 if (cp->addr.type == BDADDR_BREDR)
2048 conn = hci_connect(hdev, ACL_LINK, &cp->addr.bdaddr,
2049 cp->addr.type, sec_level, auth_type);
2051 conn = hci_connect(hdev, LE_LINK, &cp->addr.bdaddr,
2052 cp->addr.type, sec_level, auth_type);
2057 if (PTR_ERR(conn) == -EBUSY)
2058 status = MGMT_STATUS_BUSY;
2060 status = MGMT_STATUS_CONNECT_FAILED;
2062 err = cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
2068 if (conn->connect_cfm_cb) {
2070 err = cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
2071 MGMT_STATUS_BUSY, &rp, sizeof(rp));
2075 cmd = mgmt_pending_add(sk, MGMT_OP_PAIR_DEVICE, hdev, data, len);
2082 /* For LE, just connecting isn't a proof that the pairing finished */
2083 if (cp->addr.type == BDADDR_BREDR)
2084 conn->connect_cfm_cb = pairing_complete_cb;
2086 conn->connect_cfm_cb = le_connect_complete_cb;
2088 conn->security_cfm_cb = pairing_complete_cb;
2089 conn->disconn_cfm_cb = pairing_complete_cb;
2090 conn->io_capability = cp->io_cap;
2091 cmd->user_data = conn;
2093 if (conn->state == BT_CONNECTED &&
2094 hci_conn_security(conn, sec_level, auth_type))
2095 pairing_complete(cmd, 0);
2100 hci_dev_unlock(hdev);
2104 static int cancel_pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
2107 struct mgmt_addr_info *addr = data;
2108 struct pending_cmd *cmd;
2109 struct hci_conn *conn;
2116 if (!hdev_is_powered(hdev)) {
2117 err = cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
2118 MGMT_STATUS_NOT_POWERED);
2122 cmd = mgmt_pending_find(MGMT_OP_PAIR_DEVICE, hdev);
2124 err = cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
2125 MGMT_STATUS_INVALID_PARAMS);
2129 conn = cmd->user_data;
2131 if (bacmp(&addr->bdaddr, &conn->dst) != 0) {
2132 err = cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
2133 MGMT_STATUS_INVALID_PARAMS);
2137 pairing_complete(cmd, MGMT_STATUS_CANCELLED);
2139 err = cmd_complete(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE, 0,
2140 addr, sizeof(*addr));
2142 hci_dev_unlock(hdev);
2146 static int user_pairing_resp(struct sock *sk, struct hci_dev *hdev,
2147 bdaddr_t *bdaddr, u8 type, u16 mgmt_op,
2148 u16 hci_op, __le32 passkey)
2150 struct pending_cmd *cmd;
2151 struct hci_conn *conn;
2156 if (!hdev_is_powered(hdev)) {
2157 err = cmd_status(sk, hdev->id, mgmt_op,
2158 MGMT_STATUS_NOT_POWERED);
2162 if (type == BDADDR_BREDR)
2163 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, bdaddr);
2165 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, bdaddr);
2168 err = cmd_status(sk, hdev->id, mgmt_op,
2169 MGMT_STATUS_NOT_CONNECTED);
2173 if (type == BDADDR_LE_PUBLIC || type == BDADDR_LE_RANDOM) {
2174 /* Continue with pairing via SMP */
2175 err = smp_user_confirm_reply(conn, mgmt_op, passkey);
2178 err = cmd_status(sk, hdev->id, mgmt_op,
2179 MGMT_STATUS_SUCCESS);
2181 err = cmd_status(sk, hdev->id, mgmt_op,
2182 MGMT_STATUS_FAILED);
2187 cmd = mgmt_pending_add(sk, mgmt_op, hdev, bdaddr, sizeof(*bdaddr));
2193 /* Continue with pairing via HCI */
2194 if (hci_op == HCI_OP_USER_PASSKEY_REPLY) {
2195 struct hci_cp_user_passkey_reply cp;
2197 bacpy(&cp.bdaddr, bdaddr);
2198 cp.passkey = passkey;
2199 err = hci_send_cmd(hdev, hci_op, sizeof(cp), &cp);
2201 err = hci_send_cmd(hdev, hci_op, sizeof(*bdaddr), bdaddr);
2204 mgmt_pending_remove(cmd);
2207 hci_dev_unlock(hdev);
2211 static int pin_code_neg_reply(struct sock *sk, struct hci_dev *hdev,
2212 void *data, u16 len)
2214 struct mgmt_cp_pin_code_neg_reply *cp = data;
2218 return user_pairing_resp(sk, hdev, &cp->addr.bdaddr, cp->addr.type,
2219 MGMT_OP_PIN_CODE_NEG_REPLY,
2220 HCI_OP_PIN_CODE_NEG_REPLY, 0);
2223 static int user_confirm_reply(struct sock *sk, struct hci_dev *hdev, void *data,
2226 struct mgmt_cp_user_confirm_reply *cp = data;
2230 if (len != sizeof(*cp))
2231 return cmd_status(sk, hdev->id, MGMT_OP_USER_CONFIRM_REPLY,
2232 MGMT_STATUS_INVALID_PARAMS);
2234 return user_pairing_resp(sk, hdev, &cp->addr.bdaddr, cp->addr.type,
2235 MGMT_OP_USER_CONFIRM_REPLY,
2236 HCI_OP_USER_CONFIRM_REPLY, 0);
2239 static int user_confirm_neg_reply(struct sock *sk, struct hci_dev *hdev,
2240 void *data, u16 len)
2242 struct mgmt_cp_user_confirm_neg_reply *cp = data;
2246 return user_pairing_resp(sk, hdev, &cp->addr.bdaddr, cp->addr.type,
2247 MGMT_OP_USER_CONFIRM_NEG_REPLY,
2248 HCI_OP_USER_CONFIRM_NEG_REPLY, 0);
2251 static int user_passkey_reply(struct sock *sk, struct hci_dev *hdev, void *data,
2254 struct mgmt_cp_user_passkey_reply *cp = data;
2258 return user_pairing_resp(sk, hdev, &cp->addr.bdaddr, cp->addr.type,
2259 MGMT_OP_USER_PASSKEY_REPLY,
2260 HCI_OP_USER_PASSKEY_REPLY, cp->passkey);
2263 static int user_passkey_neg_reply(struct sock *sk, struct hci_dev *hdev,
2264 void *data, u16 len)
2266 struct mgmt_cp_user_passkey_neg_reply *cp = data;
2270 return user_pairing_resp(sk, hdev, &cp->addr.bdaddr, cp->addr.type,
2271 MGMT_OP_USER_PASSKEY_NEG_REPLY,
2272 HCI_OP_USER_PASSKEY_NEG_REPLY, 0);
2275 static int update_name(struct hci_dev *hdev, const char *name)
2277 struct hci_cp_write_local_name cp;
2279 memcpy(cp.name, name, sizeof(cp.name));
2281 return hci_send_cmd(hdev, HCI_OP_WRITE_LOCAL_NAME, sizeof(cp), &cp);
2284 static int set_local_name(struct sock *sk, struct hci_dev *hdev, void *data,
2287 struct mgmt_cp_set_local_name *cp = data;
2288 struct pending_cmd *cmd;
2295 memcpy(hdev->short_name, cp->short_name, sizeof(hdev->short_name));
2297 if (!hdev_is_powered(hdev)) {
2298 memcpy(hdev->dev_name, cp->name, sizeof(hdev->dev_name));
2300 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
2305 err = mgmt_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev, data, len,
2311 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LOCAL_NAME, hdev, data, len);
2317 err = update_name(hdev, cp->name);
2319 mgmt_pending_remove(cmd);
2322 hci_dev_unlock(hdev);
2326 static int read_local_oob_data(struct sock *sk, struct hci_dev *hdev,
2327 void *data, u16 data_len)
2329 struct pending_cmd *cmd;
2332 BT_DBG("%s", hdev->name);
2336 if (!hdev_is_powered(hdev)) {
2337 err = cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
2338 MGMT_STATUS_NOT_POWERED);
2342 if (!lmp_ssp_capable(hdev)) {
2343 err = cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
2344 MGMT_STATUS_NOT_SUPPORTED);
2348 if (mgmt_pending_find(MGMT_OP_READ_LOCAL_OOB_DATA, hdev)) {
2349 err = cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
2354 cmd = mgmt_pending_add(sk, MGMT_OP_READ_LOCAL_OOB_DATA, hdev, NULL, 0);
2360 err = hci_send_cmd(hdev, HCI_OP_READ_LOCAL_OOB_DATA, 0, NULL);
2362 mgmt_pending_remove(cmd);
2365 hci_dev_unlock(hdev);
2369 static int add_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
2370 void *data, u16 len)
2372 struct mgmt_cp_add_remote_oob_data *cp = data;
2376 BT_DBG("%s ", hdev->name);
2380 err = hci_add_remote_oob_data(hdev, &cp->addr.bdaddr, cp->hash,
2383 status = MGMT_STATUS_FAILED;
2385 status = MGMT_STATUS_SUCCESS;
2387 err = cmd_complete(sk, hdev->id, MGMT_OP_ADD_REMOTE_OOB_DATA, status,
2388 &cp->addr, sizeof(cp->addr));
2390 hci_dev_unlock(hdev);
2394 static int remove_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
2395 void *data, u16 len)
2397 struct mgmt_cp_remove_remote_oob_data *cp = data;
2401 BT_DBG("%s", hdev->name);
2405 err = hci_remove_remote_oob_data(hdev, &cp->addr.bdaddr);
2407 status = MGMT_STATUS_INVALID_PARAMS;
2409 status = MGMT_STATUS_SUCCESS;
2411 err = cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_REMOTE_OOB_DATA,
2412 status, &cp->addr, sizeof(cp->addr));
2414 hci_dev_unlock(hdev);
2418 int mgmt_interleaved_discovery(struct hci_dev *hdev)
2422 BT_DBG("%s", hdev->name);
2426 err = hci_do_inquiry(hdev, INQUIRY_LEN_BREDR_LE);
2428 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2430 hci_dev_unlock(hdev);
2435 static int start_discovery(struct sock *sk, struct hci_dev *hdev,
2436 void *data, u16 len)
2438 struct mgmt_cp_start_discovery *cp = data;
2439 struct pending_cmd *cmd;
2442 BT_DBG("%s", hdev->name);
2446 if (!hdev_is_powered(hdev)) {
2447 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
2448 MGMT_STATUS_NOT_POWERED);
2452 if (test_bit(HCI_PERIODIC_INQ, &hdev->dev_flags)) {
2453 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
2458 if (hdev->discovery.state != DISCOVERY_STOPPED) {
2459 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
2464 cmd = mgmt_pending_add(sk, MGMT_OP_START_DISCOVERY, hdev, NULL, 0);
2470 hdev->discovery.type = cp->type;
2472 switch (hdev->discovery.type) {
2473 case DISCOV_TYPE_BREDR:
2474 if (!lmp_bredr_capable(hdev)) {
2475 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
2476 MGMT_STATUS_NOT_SUPPORTED);
2477 mgmt_pending_remove(cmd);
2481 err = hci_do_inquiry(hdev, INQUIRY_LEN_BREDR);
2484 case DISCOV_TYPE_LE:
2485 if (!lmp_host_le_capable(hdev)) {
2486 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
2487 MGMT_STATUS_NOT_SUPPORTED);
2488 mgmt_pending_remove(cmd);
2492 err = hci_le_scan(hdev, LE_SCAN_TYPE, LE_SCAN_INT,
2493 LE_SCAN_WIN, LE_SCAN_TIMEOUT_LE_ONLY);
2496 case DISCOV_TYPE_INTERLEAVED:
2497 if (!lmp_host_le_capable(hdev) || !lmp_bredr_capable(hdev)) {
2498 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
2499 MGMT_STATUS_NOT_SUPPORTED);
2500 mgmt_pending_remove(cmd);
2504 err = hci_le_scan(hdev, LE_SCAN_TYPE, LE_SCAN_INT, LE_SCAN_WIN,
2505 LE_SCAN_TIMEOUT_BREDR_LE);
2509 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
2510 MGMT_STATUS_INVALID_PARAMS);
2511 mgmt_pending_remove(cmd);
2516 mgmt_pending_remove(cmd);
2518 hci_discovery_set_state(hdev, DISCOVERY_STARTING);
2521 hci_dev_unlock(hdev);
2525 static int stop_discovery(struct sock *sk, struct hci_dev *hdev, void *data,
2528 struct mgmt_cp_stop_discovery *mgmt_cp = data;
2529 struct pending_cmd *cmd;
2530 struct hci_cp_remote_name_req_cancel cp;
2531 struct inquiry_entry *e;
2534 BT_DBG("%s", hdev->name);
2538 if (!hci_discovery_active(hdev)) {
2539 err = cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
2540 MGMT_STATUS_REJECTED, &mgmt_cp->type,
2541 sizeof(mgmt_cp->type));
2545 if (hdev->discovery.type != mgmt_cp->type) {
2546 err = cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
2547 MGMT_STATUS_INVALID_PARAMS, &mgmt_cp->type,
2548 sizeof(mgmt_cp->type));
2552 cmd = mgmt_pending_add(sk, MGMT_OP_STOP_DISCOVERY, hdev, NULL, 0);
2558 switch (hdev->discovery.state) {
2559 case DISCOVERY_FINDING:
2560 if (test_bit(HCI_INQUIRY, &hdev->flags))
2561 err = hci_cancel_inquiry(hdev);
2563 err = hci_cancel_le_scan(hdev);
2567 case DISCOVERY_RESOLVING:
2568 e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY,
2571 mgmt_pending_remove(cmd);
2572 err = cmd_complete(sk, hdev->id,
2573 MGMT_OP_STOP_DISCOVERY, 0,
2575 sizeof(mgmt_cp->type));
2576 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2580 bacpy(&cp.bdaddr, &e->data.bdaddr);
2581 err = hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ_CANCEL,
2587 BT_DBG("unknown discovery state %u", hdev->discovery.state);
2592 mgmt_pending_remove(cmd);
2594 hci_discovery_set_state(hdev, DISCOVERY_STOPPING);
2597 hci_dev_unlock(hdev);
2601 static int confirm_name(struct sock *sk, struct hci_dev *hdev, void *data,
2604 struct mgmt_cp_confirm_name *cp = data;
2605 struct inquiry_entry *e;
2608 BT_DBG("%s", hdev->name);
2612 if (!hci_discovery_active(hdev)) {
2613 err = cmd_status(sk, hdev->id, MGMT_OP_CONFIRM_NAME,
2614 MGMT_STATUS_FAILED);
2618 e = hci_inquiry_cache_lookup_unknown(hdev, &cp->addr.bdaddr);
2620 err = cmd_status(sk, hdev->id, MGMT_OP_CONFIRM_NAME,
2621 MGMT_STATUS_INVALID_PARAMS);
2625 if (cp->name_known) {
2626 e->name_state = NAME_KNOWN;
2629 e->name_state = NAME_NEEDED;
2630 hci_inquiry_cache_update_resolve(hdev, e);
2633 err = cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME, 0, &cp->addr,
2637 hci_dev_unlock(hdev);
2641 static int block_device(struct sock *sk, struct hci_dev *hdev, void *data,
2644 struct mgmt_cp_block_device *cp = data;
2648 BT_DBG("%s", hdev->name);
2650 if (!bdaddr_type_is_valid(cp->addr.type))
2651 return cmd_complete(sk, hdev->id, MGMT_OP_BLOCK_DEVICE,
2652 MGMT_STATUS_INVALID_PARAMS,
2653 &cp->addr, sizeof(cp->addr));
2657 err = hci_blacklist_add(hdev, &cp->addr.bdaddr, cp->addr.type);
2659 status = MGMT_STATUS_FAILED;
2661 status = MGMT_STATUS_SUCCESS;
2663 err = cmd_complete(sk, hdev->id, MGMT_OP_BLOCK_DEVICE, status,
2664 &cp->addr, sizeof(cp->addr));
2666 hci_dev_unlock(hdev);
2671 static int unblock_device(struct sock *sk, struct hci_dev *hdev, void *data,
2674 struct mgmt_cp_unblock_device *cp = data;
2678 BT_DBG("%s", hdev->name);
2680 if (!bdaddr_type_is_valid(cp->addr.type))
2681 return cmd_complete(sk, hdev->id, MGMT_OP_UNBLOCK_DEVICE,
2682 MGMT_STATUS_INVALID_PARAMS,
2683 &cp->addr, sizeof(cp->addr));
2687 err = hci_blacklist_del(hdev, &cp->addr.bdaddr, cp->addr.type);
2689 status = MGMT_STATUS_INVALID_PARAMS;
2691 status = MGMT_STATUS_SUCCESS;
2693 err = cmd_complete(sk, hdev->id, MGMT_OP_UNBLOCK_DEVICE, status,
2694 &cp->addr, sizeof(cp->addr));
2696 hci_dev_unlock(hdev);
2701 static int set_device_id(struct sock *sk, struct hci_dev *hdev, void *data,
2704 struct mgmt_cp_set_device_id *cp = data;
2708 BT_DBG("%s", hdev->name);
2710 source = __le16_to_cpu(cp->source);
2712 if (source > 0x0002)
2713 return cmd_status(sk, hdev->id, MGMT_OP_SET_DEVICE_ID,
2714 MGMT_STATUS_INVALID_PARAMS);
2718 hdev->devid_source = source;
2719 hdev->devid_vendor = __le16_to_cpu(cp->vendor);
2720 hdev->devid_product = __le16_to_cpu(cp->product);
2721 hdev->devid_version = __le16_to_cpu(cp->version);
2723 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_DEVICE_ID, 0, NULL, 0);
2727 hci_dev_unlock(hdev);
2732 static int set_fast_connectable(struct sock *sk, struct hci_dev *hdev,
2733 void *data, u16 len)
2735 struct mgmt_mode *cp = data;
2736 struct hci_cp_write_page_scan_activity acp;
2740 BT_DBG("%s", hdev->name);
2742 if (!lmp_bredr_capable(hdev))
2743 return cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
2744 MGMT_STATUS_NOT_SUPPORTED);
2746 if (cp->val != 0x00 && cp->val != 0x01)
2747 return cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
2748 MGMT_STATUS_INVALID_PARAMS);
2750 if (!hdev_is_powered(hdev))
2751 return cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
2752 MGMT_STATUS_NOT_POWERED);
2754 if (!test_bit(HCI_CONNECTABLE, &hdev->dev_flags))
2755 return cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
2756 MGMT_STATUS_REJECTED);
2761 type = PAGE_SCAN_TYPE_INTERLACED;
2763 /* 160 msec page scan interval */
2764 acp.interval = __constant_cpu_to_le16(0x0100);
2766 type = PAGE_SCAN_TYPE_STANDARD; /* default */
2768 /* default 1.28 sec page scan */
2769 acp.interval = __constant_cpu_to_le16(0x0800);
2772 /* default 11.25 msec page scan window */
2773 acp.window = __constant_cpu_to_le16(0x0012);
2775 err = hci_send_cmd(hdev, HCI_OP_WRITE_PAGE_SCAN_ACTIVITY, sizeof(acp),
2778 err = cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
2779 MGMT_STATUS_FAILED);
2783 err = hci_send_cmd(hdev, HCI_OP_WRITE_PAGE_SCAN_TYPE, 1, &type);
2785 err = cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
2786 MGMT_STATUS_FAILED);
2790 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE, 0,
2793 hci_dev_unlock(hdev);
2797 static bool ltk_is_valid(struct mgmt_ltk_info *key)
2799 if (key->authenticated != 0x00 && key->authenticated != 0x01)
2801 if (key->master != 0x00 && key->master != 0x01)
2803 if (!bdaddr_type_is_le(key->addr.type))
2808 static int load_long_term_keys(struct sock *sk, struct hci_dev *hdev,
2809 void *cp_data, u16 len)
2811 struct mgmt_cp_load_long_term_keys *cp = cp_data;
2812 u16 key_count, expected_len;
2815 key_count = __le16_to_cpu(cp->key_count);
2817 expected_len = sizeof(*cp) + key_count *
2818 sizeof(struct mgmt_ltk_info);
2819 if (expected_len != len) {
2820 BT_ERR("load_keys: expected %u bytes, got %u bytes",
2822 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
2823 MGMT_STATUS_INVALID_PARAMS);
2826 BT_DBG("%s key_count %u", hdev->name, key_count);
2828 for (i = 0; i < key_count; i++) {
2829 struct mgmt_ltk_info *key = &cp->keys[i];
2831 if (!ltk_is_valid(key))
2832 return cmd_status(sk, hdev->id,
2833 MGMT_OP_LOAD_LONG_TERM_KEYS,
2834 MGMT_STATUS_INVALID_PARAMS);
2839 hci_smp_ltks_clear(hdev);
2841 for (i = 0; i < key_count; i++) {
2842 struct mgmt_ltk_info *key = &cp->keys[i];
2848 type = HCI_SMP_LTK_SLAVE;
2850 hci_add_ltk(hdev, &key->addr.bdaddr,
2851 bdaddr_to_le(key->addr.type),
2852 type, 0, key->authenticated, key->val,
2853 key->enc_size, key->ediv, key->rand);
2856 err = cmd_complete(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS, 0,
2859 hci_dev_unlock(hdev);
2864 static const struct mgmt_handler {
2865 int (*func) (struct sock *sk, struct hci_dev *hdev, void *data,
2869 } mgmt_handlers[] = {
2870 { NULL }, /* 0x0000 (no command) */
2871 { read_version, false, MGMT_READ_VERSION_SIZE },
2872 { read_commands, false, MGMT_READ_COMMANDS_SIZE },
2873 { read_index_list, false, MGMT_READ_INDEX_LIST_SIZE },
2874 { read_controller_info, false, MGMT_READ_INFO_SIZE },
2875 { set_powered, false, MGMT_SETTING_SIZE },
2876 { set_discoverable, false, MGMT_SET_DISCOVERABLE_SIZE },
2877 { set_connectable, false, MGMT_SETTING_SIZE },
2878 { set_fast_connectable, false, MGMT_SETTING_SIZE },
2879 { set_pairable, false, MGMT_SETTING_SIZE },
2880 { set_link_security, false, MGMT_SETTING_SIZE },
2881 { set_ssp, false, MGMT_SETTING_SIZE },
2882 { set_hs, false, MGMT_SETTING_SIZE },
2883 { set_le, false, MGMT_SETTING_SIZE },
2884 { set_dev_class, false, MGMT_SET_DEV_CLASS_SIZE },
2885 { set_local_name, false, MGMT_SET_LOCAL_NAME_SIZE },
2886 { add_uuid, false, MGMT_ADD_UUID_SIZE },
2887 { remove_uuid, false, MGMT_REMOVE_UUID_SIZE },
2888 { load_link_keys, true, MGMT_LOAD_LINK_KEYS_SIZE },
2889 { load_long_term_keys, true, MGMT_LOAD_LONG_TERM_KEYS_SIZE },
2890 { disconnect, false, MGMT_DISCONNECT_SIZE },
2891 { get_connections, false, MGMT_GET_CONNECTIONS_SIZE },
2892 { pin_code_reply, false, MGMT_PIN_CODE_REPLY_SIZE },
2893 { pin_code_neg_reply, false, MGMT_PIN_CODE_NEG_REPLY_SIZE },
2894 { set_io_capability, false, MGMT_SET_IO_CAPABILITY_SIZE },
2895 { pair_device, false, MGMT_PAIR_DEVICE_SIZE },
2896 { cancel_pair_device, false, MGMT_CANCEL_PAIR_DEVICE_SIZE },
2897 { unpair_device, false, MGMT_UNPAIR_DEVICE_SIZE },
2898 { user_confirm_reply, false, MGMT_USER_CONFIRM_REPLY_SIZE },
2899 { user_confirm_neg_reply, false, MGMT_USER_CONFIRM_NEG_REPLY_SIZE },
2900 { user_passkey_reply, false, MGMT_USER_PASSKEY_REPLY_SIZE },
2901 { user_passkey_neg_reply, false, MGMT_USER_PASSKEY_NEG_REPLY_SIZE },
2902 { read_local_oob_data, false, MGMT_READ_LOCAL_OOB_DATA_SIZE },
2903 { add_remote_oob_data, false, MGMT_ADD_REMOTE_OOB_DATA_SIZE },
2904 { remove_remote_oob_data, false, MGMT_REMOVE_REMOTE_OOB_DATA_SIZE },
2905 { start_discovery, false, MGMT_START_DISCOVERY_SIZE },
2906 { stop_discovery, false, MGMT_STOP_DISCOVERY_SIZE },
2907 { confirm_name, false, MGMT_CONFIRM_NAME_SIZE },
2908 { block_device, false, MGMT_BLOCK_DEVICE_SIZE },
2909 { unblock_device, false, MGMT_UNBLOCK_DEVICE_SIZE },
2910 { set_device_id, false, MGMT_SET_DEVICE_ID_SIZE },
2914 int mgmt_control(struct sock *sk, struct msghdr *msg, size_t msglen)
2918 struct mgmt_hdr *hdr;
2919 u16 opcode, index, len;
2920 struct hci_dev *hdev = NULL;
2921 const struct mgmt_handler *handler;
2924 BT_DBG("got %zu bytes", msglen);
2926 if (msglen < sizeof(*hdr))
2929 buf = kmalloc(msglen, GFP_KERNEL);
2933 if (memcpy_fromiovec(buf, msg->msg_iov, msglen)) {
2939 opcode = __le16_to_cpu(hdr->opcode);
2940 index = __le16_to_cpu(hdr->index);
2941 len = __le16_to_cpu(hdr->len);
2943 if (len != msglen - sizeof(*hdr)) {
2948 if (index != MGMT_INDEX_NONE) {
2949 hdev = hci_dev_get(index);
2951 err = cmd_status(sk, index, opcode,
2952 MGMT_STATUS_INVALID_INDEX);
2957 if (opcode >= ARRAY_SIZE(mgmt_handlers) ||
2958 mgmt_handlers[opcode].func == NULL) {
2959 BT_DBG("Unknown op %u", opcode);
2960 err = cmd_status(sk, index, opcode,
2961 MGMT_STATUS_UNKNOWN_COMMAND);
2965 if ((hdev && opcode < MGMT_OP_READ_INFO) ||
2966 (!hdev && opcode >= MGMT_OP_READ_INFO)) {
2967 err = cmd_status(sk, index, opcode,
2968 MGMT_STATUS_INVALID_INDEX);
2972 handler = &mgmt_handlers[opcode];
2974 if ((handler->var_len && len < handler->data_len) ||
2975 (!handler->var_len && len != handler->data_len)) {
2976 err = cmd_status(sk, index, opcode,
2977 MGMT_STATUS_INVALID_PARAMS);
2982 mgmt_init_hdev(sk, hdev);
2984 cp = buf + sizeof(*hdr);
2986 err = handler->func(sk, hdev, cp, len);
3000 static void cmd_status_rsp(struct pending_cmd *cmd, void *data)
3004 cmd_status(cmd->sk, cmd->index, cmd->opcode, *status);
3005 mgmt_pending_remove(cmd);
3008 int mgmt_index_added(struct hci_dev *hdev)
3010 if (!mgmt_valid_hdev(hdev))
3013 return mgmt_event(MGMT_EV_INDEX_ADDED, hdev, NULL, 0, NULL);
3016 int mgmt_index_removed(struct hci_dev *hdev)
3018 u8 status = MGMT_STATUS_INVALID_INDEX;
3020 if (!mgmt_valid_hdev(hdev))
3023 mgmt_pending_foreach(0, hdev, cmd_status_rsp, &status);
3025 return mgmt_event(MGMT_EV_INDEX_REMOVED, hdev, NULL, 0, NULL);
3030 struct hci_dev *hdev;
3034 static void settings_rsp(struct pending_cmd *cmd, void *data)
3036 struct cmd_lookup *match = data;
3038 send_settings_rsp(cmd->sk, cmd->opcode, match->hdev);
3040 list_del(&cmd->list);
3042 if (match->sk == NULL) {
3043 match->sk = cmd->sk;
3044 sock_hold(match->sk);
3047 mgmt_pending_free(cmd);
3050 static int set_bredr_scan(struct hci_dev *hdev)
3054 if (test_bit(HCI_CONNECTABLE, &hdev->dev_flags))
3056 if (test_bit(HCI_DISCOVERABLE, &hdev->dev_flags))
3057 scan |= SCAN_INQUIRY;
3062 return hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
3065 int mgmt_powered(struct hci_dev *hdev, u8 powered)
3067 struct cmd_lookup match = { NULL, hdev };
3070 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
3073 mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp, &match);
3078 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags) &&
3079 !lmp_host_ssp_capable(hdev)) {
3082 hci_send_cmd(hdev, HCI_OP_WRITE_SSP_MODE, 1, &ssp);
3085 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
3086 struct hci_cp_write_le_host_supported cp;
3089 cp.simul = lmp_le_br_capable(hdev);
3091 /* Check first if we already have the right
3092 * host state (host features set)
3094 if (cp.le != lmp_host_le_capable(hdev) ||
3095 cp.simul != lmp_host_le_br_capable(hdev))
3097 HCI_OP_WRITE_LE_HOST_SUPPORTED,
3101 link_sec = test_bit(HCI_LINK_SECURITY, &hdev->dev_flags);
3102 if (link_sec != test_bit(HCI_AUTH, &hdev->flags))
3103 hci_send_cmd(hdev, HCI_OP_WRITE_AUTH_ENABLE,
3104 sizeof(link_sec), &link_sec);
3106 if (lmp_bredr_capable(hdev)) {
3107 set_bredr_scan(hdev);
3109 update_name(hdev, hdev->dev_name);
3113 u8 status = MGMT_STATUS_NOT_POWERED;
3114 u8 zero_cod[] = { 0, 0, 0 };
3116 mgmt_pending_foreach(0, hdev, cmd_status_rsp, &status);
3118 if (memcmp(hdev->dev_class, zero_cod, sizeof(zero_cod)) != 0)
3119 mgmt_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev,
3120 zero_cod, sizeof(zero_cod), NULL);
3123 err = new_settings(hdev, match.sk);
3131 int mgmt_discoverable(struct hci_dev *hdev, u8 discoverable)
3133 struct cmd_lookup match = { NULL, hdev };
3134 bool changed = false;
3138 if (!test_and_set_bit(HCI_DISCOVERABLE, &hdev->dev_flags))
3141 if (test_and_clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags))
3145 mgmt_pending_foreach(MGMT_OP_SET_DISCOVERABLE, hdev, settings_rsp,
3149 err = new_settings(hdev, match.sk);
3157 int mgmt_connectable(struct hci_dev *hdev, u8 connectable)
3159 struct cmd_lookup match = { NULL, hdev };
3160 bool changed = false;
3164 if (!test_and_set_bit(HCI_CONNECTABLE, &hdev->dev_flags))
3167 if (test_and_clear_bit(HCI_CONNECTABLE, &hdev->dev_flags))
3171 mgmt_pending_foreach(MGMT_OP_SET_CONNECTABLE, hdev, settings_rsp,
3175 err = new_settings(hdev, match.sk);
3183 int mgmt_write_scan_failed(struct hci_dev *hdev, u8 scan, u8 status)
3185 u8 mgmt_err = mgmt_status(status);
3187 if (scan & SCAN_PAGE)
3188 mgmt_pending_foreach(MGMT_OP_SET_CONNECTABLE, hdev,
3189 cmd_status_rsp, &mgmt_err);
3191 if (scan & SCAN_INQUIRY)
3192 mgmt_pending_foreach(MGMT_OP_SET_DISCOVERABLE, hdev,
3193 cmd_status_rsp, &mgmt_err);
3198 int mgmt_new_link_key(struct hci_dev *hdev, struct link_key *key,
3201 struct mgmt_ev_new_link_key ev;
3203 memset(&ev, 0, sizeof(ev));
3205 ev.store_hint = persistent;
3206 bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
3207 ev.key.addr.type = BDADDR_BREDR;
3208 ev.key.type = key->type;
3209 memcpy(ev.key.val, key->val, HCI_LINK_KEY_SIZE);
3210 ev.key.pin_len = key->pin_len;
3212 return mgmt_event(MGMT_EV_NEW_LINK_KEY, hdev, &ev, sizeof(ev), NULL);
3215 int mgmt_new_ltk(struct hci_dev *hdev, struct smp_ltk *key, u8 persistent)
3217 struct mgmt_ev_new_long_term_key ev;
3219 memset(&ev, 0, sizeof(ev));
3221 ev.store_hint = persistent;
3222 bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
3223 ev.key.addr.type = link_to_bdaddr(LE_LINK, key->bdaddr_type);
3224 ev.key.authenticated = key->authenticated;
3225 ev.key.enc_size = key->enc_size;
3226 ev.key.ediv = key->ediv;
3228 if (key->type == HCI_SMP_LTK)
3231 memcpy(ev.key.rand, key->rand, sizeof(key->rand));
3232 memcpy(ev.key.val, key->val, sizeof(key->val));
3234 return mgmt_event(MGMT_EV_NEW_LONG_TERM_KEY, hdev, &ev, sizeof(ev),
3238 int mgmt_device_connected(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
3239 u8 addr_type, u32 flags, u8 *name, u8 name_len,
3243 struct mgmt_ev_device_connected *ev = (void *) buf;
3246 bacpy(&ev->addr.bdaddr, bdaddr);
3247 ev->addr.type = link_to_bdaddr(link_type, addr_type);
3249 ev->flags = __cpu_to_le32(flags);
3252 eir_len = eir_append_data(ev->eir, 0, EIR_NAME_COMPLETE,
3255 if (dev_class && memcmp(dev_class, "\0\0\0", 3) != 0)
3256 eir_len = eir_append_data(ev->eir, eir_len,
3257 EIR_CLASS_OF_DEV, dev_class, 3);
3259 ev->eir_len = cpu_to_le16(eir_len);
3261 return mgmt_event(MGMT_EV_DEVICE_CONNECTED, hdev, buf,
3262 sizeof(*ev) + eir_len, NULL);
3265 static void disconnect_rsp(struct pending_cmd *cmd, void *data)
3267 struct mgmt_cp_disconnect *cp = cmd->param;
3268 struct sock **sk = data;
3269 struct mgmt_rp_disconnect rp;
3271 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
3272 rp.addr.type = cp->addr.type;
3274 cmd_complete(cmd->sk, cmd->index, MGMT_OP_DISCONNECT, 0, &rp,
3280 mgmt_pending_remove(cmd);
3283 static void unpair_device_rsp(struct pending_cmd *cmd, void *data)
3285 struct hci_dev *hdev = data;
3286 struct mgmt_cp_unpair_device *cp = cmd->param;
3287 struct mgmt_rp_unpair_device rp;
3289 memset(&rp, 0, sizeof(rp));
3290 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
3291 rp.addr.type = cp->addr.type;
3293 device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, cmd->sk);
3295 cmd_complete(cmd->sk, cmd->index, cmd->opcode, 0, &rp, sizeof(rp));
3297 mgmt_pending_remove(cmd);
3300 int mgmt_device_disconnected(struct hci_dev *hdev, bdaddr_t *bdaddr,
3301 u8 link_type, u8 addr_type, u8 reason)
3303 struct mgmt_ev_device_disconnected ev;
3304 struct sock *sk = NULL;
3307 mgmt_pending_foreach(MGMT_OP_DISCONNECT, hdev, disconnect_rsp, &sk);
3309 bacpy(&ev.addr.bdaddr, bdaddr);
3310 ev.addr.type = link_to_bdaddr(link_type, addr_type);
3313 err = mgmt_event(MGMT_EV_DEVICE_DISCONNECTED, hdev, &ev, sizeof(ev),
3319 mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, unpair_device_rsp,
3325 int mgmt_disconnect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr,
3326 u8 link_type, u8 addr_type, u8 status)
3328 struct mgmt_rp_disconnect rp;
3329 struct pending_cmd *cmd;
3332 mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, unpair_device_rsp,
3335 cmd = mgmt_pending_find(MGMT_OP_DISCONNECT, hdev);
3339 bacpy(&rp.addr.bdaddr, bdaddr);
3340 rp.addr.type = link_to_bdaddr(link_type, addr_type);
3342 err = cmd_complete(cmd->sk, cmd->index, MGMT_OP_DISCONNECT,
3343 mgmt_status(status), &rp, sizeof(rp));
3345 mgmt_pending_remove(cmd);
3350 int mgmt_connect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
3351 u8 addr_type, u8 status)
3353 struct mgmt_ev_connect_failed ev;
3355 bacpy(&ev.addr.bdaddr, bdaddr);
3356 ev.addr.type = link_to_bdaddr(link_type, addr_type);
3357 ev.status = mgmt_status(status);
3359 return mgmt_event(MGMT_EV_CONNECT_FAILED, hdev, &ev, sizeof(ev), NULL);
3362 int mgmt_pin_code_request(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 secure)
3364 struct mgmt_ev_pin_code_request ev;
3366 bacpy(&ev.addr.bdaddr, bdaddr);
3367 ev.addr.type = BDADDR_BREDR;
3370 return mgmt_event(MGMT_EV_PIN_CODE_REQUEST, hdev, &ev, sizeof(ev),
3374 int mgmt_pin_code_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
3377 struct pending_cmd *cmd;
3378 struct mgmt_rp_pin_code_reply rp;
3381 cmd = mgmt_pending_find(MGMT_OP_PIN_CODE_REPLY, hdev);
3385 bacpy(&rp.addr.bdaddr, bdaddr);
3386 rp.addr.type = BDADDR_BREDR;
3388 err = cmd_complete(cmd->sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
3389 mgmt_status(status), &rp, sizeof(rp));
3391 mgmt_pending_remove(cmd);
3396 int mgmt_pin_code_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
3399 struct pending_cmd *cmd;
3400 struct mgmt_rp_pin_code_reply rp;
3403 cmd = mgmt_pending_find(MGMT_OP_PIN_CODE_NEG_REPLY, hdev);
3407 bacpy(&rp.addr.bdaddr, bdaddr);
3408 rp.addr.type = BDADDR_BREDR;
3410 err = cmd_complete(cmd->sk, hdev->id, MGMT_OP_PIN_CODE_NEG_REPLY,
3411 mgmt_status(status), &rp, sizeof(rp));
3413 mgmt_pending_remove(cmd);
3418 int mgmt_user_confirm_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
3419 u8 link_type, u8 addr_type, __le32 value,
3422 struct mgmt_ev_user_confirm_request ev;
3424 BT_DBG("%s", hdev->name);
3426 bacpy(&ev.addr.bdaddr, bdaddr);
3427 ev.addr.type = link_to_bdaddr(link_type, addr_type);
3428 ev.confirm_hint = confirm_hint;
3431 return mgmt_event(MGMT_EV_USER_CONFIRM_REQUEST, hdev, &ev, sizeof(ev),
3435 int mgmt_user_passkey_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
3436 u8 link_type, u8 addr_type)
3438 struct mgmt_ev_user_passkey_request ev;
3440 BT_DBG("%s", hdev->name);
3442 bacpy(&ev.addr.bdaddr, bdaddr);
3443 ev.addr.type = link_to_bdaddr(link_type, addr_type);
3445 return mgmt_event(MGMT_EV_USER_PASSKEY_REQUEST, hdev, &ev, sizeof(ev),
3449 static int user_pairing_resp_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
3450 u8 link_type, u8 addr_type, u8 status,
3453 struct pending_cmd *cmd;
3454 struct mgmt_rp_user_confirm_reply rp;
3457 cmd = mgmt_pending_find(opcode, hdev);
3461 bacpy(&rp.addr.bdaddr, bdaddr);
3462 rp.addr.type = link_to_bdaddr(link_type, addr_type);
3463 err = cmd_complete(cmd->sk, hdev->id, opcode, mgmt_status(status),
3466 mgmt_pending_remove(cmd);
3471 int mgmt_user_confirm_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
3472 u8 link_type, u8 addr_type, u8 status)
3474 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
3475 status, MGMT_OP_USER_CONFIRM_REPLY);
3478 int mgmt_user_confirm_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
3479 u8 link_type, u8 addr_type, u8 status)
3481 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
3483 MGMT_OP_USER_CONFIRM_NEG_REPLY);
3486 int mgmt_user_passkey_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
3487 u8 link_type, u8 addr_type, u8 status)
3489 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
3490 status, MGMT_OP_USER_PASSKEY_REPLY);
3493 int mgmt_user_passkey_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
3494 u8 link_type, u8 addr_type, u8 status)
3496 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
3498 MGMT_OP_USER_PASSKEY_NEG_REPLY);
3501 int mgmt_user_passkey_notify(struct hci_dev *hdev, bdaddr_t *bdaddr,
3502 u8 link_type, u8 addr_type, u32 passkey,
3505 struct mgmt_ev_passkey_notify ev;
3507 BT_DBG("%s", hdev->name);
3509 bacpy(&ev.addr.bdaddr, bdaddr);
3510 ev.addr.type = link_to_bdaddr(link_type, addr_type);
3511 ev.passkey = __cpu_to_le32(passkey);
3512 ev.entered = entered;
3514 return mgmt_event(MGMT_EV_PASSKEY_NOTIFY, hdev, &ev, sizeof(ev), NULL);
3517 int mgmt_auth_failed(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
3518 u8 addr_type, u8 status)
3520 struct mgmt_ev_auth_failed ev;
3522 bacpy(&ev.addr.bdaddr, bdaddr);
3523 ev.addr.type = link_to_bdaddr(link_type, addr_type);
3524 ev.status = mgmt_status(status);
3526 return mgmt_event(MGMT_EV_AUTH_FAILED, hdev, &ev, sizeof(ev), NULL);
3529 int mgmt_auth_enable_complete(struct hci_dev *hdev, u8 status)
3531 struct cmd_lookup match = { NULL, hdev };
3532 bool changed = false;
3536 u8 mgmt_err = mgmt_status(status);
3537 mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev,
3538 cmd_status_rsp, &mgmt_err);
3542 if (test_bit(HCI_AUTH, &hdev->flags)) {
3543 if (!test_and_set_bit(HCI_LINK_SECURITY, &hdev->dev_flags))
3546 if (test_and_clear_bit(HCI_LINK_SECURITY, &hdev->dev_flags))
3550 mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev, settings_rsp,
3554 err = new_settings(hdev, match.sk);
3562 static int clear_eir(struct hci_dev *hdev)
3564 struct hci_cp_write_eir cp;
3566 if (!lmp_ext_inq_capable(hdev))
3569 memset(hdev->eir, 0, sizeof(hdev->eir));
3571 memset(&cp, 0, sizeof(cp));
3573 return hci_send_cmd(hdev, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
3576 int mgmt_ssp_enable_complete(struct hci_dev *hdev, u8 enable, u8 status)
3578 struct cmd_lookup match = { NULL, hdev };
3579 bool changed = false;
3583 u8 mgmt_err = mgmt_status(status);
3585 if (enable && test_and_clear_bit(HCI_SSP_ENABLED,
3587 err = new_settings(hdev, NULL);
3589 mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, cmd_status_rsp,
3596 if (!test_and_set_bit(HCI_SSP_ENABLED, &hdev->dev_flags))
3599 if (test_and_clear_bit(HCI_SSP_ENABLED, &hdev->dev_flags))
3603 mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, settings_rsp, &match);
3606 err = new_settings(hdev, match.sk);
3611 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags))
3619 static void class_rsp(struct pending_cmd *cmd, void *data)
3621 struct cmd_lookup *match = data;
3623 cmd_complete(cmd->sk, cmd->index, cmd->opcode, match->mgmt_status,
3624 match->hdev->dev_class, 3);
3626 list_del(&cmd->list);
3628 if (match->sk == NULL) {
3629 match->sk = cmd->sk;
3630 sock_hold(match->sk);
3633 mgmt_pending_free(cmd);
3636 int mgmt_set_class_of_dev_complete(struct hci_dev *hdev, u8 *dev_class,
3639 struct cmd_lookup match = { NULL, hdev, mgmt_status(status) };
3642 clear_bit(HCI_PENDING_CLASS, &hdev->dev_flags);
3644 mgmt_pending_foreach(MGMT_OP_SET_DEV_CLASS, hdev, class_rsp, &match);
3645 mgmt_pending_foreach(MGMT_OP_ADD_UUID, hdev, class_rsp, &match);
3646 mgmt_pending_foreach(MGMT_OP_REMOVE_UUID, hdev, class_rsp, &match);
3649 err = mgmt_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev, dev_class,
3658 int mgmt_set_local_name_complete(struct hci_dev *hdev, u8 *name, u8 status)
3660 struct pending_cmd *cmd;
3661 struct mgmt_cp_set_local_name ev;
3662 bool changed = false;
3665 if (memcmp(name, hdev->dev_name, sizeof(hdev->dev_name)) != 0) {
3666 memcpy(hdev->dev_name, name, sizeof(hdev->dev_name));
3670 memset(&ev, 0, sizeof(ev));
3671 memcpy(ev.name, name, HCI_MAX_NAME_LENGTH);
3672 memcpy(ev.short_name, hdev->short_name, HCI_MAX_SHORT_NAME_LENGTH);
3674 cmd = mgmt_pending_find(MGMT_OP_SET_LOCAL_NAME, hdev);
3678 /* Always assume that either the short or the complete name has
3679 * changed if there was a pending mgmt command */
3683 err = cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME,
3684 mgmt_status(status));
3688 err = cmd_complete(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0, &ev,
3695 err = mgmt_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev, &ev,
3696 sizeof(ev), cmd ? cmd->sk : NULL);
3698 /* EIR is taken care of separately when powering on the
3699 * adapter so only update them here if this is a name change
3700 * unrelated to power on.
3702 if (!test_bit(HCI_INIT, &hdev->flags))
3707 mgmt_pending_remove(cmd);
3711 int mgmt_read_local_oob_data_reply_complete(struct hci_dev *hdev, u8 *hash,
3712 u8 *randomizer, u8 status)
3714 struct pending_cmd *cmd;
3717 BT_DBG("%s status %u", hdev->name, status);
3719 cmd = mgmt_pending_find(MGMT_OP_READ_LOCAL_OOB_DATA, hdev);
3724 err = cmd_status(cmd->sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
3725 mgmt_status(status));
3727 struct mgmt_rp_read_local_oob_data rp;
3729 memcpy(rp.hash, hash, sizeof(rp.hash));
3730 memcpy(rp.randomizer, randomizer, sizeof(rp.randomizer));
3732 err = cmd_complete(cmd->sk, hdev->id,
3733 MGMT_OP_READ_LOCAL_OOB_DATA, 0, &rp,
3737 mgmt_pending_remove(cmd);
3742 int mgmt_le_enable_complete(struct hci_dev *hdev, u8 enable, u8 status)
3744 struct cmd_lookup match = { NULL, hdev };
3745 bool changed = false;
3749 u8 mgmt_err = mgmt_status(status);
3751 if (enable && test_and_clear_bit(HCI_LE_ENABLED,
3753 err = new_settings(hdev, NULL);
3755 mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, cmd_status_rsp,
3762 if (!test_and_set_bit(HCI_LE_ENABLED, &hdev->dev_flags))
3765 if (test_and_clear_bit(HCI_LE_ENABLED, &hdev->dev_flags))
3769 mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, settings_rsp, &match);
3772 err = new_settings(hdev, match.sk);
3780 int mgmt_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
3781 u8 addr_type, u8 *dev_class, s8 rssi, u8 cfm_name, u8
3782 ssp, u8 *eir, u16 eir_len)
3785 struct mgmt_ev_device_found *ev = (void *) buf;
3788 /* Leave 5 bytes for a potential CoD field */
3789 if (sizeof(*ev) + eir_len + 5 > sizeof(buf))
3792 memset(buf, 0, sizeof(buf));
3794 bacpy(&ev->addr.bdaddr, bdaddr);
3795 ev->addr.type = link_to_bdaddr(link_type, addr_type);
3798 ev->flags |= __constant_cpu_to_le32(MGMT_DEV_FOUND_CONFIRM_NAME);
3800 ev->flags |= __constant_cpu_to_le32(MGMT_DEV_FOUND_LEGACY_PAIRING);
3803 memcpy(ev->eir, eir, eir_len);
3805 if (dev_class && !eir_has_data_type(ev->eir, eir_len, EIR_CLASS_OF_DEV))
3806 eir_len = eir_append_data(ev->eir, eir_len, EIR_CLASS_OF_DEV,
3809 ev->eir_len = cpu_to_le16(eir_len);
3810 ev_size = sizeof(*ev) + eir_len;
3812 return mgmt_event(MGMT_EV_DEVICE_FOUND, hdev, ev, ev_size, NULL);
3815 int mgmt_remote_name(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
3816 u8 addr_type, s8 rssi, u8 *name, u8 name_len)
3818 struct mgmt_ev_device_found *ev;
3819 char buf[sizeof(*ev) + HCI_MAX_NAME_LENGTH + 2];
3822 ev = (struct mgmt_ev_device_found *) buf;
3824 memset(buf, 0, sizeof(buf));
3826 bacpy(&ev->addr.bdaddr, bdaddr);
3827 ev->addr.type = link_to_bdaddr(link_type, addr_type);
3830 eir_len = eir_append_data(ev->eir, 0, EIR_NAME_COMPLETE, name,
3833 ev->eir_len = cpu_to_le16(eir_len);
3835 return mgmt_event(MGMT_EV_DEVICE_FOUND, hdev, ev,
3836 sizeof(*ev) + eir_len, NULL);
3839 int mgmt_start_discovery_failed(struct hci_dev *hdev, u8 status)
3841 struct pending_cmd *cmd;
3845 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3847 cmd = mgmt_pending_find(MGMT_OP_START_DISCOVERY, hdev);
3851 type = hdev->discovery.type;
3853 err = cmd_complete(cmd->sk, hdev->id, cmd->opcode, mgmt_status(status),
3854 &type, sizeof(type));
3855 mgmt_pending_remove(cmd);
3860 int mgmt_stop_discovery_failed(struct hci_dev *hdev, u8 status)
3862 struct pending_cmd *cmd;
3865 cmd = mgmt_pending_find(MGMT_OP_STOP_DISCOVERY, hdev);
3869 err = cmd_complete(cmd->sk, hdev->id, cmd->opcode, mgmt_status(status),
3870 &hdev->discovery.type, sizeof(hdev->discovery.type));
3871 mgmt_pending_remove(cmd);
3876 int mgmt_discovering(struct hci_dev *hdev, u8 discovering)
3878 struct mgmt_ev_discovering ev;
3879 struct pending_cmd *cmd;
3881 BT_DBG("%s discovering %u", hdev->name, discovering);
3884 cmd = mgmt_pending_find(MGMT_OP_START_DISCOVERY, hdev);
3886 cmd = mgmt_pending_find(MGMT_OP_STOP_DISCOVERY, hdev);
3889 u8 type = hdev->discovery.type;
3891 cmd_complete(cmd->sk, hdev->id, cmd->opcode, 0, &type,
3893 mgmt_pending_remove(cmd);
3896 memset(&ev, 0, sizeof(ev));
3897 ev.type = hdev->discovery.type;
3898 ev.discovering = discovering;
3900 return mgmt_event(MGMT_EV_DISCOVERING, hdev, &ev, sizeof(ev), NULL);
3903 int mgmt_device_blocked(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
3905 struct pending_cmd *cmd;
3906 struct mgmt_ev_device_blocked ev;
3908 cmd = mgmt_pending_find(MGMT_OP_BLOCK_DEVICE, hdev);
3910 bacpy(&ev.addr.bdaddr, bdaddr);
3911 ev.addr.type = type;
3913 return mgmt_event(MGMT_EV_DEVICE_BLOCKED, hdev, &ev, sizeof(ev),
3914 cmd ? cmd->sk : NULL);
3917 int mgmt_device_unblocked(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
3919 struct pending_cmd *cmd;
3920 struct mgmt_ev_device_unblocked ev;
3922 cmd = mgmt_pending_find(MGMT_OP_UNBLOCK_DEVICE, hdev);
3924 bacpy(&ev.addr.bdaddr, bdaddr);
3925 ev.addr.type = type;
3927 return mgmt_event(MGMT_EV_DEVICE_UNBLOCKED, hdev, &ev, sizeof(ev),
3928 cmd ? cmd->sk : NULL);
3931 module_param(enable_hs, bool, 0644);
3932 MODULE_PARM_DESC(enable_hs, "Enable High Speed support");