2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (c) 2000-2001, 2010, Code Aurora Forum. All rights reserved.
5 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
25 /* Bluetooth HCI event handling. */
27 #include <asm/unaligned.h>
29 #include <net/bluetooth/bluetooth.h>
30 #include <net/bluetooth/hci_core.h>
31 #include <net/bluetooth/mgmt.h>
33 #include "hci_request.h"
34 #include "hci_debugfs.h"
39 #define ZERO_KEY "\x00\x00\x00\x00\x00\x00\x00\x00" \
40 "\x00\x00\x00\x00\x00\x00\x00\x00"
42 /* Handle HCI Event packets */
44 static void hci_cc_inquiry_cancel(struct hci_dev *hdev, struct sk_buff *skb)
46 __u8 status = *((__u8 *) skb->data);
48 BT_DBG("%s status 0x%2.2x", hdev->name, status);
53 clear_bit(HCI_INQUIRY, &hdev->flags);
54 smp_mb__after_atomic(); /* wake_up_bit advises about this barrier */
55 wake_up_bit(&hdev->flags, HCI_INQUIRY);
58 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
61 hci_conn_check_pending(hdev);
64 static void hci_cc_periodic_inq(struct hci_dev *hdev, struct sk_buff *skb)
66 __u8 status = *((__u8 *) skb->data);
68 BT_DBG("%s status 0x%2.2x", hdev->name, status);
73 hci_dev_set_flag(hdev, HCI_PERIODIC_INQ);
76 static void hci_cc_exit_periodic_inq(struct hci_dev *hdev, struct sk_buff *skb)
78 __u8 status = *((__u8 *) skb->data);
80 BT_DBG("%s status 0x%2.2x", hdev->name, status);
85 hci_dev_clear_flag(hdev, HCI_PERIODIC_INQ);
87 hci_conn_check_pending(hdev);
90 static void hci_cc_remote_name_req_cancel(struct hci_dev *hdev,
93 BT_DBG("%s", hdev->name);
96 static void hci_cc_role_discovery(struct hci_dev *hdev, struct sk_buff *skb)
98 struct hci_rp_role_discovery *rp = (void *) skb->data;
99 struct hci_conn *conn;
101 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
108 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
110 conn->role = rp->role;
112 hci_dev_unlock(hdev);
115 static void hci_cc_read_link_policy(struct hci_dev *hdev, struct sk_buff *skb)
117 struct hci_rp_read_link_policy *rp = (void *) skb->data;
118 struct hci_conn *conn;
120 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
127 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
129 conn->link_policy = __le16_to_cpu(rp->policy);
131 hci_dev_unlock(hdev);
134 static void hci_cc_write_link_policy(struct hci_dev *hdev, struct sk_buff *skb)
136 struct hci_rp_write_link_policy *rp = (void *) skb->data;
137 struct hci_conn *conn;
140 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
145 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LINK_POLICY);
151 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
153 conn->link_policy = get_unaligned_le16(sent + 2);
155 hci_dev_unlock(hdev);
158 static void hci_cc_read_def_link_policy(struct hci_dev *hdev,
161 struct hci_rp_read_def_link_policy *rp = (void *) skb->data;
163 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
168 hdev->link_policy = __le16_to_cpu(rp->policy);
171 static void hci_cc_write_def_link_policy(struct hci_dev *hdev,
174 __u8 status = *((__u8 *) skb->data);
177 BT_DBG("%s status 0x%2.2x", hdev->name, status);
182 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_DEF_LINK_POLICY);
186 hdev->link_policy = get_unaligned_le16(sent);
189 static void hci_cc_reset(struct hci_dev *hdev, struct sk_buff *skb)
191 __u8 status = *((__u8 *) skb->data);
193 BT_DBG("%s status 0x%2.2x", hdev->name, status);
195 clear_bit(HCI_RESET, &hdev->flags);
200 /* Reset all non-persistent flags */
201 hci_dev_clear_volatile_flags(hdev);
203 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
205 hdev->inq_tx_power = HCI_TX_POWER_INVALID;
206 hdev->adv_tx_power = HCI_TX_POWER_INVALID;
208 memset(hdev->adv_data, 0, sizeof(hdev->adv_data));
209 hdev->adv_data_len = 0;
211 memset(hdev->scan_rsp_data, 0, sizeof(hdev->scan_rsp_data));
212 hdev->scan_rsp_data_len = 0;
214 hdev->le_scan_type = LE_SCAN_PASSIVE;
216 hdev->ssp_debug_mode = 0;
218 hci_bdaddr_list_clear(&hdev->le_white_list);
221 static void hci_cc_read_stored_link_key(struct hci_dev *hdev,
224 struct hci_rp_read_stored_link_key *rp = (void *)skb->data;
225 struct hci_cp_read_stored_link_key *sent;
227 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
229 sent = hci_sent_cmd_data(hdev, HCI_OP_READ_STORED_LINK_KEY);
233 if (!rp->status && sent->read_all == 0x01) {
234 hdev->stored_max_keys = rp->max_keys;
235 hdev->stored_num_keys = rp->num_keys;
239 static void hci_cc_delete_stored_link_key(struct hci_dev *hdev,
242 struct hci_rp_delete_stored_link_key *rp = (void *)skb->data;
244 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
249 if (rp->num_keys <= hdev->stored_num_keys)
250 hdev->stored_num_keys -= rp->num_keys;
252 hdev->stored_num_keys = 0;
255 static void hci_cc_write_local_name(struct hci_dev *hdev, struct sk_buff *skb)
257 __u8 status = *((__u8 *) skb->data);
260 BT_DBG("%s status 0x%2.2x", hdev->name, status);
262 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LOCAL_NAME);
268 if (hci_dev_test_flag(hdev, HCI_MGMT))
269 mgmt_set_local_name_complete(hdev, sent, status);
271 memcpy(hdev->dev_name, sent, HCI_MAX_NAME_LENGTH);
273 hci_dev_unlock(hdev);
276 static void hci_cc_read_local_name(struct hci_dev *hdev, struct sk_buff *skb)
278 struct hci_rp_read_local_name *rp = (void *) skb->data;
280 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
285 if (hci_dev_test_flag(hdev, HCI_SETUP) ||
286 hci_dev_test_flag(hdev, HCI_CONFIG))
287 memcpy(hdev->dev_name, rp->name, HCI_MAX_NAME_LENGTH);
290 static void hci_cc_write_auth_enable(struct hci_dev *hdev, struct sk_buff *skb)
292 __u8 status = *((__u8 *) skb->data);
295 BT_DBG("%s status 0x%2.2x", hdev->name, status);
297 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_AUTH_ENABLE);
304 __u8 param = *((__u8 *) sent);
306 if (param == AUTH_ENABLED)
307 set_bit(HCI_AUTH, &hdev->flags);
309 clear_bit(HCI_AUTH, &hdev->flags);
312 if (hci_dev_test_flag(hdev, HCI_MGMT))
313 mgmt_auth_enable_complete(hdev, status);
315 hci_dev_unlock(hdev);
318 static void hci_cc_write_encrypt_mode(struct hci_dev *hdev, struct sk_buff *skb)
320 __u8 status = *((__u8 *) skb->data);
324 BT_DBG("%s status 0x%2.2x", hdev->name, status);
329 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_ENCRYPT_MODE);
333 param = *((__u8 *) sent);
336 set_bit(HCI_ENCRYPT, &hdev->flags);
338 clear_bit(HCI_ENCRYPT, &hdev->flags);
341 static void hci_cc_write_scan_enable(struct hci_dev *hdev, struct sk_buff *skb)
343 __u8 status = *((__u8 *) skb->data);
347 BT_DBG("%s status 0x%2.2x", hdev->name, status);
349 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SCAN_ENABLE);
353 param = *((__u8 *) sent);
358 hdev->discov_timeout = 0;
362 if (param & SCAN_INQUIRY)
363 set_bit(HCI_ISCAN, &hdev->flags);
365 clear_bit(HCI_ISCAN, &hdev->flags);
367 if (param & SCAN_PAGE)
368 set_bit(HCI_PSCAN, &hdev->flags);
370 clear_bit(HCI_PSCAN, &hdev->flags);
373 hci_dev_unlock(hdev);
376 static void hci_cc_read_class_of_dev(struct hci_dev *hdev, struct sk_buff *skb)
378 struct hci_rp_read_class_of_dev *rp = (void *) skb->data;
380 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
385 memcpy(hdev->dev_class, rp->dev_class, 3);
387 BT_DBG("%s class 0x%.2x%.2x%.2x", hdev->name,
388 hdev->dev_class[2], hdev->dev_class[1], hdev->dev_class[0]);
391 static void hci_cc_write_class_of_dev(struct hci_dev *hdev, struct sk_buff *skb)
393 __u8 status = *((__u8 *) skb->data);
396 BT_DBG("%s status 0x%2.2x", hdev->name, status);
398 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_CLASS_OF_DEV);
405 memcpy(hdev->dev_class, sent, 3);
407 if (hci_dev_test_flag(hdev, HCI_MGMT))
408 mgmt_set_class_of_dev_complete(hdev, sent, status);
410 hci_dev_unlock(hdev);
413 static void hci_cc_read_voice_setting(struct hci_dev *hdev, struct sk_buff *skb)
415 struct hci_rp_read_voice_setting *rp = (void *) skb->data;
418 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
423 setting = __le16_to_cpu(rp->voice_setting);
425 if (hdev->voice_setting == setting)
428 hdev->voice_setting = setting;
430 BT_DBG("%s voice setting 0x%4.4x", hdev->name, setting);
433 hdev->notify(hdev, HCI_NOTIFY_VOICE_SETTING);
436 static void hci_cc_write_voice_setting(struct hci_dev *hdev,
439 __u8 status = *((__u8 *) skb->data);
443 BT_DBG("%s status 0x%2.2x", hdev->name, status);
448 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_VOICE_SETTING);
452 setting = get_unaligned_le16(sent);
454 if (hdev->voice_setting == setting)
457 hdev->voice_setting = setting;
459 BT_DBG("%s voice setting 0x%4.4x", hdev->name, setting);
462 hdev->notify(hdev, HCI_NOTIFY_VOICE_SETTING);
465 static void hci_cc_read_num_supported_iac(struct hci_dev *hdev,
468 struct hci_rp_read_num_supported_iac *rp = (void *) skb->data;
470 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
475 hdev->num_iac = rp->num_iac;
477 BT_DBG("%s num iac %d", hdev->name, hdev->num_iac);
480 static void hci_cc_write_ssp_mode(struct hci_dev *hdev, struct sk_buff *skb)
482 __u8 status = *((__u8 *) skb->data);
483 struct hci_cp_write_ssp_mode *sent;
485 BT_DBG("%s status 0x%2.2x", hdev->name, status);
487 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SSP_MODE);
495 hdev->features[1][0] |= LMP_HOST_SSP;
497 hdev->features[1][0] &= ~LMP_HOST_SSP;
500 if (hci_dev_test_flag(hdev, HCI_MGMT))
501 mgmt_ssp_enable_complete(hdev, sent->mode, status);
504 hci_dev_set_flag(hdev, HCI_SSP_ENABLED);
506 hci_dev_clear_flag(hdev, HCI_SSP_ENABLED);
509 hci_dev_unlock(hdev);
512 static void hci_cc_write_sc_support(struct hci_dev *hdev, struct sk_buff *skb)
514 u8 status = *((u8 *) skb->data);
515 struct hci_cp_write_sc_support *sent;
517 BT_DBG("%s status 0x%2.2x", hdev->name, status);
519 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SC_SUPPORT);
527 hdev->features[1][0] |= LMP_HOST_SC;
529 hdev->features[1][0] &= ~LMP_HOST_SC;
532 if (!hci_dev_test_flag(hdev, HCI_MGMT) && !status) {
534 hci_dev_set_flag(hdev, HCI_SC_ENABLED);
536 hci_dev_clear_flag(hdev, HCI_SC_ENABLED);
539 hci_dev_unlock(hdev);
542 static void hci_cc_read_local_version(struct hci_dev *hdev, struct sk_buff *skb)
544 struct hci_rp_read_local_version *rp = (void *) skb->data;
546 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
551 if (hci_dev_test_flag(hdev, HCI_SETUP) ||
552 hci_dev_test_flag(hdev, HCI_CONFIG)) {
553 hdev->hci_ver = rp->hci_ver;
554 hdev->hci_rev = __le16_to_cpu(rp->hci_rev);
555 hdev->lmp_ver = rp->lmp_ver;
556 hdev->manufacturer = __le16_to_cpu(rp->manufacturer);
557 hdev->lmp_subver = __le16_to_cpu(rp->lmp_subver);
561 static void hci_cc_read_local_commands(struct hci_dev *hdev,
564 struct hci_rp_read_local_commands *rp = (void *) skb->data;
566 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
571 if (hci_dev_test_flag(hdev, HCI_SETUP) ||
572 hci_dev_test_flag(hdev, HCI_CONFIG))
573 memcpy(hdev->commands, rp->commands, sizeof(hdev->commands));
576 static void hci_cc_read_local_features(struct hci_dev *hdev,
579 struct hci_rp_read_local_features *rp = (void *) skb->data;
581 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
586 memcpy(hdev->features, rp->features, 8);
588 /* Adjust default settings according to features
589 * supported by device. */
591 if (hdev->features[0][0] & LMP_3SLOT)
592 hdev->pkt_type |= (HCI_DM3 | HCI_DH3);
594 if (hdev->features[0][0] & LMP_5SLOT)
595 hdev->pkt_type |= (HCI_DM5 | HCI_DH5);
597 if (hdev->features[0][1] & LMP_HV2) {
598 hdev->pkt_type |= (HCI_HV2);
599 hdev->esco_type |= (ESCO_HV2);
602 if (hdev->features[0][1] & LMP_HV3) {
603 hdev->pkt_type |= (HCI_HV3);
604 hdev->esco_type |= (ESCO_HV3);
607 if (lmp_esco_capable(hdev))
608 hdev->esco_type |= (ESCO_EV3);
610 if (hdev->features[0][4] & LMP_EV4)
611 hdev->esco_type |= (ESCO_EV4);
613 if (hdev->features[0][4] & LMP_EV5)
614 hdev->esco_type |= (ESCO_EV5);
616 if (hdev->features[0][5] & LMP_EDR_ESCO_2M)
617 hdev->esco_type |= (ESCO_2EV3);
619 if (hdev->features[0][5] & LMP_EDR_ESCO_3M)
620 hdev->esco_type |= (ESCO_3EV3);
622 if (hdev->features[0][5] & LMP_EDR_3S_ESCO)
623 hdev->esco_type |= (ESCO_2EV5 | ESCO_3EV5);
626 static void hci_cc_read_local_ext_features(struct hci_dev *hdev,
629 struct hci_rp_read_local_ext_features *rp = (void *) skb->data;
631 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
636 if (hdev->max_page < rp->max_page)
637 hdev->max_page = rp->max_page;
639 if (rp->page < HCI_MAX_PAGES)
640 memcpy(hdev->features[rp->page], rp->features, 8);
643 static void hci_cc_read_flow_control_mode(struct hci_dev *hdev,
646 struct hci_rp_read_flow_control_mode *rp = (void *) skb->data;
648 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
653 hdev->flow_ctl_mode = rp->mode;
656 static void hci_cc_read_buffer_size(struct hci_dev *hdev, struct sk_buff *skb)
658 struct hci_rp_read_buffer_size *rp = (void *) skb->data;
660 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
665 hdev->acl_mtu = __le16_to_cpu(rp->acl_mtu);
666 hdev->sco_mtu = rp->sco_mtu;
667 hdev->acl_pkts = __le16_to_cpu(rp->acl_max_pkt);
668 hdev->sco_pkts = __le16_to_cpu(rp->sco_max_pkt);
670 if (test_bit(HCI_QUIRK_FIXUP_BUFFER_SIZE, &hdev->quirks)) {
675 hdev->acl_cnt = hdev->acl_pkts;
676 hdev->sco_cnt = hdev->sco_pkts;
678 BT_DBG("%s acl mtu %d:%d sco mtu %d:%d", hdev->name, hdev->acl_mtu,
679 hdev->acl_pkts, hdev->sco_mtu, hdev->sco_pkts);
682 static void hci_cc_read_bd_addr(struct hci_dev *hdev, struct sk_buff *skb)
684 struct hci_rp_read_bd_addr *rp = (void *) skb->data;
686 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
691 if (test_bit(HCI_INIT, &hdev->flags))
692 bacpy(&hdev->bdaddr, &rp->bdaddr);
694 if (hci_dev_test_flag(hdev, HCI_SETUP))
695 bacpy(&hdev->setup_addr, &rp->bdaddr);
698 static void hci_cc_read_page_scan_activity(struct hci_dev *hdev,
701 struct hci_rp_read_page_scan_activity *rp = (void *) skb->data;
703 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
708 if (test_bit(HCI_INIT, &hdev->flags)) {
709 hdev->page_scan_interval = __le16_to_cpu(rp->interval);
710 hdev->page_scan_window = __le16_to_cpu(rp->window);
714 static void hci_cc_write_page_scan_activity(struct hci_dev *hdev,
717 u8 status = *((u8 *) skb->data);
718 struct hci_cp_write_page_scan_activity *sent;
720 BT_DBG("%s status 0x%2.2x", hdev->name, status);
725 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_PAGE_SCAN_ACTIVITY);
729 hdev->page_scan_interval = __le16_to_cpu(sent->interval);
730 hdev->page_scan_window = __le16_to_cpu(sent->window);
733 static void hci_cc_read_page_scan_type(struct hci_dev *hdev,
736 struct hci_rp_read_page_scan_type *rp = (void *) skb->data;
738 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
743 if (test_bit(HCI_INIT, &hdev->flags))
744 hdev->page_scan_type = rp->type;
747 static void hci_cc_write_page_scan_type(struct hci_dev *hdev,
750 u8 status = *((u8 *) skb->data);
753 BT_DBG("%s status 0x%2.2x", hdev->name, status);
758 type = hci_sent_cmd_data(hdev, HCI_OP_WRITE_PAGE_SCAN_TYPE);
760 hdev->page_scan_type = *type;
763 static void hci_cc_read_data_block_size(struct hci_dev *hdev,
766 struct hci_rp_read_data_block_size *rp = (void *) skb->data;
768 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
773 hdev->block_mtu = __le16_to_cpu(rp->max_acl_len);
774 hdev->block_len = __le16_to_cpu(rp->block_len);
775 hdev->num_blocks = __le16_to_cpu(rp->num_blocks);
777 hdev->block_cnt = hdev->num_blocks;
779 BT_DBG("%s blk mtu %d cnt %d len %d", hdev->name, hdev->block_mtu,
780 hdev->block_cnt, hdev->block_len);
783 static void hci_cc_read_clock(struct hci_dev *hdev, struct sk_buff *skb)
785 struct hci_rp_read_clock *rp = (void *) skb->data;
786 struct hci_cp_read_clock *cp;
787 struct hci_conn *conn;
789 BT_DBG("%s", hdev->name);
791 if (skb->len < sizeof(*rp))
799 cp = hci_sent_cmd_data(hdev, HCI_OP_READ_CLOCK);
803 if (cp->which == 0x00) {
804 hdev->clock = le32_to_cpu(rp->clock);
808 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
810 conn->clock = le32_to_cpu(rp->clock);
811 conn->clock_accuracy = le16_to_cpu(rp->accuracy);
815 hci_dev_unlock(hdev);
818 static void hci_cc_read_local_amp_info(struct hci_dev *hdev,
821 struct hci_rp_read_local_amp_info *rp = (void *) skb->data;
823 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
828 hdev->amp_status = rp->amp_status;
829 hdev->amp_total_bw = __le32_to_cpu(rp->total_bw);
830 hdev->amp_max_bw = __le32_to_cpu(rp->max_bw);
831 hdev->amp_min_latency = __le32_to_cpu(rp->min_latency);
832 hdev->amp_max_pdu = __le32_to_cpu(rp->max_pdu);
833 hdev->amp_type = rp->amp_type;
834 hdev->amp_pal_cap = __le16_to_cpu(rp->pal_cap);
835 hdev->amp_assoc_size = __le16_to_cpu(rp->max_assoc_size);
836 hdev->amp_be_flush_to = __le32_to_cpu(rp->be_flush_to);
837 hdev->amp_max_flush_to = __le32_to_cpu(rp->max_flush_to);
840 static void hci_cc_read_inq_rsp_tx_power(struct hci_dev *hdev,
843 struct hci_rp_read_inq_rsp_tx_power *rp = (void *) skb->data;
845 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
850 hdev->inq_tx_power = rp->tx_power;
853 static void hci_cc_pin_code_reply(struct hci_dev *hdev, struct sk_buff *skb)
855 struct hci_rp_pin_code_reply *rp = (void *) skb->data;
856 struct hci_cp_pin_code_reply *cp;
857 struct hci_conn *conn;
859 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
863 if (hci_dev_test_flag(hdev, HCI_MGMT))
864 mgmt_pin_code_reply_complete(hdev, &rp->bdaddr, rp->status);
869 cp = hci_sent_cmd_data(hdev, HCI_OP_PIN_CODE_REPLY);
873 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
875 conn->pin_length = cp->pin_len;
878 hci_dev_unlock(hdev);
881 static void hci_cc_pin_code_neg_reply(struct hci_dev *hdev, struct sk_buff *skb)
883 struct hci_rp_pin_code_neg_reply *rp = (void *) skb->data;
885 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
889 if (hci_dev_test_flag(hdev, HCI_MGMT))
890 mgmt_pin_code_neg_reply_complete(hdev, &rp->bdaddr,
893 hci_dev_unlock(hdev);
896 static void hci_cc_le_read_buffer_size(struct hci_dev *hdev,
899 struct hci_rp_le_read_buffer_size *rp = (void *) skb->data;
901 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
906 hdev->le_mtu = __le16_to_cpu(rp->le_mtu);
907 hdev->le_pkts = rp->le_max_pkt;
909 hdev->le_cnt = hdev->le_pkts;
911 BT_DBG("%s le mtu %d:%d", hdev->name, hdev->le_mtu, hdev->le_pkts);
914 static void hci_cc_le_read_local_features(struct hci_dev *hdev,
917 struct hci_rp_le_read_local_features *rp = (void *) skb->data;
919 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
924 memcpy(hdev->le_features, rp->features, 8);
927 static void hci_cc_le_read_adv_tx_power(struct hci_dev *hdev,
930 struct hci_rp_le_read_adv_tx_power *rp = (void *) skb->data;
932 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
937 hdev->adv_tx_power = rp->tx_power;
940 static void hci_cc_user_confirm_reply(struct hci_dev *hdev, struct sk_buff *skb)
942 struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
944 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
948 if (hci_dev_test_flag(hdev, HCI_MGMT))
949 mgmt_user_confirm_reply_complete(hdev, &rp->bdaddr, ACL_LINK, 0,
952 hci_dev_unlock(hdev);
955 static void hci_cc_user_confirm_neg_reply(struct hci_dev *hdev,
958 struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
960 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
964 if (hci_dev_test_flag(hdev, HCI_MGMT))
965 mgmt_user_confirm_neg_reply_complete(hdev, &rp->bdaddr,
966 ACL_LINK, 0, rp->status);
968 hci_dev_unlock(hdev);
971 static void hci_cc_user_passkey_reply(struct hci_dev *hdev, struct sk_buff *skb)
973 struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
975 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
979 if (hci_dev_test_flag(hdev, HCI_MGMT))
980 mgmt_user_passkey_reply_complete(hdev, &rp->bdaddr, ACL_LINK,
983 hci_dev_unlock(hdev);
986 static void hci_cc_user_passkey_neg_reply(struct hci_dev *hdev,
989 struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
991 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
995 if (hci_dev_test_flag(hdev, HCI_MGMT))
996 mgmt_user_passkey_neg_reply_complete(hdev, &rp->bdaddr,
997 ACL_LINK, 0, rp->status);
999 hci_dev_unlock(hdev);
1002 static void hci_cc_read_local_oob_data(struct hci_dev *hdev,
1003 struct sk_buff *skb)
1005 struct hci_rp_read_local_oob_data *rp = (void *) skb->data;
1007 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1010 static void hci_cc_read_local_oob_ext_data(struct hci_dev *hdev,
1011 struct sk_buff *skb)
1013 struct hci_rp_read_local_oob_ext_data *rp = (void *) skb->data;
1015 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1018 static void hci_cc_le_set_random_addr(struct hci_dev *hdev, struct sk_buff *skb)
1020 __u8 status = *((__u8 *) skb->data);
1023 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1028 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_RANDOM_ADDR);
1034 bacpy(&hdev->random_addr, sent);
1036 hci_dev_unlock(hdev);
1039 static void hci_cc_le_set_adv_enable(struct hci_dev *hdev, struct sk_buff *skb)
1041 __u8 *sent, status = *((__u8 *) skb->data);
1043 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1048 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_ADV_ENABLE);
1054 /* If we're doing connection initiation as peripheral. Set a
1055 * timeout in case something goes wrong.
1058 struct hci_conn *conn;
1060 hci_dev_set_flag(hdev, HCI_LE_ADV);
1062 conn = hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT);
1064 queue_delayed_work(hdev->workqueue,
1065 &conn->le_conn_timeout,
1066 conn->conn_timeout);
1068 hci_dev_clear_flag(hdev, HCI_LE_ADV);
1071 hci_dev_unlock(hdev);
1074 static void hci_cc_le_set_scan_param(struct hci_dev *hdev, struct sk_buff *skb)
1076 struct hci_cp_le_set_scan_param *cp;
1077 __u8 status = *((__u8 *) skb->data);
1079 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1084 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_SCAN_PARAM);
1090 hdev->le_scan_type = cp->type;
1092 hci_dev_unlock(hdev);
1095 static bool has_pending_adv_report(struct hci_dev *hdev)
1097 struct discovery_state *d = &hdev->discovery;
1099 return bacmp(&d->last_adv_addr, BDADDR_ANY);
1102 static void clear_pending_adv_report(struct hci_dev *hdev)
1104 struct discovery_state *d = &hdev->discovery;
1106 bacpy(&d->last_adv_addr, BDADDR_ANY);
1107 d->last_adv_data_len = 0;
1110 static void store_pending_adv_report(struct hci_dev *hdev, bdaddr_t *bdaddr,
1111 u8 bdaddr_type, s8 rssi, u32 flags,
1114 struct discovery_state *d = &hdev->discovery;
1116 bacpy(&d->last_adv_addr, bdaddr);
1117 d->last_adv_addr_type = bdaddr_type;
1118 d->last_adv_rssi = rssi;
1119 d->last_adv_flags = flags;
1120 memcpy(d->last_adv_data, data, len);
1121 d->last_adv_data_len = len;
1124 static void hci_cc_le_set_scan_enable(struct hci_dev *hdev,
1125 struct sk_buff *skb)
1127 struct hci_cp_le_set_scan_enable *cp;
1128 __u8 status = *((__u8 *) skb->data);
1130 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1135 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_SCAN_ENABLE);
1141 switch (cp->enable) {
1142 case LE_SCAN_ENABLE:
1143 hci_dev_set_flag(hdev, HCI_LE_SCAN);
1144 if (hdev->le_scan_type == LE_SCAN_ACTIVE)
1145 clear_pending_adv_report(hdev);
1148 case LE_SCAN_DISABLE:
1149 /* We do this here instead of when setting DISCOVERY_STOPPED
1150 * since the latter would potentially require waiting for
1151 * inquiry to stop too.
1153 if (has_pending_adv_report(hdev)) {
1154 struct discovery_state *d = &hdev->discovery;
1156 mgmt_device_found(hdev, &d->last_adv_addr, LE_LINK,
1157 d->last_adv_addr_type, NULL,
1158 d->last_adv_rssi, d->last_adv_flags,
1160 d->last_adv_data_len, NULL, 0);
1163 /* Cancel this timer so that we don't try to disable scanning
1164 * when it's already disabled.
1166 cancel_delayed_work(&hdev->le_scan_disable);
1168 hci_dev_clear_flag(hdev, HCI_LE_SCAN);
1170 /* The HCI_LE_SCAN_INTERRUPTED flag indicates that we
1171 * interrupted scanning due to a connect request. Mark
1172 * therefore discovery as stopped. If this was not
1173 * because of a connect request advertising might have
1174 * been disabled because of active scanning, so
1175 * re-enable it again if necessary.
1177 if (hci_dev_test_and_clear_flag(hdev, HCI_LE_SCAN_INTERRUPTED))
1178 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1179 else if (!hci_dev_test_flag(hdev, HCI_LE_ADV) &&
1180 hdev->discovery.state == DISCOVERY_FINDING)
1181 mgmt_reenable_advertising(hdev);
1186 BT_ERR("Used reserved LE_Scan_Enable param %d", cp->enable);
1190 hci_dev_unlock(hdev);
1193 static void hci_cc_le_read_white_list_size(struct hci_dev *hdev,
1194 struct sk_buff *skb)
1196 struct hci_rp_le_read_white_list_size *rp = (void *) skb->data;
1198 BT_DBG("%s status 0x%2.2x size %u", hdev->name, rp->status, rp->size);
1203 hdev->le_white_list_size = rp->size;
1206 static void hci_cc_le_clear_white_list(struct hci_dev *hdev,
1207 struct sk_buff *skb)
1209 __u8 status = *((__u8 *) skb->data);
1211 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1216 hci_bdaddr_list_clear(&hdev->le_white_list);
1219 static void hci_cc_le_add_to_white_list(struct hci_dev *hdev,
1220 struct sk_buff *skb)
1222 struct hci_cp_le_add_to_white_list *sent;
1223 __u8 status = *((__u8 *) skb->data);
1225 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1230 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_ADD_TO_WHITE_LIST);
1234 hci_bdaddr_list_add(&hdev->le_white_list, &sent->bdaddr,
1238 static void hci_cc_le_del_from_white_list(struct hci_dev *hdev,
1239 struct sk_buff *skb)
1241 struct hci_cp_le_del_from_white_list *sent;
1242 __u8 status = *((__u8 *) skb->data);
1244 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1249 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_DEL_FROM_WHITE_LIST);
1253 hci_bdaddr_list_del(&hdev->le_white_list, &sent->bdaddr,
1257 static void hci_cc_le_read_supported_states(struct hci_dev *hdev,
1258 struct sk_buff *skb)
1260 struct hci_rp_le_read_supported_states *rp = (void *) skb->data;
1262 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1267 memcpy(hdev->le_states, rp->le_states, 8);
1270 static void hci_cc_le_read_def_data_len(struct hci_dev *hdev,
1271 struct sk_buff *skb)
1273 struct hci_rp_le_read_def_data_len *rp = (void *) skb->data;
1275 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1280 hdev->le_def_tx_len = le16_to_cpu(rp->tx_len);
1281 hdev->le_def_tx_time = le16_to_cpu(rp->tx_time);
1284 static void hci_cc_le_write_def_data_len(struct hci_dev *hdev,
1285 struct sk_buff *skb)
1287 struct hci_cp_le_write_def_data_len *sent;
1288 __u8 status = *((__u8 *) skb->data);
1290 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1295 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_WRITE_DEF_DATA_LEN);
1299 hdev->le_def_tx_len = le16_to_cpu(sent->tx_len);
1300 hdev->le_def_tx_time = le16_to_cpu(sent->tx_time);
1303 static void hci_cc_le_read_max_data_len(struct hci_dev *hdev,
1304 struct sk_buff *skb)
1306 struct hci_rp_le_read_max_data_len *rp = (void *) skb->data;
1308 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1313 hdev->le_max_tx_len = le16_to_cpu(rp->tx_len);
1314 hdev->le_max_tx_time = le16_to_cpu(rp->tx_time);
1315 hdev->le_max_rx_len = le16_to_cpu(rp->rx_len);
1316 hdev->le_max_rx_time = le16_to_cpu(rp->rx_time);
1319 static void hci_cc_write_le_host_supported(struct hci_dev *hdev,
1320 struct sk_buff *skb)
1322 struct hci_cp_write_le_host_supported *sent;
1323 __u8 status = *((__u8 *) skb->data);
1325 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1330 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LE_HOST_SUPPORTED);
1337 hdev->features[1][0] |= LMP_HOST_LE;
1338 hci_dev_set_flag(hdev, HCI_LE_ENABLED);
1340 hdev->features[1][0] &= ~LMP_HOST_LE;
1341 hci_dev_clear_flag(hdev, HCI_LE_ENABLED);
1342 hci_dev_clear_flag(hdev, HCI_ADVERTISING);
1346 hdev->features[1][0] |= LMP_HOST_LE_BREDR;
1348 hdev->features[1][0] &= ~LMP_HOST_LE_BREDR;
1350 hci_dev_unlock(hdev);
1353 static void hci_cc_set_adv_param(struct hci_dev *hdev, struct sk_buff *skb)
1355 struct hci_cp_le_set_adv_param *cp;
1356 u8 status = *((u8 *) skb->data);
1358 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1363 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_ADV_PARAM);
1368 hdev->adv_addr_type = cp->own_address_type;
1369 hci_dev_unlock(hdev);
1372 static void hci_cc_read_rssi(struct hci_dev *hdev, struct sk_buff *skb)
1374 struct hci_rp_read_rssi *rp = (void *) skb->data;
1375 struct hci_conn *conn;
1377 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1384 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
1386 conn->rssi = rp->rssi;
1388 hci_dev_unlock(hdev);
1391 static void hci_cc_read_tx_power(struct hci_dev *hdev, struct sk_buff *skb)
1393 struct hci_cp_read_tx_power *sent;
1394 struct hci_rp_read_tx_power *rp = (void *) skb->data;
1395 struct hci_conn *conn;
1397 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1402 sent = hci_sent_cmd_data(hdev, HCI_OP_READ_TX_POWER);
1408 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
1412 switch (sent->type) {
1414 conn->tx_power = rp->tx_power;
1417 conn->max_tx_power = rp->tx_power;
1422 hci_dev_unlock(hdev);
1425 static void hci_cc_write_ssp_debug_mode(struct hci_dev *hdev, struct sk_buff *skb)
1427 u8 status = *((u8 *) skb->data);
1430 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1435 mode = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SSP_DEBUG_MODE);
1437 hdev->ssp_debug_mode = *mode;
1440 static void hci_cs_inquiry(struct hci_dev *hdev, __u8 status)
1442 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1445 hci_conn_check_pending(hdev);
1449 set_bit(HCI_INQUIRY, &hdev->flags);
1452 static void hci_cs_create_conn(struct hci_dev *hdev, __u8 status)
1454 struct hci_cp_create_conn *cp;
1455 struct hci_conn *conn;
1457 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1459 cp = hci_sent_cmd_data(hdev, HCI_OP_CREATE_CONN);
1465 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
1467 BT_DBG("%s bdaddr %pMR hcon %p", hdev->name, &cp->bdaddr, conn);
1470 if (conn && conn->state == BT_CONNECT) {
1471 if (status != 0x0c || conn->attempt > 2) {
1472 conn->state = BT_CLOSED;
1473 hci_connect_cfm(conn, status);
1476 conn->state = BT_CONNECT2;
1480 conn = hci_conn_add(hdev, ACL_LINK, &cp->bdaddr,
1483 BT_ERR("No memory for new connection");
1487 hci_dev_unlock(hdev);
1490 static void hci_cs_add_sco(struct hci_dev *hdev, __u8 status)
1492 struct hci_cp_add_sco *cp;
1493 struct hci_conn *acl, *sco;
1496 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1501 cp = hci_sent_cmd_data(hdev, HCI_OP_ADD_SCO);
1505 handle = __le16_to_cpu(cp->handle);
1507 BT_DBG("%s handle 0x%4.4x", hdev->name, handle);
1511 acl = hci_conn_hash_lookup_handle(hdev, handle);
1515 sco->state = BT_CLOSED;
1517 hci_connect_cfm(sco, status);
1522 hci_dev_unlock(hdev);
1525 static void hci_cs_auth_requested(struct hci_dev *hdev, __u8 status)
1527 struct hci_cp_auth_requested *cp;
1528 struct hci_conn *conn;
1530 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1535 cp = hci_sent_cmd_data(hdev, HCI_OP_AUTH_REQUESTED);
1541 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1543 if (conn->state == BT_CONFIG) {
1544 hci_connect_cfm(conn, status);
1545 hci_conn_drop(conn);
1549 hci_dev_unlock(hdev);
1552 static void hci_cs_set_conn_encrypt(struct hci_dev *hdev, __u8 status)
1554 struct hci_cp_set_conn_encrypt *cp;
1555 struct hci_conn *conn;
1557 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1562 cp = hci_sent_cmd_data(hdev, HCI_OP_SET_CONN_ENCRYPT);
1568 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1570 if (conn->state == BT_CONFIG) {
1571 hci_connect_cfm(conn, status);
1572 hci_conn_drop(conn);
1576 hci_dev_unlock(hdev);
1579 static int hci_outgoing_auth_needed(struct hci_dev *hdev,
1580 struct hci_conn *conn)
1582 if (conn->state != BT_CONFIG || !conn->out)
1585 if (conn->pending_sec_level == BT_SECURITY_SDP)
1588 /* Only request authentication for SSP connections or non-SSP
1589 * devices with sec_level MEDIUM or HIGH or if MITM protection
1592 if (!hci_conn_ssp_enabled(conn) && !(conn->auth_type & 0x01) &&
1593 conn->pending_sec_level != BT_SECURITY_FIPS &&
1594 conn->pending_sec_level != BT_SECURITY_HIGH &&
1595 conn->pending_sec_level != BT_SECURITY_MEDIUM)
1601 static int hci_resolve_name(struct hci_dev *hdev,
1602 struct inquiry_entry *e)
1604 struct hci_cp_remote_name_req cp;
1606 memset(&cp, 0, sizeof(cp));
1608 bacpy(&cp.bdaddr, &e->data.bdaddr);
1609 cp.pscan_rep_mode = e->data.pscan_rep_mode;
1610 cp.pscan_mode = e->data.pscan_mode;
1611 cp.clock_offset = e->data.clock_offset;
1613 return hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp);
1616 static bool hci_resolve_next_name(struct hci_dev *hdev)
1618 struct discovery_state *discov = &hdev->discovery;
1619 struct inquiry_entry *e;
1621 if (list_empty(&discov->resolve))
1624 e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY, NAME_NEEDED);
1628 if (hci_resolve_name(hdev, e) == 0) {
1629 e->name_state = NAME_PENDING;
1636 static void hci_check_pending_name(struct hci_dev *hdev, struct hci_conn *conn,
1637 bdaddr_t *bdaddr, u8 *name, u8 name_len)
1639 struct discovery_state *discov = &hdev->discovery;
1640 struct inquiry_entry *e;
1642 /* Update the mgmt connected state if necessary. Be careful with
1643 * conn objects that exist but are not (yet) connected however.
1644 * Only those in BT_CONFIG or BT_CONNECTED states can be
1645 * considered connected.
1648 (conn->state == BT_CONFIG || conn->state == BT_CONNECTED) &&
1649 !test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
1650 mgmt_device_connected(hdev, conn, 0, name, name_len);
1652 if (discov->state == DISCOVERY_STOPPED)
1655 if (discov->state == DISCOVERY_STOPPING)
1656 goto discov_complete;
1658 if (discov->state != DISCOVERY_RESOLVING)
1661 e = hci_inquiry_cache_lookup_resolve(hdev, bdaddr, NAME_PENDING);
1662 /* If the device was not found in a list of found devices names of which
1663 * are pending. there is no need to continue resolving a next name as it
1664 * will be done upon receiving another Remote Name Request Complete
1671 e->name_state = NAME_KNOWN;
1672 mgmt_remote_name(hdev, bdaddr, ACL_LINK, 0x00,
1673 e->data.rssi, name, name_len);
1675 e->name_state = NAME_NOT_KNOWN;
1678 if (hci_resolve_next_name(hdev))
1682 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1685 static void hci_cs_remote_name_req(struct hci_dev *hdev, __u8 status)
1687 struct hci_cp_remote_name_req *cp;
1688 struct hci_conn *conn;
1690 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1692 /* If successful wait for the name req complete event before
1693 * checking for the need to do authentication */
1697 cp = hci_sent_cmd_data(hdev, HCI_OP_REMOTE_NAME_REQ);
1703 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
1705 if (hci_dev_test_flag(hdev, HCI_MGMT))
1706 hci_check_pending_name(hdev, conn, &cp->bdaddr, NULL, 0);
1711 if (!hci_outgoing_auth_needed(hdev, conn))
1714 if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->flags)) {
1715 struct hci_cp_auth_requested auth_cp;
1717 set_bit(HCI_CONN_AUTH_INITIATOR, &conn->flags);
1719 auth_cp.handle = __cpu_to_le16(conn->handle);
1720 hci_send_cmd(hdev, HCI_OP_AUTH_REQUESTED,
1721 sizeof(auth_cp), &auth_cp);
1725 hci_dev_unlock(hdev);
1728 static void hci_cs_read_remote_features(struct hci_dev *hdev, __u8 status)
1730 struct hci_cp_read_remote_features *cp;
1731 struct hci_conn *conn;
1733 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1738 cp = hci_sent_cmd_data(hdev, HCI_OP_READ_REMOTE_FEATURES);
1744 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1746 if (conn->state == BT_CONFIG) {
1747 hci_connect_cfm(conn, status);
1748 hci_conn_drop(conn);
1752 hci_dev_unlock(hdev);
1755 static void hci_cs_read_remote_ext_features(struct hci_dev *hdev, __u8 status)
1757 struct hci_cp_read_remote_ext_features *cp;
1758 struct hci_conn *conn;
1760 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1765 cp = hci_sent_cmd_data(hdev, HCI_OP_READ_REMOTE_EXT_FEATURES);
1771 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1773 if (conn->state == BT_CONFIG) {
1774 hci_connect_cfm(conn, status);
1775 hci_conn_drop(conn);
1779 hci_dev_unlock(hdev);
1782 static void hci_cs_setup_sync_conn(struct hci_dev *hdev, __u8 status)
1784 struct hci_cp_setup_sync_conn *cp;
1785 struct hci_conn *acl, *sco;
1788 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1793 cp = hci_sent_cmd_data(hdev, HCI_OP_SETUP_SYNC_CONN);
1797 handle = __le16_to_cpu(cp->handle);
1799 BT_DBG("%s handle 0x%4.4x", hdev->name, handle);
1803 acl = hci_conn_hash_lookup_handle(hdev, handle);
1807 sco->state = BT_CLOSED;
1809 hci_connect_cfm(sco, status);
1814 hci_dev_unlock(hdev);
1817 static void hci_cs_sniff_mode(struct hci_dev *hdev, __u8 status)
1819 struct hci_cp_sniff_mode *cp;
1820 struct hci_conn *conn;
1822 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1827 cp = hci_sent_cmd_data(hdev, HCI_OP_SNIFF_MODE);
1833 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1835 clear_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags);
1837 if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags))
1838 hci_sco_setup(conn, status);
1841 hci_dev_unlock(hdev);
1844 static void hci_cs_exit_sniff_mode(struct hci_dev *hdev, __u8 status)
1846 struct hci_cp_exit_sniff_mode *cp;
1847 struct hci_conn *conn;
1849 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1854 cp = hci_sent_cmd_data(hdev, HCI_OP_EXIT_SNIFF_MODE);
1860 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1862 clear_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags);
1864 if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags))
1865 hci_sco_setup(conn, status);
1868 hci_dev_unlock(hdev);
1871 static void hci_cs_disconnect(struct hci_dev *hdev, u8 status)
1873 struct hci_cp_disconnect *cp;
1874 struct hci_conn *conn;
1879 cp = hci_sent_cmd_data(hdev, HCI_OP_DISCONNECT);
1885 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1887 mgmt_disconnect_failed(hdev, &conn->dst, conn->type,
1888 conn->dst_type, status);
1890 hci_dev_unlock(hdev);
1893 static void hci_cs_create_phylink(struct hci_dev *hdev, u8 status)
1895 struct hci_cp_create_phy_link *cp;
1897 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1899 cp = hci_sent_cmd_data(hdev, HCI_OP_CREATE_PHY_LINK);
1906 struct hci_conn *hcon;
1908 hcon = hci_conn_hash_lookup_handle(hdev, cp->phy_handle);
1912 amp_write_remote_assoc(hdev, cp->phy_handle);
1915 hci_dev_unlock(hdev);
1918 static void hci_cs_accept_phylink(struct hci_dev *hdev, u8 status)
1920 struct hci_cp_accept_phy_link *cp;
1922 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1927 cp = hci_sent_cmd_data(hdev, HCI_OP_ACCEPT_PHY_LINK);
1931 amp_write_remote_assoc(hdev, cp->phy_handle);
1934 static void hci_cs_le_create_conn(struct hci_dev *hdev, u8 status)
1936 struct hci_cp_le_create_conn *cp;
1937 struct hci_conn *conn;
1939 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1941 /* All connection failure handling is taken care of by the
1942 * hci_le_conn_failed function which is triggered by the HCI
1943 * request completion callbacks used for connecting.
1948 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_CREATE_CONN);
1954 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->peer_addr);
1958 /* Store the initiator and responder address information which
1959 * is needed for SMP. These values will not change during the
1960 * lifetime of the connection.
1962 conn->init_addr_type = cp->own_address_type;
1963 if (cp->own_address_type == ADDR_LE_DEV_RANDOM)
1964 bacpy(&conn->init_addr, &hdev->random_addr);
1966 bacpy(&conn->init_addr, &hdev->bdaddr);
1968 conn->resp_addr_type = cp->peer_addr_type;
1969 bacpy(&conn->resp_addr, &cp->peer_addr);
1971 /* We don't want the connection attempt to stick around
1972 * indefinitely since LE doesn't have a page timeout concept
1973 * like BR/EDR. Set a timer for any connection that doesn't use
1974 * the white list for connecting.
1976 if (cp->filter_policy == HCI_LE_USE_PEER_ADDR)
1977 queue_delayed_work(conn->hdev->workqueue,
1978 &conn->le_conn_timeout,
1979 conn->conn_timeout);
1982 hci_dev_unlock(hdev);
1985 static void hci_cs_le_read_remote_features(struct hci_dev *hdev, u8 status)
1987 struct hci_cp_le_read_remote_features *cp;
1988 struct hci_conn *conn;
1990 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1995 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_READ_REMOTE_FEATURES);
2001 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
2003 if (conn->state == BT_CONFIG) {
2004 hci_connect_cfm(conn, status);
2005 hci_conn_drop(conn);
2009 hci_dev_unlock(hdev);
2012 static void hci_cs_le_start_enc(struct hci_dev *hdev, u8 status)
2014 struct hci_cp_le_start_enc *cp;
2015 struct hci_conn *conn;
2017 BT_DBG("%s status 0x%2.2x", hdev->name, status);
2024 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_START_ENC);
2028 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
2032 if (conn->state != BT_CONNECTED)
2035 hci_disconnect(conn, HCI_ERROR_AUTH_FAILURE);
2036 hci_conn_drop(conn);
2039 hci_dev_unlock(hdev);
2042 static void hci_cs_switch_role(struct hci_dev *hdev, u8 status)
2044 struct hci_cp_switch_role *cp;
2045 struct hci_conn *conn;
2047 BT_DBG("%s status 0x%2.2x", hdev->name, status);
2052 cp = hci_sent_cmd_data(hdev, HCI_OP_SWITCH_ROLE);
2058 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
2060 clear_bit(HCI_CONN_RSWITCH_PEND, &conn->flags);
2062 hci_dev_unlock(hdev);
2065 static void hci_inquiry_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
2067 __u8 status = *((__u8 *) skb->data);
2068 struct discovery_state *discov = &hdev->discovery;
2069 struct inquiry_entry *e;
2071 BT_DBG("%s status 0x%2.2x", hdev->name, status);
2073 hci_conn_check_pending(hdev);
2075 if (!test_and_clear_bit(HCI_INQUIRY, &hdev->flags))
2078 smp_mb__after_atomic(); /* wake_up_bit advises about this barrier */
2079 wake_up_bit(&hdev->flags, HCI_INQUIRY);
2081 if (!hci_dev_test_flag(hdev, HCI_MGMT))
2086 if (discov->state != DISCOVERY_FINDING)
2089 if (list_empty(&discov->resolve)) {
2090 /* When BR/EDR inquiry is active and no LE scanning is in
2091 * progress, then change discovery state to indicate completion.
2093 * When running LE scanning and BR/EDR inquiry simultaneously
2094 * and the LE scan already finished, then change the discovery
2095 * state to indicate completion.
2097 if (!hci_dev_test_flag(hdev, HCI_LE_SCAN) ||
2098 !test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY, &hdev->quirks))
2099 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2103 e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY, NAME_NEEDED);
2104 if (e && hci_resolve_name(hdev, e) == 0) {
2105 e->name_state = NAME_PENDING;
2106 hci_discovery_set_state(hdev, DISCOVERY_RESOLVING);
2108 /* When BR/EDR inquiry is active and no LE scanning is in
2109 * progress, then change discovery state to indicate completion.
2111 * When running LE scanning and BR/EDR inquiry simultaneously
2112 * and the LE scan already finished, then change the discovery
2113 * state to indicate completion.
2115 if (!hci_dev_test_flag(hdev, HCI_LE_SCAN) ||
2116 !test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY, &hdev->quirks))
2117 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2121 hci_dev_unlock(hdev);
2124 static void hci_inquiry_result_evt(struct hci_dev *hdev, struct sk_buff *skb)
2126 struct inquiry_data data;
2127 struct inquiry_info *info = (void *) (skb->data + 1);
2128 int num_rsp = *((__u8 *) skb->data);
2130 BT_DBG("%s num_rsp %d", hdev->name, num_rsp);
2135 if (hci_dev_test_flag(hdev, HCI_PERIODIC_INQ))
2140 for (; num_rsp; num_rsp--, info++) {
2143 bacpy(&data.bdaddr, &info->bdaddr);
2144 data.pscan_rep_mode = info->pscan_rep_mode;
2145 data.pscan_period_mode = info->pscan_period_mode;
2146 data.pscan_mode = info->pscan_mode;
2147 memcpy(data.dev_class, info->dev_class, 3);
2148 data.clock_offset = info->clock_offset;
2149 data.rssi = HCI_RSSI_INVALID;
2150 data.ssp_mode = 0x00;
2152 flags = hci_inquiry_cache_update(hdev, &data, false);
2154 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
2155 info->dev_class, HCI_RSSI_INVALID,
2156 flags, NULL, 0, NULL, 0);
2159 hci_dev_unlock(hdev);
2162 static void hci_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
2164 struct hci_ev_conn_complete *ev = (void *) skb->data;
2165 struct hci_conn *conn;
2167 BT_DBG("%s", hdev->name);
2171 conn = hci_conn_hash_lookup_ba(hdev, ev->link_type, &ev->bdaddr);
2173 if (ev->link_type != SCO_LINK)
2176 conn = hci_conn_hash_lookup_ba(hdev, ESCO_LINK, &ev->bdaddr);
2180 conn->type = SCO_LINK;
2184 conn->handle = __le16_to_cpu(ev->handle);
2186 if (conn->type == ACL_LINK) {
2187 conn->state = BT_CONFIG;
2188 hci_conn_hold(conn);
2190 if (!conn->out && !hci_conn_ssp_enabled(conn) &&
2191 !hci_find_link_key(hdev, &ev->bdaddr))
2192 conn->disc_timeout = HCI_PAIRING_TIMEOUT;
2194 conn->disc_timeout = HCI_DISCONN_TIMEOUT;
2196 conn->state = BT_CONNECTED;
2198 hci_debugfs_create_conn(conn);
2199 hci_conn_add_sysfs(conn);
2201 if (test_bit(HCI_AUTH, &hdev->flags))
2202 set_bit(HCI_CONN_AUTH, &conn->flags);
2204 if (test_bit(HCI_ENCRYPT, &hdev->flags))
2205 set_bit(HCI_CONN_ENCRYPT, &conn->flags);
2207 /* Get remote features */
2208 if (conn->type == ACL_LINK) {
2209 struct hci_cp_read_remote_features cp;
2210 cp.handle = ev->handle;
2211 hci_send_cmd(hdev, HCI_OP_READ_REMOTE_FEATURES,
2214 hci_update_page_scan(hdev);
2217 /* Set packet type for incoming connection */
2218 if (!conn->out && hdev->hci_ver < BLUETOOTH_VER_2_0) {
2219 struct hci_cp_change_conn_ptype cp;
2220 cp.handle = ev->handle;
2221 cp.pkt_type = cpu_to_le16(conn->pkt_type);
2222 hci_send_cmd(hdev, HCI_OP_CHANGE_CONN_PTYPE, sizeof(cp),
2226 conn->state = BT_CLOSED;
2227 if (conn->type == ACL_LINK)
2228 mgmt_connect_failed(hdev, &conn->dst, conn->type,
2229 conn->dst_type, ev->status);
2232 if (conn->type == ACL_LINK)
2233 hci_sco_setup(conn, ev->status);
2236 hci_connect_cfm(conn, ev->status);
2238 } else if (ev->link_type != ACL_LINK)
2239 hci_connect_cfm(conn, ev->status);
2242 hci_dev_unlock(hdev);
2244 hci_conn_check_pending(hdev);
2247 static void hci_reject_conn(struct hci_dev *hdev, bdaddr_t *bdaddr)
2249 struct hci_cp_reject_conn_req cp;
2251 bacpy(&cp.bdaddr, bdaddr);
2252 cp.reason = HCI_ERROR_REJ_BAD_ADDR;
2253 hci_send_cmd(hdev, HCI_OP_REJECT_CONN_REQ, sizeof(cp), &cp);
2256 static void hci_conn_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
2258 struct hci_ev_conn_request *ev = (void *) skb->data;
2259 int mask = hdev->link_mode;
2260 struct inquiry_entry *ie;
2261 struct hci_conn *conn;
2264 BT_DBG("%s bdaddr %pMR type 0x%x", hdev->name, &ev->bdaddr,
2267 mask |= hci_proto_connect_ind(hdev, &ev->bdaddr, ev->link_type,
2270 if (!(mask & HCI_LM_ACCEPT)) {
2271 hci_reject_conn(hdev, &ev->bdaddr);
2275 if (hci_bdaddr_list_lookup(&hdev->blacklist, &ev->bdaddr,
2277 hci_reject_conn(hdev, &ev->bdaddr);
2281 /* Require HCI_CONNECTABLE or a whitelist entry to accept the
2282 * connection. These features are only touched through mgmt so
2283 * only do the checks if HCI_MGMT is set.
2285 if (hci_dev_test_flag(hdev, HCI_MGMT) &&
2286 !hci_dev_test_flag(hdev, HCI_CONNECTABLE) &&
2287 !hci_bdaddr_list_lookup(&hdev->whitelist, &ev->bdaddr,
2289 hci_reject_conn(hdev, &ev->bdaddr);
2293 /* Connection accepted */
2297 ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr);
2299 memcpy(ie->data.dev_class, ev->dev_class, 3);
2301 conn = hci_conn_hash_lookup_ba(hdev, ev->link_type,
2304 conn = hci_conn_add(hdev, ev->link_type, &ev->bdaddr,
2307 BT_ERR("No memory for new connection");
2308 hci_dev_unlock(hdev);
2313 memcpy(conn->dev_class, ev->dev_class, 3);
2315 hci_dev_unlock(hdev);
2317 if (ev->link_type == ACL_LINK ||
2318 (!(flags & HCI_PROTO_DEFER) && !lmp_esco_capable(hdev))) {
2319 struct hci_cp_accept_conn_req cp;
2320 conn->state = BT_CONNECT;
2322 bacpy(&cp.bdaddr, &ev->bdaddr);
2324 if (lmp_rswitch_capable(hdev) && (mask & HCI_LM_MASTER))
2325 cp.role = 0x00; /* Become master */
2327 cp.role = 0x01; /* Remain slave */
2329 hci_send_cmd(hdev, HCI_OP_ACCEPT_CONN_REQ, sizeof(cp), &cp);
2330 } else if (!(flags & HCI_PROTO_DEFER)) {
2331 struct hci_cp_accept_sync_conn_req cp;
2332 conn->state = BT_CONNECT;
2334 bacpy(&cp.bdaddr, &ev->bdaddr);
2335 cp.pkt_type = cpu_to_le16(conn->pkt_type);
2337 cp.tx_bandwidth = cpu_to_le32(0x00001f40);
2338 cp.rx_bandwidth = cpu_to_le32(0x00001f40);
2339 cp.max_latency = cpu_to_le16(0xffff);
2340 cp.content_format = cpu_to_le16(hdev->voice_setting);
2341 cp.retrans_effort = 0xff;
2343 hci_send_cmd(hdev, HCI_OP_ACCEPT_SYNC_CONN_REQ, sizeof(cp),
2346 conn->state = BT_CONNECT2;
2347 hci_connect_cfm(conn, 0);
2351 static u8 hci_to_mgmt_reason(u8 err)
2354 case HCI_ERROR_CONNECTION_TIMEOUT:
2355 return MGMT_DEV_DISCONN_TIMEOUT;
2356 case HCI_ERROR_REMOTE_USER_TERM:
2357 case HCI_ERROR_REMOTE_LOW_RESOURCES:
2358 case HCI_ERROR_REMOTE_POWER_OFF:
2359 return MGMT_DEV_DISCONN_REMOTE;
2360 case HCI_ERROR_LOCAL_HOST_TERM:
2361 return MGMT_DEV_DISCONN_LOCAL_HOST;
2363 return MGMT_DEV_DISCONN_UNKNOWN;
2367 static void hci_disconn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
2369 struct hci_ev_disconn_complete *ev = (void *) skb->data;
2370 u8 reason = hci_to_mgmt_reason(ev->reason);
2371 struct hci_conn_params *params;
2372 struct hci_conn *conn;
2373 bool mgmt_connected;
2376 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2380 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2385 mgmt_disconnect_failed(hdev, &conn->dst, conn->type,
2386 conn->dst_type, ev->status);
2390 conn->state = BT_CLOSED;
2392 mgmt_connected = test_and_clear_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags);
2393 mgmt_device_disconnected(hdev, &conn->dst, conn->type, conn->dst_type,
2394 reason, mgmt_connected);
2396 if (conn->type == ACL_LINK) {
2397 if (test_bit(HCI_CONN_FLUSH_KEY, &conn->flags))
2398 hci_remove_link_key(hdev, &conn->dst);
2400 hci_update_page_scan(hdev);
2403 params = hci_conn_params_lookup(hdev, &conn->dst, conn->dst_type);
2405 switch (params->auto_connect) {
2406 case HCI_AUTO_CONN_LINK_LOSS:
2407 if (ev->reason != HCI_ERROR_CONNECTION_TIMEOUT)
2411 case HCI_AUTO_CONN_DIRECT:
2412 case HCI_AUTO_CONN_ALWAYS:
2413 list_del_init(¶ms->action);
2414 list_add(¶ms->action, &hdev->pend_le_conns);
2415 hci_update_background_scan(hdev);
2425 hci_disconn_cfm(conn, ev->reason);
2428 /* Re-enable advertising if necessary, since it might
2429 * have been disabled by the connection. From the
2430 * HCI_LE_Set_Advertise_Enable command description in
2431 * the core specification (v4.0):
2432 * "The Controller shall continue advertising until the Host
2433 * issues an LE_Set_Advertise_Enable command with
2434 * Advertising_Enable set to 0x00 (Advertising is disabled)
2435 * or until a connection is created or until the Advertising
2436 * is timed out due to Directed Advertising."
2438 if (type == LE_LINK)
2439 mgmt_reenable_advertising(hdev);
2442 hci_dev_unlock(hdev);
2445 static void hci_auth_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
2447 struct hci_ev_auth_complete *ev = (void *) skb->data;
2448 struct hci_conn *conn;
2450 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2454 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2459 if (!hci_conn_ssp_enabled(conn) &&
2460 test_bit(HCI_CONN_REAUTH_PEND, &conn->flags)) {
2461 BT_INFO("re-auth of legacy device is not possible.");
2463 set_bit(HCI_CONN_AUTH, &conn->flags);
2464 conn->sec_level = conn->pending_sec_level;
2467 mgmt_auth_failed(conn, ev->status);
2470 clear_bit(HCI_CONN_AUTH_PEND, &conn->flags);
2471 clear_bit(HCI_CONN_REAUTH_PEND, &conn->flags);
2473 if (conn->state == BT_CONFIG) {
2474 if (!ev->status && hci_conn_ssp_enabled(conn)) {
2475 struct hci_cp_set_conn_encrypt cp;
2476 cp.handle = ev->handle;
2478 hci_send_cmd(hdev, HCI_OP_SET_CONN_ENCRYPT, sizeof(cp),
2481 conn->state = BT_CONNECTED;
2482 hci_connect_cfm(conn, ev->status);
2483 hci_conn_drop(conn);
2486 hci_auth_cfm(conn, ev->status);
2488 hci_conn_hold(conn);
2489 conn->disc_timeout = HCI_DISCONN_TIMEOUT;
2490 hci_conn_drop(conn);
2493 if (test_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags)) {
2495 struct hci_cp_set_conn_encrypt cp;
2496 cp.handle = ev->handle;
2498 hci_send_cmd(hdev, HCI_OP_SET_CONN_ENCRYPT, sizeof(cp),
2501 clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
2502 hci_encrypt_cfm(conn, ev->status, 0x00);
2507 hci_dev_unlock(hdev);
2510 static void hci_remote_name_evt(struct hci_dev *hdev, struct sk_buff *skb)
2512 struct hci_ev_remote_name *ev = (void *) skb->data;
2513 struct hci_conn *conn;
2515 BT_DBG("%s", hdev->name);
2517 hci_conn_check_pending(hdev);
2521 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
2523 if (!hci_dev_test_flag(hdev, HCI_MGMT))
2526 if (ev->status == 0)
2527 hci_check_pending_name(hdev, conn, &ev->bdaddr, ev->name,
2528 strnlen(ev->name, HCI_MAX_NAME_LENGTH));
2530 hci_check_pending_name(hdev, conn, &ev->bdaddr, NULL, 0);
2536 if (!hci_outgoing_auth_needed(hdev, conn))
2539 if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->flags)) {
2540 struct hci_cp_auth_requested cp;
2542 set_bit(HCI_CONN_AUTH_INITIATOR, &conn->flags);
2544 cp.handle = __cpu_to_le16(conn->handle);
2545 hci_send_cmd(hdev, HCI_OP_AUTH_REQUESTED, sizeof(cp), &cp);
2549 hci_dev_unlock(hdev);
2552 static void read_enc_key_size_complete(struct hci_dev *hdev, u8 status,
2553 u16 opcode, struct sk_buff *skb)
2555 const struct hci_rp_read_enc_key_size *rp;
2556 struct hci_conn *conn;
2559 BT_DBG("%s status 0x%02x", hdev->name, status);
2561 if (!skb || skb->len < sizeof(*rp)) {
2562 BT_ERR("%s invalid HCI Read Encryption Key Size response",
2567 rp = (void *)skb->data;
2568 handle = le16_to_cpu(rp->handle);
2572 conn = hci_conn_hash_lookup_handle(hdev, handle);
2576 /* If we fail to read the encryption key size, assume maximum
2577 * (which is the same we do also when this HCI command isn't
2581 BT_ERR("%s failed to read key size for handle %u", hdev->name,
2583 conn->enc_key_size = HCI_LINK_KEY_SIZE;
2585 conn->enc_key_size = rp->key_size;
2588 if (conn->state == BT_CONFIG) {
2589 conn->state = BT_CONNECTED;
2590 hci_connect_cfm(conn, 0);
2591 hci_conn_drop(conn);
2595 if (!test_bit(HCI_CONN_ENCRYPT, &conn->flags))
2597 else if (test_bit(HCI_CONN_AES_CCM, &conn->flags))
2602 hci_encrypt_cfm(conn, 0, encrypt);
2606 hci_dev_unlock(hdev);
2609 static void hci_encrypt_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
2611 struct hci_ev_encrypt_change *ev = (void *) skb->data;
2612 struct hci_conn *conn;
2614 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2618 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2624 /* Encryption implies authentication */
2625 set_bit(HCI_CONN_AUTH, &conn->flags);
2626 set_bit(HCI_CONN_ENCRYPT, &conn->flags);
2627 conn->sec_level = conn->pending_sec_level;
2629 /* P-256 authentication key implies FIPS */
2630 if (conn->key_type == HCI_LK_AUTH_COMBINATION_P256)
2631 set_bit(HCI_CONN_FIPS, &conn->flags);
2633 if ((conn->type == ACL_LINK && ev->encrypt == 0x02) ||
2634 conn->type == LE_LINK)
2635 set_bit(HCI_CONN_AES_CCM, &conn->flags);
2637 clear_bit(HCI_CONN_ENCRYPT, &conn->flags);
2638 clear_bit(HCI_CONN_AES_CCM, &conn->flags);
2642 /* We should disregard the current RPA and generate a new one
2643 * whenever the encryption procedure fails.
2645 if (ev->status && conn->type == LE_LINK)
2646 hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
2648 clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
2650 if (ev->status && conn->state == BT_CONNECTED) {
2651 hci_disconnect(conn, HCI_ERROR_AUTH_FAILURE);
2652 hci_conn_drop(conn);
2656 /* In Secure Connections Only mode, do not allow any connections
2657 * that are not encrypted with AES-CCM using a P-256 authenticated
2660 if (hci_dev_test_flag(hdev, HCI_SC_ONLY) &&
2661 (!test_bit(HCI_CONN_AES_CCM, &conn->flags) ||
2662 conn->key_type != HCI_LK_AUTH_COMBINATION_P256)) {
2663 hci_connect_cfm(conn, HCI_ERROR_AUTH_FAILURE);
2664 hci_conn_drop(conn);
2668 /* Try reading the encryption key size for encrypted ACL links */
2669 if (!ev->status && ev->encrypt && conn->type == ACL_LINK) {
2670 struct hci_cp_read_enc_key_size cp;
2671 struct hci_request req;
2673 /* Only send HCI_Read_Encryption_Key_Size if the
2674 * controller really supports it. If it doesn't, assume
2675 * the default size (16).
2677 if (!(hdev->commands[20] & 0x10)) {
2678 conn->enc_key_size = HCI_LINK_KEY_SIZE;
2682 hci_req_init(&req, hdev);
2684 cp.handle = cpu_to_le16(conn->handle);
2685 hci_req_add(&req, HCI_OP_READ_ENC_KEY_SIZE, sizeof(cp), &cp);
2687 if (hci_req_run_skb(&req, read_enc_key_size_complete)) {
2688 BT_ERR("Sending HCI Read Encryption Key Size failed");
2689 conn->enc_key_size = HCI_LINK_KEY_SIZE;
2697 if (conn->state == BT_CONFIG) {
2699 conn->state = BT_CONNECTED;
2701 hci_connect_cfm(conn, ev->status);
2702 hci_conn_drop(conn);
2704 hci_encrypt_cfm(conn, ev->status, ev->encrypt);
2707 hci_dev_unlock(hdev);
2710 static void hci_change_link_key_complete_evt(struct hci_dev *hdev,
2711 struct sk_buff *skb)
2713 struct hci_ev_change_link_key_complete *ev = (void *) skb->data;
2714 struct hci_conn *conn;
2716 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2720 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2723 set_bit(HCI_CONN_SECURE, &conn->flags);
2725 clear_bit(HCI_CONN_AUTH_PEND, &conn->flags);
2727 hci_key_change_cfm(conn, ev->status);
2730 hci_dev_unlock(hdev);
2733 static void hci_remote_features_evt(struct hci_dev *hdev,
2734 struct sk_buff *skb)
2736 struct hci_ev_remote_features *ev = (void *) skb->data;
2737 struct hci_conn *conn;
2739 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2743 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2748 memcpy(conn->features[0], ev->features, 8);
2750 if (conn->state != BT_CONFIG)
2753 if (!ev->status && lmp_ext_feat_capable(hdev) &&
2754 lmp_ext_feat_capable(conn)) {
2755 struct hci_cp_read_remote_ext_features cp;
2756 cp.handle = ev->handle;
2758 hci_send_cmd(hdev, HCI_OP_READ_REMOTE_EXT_FEATURES,
2763 if (!ev->status && !test_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags)) {
2764 struct hci_cp_remote_name_req cp;
2765 memset(&cp, 0, sizeof(cp));
2766 bacpy(&cp.bdaddr, &conn->dst);
2767 cp.pscan_rep_mode = 0x02;
2768 hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp);
2769 } else if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
2770 mgmt_device_connected(hdev, conn, 0, NULL, 0);
2772 if (!hci_outgoing_auth_needed(hdev, conn)) {
2773 conn->state = BT_CONNECTED;
2774 hci_connect_cfm(conn, ev->status);
2775 hci_conn_drop(conn);
2779 hci_dev_unlock(hdev);
2782 static void hci_cmd_complete_evt(struct hci_dev *hdev, struct sk_buff *skb,
2783 u16 *opcode, u8 *status,
2784 hci_req_complete_t *req_complete,
2785 hci_req_complete_skb_t *req_complete_skb)
2787 struct hci_ev_cmd_complete *ev = (void *) skb->data;
2789 *opcode = __le16_to_cpu(ev->opcode);
2790 *status = skb->data[sizeof(*ev)];
2792 skb_pull(skb, sizeof(*ev));
2795 case HCI_OP_INQUIRY_CANCEL:
2796 hci_cc_inquiry_cancel(hdev, skb);
2799 case HCI_OP_PERIODIC_INQ:
2800 hci_cc_periodic_inq(hdev, skb);
2803 case HCI_OP_EXIT_PERIODIC_INQ:
2804 hci_cc_exit_periodic_inq(hdev, skb);
2807 case HCI_OP_REMOTE_NAME_REQ_CANCEL:
2808 hci_cc_remote_name_req_cancel(hdev, skb);
2811 case HCI_OP_ROLE_DISCOVERY:
2812 hci_cc_role_discovery(hdev, skb);
2815 case HCI_OP_READ_LINK_POLICY:
2816 hci_cc_read_link_policy(hdev, skb);
2819 case HCI_OP_WRITE_LINK_POLICY:
2820 hci_cc_write_link_policy(hdev, skb);
2823 case HCI_OP_READ_DEF_LINK_POLICY:
2824 hci_cc_read_def_link_policy(hdev, skb);
2827 case HCI_OP_WRITE_DEF_LINK_POLICY:
2828 hci_cc_write_def_link_policy(hdev, skb);
2832 hci_cc_reset(hdev, skb);
2835 case HCI_OP_READ_STORED_LINK_KEY:
2836 hci_cc_read_stored_link_key(hdev, skb);
2839 case HCI_OP_DELETE_STORED_LINK_KEY:
2840 hci_cc_delete_stored_link_key(hdev, skb);
2843 case HCI_OP_WRITE_LOCAL_NAME:
2844 hci_cc_write_local_name(hdev, skb);
2847 case HCI_OP_READ_LOCAL_NAME:
2848 hci_cc_read_local_name(hdev, skb);
2851 case HCI_OP_WRITE_AUTH_ENABLE:
2852 hci_cc_write_auth_enable(hdev, skb);
2855 case HCI_OP_WRITE_ENCRYPT_MODE:
2856 hci_cc_write_encrypt_mode(hdev, skb);
2859 case HCI_OP_WRITE_SCAN_ENABLE:
2860 hci_cc_write_scan_enable(hdev, skb);
2863 case HCI_OP_READ_CLASS_OF_DEV:
2864 hci_cc_read_class_of_dev(hdev, skb);
2867 case HCI_OP_WRITE_CLASS_OF_DEV:
2868 hci_cc_write_class_of_dev(hdev, skb);
2871 case HCI_OP_READ_VOICE_SETTING:
2872 hci_cc_read_voice_setting(hdev, skb);
2875 case HCI_OP_WRITE_VOICE_SETTING:
2876 hci_cc_write_voice_setting(hdev, skb);
2879 case HCI_OP_READ_NUM_SUPPORTED_IAC:
2880 hci_cc_read_num_supported_iac(hdev, skb);
2883 case HCI_OP_WRITE_SSP_MODE:
2884 hci_cc_write_ssp_mode(hdev, skb);
2887 case HCI_OP_WRITE_SC_SUPPORT:
2888 hci_cc_write_sc_support(hdev, skb);
2891 case HCI_OP_READ_LOCAL_VERSION:
2892 hci_cc_read_local_version(hdev, skb);
2895 case HCI_OP_READ_LOCAL_COMMANDS:
2896 hci_cc_read_local_commands(hdev, skb);
2899 case HCI_OP_READ_LOCAL_FEATURES:
2900 hci_cc_read_local_features(hdev, skb);
2903 case HCI_OP_READ_LOCAL_EXT_FEATURES:
2904 hci_cc_read_local_ext_features(hdev, skb);
2907 case HCI_OP_READ_BUFFER_SIZE:
2908 hci_cc_read_buffer_size(hdev, skb);
2911 case HCI_OP_READ_BD_ADDR:
2912 hci_cc_read_bd_addr(hdev, skb);
2915 case HCI_OP_READ_PAGE_SCAN_ACTIVITY:
2916 hci_cc_read_page_scan_activity(hdev, skb);
2919 case HCI_OP_WRITE_PAGE_SCAN_ACTIVITY:
2920 hci_cc_write_page_scan_activity(hdev, skb);
2923 case HCI_OP_READ_PAGE_SCAN_TYPE:
2924 hci_cc_read_page_scan_type(hdev, skb);
2927 case HCI_OP_WRITE_PAGE_SCAN_TYPE:
2928 hci_cc_write_page_scan_type(hdev, skb);
2931 case HCI_OP_READ_DATA_BLOCK_SIZE:
2932 hci_cc_read_data_block_size(hdev, skb);
2935 case HCI_OP_READ_FLOW_CONTROL_MODE:
2936 hci_cc_read_flow_control_mode(hdev, skb);
2939 case HCI_OP_READ_LOCAL_AMP_INFO:
2940 hci_cc_read_local_amp_info(hdev, skb);
2943 case HCI_OP_READ_CLOCK:
2944 hci_cc_read_clock(hdev, skb);
2947 case HCI_OP_READ_INQ_RSP_TX_POWER:
2948 hci_cc_read_inq_rsp_tx_power(hdev, skb);
2951 case HCI_OP_PIN_CODE_REPLY:
2952 hci_cc_pin_code_reply(hdev, skb);
2955 case HCI_OP_PIN_CODE_NEG_REPLY:
2956 hci_cc_pin_code_neg_reply(hdev, skb);
2959 case HCI_OP_READ_LOCAL_OOB_DATA:
2960 hci_cc_read_local_oob_data(hdev, skb);
2963 case HCI_OP_READ_LOCAL_OOB_EXT_DATA:
2964 hci_cc_read_local_oob_ext_data(hdev, skb);
2967 case HCI_OP_LE_READ_BUFFER_SIZE:
2968 hci_cc_le_read_buffer_size(hdev, skb);
2971 case HCI_OP_LE_READ_LOCAL_FEATURES:
2972 hci_cc_le_read_local_features(hdev, skb);
2975 case HCI_OP_LE_READ_ADV_TX_POWER:
2976 hci_cc_le_read_adv_tx_power(hdev, skb);
2979 case HCI_OP_USER_CONFIRM_REPLY:
2980 hci_cc_user_confirm_reply(hdev, skb);
2983 case HCI_OP_USER_CONFIRM_NEG_REPLY:
2984 hci_cc_user_confirm_neg_reply(hdev, skb);
2987 case HCI_OP_USER_PASSKEY_REPLY:
2988 hci_cc_user_passkey_reply(hdev, skb);
2991 case HCI_OP_USER_PASSKEY_NEG_REPLY:
2992 hci_cc_user_passkey_neg_reply(hdev, skb);
2995 case HCI_OP_LE_SET_RANDOM_ADDR:
2996 hci_cc_le_set_random_addr(hdev, skb);
2999 case HCI_OP_LE_SET_ADV_ENABLE:
3000 hci_cc_le_set_adv_enable(hdev, skb);
3003 case HCI_OP_LE_SET_SCAN_PARAM:
3004 hci_cc_le_set_scan_param(hdev, skb);
3007 case HCI_OP_LE_SET_SCAN_ENABLE:
3008 hci_cc_le_set_scan_enable(hdev, skb);
3011 case HCI_OP_LE_READ_WHITE_LIST_SIZE:
3012 hci_cc_le_read_white_list_size(hdev, skb);
3015 case HCI_OP_LE_CLEAR_WHITE_LIST:
3016 hci_cc_le_clear_white_list(hdev, skb);
3019 case HCI_OP_LE_ADD_TO_WHITE_LIST:
3020 hci_cc_le_add_to_white_list(hdev, skb);
3023 case HCI_OP_LE_DEL_FROM_WHITE_LIST:
3024 hci_cc_le_del_from_white_list(hdev, skb);
3027 case HCI_OP_LE_READ_SUPPORTED_STATES:
3028 hci_cc_le_read_supported_states(hdev, skb);
3031 case HCI_OP_LE_READ_DEF_DATA_LEN:
3032 hci_cc_le_read_def_data_len(hdev, skb);
3035 case HCI_OP_LE_WRITE_DEF_DATA_LEN:
3036 hci_cc_le_write_def_data_len(hdev, skb);
3039 case HCI_OP_LE_READ_MAX_DATA_LEN:
3040 hci_cc_le_read_max_data_len(hdev, skb);
3043 case HCI_OP_WRITE_LE_HOST_SUPPORTED:
3044 hci_cc_write_le_host_supported(hdev, skb);
3047 case HCI_OP_LE_SET_ADV_PARAM:
3048 hci_cc_set_adv_param(hdev, skb);
3051 case HCI_OP_READ_RSSI:
3052 hci_cc_read_rssi(hdev, skb);
3055 case HCI_OP_READ_TX_POWER:
3056 hci_cc_read_tx_power(hdev, skb);
3059 case HCI_OP_WRITE_SSP_DEBUG_MODE:
3060 hci_cc_write_ssp_debug_mode(hdev, skb);
3064 BT_DBG("%s opcode 0x%4.4x", hdev->name, *opcode);
3068 if (*opcode != HCI_OP_NOP)
3069 cancel_delayed_work(&hdev->cmd_timer);
3071 if (ev->ncmd && !test_bit(HCI_RESET, &hdev->flags))
3072 atomic_set(&hdev->cmd_cnt, 1);
3074 hci_req_cmd_complete(hdev, *opcode, *status, req_complete,
3077 if (atomic_read(&hdev->cmd_cnt) && !skb_queue_empty(&hdev->cmd_q))
3078 queue_work(hdev->workqueue, &hdev->cmd_work);
3081 static void hci_cmd_status_evt(struct hci_dev *hdev, struct sk_buff *skb,
3082 u16 *opcode, u8 *status,
3083 hci_req_complete_t *req_complete,
3084 hci_req_complete_skb_t *req_complete_skb)
3086 struct hci_ev_cmd_status *ev = (void *) skb->data;
3088 skb_pull(skb, sizeof(*ev));
3090 *opcode = __le16_to_cpu(ev->opcode);
3091 *status = ev->status;
3094 case HCI_OP_INQUIRY:
3095 hci_cs_inquiry(hdev, ev->status);
3098 case HCI_OP_CREATE_CONN:
3099 hci_cs_create_conn(hdev, ev->status);
3102 case HCI_OP_DISCONNECT:
3103 hci_cs_disconnect(hdev, ev->status);
3106 case HCI_OP_ADD_SCO:
3107 hci_cs_add_sco(hdev, ev->status);
3110 case HCI_OP_AUTH_REQUESTED:
3111 hci_cs_auth_requested(hdev, ev->status);
3114 case HCI_OP_SET_CONN_ENCRYPT:
3115 hci_cs_set_conn_encrypt(hdev, ev->status);
3118 case HCI_OP_REMOTE_NAME_REQ:
3119 hci_cs_remote_name_req(hdev, ev->status);
3122 case HCI_OP_READ_REMOTE_FEATURES:
3123 hci_cs_read_remote_features(hdev, ev->status);
3126 case HCI_OP_READ_REMOTE_EXT_FEATURES:
3127 hci_cs_read_remote_ext_features(hdev, ev->status);
3130 case HCI_OP_SETUP_SYNC_CONN:
3131 hci_cs_setup_sync_conn(hdev, ev->status);
3134 case HCI_OP_CREATE_PHY_LINK:
3135 hci_cs_create_phylink(hdev, ev->status);
3138 case HCI_OP_ACCEPT_PHY_LINK:
3139 hci_cs_accept_phylink(hdev, ev->status);
3142 case HCI_OP_SNIFF_MODE:
3143 hci_cs_sniff_mode(hdev, ev->status);
3146 case HCI_OP_EXIT_SNIFF_MODE:
3147 hci_cs_exit_sniff_mode(hdev, ev->status);
3150 case HCI_OP_SWITCH_ROLE:
3151 hci_cs_switch_role(hdev, ev->status);
3154 case HCI_OP_LE_CREATE_CONN:
3155 hci_cs_le_create_conn(hdev, ev->status);
3158 case HCI_OP_LE_READ_REMOTE_FEATURES:
3159 hci_cs_le_read_remote_features(hdev, ev->status);
3162 case HCI_OP_LE_START_ENC:
3163 hci_cs_le_start_enc(hdev, ev->status);
3167 BT_DBG("%s opcode 0x%4.4x", hdev->name, *opcode);
3171 if (*opcode != HCI_OP_NOP)
3172 cancel_delayed_work(&hdev->cmd_timer);
3174 if (ev->ncmd && !test_bit(HCI_RESET, &hdev->flags))
3175 atomic_set(&hdev->cmd_cnt, 1);
3177 /* Indicate request completion if the command failed. Also, if
3178 * we're not waiting for a special event and we get a success
3179 * command status we should try to flag the request as completed
3180 * (since for this kind of commands there will not be a command
3184 (hdev->sent_cmd && !bt_cb(hdev->sent_cmd)->req.event))
3185 hci_req_cmd_complete(hdev, *opcode, ev->status, req_complete,
3188 if (atomic_read(&hdev->cmd_cnt) && !skb_queue_empty(&hdev->cmd_q))
3189 queue_work(hdev->workqueue, &hdev->cmd_work);
3192 static void hci_hardware_error_evt(struct hci_dev *hdev, struct sk_buff *skb)
3194 struct hci_ev_hardware_error *ev = (void *) skb->data;
3196 hdev->hw_error_code = ev->code;
3198 queue_work(hdev->req_workqueue, &hdev->error_reset);
3201 static void hci_role_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
3203 struct hci_ev_role_change *ev = (void *) skb->data;
3204 struct hci_conn *conn;
3206 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
3210 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3213 conn->role = ev->role;
3215 clear_bit(HCI_CONN_RSWITCH_PEND, &conn->flags);
3217 hci_role_switch_cfm(conn, ev->status, ev->role);
3220 hci_dev_unlock(hdev);
3223 static void hci_num_comp_pkts_evt(struct hci_dev *hdev, struct sk_buff *skb)
3225 struct hci_ev_num_comp_pkts *ev = (void *) skb->data;
3228 if (hdev->flow_ctl_mode != HCI_FLOW_CTL_MODE_PACKET_BASED) {
3229 BT_ERR("Wrong event for mode %d", hdev->flow_ctl_mode);
3233 if (skb->len < sizeof(*ev) || skb->len < sizeof(*ev) +
3234 ev->num_hndl * sizeof(struct hci_comp_pkts_info)) {
3235 BT_DBG("%s bad parameters", hdev->name);
3239 BT_DBG("%s num_hndl %d", hdev->name, ev->num_hndl);
3241 for (i = 0; i < ev->num_hndl; i++) {
3242 struct hci_comp_pkts_info *info = &ev->handles[i];
3243 struct hci_conn *conn;
3244 __u16 handle, count;
3246 handle = __le16_to_cpu(info->handle);
3247 count = __le16_to_cpu(info->count);
3249 conn = hci_conn_hash_lookup_handle(hdev, handle);
3253 conn->sent -= count;
3255 switch (conn->type) {
3257 hdev->acl_cnt += count;
3258 if (hdev->acl_cnt > hdev->acl_pkts)
3259 hdev->acl_cnt = hdev->acl_pkts;
3263 if (hdev->le_pkts) {
3264 hdev->le_cnt += count;
3265 if (hdev->le_cnt > hdev->le_pkts)
3266 hdev->le_cnt = hdev->le_pkts;
3268 hdev->acl_cnt += count;
3269 if (hdev->acl_cnt > hdev->acl_pkts)
3270 hdev->acl_cnt = hdev->acl_pkts;
3275 hdev->sco_cnt += count;
3276 if (hdev->sco_cnt > hdev->sco_pkts)
3277 hdev->sco_cnt = hdev->sco_pkts;
3281 BT_ERR("Unknown type %d conn %p", conn->type, conn);
3286 queue_work(hdev->workqueue, &hdev->tx_work);
3289 static struct hci_conn *__hci_conn_lookup_handle(struct hci_dev *hdev,
3292 struct hci_chan *chan;
3294 switch (hdev->dev_type) {
3296 return hci_conn_hash_lookup_handle(hdev, handle);
3298 chan = hci_chan_lookup_handle(hdev, handle);
3303 BT_ERR("%s unknown dev_type %d", hdev->name, hdev->dev_type);
3310 static void hci_num_comp_blocks_evt(struct hci_dev *hdev, struct sk_buff *skb)
3312 struct hci_ev_num_comp_blocks *ev = (void *) skb->data;
3315 if (hdev->flow_ctl_mode != HCI_FLOW_CTL_MODE_BLOCK_BASED) {
3316 BT_ERR("Wrong event for mode %d", hdev->flow_ctl_mode);
3320 if (skb->len < sizeof(*ev) || skb->len < sizeof(*ev) +
3321 ev->num_hndl * sizeof(struct hci_comp_blocks_info)) {
3322 BT_DBG("%s bad parameters", hdev->name);
3326 BT_DBG("%s num_blocks %d num_hndl %d", hdev->name, ev->num_blocks,
3329 for (i = 0; i < ev->num_hndl; i++) {
3330 struct hci_comp_blocks_info *info = &ev->handles[i];
3331 struct hci_conn *conn = NULL;
3332 __u16 handle, block_count;
3334 handle = __le16_to_cpu(info->handle);
3335 block_count = __le16_to_cpu(info->blocks);
3337 conn = __hci_conn_lookup_handle(hdev, handle);
3341 conn->sent -= block_count;
3343 switch (conn->type) {
3346 hdev->block_cnt += block_count;
3347 if (hdev->block_cnt > hdev->num_blocks)
3348 hdev->block_cnt = hdev->num_blocks;
3352 BT_ERR("Unknown type %d conn %p", conn->type, conn);
3357 queue_work(hdev->workqueue, &hdev->tx_work);
3360 static void hci_mode_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
3362 struct hci_ev_mode_change *ev = (void *) skb->data;
3363 struct hci_conn *conn;
3365 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
3369 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3371 conn->mode = ev->mode;
3373 if (!test_and_clear_bit(HCI_CONN_MODE_CHANGE_PEND,
3375 if (conn->mode == HCI_CM_ACTIVE)
3376 set_bit(HCI_CONN_POWER_SAVE, &conn->flags);
3378 clear_bit(HCI_CONN_POWER_SAVE, &conn->flags);
3381 if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags))
3382 hci_sco_setup(conn, ev->status);
3385 hci_dev_unlock(hdev);
3388 static void hci_pin_code_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
3390 struct hci_ev_pin_code_req *ev = (void *) skb->data;
3391 struct hci_conn *conn;
3393 BT_DBG("%s", hdev->name);
3397 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3401 if (conn->state == BT_CONNECTED) {
3402 hci_conn_hold(conn);
3403 conn->disc_timeout = HCI_PAIRING_TIMEOUT;
3404 hci_conn_drop(conn);
3407 if (!hci_dev_test_flag(hdev, HCI_BONDABLE) &&
3408 !test_bit(HCI_CONN_AUTH_INITIATOR, &conn->flags)) {
3409 hci_send_cmd(hdev, HCI_OP_PIN_CODE_NEG_REPLY,
3410 sizeof(ev->bdaddr), &ev->bdaddr);
3411 } else if (hci_dev_test_flag(hdev, HCI_MGMT)) {
3414 if (conn->pending_sec_level == BT_SECURITY_HIGH)
3419 mgmt_pin_code_request(hdev, &ev->bdaddr, secure);
3423 hci_dev_unlock(hdev);
3426 static void conn_set_key(struct hci_conn *conn, u8 key_type, u8 pin_len)
3428 if (key_type == HCI_LK_CHANGED_COMBINATION)
3431 conn->pin_length = pin_len;
3432 conn->key_type = key_type;
3435 case HCI_LK_LOCAL_UNIT:
3436 case HCI_LK_REMOTE_UNIT:
3437 case HCI_LK_DEBUG_COMBINATION:
3439 case HCI_LK_COMBINATION:
3441 conn->pending_sec_level = BT_SECURITY_HIGH;
3443 conn->pending_sec_level = BT_SECURITY_MEDIUM;
3445 case HCI_LK_UNAUTH_COMBINATION_P192:
3446 case HCI_LK_UNAUTH_COMBINATION_P256:
3447 conn->pending_sec_level = BT_SECURITY_MEDIUM;
3449 case HCI_LK_AUTH_COMBINATION_P192:
3450 conn->pending_sec_level = BT_SECURITY_HIGH;
3452 case HCI_LK_AUTH_COMBINATION_P256:
3453 conn->pending_sec_level = BT_SECURITY_FIPS;
3458 static void hci_link_key_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
3460 struct hci_ev_link_key_req *ev = (void *) skb->data;
3461 struct hci_cp_link_key_reply cp;
3462 struct hci_conn *conn;
3463 struct link_key *key;
3465 BT_DBG("%s", hdev->name);
3467 if (!hci_dev_test_flag(hdev, HCI_MGMT))
3472 key = hci_find_link_key(hdev, &ev->bdaddr);
3474 BT_DBG("%s link key not found for %pMR", hdev->name,
3479 BT_DBG("%s found key type %u for %pMR", hdev->name, key->type,
3482 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3484 clear_bit(HCI_CONN_NEW_LINK_KEY, &conn->flags);
3486 if ((key->type == HCI_LK_UNAUTH_COMBINATION_P192 ||
3487 key->type == HCI_LK_UNAUTH_COMBINATION_P256) &&
3488 conn->auth_type != 0xff && (conn->auth_type & 0x01)) {
3489 BT_DBG("%s ignoring unauthenticated key", hdev->name);
3493 if (key->type == HCI_LK_COMBINATION && key->pin_len < 16 &&
3494 (conn->pending_sec_level == BT_SECURITY_HIGH ||
3495 conn->pending_sec_level == BT_SECURITY_FIPS)) {
3496 BT_DBG("%s ignoring key unauthenticated for high security",
3501 conn_set_key(conn, key->type, key->pin_len);
3504 bacpy(&cp.bdaddr, &ev->bdaddr);
3505 memcpy(cp.link_key, key->val, HCI_LINK_KEY_SIZE);
3507 hci_send_cmd(hdev, HCI_OP_LINK_KEY_REPLY, sizeof(cp), &cp);
3509 hci_dev_unlock(hdev);
3514 hci_send_cmd(hdev, HCI_OP_LINK_KEY_NEG_REPLY, 6, &ev->bdaddr);
3515 hci_dev_unlock(hdev);
3518 static void hci_link_key_notify_evt(struct hci_dev *hdev, struct sk_buff *skb)
3520 struct hci_ev_link_key_notify *ev = (void *) skb->data;
3521 struct hci_conn *conn;
3522 struct link_key *key;
3526 BT_DBG("%s", hdev->name);
3530 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3534 hci_conn_hold(conn);
3535 conn->disc_timeout = HCI_DISCONN_TIMEOUT;
3536 hci_conn_drop(conn);
3538 set_bit(HCI_CONN_NEW_LINK_KEY, &conn->flags);
3539 conn_set_key(conn, ev->key_type, conn->pin_length);
3541 if (!hci_dev_test_flag(hdev, HCI_MGMT))
3544 key = hci_add_link_key(hdev, conn, &ev->bdaddr, ev->link_key,
3545 ev->key_type, pin_len, &persistent);
3549 /* Update connection information since adding the key will have
3550 * fixed up the type in the case of changed combination keys.
3552 if (ev->key_type == HCI_LK_CHANGED_COMBINATION)
3553 conn_set_key(conn, key->type, key->pin_len);
3555 mgmt_new_link_key(hdev, key, persistent);
3557 /* Keep debug keys around only if the HCI_KEEP_DEBUG_KEYS flag
3558 * is set. If it's not set simply remove the key from the kernel
3559 * list (we've still notified user space about it but with
3560 * store_hint being 0).
3562 if (key->type == HCI_LK_DEBUG_COMBINATION &&
3563 !hci_dev_test_flag(hdev, HCI_KEEP_DEBUG_KEYS)) {
3564 list_del_rcu(&key->list);
3565 kfree_rcu(key, rcu);
3570 clear_bit(HCI_CONN_FLUSH_KEY, &conn->flags);
3572 set_bit(HCI_CONN_FLUSH_KEY, &conn->flags);
3575 hci_dev_unlock(hdev);
3578 static void hci_clock_offset_evt(struct hci_dev *hdev, struct sk_buff *skb)
3580 struct hci_ev_clock_offset *ev = (void *) skb->data;
3581 struct hci_conn *conn;
3583 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
3587 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3588 if (conn && !ev->status) {
3589 struct inquiry_entry *ie;
3591 ie = hci_inquiry_cache_lookup(hdev, &conn->dst);
3593 ie->data.clock_offset = ev->clock_offset;
3594 ie->timestamp = jiffies;
3598 hci_dev_unlock(hdev);
3601 static void hci_pkt_type_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
3603 struct hci_ev_pkt_type_change *ev = (void *) skb->data;
3604 struct hci_conn *conn;
3606 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
3610 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3611 if (conn && !ev->status)
3612 conn->pkt_type = __le16_to_cpu(ev->pkt_type);
3614 hci_dev_unlock(hdev);
3617 static void hci_pscan_rep_mode_evt(struct hci_dev *hdev, struct sk_buff *skb)
3619 struct hci_ev_pscan_rep_mode *ev = (void *) skb->data;
3620 struct inquiry_entry *ie;
3622 BT_DBG("%s", hdev->name);
3626 ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr);
3628 ie->data.pscan_rep_mode = ev->pscan_rep_mode;
3629 ie->timestamp = jiffies;
3632 hci_dev_unlock(hdev);
3635 static void hci_inquiry_result_with_rssi_evt(struct hci_dev *hdev,
3636 struct sk_buff *skb)
3638 struct inquiry_data data;
3639 int num_rsp = *((__u8 *) skb->data);
3641 BT_DBG("%s num_rsp %d", hdev->name, num_rsp);
3646 if (hci_dev_test_flag(hdev, HCI_PERIODIC_INQ))
3651 if ((skb->len - 1) / num_rsp != sizeof(struct inquiry_info_with_rssi)) {
3652 struct inquiry_info_with_rssi_and_pscan_mode *info;
3653 info = (void *) (skb->data + 1);
3655 for (; num_rsp; num_rsp--, info++) {
3658 bacpy(&data.bdaddr, &info->bdaddr);
3659 data.pscan_rep_mode = info->pscan_rep_mode;
3660 data.pscan_period_mode = info->pscan_period_mode;
3661 data.pscan_mode = info->pscan_mode;
3662 memcpy(data.dev_class, info->dev_class, 3);
3663 data.clock_offset = info->clock_offset;
3664 data.rssi = info->rssi;
3665 data.ssp_mode = 0x00;
3667 flags = hci_inquiry_cache_update(hdev, &data, false);
3669 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
3670 info->dev_class, info->rssi,
3671 flags, NULL, 0, NULL, 0);
3674 struct inquiry_info_with_rssi *info = (void *) (skb->data + 1);
3676 for (; num_rsp; num_rsp--, info++) {
3679 bacpy(&data.bdaddr, &info->bdaddr);
3680 data.pscan_rep_mode = info->pscan_rep_mode;
3681 data.pscan_period_mode = info->pscan_period_mode;
3682 data.pscan_mode = 0x00;
3683 memcpy(data.dev_class, info->dev_class, 3);
3684 data.clock_offset = info->clock_offset;
3685 data.rssi = info->rssi;
3686 data.ssp_mode = 0x00;
3688 flags = hci_inquiry_cache_update(hdev, &data, false);
3690 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
3691 info->dev_class, info->rssi,
3692 flags, NULL, 0, NULL, 0);
3696 hci_dev_unlock(hdev);
3699 static void hci_remote_ext_features_evt(struct hci_dev *hdev,
3700 struct sk_buff *skb)
3702 struct hci_ev_remote_ext_features *ev = (void *) skb->data;
3703 struct hci_conn *conn;
3705 BT_DBG("%s", hdev->name);
3709 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3713 if (ev->page < HCI_MAX_PAGES)
3714 memcpy(conn->features[ev->page], ev->features, 8);
3716 if (!ev->status && ev->page == 0x01) {
3717 struct inquiry_entry *ie;
3719 ie = hci_inquiry_cache_lookup(hdev, &conn->dst);
3721 ie->data.ssp_mode = (ev->features[0] & LMP_HOST_SSP);
3723 if (ev->features[0] & LMP_HOST_SSP) {
3724 set_bit(HCI_CONN_SSP_ENABLED, &conn->flags);
3726 /* It is mandatory by the Bluetooth specification that
3727 * Extended Inquiry Results are only used when Secure
3728 * Simple Pairing is enabled, but some devices violate
3731 * To make these devices work, the internal SSP
3732 * enabled flag needs to be cleared if the remote host
3733 * features do not indicate SSP support */
3734 clear_bit(HCI_CONN_SSP_ENABLED, &conn->flags);
3737 if (ev->features[0] & LMP_HOST_SC)
3738 set_bit(HCI_CONN_SC_ENABLED, &conn->flags);
3741 if (conn->state != BT_CONFIG)
3744 if (!ev->status && !test_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags)) {
3745 struct hci_cp_remote_name_req cp;
3746 memset(&cp, 0, sizeof(cp));
3747 bacpy(&cp.bdaddr, &conn->dst);
3748 cp.pscan_rep_mode = 0x02;
3749 hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp);
3750 } else if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
3751 mgmt_device_connected(hdev, conn, 0, NULL, 0);
3753 if (!hci_outgoing_auth_needed(hdev, conn)) {
3754 conn->state = BT_CONNECTED;
3755 hci_connect_cfm(conn, ev->status);
3756 hci_conn_drop(conn);
3760 hci_dev_unlock(hdev);
3763 static void hci_sync_conn_complete_evt(struct hci_dev *hdev,
3764 struct sk_buff *skb)
3766 struct hci_ev_sync_conn_complete *ev = (void *) skb->data;
3767 struct hci_conn *conn;
3769 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
3773 conn = hci_conn_hash_lookup_ba(hdev, ev->link_type, &ev->bdaddr);
3775 if (ev->link_type == ESCO_LINK)
3778 conn = hci_conn_hash_lookup_ba(hdev, ESCO_LINK, &ev->bdaddr);
3782 conn->type = SCO_LINK;
3785 switch (ev->status) {
3787 conn->handle = __le16_to_cpu(ev->handle);
3788 conn->state = BT_CONNECTED;
3790 hci_debugfs_create_conn(conn);
3791 hci_conn_add_sysfs(conn);
3794 case 0x10: /* Connection Accept Timeout */
3795 case 0x0d: /* Connection Rejected due to Limited Resources */
3796 case 0x11: /* Unsupported Feature or Parameter Value */
3797 case 0x1c: /* SCO interval rejected */
3798 case 0x1a: /* Unsupported Remote Feature */
3799 case 0x1f: /* Unspecified error */
3800 case 0x20: /* Unsupported LMP Parameter value */
3802 conn->pkt_type = (hdev->esco_type & SCO_ESCO_MASK) |
3803 (hdev->esco_type & EDR_ESCO_MASK);
3804 if (hci_setup_sync(conn, conn->link->handle))
3810 conn->state = BT_CLOSED;
3814 hci_connect_cfm(conn, ev->status);
3819 hci_dev_unlock(hdev);
3822 static inline size_t eir_get_length(u8 *eir, size_t eir_len)
3826 while (parsed < eir_len) {
3827 u8 field_len = eir[0];
3832 parsed += field_len + 1;
3833 eir += field_len + 1;
3839 static void hci_extended_inquiry_result_evt(struct hci_dev *hdev,
3840 struct sk_buff *skb)
3842 struct inquiry_data data;
3843 struct extended_inquiry_info *info = (void *) (skb->data + 1);
3844 int num_rsp = *((__u8 *) skb->data);
3847 BT_DBG("%s num_rsp %d", hdev->name, num_rsp);
3852 if (hci_dev_test_flag(hdev, HCI_PERIODIC_INQ))
3857 for (; num_rsp; num_rsp--, info++) {
3861 bacpy(&data.bdaddr, &info->bdaddr);
3862 data.pscan_rep_mode = info->pscan_rep_mode;
3863 data.pscan_period_mode = info->pscan_period_mode;
3864 data.pscan_mode = 0x00;
3865 memcpy(data.dev_class, info->dev_class, 3);
3866 data.clock_offset = info->clock_offset;
3867 data.rssi = info->rssi;
3868 data.ssp_mode = 0x01;
3870 if (hci_dev_test_flag(hdev, HCI_MGMT))
3871 name_known = eir_has_data_type(info->data,
3877 flags = hci_inquiry_cache_update(hdev, &data, name_known);
3879 eir_len = eir_get_length(info->data, sizeof(info->data));
3881 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
3882 info->dev_class, info->rssi,
3883 flags, info->data, eir_len, NULL, 0);
3886 hci_dev_unlock(hdev);
3889 static void hci_key_refresh_complete_evt(struct hci_dev *hdev,
3890 struct sk_buff *skb)
3892 struct hci_ev_key_refresh_complete *ev = (void *) skb->data;
3893 struct hci_conn *conn;
3895 BT_DBG("%s status 0x%2.2x handle 0x%4.4x", hdev->name, ev->status,
3896 __le16_to_cpu(ev->handle));
3900 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3904 /* For BR/EDR the necessary steps are taken through the
3905 * auth_complete event.
3907 if (conn->type != LE_LINK)
3911 conn->sec_level = conn->pending_sec_level;
3913 clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
3915 if (ev->status && conn->state == BT_CONNECTED) {
3916 hci_disconnect(conn, HCI_ERROR_AUTH_FAILURE);
3917 hci_conn_drop(conn);
3921 if (conn->state == BT_CONFIG) {
3923 conn->state = BT_CONNECTED;
3925 hci_connect_cfm(conn, ev->status);
3926 hci_conn_drop(conn);
3928 hci_auth_cfm(conn, ev->status);
3930 hci_conn_hold(conn);
3931 conn->disc_timeout = HCI_DISCONN_TIMEOUT;
3932 hci_conn_drop(conn);
3936 hci_dev_unlock(hdev);
3939 static u8 hci_get_auth_req(struct hci_conn *conn)
3941 /* If remote requests no-bonding follow that lead */
3942 if (conn->remote_auth == HCI_AT_NO_BONDING ||
3943 conn->remote_auth == HCI_AT_NO_BONDING_MITM)
3944 return conn->remote_auth | (conn->auth_type & 0x01);
3946 /* If both remote and local have enough IO capabilities, require
3949 if (conn->remote_cap != HCI_IO_NO_INPUT_OUTPUT &&
3950 conn->io_capability != HCI_IO_NO_INPUT_OUTPUT)
3951 return conn->remote_auth | 0x01;
3953 /* No MITM protection possible so ignore remote requirement */
3954 return (conn->remote_auth & ~0x01) | (conn->auth_type & 0x01);
3957 static u8 bredr_oob_data_present(struct hci_conn *conn)
3959 struct hci_dev *hdev = conn->hdev;
3960 struct oob_data *data;
3962 data = hci_find_remote_oob_data(hdev, &conn->dst, BDADDR_BREDR);
3966 if (bredr_sc_enabled(hdev)) {
3967 /* When Secure Connections is enabled, then just
3968 * return the present value stored with the OOB
3969 * data. The stored value contains the right present
3970 * information. However it can only be trusted when
3971 * not in Secure Connection Only mode.
3973 if (!hci_dev_test_flag(hdev, HCI_SC_ONLY))
3974 return data->present;
3976 /* When Secure Connections Only mode is enabled, then
3977 * the P-256 values are required. If they are not
3978 * available, then do not declare that OOB data is
3981 if (!memcmp(data->rand256, ZERO_KEY, 16) ||
3982 !memcmp(data->hash256, ZERO_KEY, 16))
3988 /* When Secure Connections is not enabled or actually
3989 * not supported by the hardware, then check that if
3990 * P-192 data values are present.
3992 if (!memcmp(data->rand192, ZERO_KEY, 16) ||
3993 !memcmp(data->hash192, ZERO_KEY, 16))
3999 static void hci_io_capa_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
4001 struct hci_ev_io_capa_request *ev = (void *) skb->data;
4002 struct hci_conn *conn;
4004 BT_DBG("%s", hdev->name);
4008 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
4012 hci_conn_hold(conn);
4014 if (!hci_dev_test_flag(hdev, HCI_MGMT))
4017 /* Allow pairing if we're pairable, the initiators of the
4018 * pairing or if the remote is not requesting bonding.
4020 if (hci_dev_test_flag(hdev, HCI_BONDABLE) ||
4021 test_bit(HCI_CONN_AUTH_INITIATOR, &conn->flags) ||
4022 (conn->remote_auth & ~0x01) == HCI_AT_NO_BONDING) {
4023 struct hci_cp_io_capability_reply cp;
4025 bacpy(&cp.bdaddr, &ev->bdaddr);
4026 /* Change the IO capability from KeyboardDisplay
4027 * to DisplayYesNo as it is not supported by BT spec. */
4028 cp.capability = (conn->io_capability == 0x04) ?
4029 HCI_IO_DISPLAY_YESNO : conn->io_capability;
4031 /* If we are initiators, there is no remote information yet */
4032 if (conn->remote_auth == 0xff) {
4033 /* Request MITM protection if our IO caps allow it
4034 * except for the no-bonding case.
4036 if (conn->io_capability != HCI_IO_NO_INPUT_OUTPUT &&
4037 conn->auth_type != HCI_AT_NO_BONDING)
4038 conn->auth_type |= 0x01;
4040 conn->auth_type = hci_get_auth_req(conn);
4043 /* If we're not bondable, force one of the non-bondable
4044 * authentication requirement values.
4046 if (!hci_dev_test_flag(hdev, HCI_BONDABLE))
4047 conn->auth_type &= HCI_AT_NO_BONDING_MITM;
4049 cp.authentication = conn->auth_type;
4050 cp.oob_data = bredr_oob_data_present(conn);
4052 hci_send_cmd(hdev, HCI_OP_IO_CAPABILITY_REPLY,
4055 struct hci_cp_io_capability_neg_reply cp;
4057 bacpy(&cp.bdaddr, &ev->bdaddr);
4058 cp.reason = HCI_ERROR_PAIRING_NOT_ALLOWED;
4060 hci_send_cmd(hdev, HCI_OP_IO_CAPABILITY_NEG_REPLY,
4065 hci_dev_unlock(hdev);
4068 static void hci_io_capa_reply_evt(struct hci_dev *hdev, struct sk_buff *skb)
4070 struct hci_ev_io_capa_reply *ev = (void *) skb->data;
4071 struct hci_conn *conn;
4073 BT_DBG("%s", hdev->name);
4077 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
4081 conn->remote_cap = ev->capability;
4082 conn->remote_auth = ev->authentication;
4085 hci_dev_unlock(hdev);
4088 static void hci_user_confirm_request_evt(struct hci_dev *hdev,
4089 struct sk_buff *skb)
4091 struct hci_ev_user_confirm_req *ev = (void *) skb->data;
4092 int loc_mitm, rem_mitm, confirm_hint = 0;
4093 struct hci_conn *conn;
4095 BT_DBG("%s", hdev->name);
4099 if (!hci_dev_test_flag(hdev, HCI_MGMT))
4102 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
4106 loc_mitm = (conn->auth_type & 0x01);
4107 rem_mitm = (conn->remote_auth & 0x01);
4109 /* If we require MITM but the remote device can't provide that
4110 * (it has NoInputNoOutput) then reject the confirmation
4111 * request. We check the security level here since it doesn't
4112 * necessarily match conn->auth_type.
4114 if (conn->pending_sec_level > BT_SECURITY_MEDIUM &&
4115 conn->remote_cap == HCI_IO_NO_INPUT_OUTPUT) {
4116 BT_DBG("Rejecting request: remote device can't provide MITM");
4117 hci_send_cmd(hdev, HCI_OP_USER_CONFIRM_NEG_REPLY,
4118 sizeof(ev->bdaddr), &ev->bdaddr);
4122 /* If no side requires MITM protection; auto-accept */
4123 if ((!loc_mitm || conn->remote_cap == HCI_IO_NO_INPUT_OUTPUT) &&
4124 (!rem_mitm || conn->io_capability == HCI_IO_NO_INPUT_OUTPUT)) {
4126 /* If we're not the initiators request authorization to
4127 * proceed from user space (mgmt_user_confirm with
4128 * confirm_hint set to 1). The exception is if neither
4129 * side had MITM or if the local IO capability is
4130 * NoInputNoOutput, in which case we do auto-accept
4132 if (!test_bit(HCI_CONN_AUTH_PEND, &conn->flags) &&
4133 conn->io_capability != HCI_IO_NO_INPUT_OUTPUT &&
4134 (loc_mitm || rem_mitm)) {
4135 BT_DBG("Confirming auto-accept as acceptor");
4140 BT_DBG("Auto-accept of user confirmation with %ums delay",
4141 hdev->auto_accept_delay);
4143 if (hdev->auto_accept_delay > 0) {
4144 int delay = msecs_to_jiffies(hdev->auto_accept_delay);
4145 queue_delayed_work(conn->hdev->workqueue,
4146 &conn->auto_accept_work, delay);
4150 hci_send_cmd(hdev, HCI_OP_USER_CONFIRM_REPLY,
4151 sizeof(ev->bdaddr), &ev->bdaddr);
4156 mgmt_user_confirm_request(hdev, &ev->bdaddr, ACL_LINK, 0,
4157 le32_to_cpu(ev->passkey), confirm_hint);
4160 hci_dev_unlock(hdev);
4163 static void hci_user_passkey_request_evt(struct hci_dev *hdev,
4164 struct sk_buff *skb)
4166 struct hci_ev_user_passkey_req *ev = (void *) skb->data;
4168 BT_DBG("%s", hdev->name);
4170 if (hci_dev_test_flag(hdev, HCI_MGMT))
4171 mgmt_user_passkey_request(hdev, &ev->bdaddr, ACL_LINK, 0);
4174 static void hci_user_passkey_notify_evt(struct hci_dev *hdev,
4175 struct sk_buff *skb)
4177 struct hci_ev_user_passkey_notify *ev = (void *) skb->data;
4178 struct hci_conn *conn;
4180 BT_DBG("%s", hdev->name);
4182 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
4186 conn->passkey_notify = __le32_to_cpu(ev->passkey);
4187 conn->passkey_entered = 0;
4189 if (hci_dev_test_flag(hdev, HCI_MGMT))
4190 mgmt_user_passkey_notify(hdev, &conn->dst, conn->type,
4191 conn->dst_type, conn->passkey_notify,
4192 conn->passkey_entered);
4195 static void hci_keypress_notify_evt(struct hci_dev *hdev, struct sk_buff *skb)
4197 struct hci_ev_keypress_notify *ev = (void *) skb->data;
4198 struct hci_conn *conn;
4200 BT_DBG("%s", hdev->name);
4202 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
4207 case HCI_KEYPRESS_STARTED:
4208 conn->passkey_entered = 0;
4211 case HCI_KEYPRESS_ENTERED:
4212 conn->passkey_entered++;
4215 case HCI_KEYPRESS_ERASED:
4216 conn->passkey_entered--;
4219 case HCI_KEYPRESS_CLEARED:
4220 conn->passkey_entered = 0;
4223 case HCI_KEYPRESS_COMPLETED:
4227 if (hci_dev_test_flag(hdev, HCI_MGMT))
4228 mgmt_user_passkey_notify(hdev, &conn->dst, conn->type,
4229 conn->dst_type, conn->passkey_notify,
4230 conn->passkey_entered);
4233 static void hci_simple_pair_complete_evt(struct hci_dev *hdev,
4234 struct sk_buff *skb)
4236 struct hci_ev_simple_pair_complete *ev = (void *) skb->data;
4237 struct hci_conn *conn;
4239 BT_DBG("%s", hdev->name);
4243 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
4247 /* Reset the authentication requirement to unknown */
4248 conn->remote_auth = 0xff;
4250 /* To avoid duplicate auth_failed events to user space we check
4251 * the HCI_CONN_AUTH_PEND flag which will be set if we
4252 * initiated the authentication. A traditional auth_complete
4253 * event gets always produced as initiator and is also mapped to
4254 * the mgmt_auth_failed event */
4255 if (!test_bit(HCI_CONN_AUTH_PEND, &conn->flags) && ev->status)
4256 mgmt_auth_failed(conn, ev->status);
4258 hci_conn_drop(conn);
4261 hci_dev_unlock(hdev);
4264 static void hci_remote_host_features_evt(struct hci_dev *hdev,
4265 struct sk_buff *skb)
4267 struct hci_ev_remote_host_features *ev = (void *) skb->data;
4268 struct inquiry_entry *ie;
4269 struct hci_conn *conn;
4271 BT_DBG("%s", hdev->name);
4275 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
4277 memcpy(conn->features[1], ev->features, 8);
4279 ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr);
4281 ie->data.ssp_mode = (ev->features[0] & LMP_HOST_SSP);
4283 hci_dev_unlock(hdev);
4286 static void hci_remote_oob_data_request_evt(struct hci_dev *hdev,
4287 struct sk_buff *skb)
4289 struct hci_ev_remote_oob_data_request *ev = (void *) skb->data;
4290 struct oob_data *data;
4292 BT_DBG("%s", hdev->name);
4296 if (!hci_dev_test_flag(hdev, HCI_MGMT))
4299 data = hci_find_remote_oob_data(hdev, &ev->bdaddr, BDADDR_BREDR);
4301 struct hci_cp_remote_oob_data_neg_reply cp;
4303 bacpy(&cp.bdaddr, &ev->bdaddr);
4304 hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_DATA_NEG_REPLY,
4309 if (bredr_sc_enabled(hdev)) {
4310 struct hci_cp_remote_oob_ext_data_reply cp;
4312 bacpy(&cp.bdaddr, &ev->bdaddr);
4313 if (hci_dev_test_flag(hdev, HCI_SC_ONLY)) {
4314 memset(cp.hash192, 0, sizeof(cp.hash192));
4315 memset(cp.rand192, 0, sizeof(cp.rand192));
4317 memcpy(cp.hash192, data->hash192, sizeof(cp.hash192));
4318 memcpy(cp.rand192, data->rand192, sizeof(cp.rand192));
4320 memcpy(cp.hash256, data->hash256, sizeof(cp.hash256));
4321 memcpy(cp.rand256, data->rand256, sizeof(cp.rand256));
4323 hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_EXT_DATA_REPLY,
4326 struct hci_cp_remote_oob_data_reply cp;
4328 bacpy(&cp.bdaddr, &ev->bdaddr);
4329 memcpy(cp.hash, data->hash192, sizeof(cp.hash));
4330 memcpy(cp.rand, data->rand192, sizeof(cp.rand));
4332 hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_DATA_REPLY,
4337 hci_dev_unlock(hdev);
4340 #if IS_ENABLED(CONFIG_BT_HS)
4341 static void hci_chan_selected_evt(struct hci_dev *hdev, struct sk_buff *skb)
4343 struct hci_ev_channel_selected *ev = (void *)skb->data;
4344 struct hci_conn *hcon;
4346 BT_DBG("%s handle 0x%2.2x", hdev->name, ev->phy_handle);
4348 skb_pull(skb, sizeof(*ev));
4350 hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
4354 amp_read_loc_assoc_final_data(hdev, hcon);
4357 static void hci_phy_link_complete_evt(struct hci_dev *hdev,
4358 struct sk_buff *skb)
4360 struct hci_ev_phy_link_complete *ev = (void *) skb->data;
4361 struct hci_conn *hcon, *bredr_hcon;
4363 BT_DBG("%s handle 0x%2.2x status 0x%2.2x", hdev->name, ev->phy_handle,
4368 hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
4370 hci_dev_unlock(hdev);
4376 hci_dev_unlock(hdev);
4380 bredr_hcon = hcon->amp_mgr->l2cap_conn->hcon;
4382 hcon->state = BT_CONNECTED;
4383 bacpy(&hcon->dst, &bredr_hcon->dst);
4385 hci_conn_hold(hcon);
4386 hcon->disc_timeout = HCI_DISCONN_TIMEOUT;
4387 hci_conn_drop(hcon);
4389 hci_debugfs_create_conn(hcon);
4390 hci_conn_add_sysfs(hcon);
4392 amp_physical_cfm(bredr_hcon, hcon);
4394 hci_dev_unlock(hdev);
4397 static void hci_loglink_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
4399 struct hci_ev_logical_link_complete *ev = (void *) skb->data;
4400 struct hci_conn *hcon;
4401 struct hci_chan *hchan;
4402 struct amp_mgr *mgr;
4404 BT_DBG("%s log_handle 0x%4.4x phy_handle 0x%2.2x status 0x%2.2x",
4405 hdev->name, le16_to_cpu(ev->handle), ev->phy_handle,
4408 hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
4412 /* Create AMP hchan */
4413 hchan = hci_chan_create(hcon);
4417 hchan->handle = le16_to_cpu(ev->handle);
4419 BT_DBG("hcon %p mgr %p hchan %p", hcon, hcon->amp_mgr, hchan);
4421 mgr = hcon->amp_mgr;
4422 if (mgr && mgr->bredr_chan) {
4423 struct l2cap_chan *bredr_chan = mgr->bredr_chan;
4425 l2cap_chan_lock(bredr_chan);
4427 bredr_chan->conn->mtu = hdev->block_mtu;
4428 l2cap_logical_cfm(bredr_chan, hchan, 0);
4429 hci_conn_hold(hcon);
4431 l2cap_chan_unlock(bredr_chan);
4435 static void hci_disconn_loglink_complete_evt(struct hci_dev *hdev,
4436 struct sk_buff *skb)
4438 struct hci_ev_disconn_logical_link_complete *ev = (void *) skb->data;
4439 struct hci_chan *hchan;
4441 BT_DBG("%s log handle 0x%4.4x status 0x%2.2x", hdev->name,
4442 le16_to_cpu(ev->handle), ev->status);
4449 hchan = hci_chan_lookup_handle(hdev, le16_to_cpu(ev->handle));
4453 amp_destroy_logical_link(hchan, ev->reason);
4456 hci_dev_unlock(hdev);
4459 static void hci_disconn_phylink_complete_evt(struct hci_dev *hdev,
4460 struct sk_buff *skb)
4462 struct hci_ev_disconn_phy_link_complete *ev = (void *) skb->data;
4463 struct hci_conn *hcon;
4465 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
4472 hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
4474 hcon->state = BT_CLOSED;
4478 hci_dev_unlock(hdev);
4482 static void hci_le_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
4484 struct hci_ev_le_conn_complete *ev = (void *) skb->data;
4485 struct hci_conn_params *params;
4486 struct hci_conn *conn;
4487 struct smp_irk *irk;
4490 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
4494 /* All controllers implicitly stop advertising in the event of a
4495 * connection, so ensure that the state bit is cleared.
4497 hci_dev_clear_flag(hdev, HCI_LE_ADV);
4499 conn = hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT);
4501 conn = hci_conn_add(hdev, LE_LINK, &ev->bdaddr, ev->role);
4503 BT_ERR("No memory for new connection");
4507 conn->dst_type = ev->bdaddr_type;
4509 /* If we didn't have a hci_conn object previously
4510 * but we're in master role this must be something
4511 * initiated using a white list. Since white list based
4512 * connections are not "first class citizens" we don't
4513 * have full tracking of them. Therefore, we go ahead
4514 * with a "best effort" approach of determining the
4515 * initiator address based on the HCI_PRIVACY flag.
4518 conn->resp_addr_type = ev->bdaddr_type;
4519 bacpy(&conn->resp_addr, &ev->bdaddr);
4520 if (hci_dev_test_flag(hdev, HCI_PRIVACY)) {
4521 conn->init_addr_type = ADDR_LE_DEV_RANDOM;
4522 bacpy(&conn->init_addr, &hdev->rpa);
4524 hci_copy_identity_address(hdev,
4526 &conn->init_addr_type);
4530 cancel_delayed_work(&conn->le_conn_timeout);
4534 /* Set the responder (our side) address type based on
4535 * the advertising address type.
4537 conn->resp_addr_type = hdev->adv_addr_type;
4538 if (hdev->adv_addr_type == ADDR_LE_DEV_RANDOM)
4539 bacpy(&conn->resp_addr, &hdev->random_addr);
4541 bacpy(&conn->resp_addr, &hdev->bdaddr);
4543 conn->init_addr_type = ev->bdaddr_type;
4544 bacpy(&conn->init_addr, &ev->bdaddr);
4546 /* For incoming connections, set the default minimum
4547 * and maximum connection interval. They will be used
4548 * to check if the parameters are in range and if not
4549 * trigger the connection update procedure.
4551 conn->le_conn_min_interval = hdev->le_conn_min_interval;
4552 conn->le_conn_max_interval = hdev->le_conn_max_interval;
4555 /* Lookup the identity address from the stored connection
4556 * address and address type.
4558 * When establishing connections to an identity address, the
4559 * connection procedure will store the resolvable random
4560 * address first. Now if it can be converted back into the
4561 * identity address, start using the identity address from
4564 irk = hci_get_irk(hdev, &conn->dst, conn->dst_type);
4566 bacpy(&conn->dst, &irk->bdaddr);
4567 conn->dst_type = irk->addr_type;
4571 hci_le_conn_failed(conn, ev->status);
4575 if (conn->dst_type == ADDR_LE_DEV_PUBLIC)
4576 addr_type = BDADDR_LE_PUBLIC;
4578 addr_type = BDADDR_LE_RANDOM;
4580 /* Drop the connection if the device is blocked */
4581 if (hci_bdaddr_list_lookup(&hdev->blacklist, &conn->dst, addr_type)) {
4582 hci_conn_drop(conn);
4586 if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
4587 mgmt_device_connected(hdev, conn, 0, NULL, 0);
4589 conn->sec_level = BT_SECURITY_LOW;
4590 conn->handle = __le16_to_cpu(ev->handle);
4591 conn->state = BT_CONFIG;
4593 conn->le_conn_interval = le16_to_cpu(ev->interval);
4594 conn->le_conn_latency = le16_to_cpu(ev->latency);
4595 conn->le_supv_timeout = le16_to_cpu(ev->supervision_timeout);
4597 hci_debugfs_create_conn(conn);
4598 hci_conn_add_sysfs(conn);
4601 /* The remote features procedure is defined for master
4602 * role only. So only in case of an initiated connection
4603 * request the remote features.
4605 * If the local controller supports slave-initiated features
4606 * exchange, then requesting the remote features in slave
4607 * role is possible. Otherwise just transition into the
4608 * connected state without requesting the remote features.
4611 (hdev->le_features[0] & HCI_LE_SLAVE_FEATURES)) {
4612 struct hci_cp_le_read_remote_features cp;
4614 cp.handle = __cpu_to_le16(conn->handle);
4616 hci_send_cmd(hdev, HCI_OP_LE_READ_REMOTE_FEATURES,
4619 hci_conn_hold(conn);
4621 conn->state = BT_CONNECTED;
4622 hci_connect_cfm(conn, ev->status);
4625 hci_connect_cfm(conn, ev->status);
4628 params = hci_pend_le_action_lookup(&hdev->pend_le_conns, &conn->dst,
4631 list_del_init(¶ms->action);
4633 hci_conn_drop(params->conn);
4634 hci_conn_put(params->conn);
4635 params->conn = NULL;
4640 hci_update_background_scan(hdev);
4641 hci_dev_unlock(hdev);
4644 static void hci_le_conn_update_complete_evt(struct hci_dev *hdev,
4645 struct sk_buff *skb)
4647 struct hci_ev_le_conn_update_complete *ev = (void *) skb->data;
4648 struct hci_conn *conn;
4650 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
4657 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
4659 conn->le_conn_interval = le16_to_cpu(ev->interval);
4660 conn->le_conn_latency = le16_to_cpu(ev->latency);
4661 conn->le_supv_timeout = le16_to_cpu(ev->supervision_timeout);
4664 hci_dev_unlock(hdev);
4667 /* This function requires the caller holds hdev->lock */
4668 static struct hci_conn *check_pending_le_conn(struct hci_dev *hdev,
4670 u8 addr_type, u8 adv_type)
4672 struct hci_conn *conn;
4673 struct hci_conn_params *params;
4675 /* If the event is not connectable don't proceed further */
4676 if (adv_type != LE_ADV_IND && adv_type != LE_ADV_DIRECT_IND)
4679 /* Ignore if the device is blocked */
4680 if (hci_bdaddr_list_lookup(&hdev->blacklist, addr, addr_type))
4683 /* Most controller will fail if we try to create new connections
4684 * while we have an existing one in slave role.
4686 if (hdev->conn_hash.le_num_slave > 0)
4689 /* If we're not connectable only connect devices that we have in
4690 * our pend_le_conns list.
4692 params = hci_pend_le_action_lookup(&hdev->pend_le_conns,
4697 switch (params->auto_connect) {
4698 case HCI_AUTO_CONN_DIRECT:
4699 /* Only devices advertising with ADV_DIRECT_IND are
4700 * triggering a connection attempt. This is allowing
4701 * incoming connections from slave devices.
4703 if (adv_type != LE_ADV_DIRECT_IND)
4706 case HCI_AUTO_CONN_ALWAYS:
4707 /* Devices advertising with ADV_IND or ADV_DIRECT_IND
4708 * are triggering a connection attempt. This means
4709 * that incoming connectioms from slave device are
4710 * accepted and also outgoing connections to slave
4711 * devices are established when found.
4718 conn = hci_connect_le(hdev, addr, addr_type, BT_SECURITY_LOW,
4719 HCI_LE_AUTOCONN_TIMEOUT, HCI_ROLE_MASTER);
4720 if (!IS_ERR(conn)) {
4721 /* Store the pointer since we don't really have any
4722 * other owner of the object besides the params that
4723 * triggered it. This way we can abort the connection if
4724 * the parameters get removed and keep the reference
4725 * count consistent once the connection is established.
4727 params->conn = hci_conn_get(conn);
4731 switch (PTR_ERR(conn)) {
4733 /* If hci_connect() returns -EBUSY it means there is already
4734 * an LE connection attempt going on. Since controllers don't
4735 * support more than one connection attempt at the time, we
4736 * don't consider this an error case.
4740 BT_DBG("Failed to connect: err %ld", PTR_ERR(conn));
4747 static void process_adv_report(struct hci_dev *hdev, u8 type, bdaddr_t *bdaddr,
4748 u8 bdaddr_type, bdaddr_t *direct_addr,
4749 u8 direct_addr_type, s8 rssi, u8 *data, u8 len)
4751 struct discovery_state *d = &hdev->discovery;
4752 struct smp_irk *irk;
4753 struct hci_conn *conn;
4757 /* If the direct address is present, then this report is from
4758 * a LE Direct Advertising Report event. In that case it is
4759 * important to see if the address is matching the local
4760 * controller address.
4763 /* Only resolvable random addresses are valid for these
4764 * kind of reports and others can be ignored.
4766 if (!hci_bdaddr_is_rpa(direct_addr, direct_addr_type))
4769 /* If the controller is not using resolvable random
4770 * addresses, then this report can be ignored.
4772 if (!hci_dev_test_flag(hdev, HCI_PRIVACY))
4775 /* If the local IRK of the controller does not match
4776 * with the resolvable random address provided, then
4777 * this report can be ignored.
4779 if (!smp_irk_matches(hdev, hdev->irk, direct_addr))
4783 /* Check if we need to convert to identity address */
4784 irk = hci_get_irk(hdev, bdaddr, bdaddr_type);
4786 bdaddr = &irk->bdaddr;
4787 bdaddr_type = irk->addr_type;
4790 /* Check if we have been requested to connect to this device */
4791 conn = check_pending_le_conn(hdev, bdaddr, bdaddr_type, type);
4792 if (conn && type == LE_ADV_IND) {
4793 /* Store report for later inclusion by
4794 * mgmt_device_connected
4796 memcpy(conn->le_adv_data, data, len);
4797 conn->le_adv_data_len = len;
4800 /* Passive scanning shouldn't trigger any device found events,
4801 * except for devices marked as CONN_REPORT for which we do send
4802 * device found events.
4804 if (hdev->le_scan_type == LE_SCAN_PASSIVE) {
4805 if (type == LE_ADV_DIRECT_IND)
4808 if (!hci_pend_le_action_lookup(&hdev->pend_le_reports,
4809 bdaddr, bdaddr_type))
4812 if (type == LE_ADV_NONCONN_IND || type == LE_ADV_SCAN_IND)
4813 flags = MGMT_DEV_FOUND_NOT_CONNECTABLE;
4816 mgmt_device_found(hdev, bdaddr, LE_LINK, bdaddr_type, NULL,
4817 rssi, flags, data, len, NULL, 0);
4821 /* When receiving non-connectable or scannable undirected
4822 * advertising reports, this means that the remote device is
4823 * not connectable and then clearly indicate this in the
4824 * device found event.
4826 * When receiving a scan response, then there is no way to
4827 * know if the remote device is connectable or not. However
4828 * since scan responses are merged with a previously seen
4829 * advertising report, the flags field from that report
4832 * In the really unlikely case that a controller get confused
4833 * and just sends a scan response event, then it is marked as
4834 * not connectable as well.
4836 if (type == LE_ADV_NONCONN_IND || type == LE_ADV_SCAN_IND ||
4837 type == LE_ADV_SCAN_RSP)
4838 flags = MGMT_DEV_FOUND_NOT_CONNECTABLE;
4842 /* If there's nothing pending either store the data from this
4843 * event or send an immediate device found event if the data
4844 * should not be stored for later.
4846 if (!has_pending_adv_report(hdev)) {
4847 /* If the report will trigger a SCAN_REQ store it for
4850 if (type == LE_ADV_IND || type == LE_ADV_SCAN_IND) {
4851 store_pending_adv_report(hdev, bdaddr, bdaddr_type,
4852 rssi, flags, data, len);
4856 mgmt_device_found(hdev, bdaddr, LE_LINK, bdaddr_type, NULL,
4857 rssi, flags, data, len, NULL, 0);
4861 /* Check if the pending report is for the same device as the new one */
4862 match = (!bacmp(bdaddr, &d->last_adv_addr) &&
4863 bdaddr_type == d->last_adv_addr_type);
4865 /* If the pending data doesn't match this report or this isn't a
4866 * scan response (e.g. we got a duplicate ADV_IND) then force
4867 * sending of the pending data.
4869 if (type != LE_ADV_SCAN_RSP || !match) {
4870 /* Send out whatever is in the cache, but skip duplicates */
4872 mgmt_device_found(hdev, &d->last_adv_addr, LE_LINK,
4873 d->last_adv_addr_type, NULL,
4874 d->last_adv_rssi, d->last_adv_flags,
4876 d->last_adv_data_len, NULL, 0);
4878 /* If the new report will trigger a SCAN_REQ store it for
4881 if (type == LE_ADV_IND || type == LE_ADV_SCAN_IND) {
4882 store_pending_adv_report(hdev, bdaddr, bdaddr_type,
4883 rssi, flags, data, len);
4887 /* The advertising reports cannot be merged, so clear
4888 * the pending report and send out a device found event.
4890 clear_pending_adv_report(hdev);
4891 mgmt_device_found(hdev, bdaddr, LE_LINK, bdaddr_type, NULL,
4892 rssi, flags, data, len, NULL, 0);
4896 /* If we get here we've got a pending ADV_IND or ADV_SCAN_IND and
4897 * the new event is a SCAN_RSP. We can therefore proceed with
4898 * sending a merged device found event.
4900 mgmt_device_found(hdev, &d->last_adv_addr, LE_LINK,
4901 d->last_adv_addr_type, NULL, rssi, d->last_adv_flags,
4902 d->last_adv_data, d->last_adv_data_len, data, len);
4903 clear_pending_adv_report(hdev);
4906 static void hci_le_adv_report_evt(struct hci_dev *hdev, struct sk_buff *skb)
4908 u8 num_reports = skb->data[0];
4909 void *ptr = &skb->data[1];
4913 while (num_reports--) {
4914 struct hci_ev_le_advertising_info *ev = ptr;
4917 rssi = ev->data[ev->length];
4918 process_adv_report(hdev, ev->evt_type, &ev->bdaddr,
4919 ev->bdaddr_type, NULL, 0, rssi,
4920 ev->data, ev->length);
4922 ptr += sizeof(*ev) + ev->length + 1;
4925 hci_dev_unlock(hdev);
4928 static void hci_le_remote_feat_complete_evt(struct hci_dev *hdev,
4929 struct sk_buff *skb)
4931 struct hci_ev_le_remote_feat_complete *ev = (void *)skb->data;
4932 struct hci_conn *conn;
4934 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
4938 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
4941 memcpy(conn->features[0], ev->features, 8);
4943 if (conn->state == BT_CONFIG) {
4946 /* If the local controller supports slave-initiated
4947 * features exchange, but the remote controller does
4948 * not, then it is possible that the error code 0x1a
4949 * for unsupported remote feature gets returned.
4951 * In this specific case, allow the connection to
4952 * transition into connected state and mark it as
4955 if ((hdev->le_features[0] & HCI_LE_SLAVE_FEATURES) &&
4956 !conn->out && ev->status == 0x1a)
4959 status = ev->status;
4961 conn->state = BT_CONNECTED;
4962 hci_connect_cfm(conn, status);
4963 hci_conn_drop(conn);
4967 hci_dev_unlock(hdev);
4970 static void hci_le_ltk_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
4972 struct hci_ev_le_ltk_req *ev = (void *) skb->data;
4973 struct hci_cp_le_ltk_reply cp;
4974 struct hci_cp_le_ltk_neg_reply neg;
4975 struct hci_conn *conn;
4976 struct smp_ltk *ltk;
4978 BT_DBG("%s handle 0x%4.4x", hdev->name, __le16_to_cpu(ev->handle));
4982 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
4986 ltk = hci_find_ltk(hdev, &conn->dst, conn->dst_type, conn->role);
4990 if (smp_ltk_is_sc(ltk)) {
4991 /* With SC both EDiv and Rand are set to zero */
4992 if (ev->ediv || ev->rand)
4995 /* For non-SC keys check that EDiv and Rand match */
4996 if (ev->ediv != ltk->ediv || ev->rand != ltk->rand)
5000 memcpy(cp.ltk, ltk->val, ltk->enc_size);
5001 memset(cp.ltk + ltk->enc_size, 0, sizeof(cp.ltk) - ltk->enc_size);
5002 cp.handle = cpu_to_le16(conn->handle);
5004 conn->pending_sec_level = smp_ltk_sec_level(ltk);
5006 conn->enc_key_size = ltk->enc_size;
5008 hci_send_cmd(hdev, HCI_OP_LE_LTK_REPLY, sizeof(cp), &cp);
5010 /* Ref. Bluetooth Core SPEC pages 1975 and 2004. STK is a
5011 * temporary key used to encrypt a connection following
5012 * pairing. It is used during the Encrypted Session Setup to
5013 * distribute the keys. Later, security can be re-established
5014 * using a distributed LTK.
5016 if (ltk->type == SMP_STK) {
5017 set_bit(HCI_CONN_STK_ENCRYPT, &conn->flags);
5018 list_del_rcu(<k->list);
5019 kfree_rcu(ltk, rcu);
5021 clear_bit(HCI_CONN_STK_ENCRYPT, &conn->flags);
5024 hci_dev_unlock(hdev);
5029 neg.handle = ev->handle;
5030 hci_send_cmd(hdev, HCI_OP_LE_LTK_NEG_REPLY, sizeof(neg), &neg);
5031 hci_dev_unlock(hdev);
5034 static void send_conn_param_neg_reply(struct hci_dev *hdev, u16 handle,
5037 struct hci_cp_le_conn_param_req_neg_reply cp;
5039 cp.handle = cpu_to_le16(handle);
5042 hci_send_cmd(hdev, HCI_OP_LE_CONN_PARAM_REQ_NEG_REPLY, sizeof(cp),
5046 static void hci_le_remote_conn_param_req_evt(struct hci_dev *hdev,
5047 struct sk_buff *skb)
5049 struct hci_ev_le_remote_conn_param_req *ev = (void *) skb->data;
5050 struct hci_cp_le_conn_param_req_reply cp;
5051 struct hci_conn *hcon;
5052 u16 handle, min, max, latency, timeout;
5054 handle = le16_to_cpu(ev->handle);
5055 min = le16_to_cpu(ev->interval_min);
5056 max = le16_to_cpu(ev->interval_max);
5057 latency = le16_to_cpu(ev->latency);
5058 timeout = le16_to_cpu(ev->timeout);
5060 hcon = hci_conn_hash_lookup_handle(hdev, handle);
5061 if (!hcon || hcon->state != BT_CONNECTED)
5062 return send_conn_param_neg_reply(hdev, handle,
5063 HCI_ERROR_UNKNOWN_CONN_ID);
5065 if (hci_check_conn_params(min, max, latency, timeout))
5066 return send_conn_param_neg_reply(hdev, handle,
5067 HCI_ERROR_INVALID_LL_PARAMS);
5069 if (hcon->role == HCI_ROLE_MASTER) {
5070 struct hci_conn_params *params;
5075 params = hci_conn_params_lookup(hdev, &hcon->dst,
5078 params->conn_min_interval = min;
5079 params->conn_max_interval = max;
5080 params->conn_latency = latency;
5081 params->supervision_timeout = timeout;
5087 hci_dev_unlock(hdev);
5089 mgmt_new_conn_param(hdev, &hcon->dst, hcon->dst_type,
5090 store_hint, min, max, latency, timeout);
5093 cp.handle = ev->handle;
5094 cp.interval_min = ev->interval_min;
5095 cp.interval_max = ev->interval_max;
5096 cp.latency = ev->latency;
5097 cp.timeout = ev->timeout;
5101 hci_send_cmd(hdev, HCI_OP_LE_CONN_PARAM_REQ_REPLY, sizeof(cp), &cp);
5104 static void hci_le_direct_adv_report_evt(struct hci_dev *hdev,
5105 struct sk_buff *skb)
5107 u8 num_reports = skb->data[0];
5108 void *ptr = &skb->data[1];
5112 while (num_reports--) {
5113 struct hci_ev_le_direct_adv_info *ev = ptr;
5115 process_adv_report(hdev, ev->evt_type, &ev->bdaddr,
5116 ev->bdaddr_type, &ev->direct_addr,
5117 ev->direct_addr_type, ev->rssi, NULL, 0);
5122 hci_dev_unlock(hdev);
5125 static void hci_le_meta_evt(struct hci_dev *hdev, struct sk_buff *skb)
5127 struct hci_ev_le_meta *le_ev = (void *) skb->data;
5129 skb_pull(skb, sizeof(*le_ev));
5131 switch (le_ev->subevent) {
5132 case HCI_EV_LE_CONN_COMPLETE:
5133 hci_le_conn_complete_evt(hdev, skb);
5136 case HCI_EV_LE_CONN_UPDATE_COMPLETE:
5137 hci_le_conn_update_complete_evt(hdev, skb);
5140 case HCI_EV_LE_ADVERTISING_REPORT:
5141 hci_le_adv_report_evt(hdev, skb);
5144 case HCI_EV_LE_REMOTE_FEAT_COMPLETE:
5145 hci_le_remote_feat_complete_evt(hdev, skb);
5148 case HCI_EV_LE_LTK_REQ:
5149 hci_le_ltk_request_evt(hdev, skb);
5152 case HCI_EV_LE_REMOTE_CONN_PARAM_REQ:
5153 hci_le_remote_conn_param_req_evt(hdev, skb);
5156 case HCI_EV_LE_DIRECT_ADV_REPORT:
5157 hci_le_direct_adv_report_evt(hdev, skb);
5165 static bool hci_get_cmd_complete(struct hci_dev *hdev, u16 opcode,
5166 u8 event, struct sk_buff *skb)
5168 struct hci_ev_cmd_complete *ev;
5169 struct hci_event_hdr *hdr;
5174 if (skb->len < sizeof(*hdr)) {
5175 BT_ERR("Too short HCI event");
5179 hdr = (void *) skb->data;
5180 skb_pull(skb, HCI_EVENT_HDR_SIZE);
5183 if (hdr->evt != event)
5188 if (hdr->evt != HCI_EV_CMD_COMPLETE) {
5189 BT_DBG("Last event is not cmd complete (0x%2.2x)", hdr->evt);
5193 if (skb->len < sizeof(*ev)) {
5194 BT_ERR("Too short cmd_complete event");
5198 ev = (void *) skb->data;
5199 skb_pull(skb, sizeof(*ev));
5201 if (opcode != __le16_to_cpu(ev->opcode)) {
5202 BT_DBG("opcode doesn't match (0x%2.2x != 0x%2.2x)", opcode,
5203 __le16_to_cpu(ev->opcode));
5210 void hci_event_packet(struct hci_dev *hdev, struct sk_buff *skb)
5212 struct hci_event_hdr *hdr = (void *) skb->data;
5213 hci_req_complete_t req_complete = NULL;
5214 hci_req_complete_skb_t req_complete_skb = NULL;
5215 struct sk_buff *orig_skb = NULL;
5216 u8 status = 0, event = hdr->evt, req_evt = 0;
5217 u16 opcode = HCI_OP_NOP;
5219 if (hdev->sent_cmd && bt_cb(hdev->sent_cmd)->req.event == event) {
5220 struct hci_command_hdr *cmd_hdr = (void *) hdev->sent_cmd->data;
5221 opcode = __le16_to_cpu(cmd_hdr->opcode);
5222 hci_req_cmd_complete(hdev, opcode, status, &req_complete,
5227 /* If it looks like we might end up having to call
5228 * req_complete_skb, store a pristine copy of the skb since the
5229 * various handlers may modify the original one through
5230 * skb_pull() calls, etc.
5232 if (req_complete_skb || event == HCI_EV_CMD_STATUS ||
5233 event == HCI_EV_CMD_COMPLETE)
5234 orig_skb = skb_clone(skb, GFP_KERNEL);
5236 skb_pull(skb, HCI_EVENT_HDR_SIZE);
5239 case HCI_EV_INQUIRY_COMPLETE:
5240 hci_inquiry_complete_evt(hdev, skb);
5243 case HCI_EV_INQUIRY_RESULT:
5244 hci_inquiry_result_evt(hdev, skb);
5247 case HCI_EV_CONN_COMPLETE:
5248 hci_conn_complete_evt(hdev, skb);
5251 case HCI_EV_CONN_REQUEST:
5252 hci_conn_request_evt(hdev, skb);
5255 case HCI_EV_DISCONN_COMPLETE:
5256 hci_disconn_complete_evt(hdev, skb);
5259 case HCI_EV_AUTH_COMPLETE:
5260 hci_auth_complete_evt(hdev, skb);
5263 case HCI_EV_REMOTE_NAME:
5264 hci_remote_name_evt(hdev, skb);
5267 case HCI_EV_ENCRYPT_CHANGE:
5268 hci_encrypt_change_evt(hdev, skb);
5271 case HCI_EV_CHANGE_LINK_KEY_COMPLETE:
5272 hci_change_link_key_complete_evt(hdev, skb);
5275 case HCI_EV_REMOTE_FEATURES:
5276 hci_remote_features_evt(hdev, skb);
5279 case HCI_EV_CMD_COMPLETE:
5280 hci_cmd_complete_evt(hdev, skb, &opcode, &status,
5281 &req_complete, &req_complete_skb);
5284 case HCI_EV_CMD_STATUS:
5285 hci_cmd_status_evt(hdev, skb, &opcode, &status, &req_complete,
5289 case HCI_EV_HARDWARE_ERROR:
5290 hci_hardware_error_evt(hdev, skb);
5293 case HCI_EV_ROLE_CHANGE:
5294 hci_role_change_evt(hdev, skb);
5297 case HCI_EV_NUM_COMP_PKTS:
5298 hci_num_comp_pkts_evt(hdev, skb);
5301 case HCI_EV_MODE_CHANGE:
5302 hci_mode_change_evt(hdev, skb);
5305 case HCI_EV_PIN_CODE_REQ:
5306 hci_pin_code_request_evt(hdev, skb);
5309 case HCI_EV_LINK_KEY_REQ:
5310 hci_link_key_request_evt(hdev, skb);
5313 case HCI_EV_LINK_KEY_NOTIFY:
5314 hci_link_key_notify_evt(hdev, skb);
5317 case HCI_EV_CLOCK_OFFSET:
5318 hci_clock_offset_evt(hdev, skb);
5321 case HCI_EV_PKT_TYPE_CHANGE:
5322 hci_pkt_type_change_evt(hdev, skb);
5325 case HCI_EV_PSCAN_REP_MODE:
5326 hci_pscan_rep_mode_evt(hdev, skb);
5329 case HCI_EV_INQUIRY_RESULT_WITH_RSSI:
5330 hci_inquiry_result_with_rssi_evt(hdev, skb);
5333 case HCI_EV_REMOTE_EXT_FEATURES:
5334 hci_remote_ext_features_evt(hdev, skb);
5337 case HCI_EV_SYNC_CONN_COMPLETE:
5338 hci_sync_conn_complete_evt(hdev, skb);
5341 case HCI_EV_EXTENDED_INQUIRY_RESULT:
5342 hci_extended_inquiry_result_evt(hdev, skb);
5345 case HCI_EV_KEY_REFRESH_COMPLETE:
5346 hci_key_refresh_complete_evt(hdev, skb);
5349 case HCI_EV_IO_CAPA_REQUEST:
5350 hci_io_capa_request_evt(hdev, skb);
5353 case HCI_EV_IO_CAPA_REPLY:
5354 hci_io_capa_reply_evt(hdev, skb);
5357 case HCI_EV_USER_CONFIRM_REQUEST:
5358 hci_user_confirm_request_evt(hdev, skb);
5361 case HCI_EV_USER_PASSKEY_REQUEST:
5362 hci_user_passkey_request_evt(hdev, skb);
5365 case HCI_EV_USER_PASSKEY_NOTIFY:
5366 hci_user_passkey_notify_evt(hdev, skb);
5369 case HCI_EV_KEYPRESS_NOTIFY:
5370 hci_keypress_notify_evt(hdev, skb);
5373 case HCI_EV_SIMPLE_PAIR_COMPLETE:
5374 hci_simple_pair_complete_evt(hdev, skb);
5377 case HCI_EV_REMOTE_HOST_FEATURES:
5378 hci_remote_host_features_evt(hdev, skb);
5381 case HCI_EV_LE_META:
5382 hci_le_meta_evt(hdev, skb);
5385 case HCI_EV_REMOTE_OOB_DATA_REQUEST:
5386 hci_remote_oob_data_request_evt(hdev, skb);
5389 #if IS_ENABLED(CONFIG_BT_HS)
5390 case HCI_EV_CHANNEL_SELECTED:
5391 hci_chan_selected_evt(hdev, skb);
5394 case HCI_EV_PHY_LINK_COMPLETE:
5395 hci_phy_link_complete_evt(hdev, skb);
5398 case HCI_EV_LOGICAL_LINK_COMPLETE:
5399 hci_loglink_complete_evt(hdev, skb);
5402 case HCI_EV_DISCONN_LOGICAL_LINK_COMPLETE:
5403 hci_disconn_loglink_complete_evt(hdev, skb);
5406 case HCI_EV_DISCONN_PHY_LINK_COMPLETE:
5407 hci_disconn_phylink_complete_evt(hdev, skb);
5411 case HCI_EV_NUM_COMP_BLOCKS:
5412 hci_num_comp_blocks_evt(hdev, skb);
5416 BT_DBG("%s event 0x%2.2x", hdev->name, event);
5421 req_complete(hdev, status, opcode);
5422 } else if (req_complete_skb) {
5423 if (!hci_get_cmd_complete(hdev, opcode, req_evt, orig_skb)) {
5424 kfree_skb(orig_skb);
5427 req_complete_skb(hdev, status, opcode, orig_skb);
5430 kfree_skb(orig_skb);
5432 hdev->stat.evt_rx++;