2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (c) 2000-2001, 2010, Code Aurora Forum. All rights reserved.
5 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
25 /* Bluetooth HCI event handling. */
27 #include <asm/unaligned.h>
29 #include <net/bluetooth/bluetooth.h>
30 #include <net/bluetooth/hci_core.h>
31 #include <net/bluetooth/mgmt.h>
33 #include "hci_request.h"
34 #include "hci_debugfs.h"
39 #define ZERO_KEY "\x00\x00\x00\x00\x00\x00\x00\x00" \
40 "\x00\x00\x00\x00\x00\x00\x00\x00"
42 /* Handle HCI Event packets */
44 static void hci_cc_inquiry_cancel(struct hci_dev *hdev, struct sk_buff *skb)
46 __u8 status = *((__u8 *) skb->data);
48 BT_DBG("%s status 0x%2.2x", hdev->name, status);
53 clear_bit(HCI_INQUIRY, &hdev->flags);
54 smp_mb__after_atomic(); /* wake_up_bit advises about this barrier */
55 wake_up_bit(&hdev->flags, HCI_INQUIRY);
58 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
61 hci_conn_check_pending(hdev);
64 static void hci_cc_periodic_inq(struct hci_dev *hdev, struct sk_buff *skb)
66 __u8 status = *((__u8 *) skb->data);
68 BT_DBG("%s status 0x%2.2x", hdev->name, status);
73 hci_dev_set_flag(hdev, HCI_PERIODIC_INQ);
76 static void hci_cc_exit_periodic_inq(struct hci_dev *hdev, struct sk_buff *skb)
78 __u8 status = *((__u8 *) skb->data);
80 BT_DBG("%s status 0x%2.2x", hdev->name, status);
85 hci_dev_clear_flag(hdev, HCI_PERIODIC_INQ);
87 hci_conn_check_pending(hdev);
90 static void hci_cc_remote_name_req_cancel(struct hci_dev *hdev,
93 BT_DBG("%s", hdev->name);
96 static void hci_cc_role_discovery(struct hci_dev *hdev, struct sk_buff *skb)
98 struct hci_rp_role_discovery *rp = (void *) skb->data;
99 struct hci_conn *conn;
101 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
108 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
110 conn->role = rp->role;
112 hci_dev_unlock(hdev);
115 static void hci_cc_read_link_policy(struct hci_dev *hdev, struct sk_buff *skb)
117 struct hci_rp_read_link_policy *rp = (void *) skb->data;
118 struct hci_conn *conn;
120 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
127 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
129 conn->link_policy = __le16_to_cpu(rp->policy);
131 hci_dev_unlock(hdev);
134 static void hci_cc_write_link_policy(struct hci_dev *hdev, struct sk_buff *skb)
136 struct hci_rp_write_link_policy *rp = (void *) skb->data;
137 struct hci_conn *conn;
140 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
145 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LINK_POLICY);
151 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
153 conn->link_policy = get_unaligned_le16(sent + 2);
155 hci_dev_unlock(hdev);
158 static void hci_cc_read_def_link_policy(struct hci_dev *hdev,
161 struct hci_rp_read_def_link_policy *rp = (void *) skb->data;
163 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
168 hdev->link_policy = __le16_to_cpu(rp->policy);
171 static void hci_cc_write_def_link_policy(struct hci_dev *hdev,
174 __u8 status = *((__u8 *) skb->data);
177 BT_DBG("%s status 0x%2.2x", hdev->name, status);
182 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_DEF_LINK_POLICY);
186 hdev->link_policy = get_unaligned_le16(sent);
189 static void hci_cc_reset(struct hci_dev *hdev, struct sk_buff *skb)
191 __u8 status = *((__u8 *) skb->data);
193 BT_DBG("%s status 0x%2.2x", hdev->name, status);
195 clear_bit(HCI_RESET, &hdev->flags);
200 /* Reset all non-persistent flags */
201 hci_dev_clear_volatile_flags(hdev);
203 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
205 hdev->inq_tx_power = HCI_TX_POWER_INVALID;
206 hdev->adv_tx_power = HCI_TX_POWER_INVALID;
208 memset(hdev->adv_data, 0, sizeof(hdev->adv_data));
209 hdev->adv_data_len = 0;
211 memset(hdev->scan_rsp_data, 0, sizeof(hdev->scan_rsp_data));
212 hdev->scan_rsp_data_len = 0;
214 hdev->le_scan_type = LE_SCAN_PASSIVE;
216 hdev->ssp_debug_mode = 0;
218 hci_bdaddr_list_clear(&hdev->le_white_list);
221 static void hci_cc_read_stored_link_key(struct hci_dev *hdev,
224 struct hci_rp_read_stored_link_key *rp = (void *)skb->data;
225 struct hci_cp_read_stored_link_key *sent;
227 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
229 sent = hci_sent_cmd_data(hdev, HCI_OP_READ_STORED_LINK_KEY);
233 if (!rp->status && sent->read_all == 0x01) {
234 hdev->stored_max_keys = rp->max_keys;
235 hdev->stored_num_keys = rp->num_keys;
239 static void hci_cc_delete_stored_link_key(struct hci_dev *hdev,
242 struct hci_rp_delete_stored_link_key *rp = (void *)skb->data;
244 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
249 if (rp->num_keys <= hdev->stored_num_keys)
250 hdev->stored_num_keys -= rp->num_keys;
252 hdev->stored_num_keys = 0;
255 static void hci_cc_write_local_name(struct hci_dev *hdev, struct sk_buff *skb)
257 __u8 status = *((__u8 *) skb->data);
260 BT_DBG("%s status 0x%2.2x", hdev->name, status);
262 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LOCAL_NAME);
268 if (hci_dev_test_flag(hdev, HCI_MGMT))
269 mgmt_set_local_name_complete(hdev, sent, status);
271 memcpy(hdev->dev_name, sent, HCI_MAX_NAME_LENGTH);
273 hci_dev_unlock(hdev);
276 static void hci_cc_read_local_name(struct hci_dev *hdev, struct sk_buff *skb)
278 struct hci_rp_read_local_name *rp = (void *) skb->data;
280 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
285 if (hci_dev_test_flag(hdev, HCI_SETUP) ||
286 hci_dev_test_flag(hdev, HCI_CONFIG))
287 memcpy(hdev->dev_name, rp->name, HCI_MAX_NAME_LENGTH);
290 static void hci_cc_write_auth_enable(struct hci_dev *hdev, struct sk_buff *skb)
292 __u8 status = *((__u8 *) skb->data);
295 BT_DBG("%s status 0x%2.2x", hdev->name, status);
297 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_AUTH_ENABLE);
304 __u8 param = *((__u8 *) sent);
306 if (param == AUTH_ENABLED)
307 set_bit(HCI_AUTH, &hdev->flags);
309 clear_bit(HCI_AUTH, &hdev->flags);
312 if (hci_dev_test_flag(hdev, HCI_MGMT))
313 mgmt_auth_enable_complete(hdev, status);
315 hci_dev_unlock(hdev);
318 static void hci_cc_write_encrypt_mode(struct hci_dev *hdev, struct sk_buff *skb)
320 __u8 status = *((__u8 *) skb->data);
324 BT_DBG("%s status 0x%2.2x", hdev->name, status);
329 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_ENCRYPT_MODE);
333 param = *((__u8 *) sent);
336 set_bit(HCI_ENCRYPT, &hdev->flags);
338 clear_bit(HCI_ENCRYPT, &hdev->flags);
341 static void hci_cc_write_scan_enable(struct hci_dev *hdev, struct sk_buff *skb)
343 __u8 status = *((__u8 *) skb->data);
347 BT_DBG("%s status 0x%2.2x", hdev->name, status);
349 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SCAN_ENABLE);
353 param = *((__u8 *) sent);
358 hdev->discov_timeout = 0;
362 if (param & SCAN_INQUIRY)
363 set_bit(HCI_ISCAN, &hdev->flags);
365 clear_bit(HCI_ISCAN, &hdev->flags);
367 if (param & SCAN_PAGE)
368 set_bit(HCI_PSCAN, &hdev->flags);
370 clear_bit(HCI_PSCAN, &hdev->flags);
373 hci_dev_unlock(hdev);
376 static void hci_cc_read_class_of_dev(struct hci_dev *hdev, struct sk_buff *skb)
378 struct hci_rp_read_class_of_dev *rp = (void *) skb->data;
380 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
385 memcpy(hdev->dev_class, rp->dev_class, 3);
387 BT_DBG("%s class 0x%.2x%.2x%.2x", hdev->name,
388 hdev->dev_class[2], hdev->dev_class[1], hdev->dev_class[0]);
391 static void hci_cc_write_class_of_dev(struct hci_dev *hdev, struct sk_buff *skb)
393 __u8 status = *((__u8 *) skb->data);
396 BT_DBG("%s status 0x%2.2x", hdev->name, status);
398 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_CLASS_OF_DEV);
405 memcpy(hdev->dev_class, sent, 3);
407 if (hci_dev_test_flag(hdev, HCI_MGMT))
408 mgmt_set_class_of_dev_complete(hdev, sent, status);
410 hci_dev_unlock(hdev);
413 static void hci_cc_read_voice_setting(struct hci_dev *hdev, struct sk_buff *skb)
415 struct hci_rp_read_voice_setting *rp = (void *) skb->data;
418 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
423 setting = __le16_to_cpu(rp->voice_setting);
425 if (hdev->voice_setting == setting)
428 hdev->voice_setting = setting;
430 BT_DBG("%s voice setting 0x%4.4x", hdev->name, setting);
433 hdev->notify(hdev, HCI_NOTIFY_VOICE_SETTING);
436 static void hci_cc_write_voice_setting(struct hci_dev *hdev,
439 __u8 status = *((__u8 *) skb->data);
443 BT_DBG("%s status 0x%2.2x", hdev->name, status);
448 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_VOICE_SETTING);
452 setting = get_unaligned_le16(sent);
454 if (hdev->voice_setting == setting)
457 hdev->voice_setting = setting;
459 BT_DBG("%s voice setting 0x%4.4x", hdev->name, setting);
462 hdev->notify(hdev, HCI_NOTIFY_VOICE_SETTING);
465 static void hci_cc_read_num_supported_iac(struct hci_dev *hdev,
468 struct hci_rp_read_num_supported_iac *rp = (void *) skb->data;
470 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
475 hdev->num_iac = rp->num_iac;
477 BT_DBG("%s num iac %d", hdev->name, hdev->num_iac);
480 static void hci_cc_write_ssp_mode(struct hci_dev *hdev, struct sk_buff *skb)
482 __u8 status = *((__u8 *) skb->data);
483 struct hci_cp_write_ssp_mode *sent;
485 BT_DBG("%s status 0x%2.2x", hdev->name, status);
487 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SSP_MODE);
495 hdev->features[1][0] |= LMP_HOST_SSP;
497 hdev->features[1][0] &= ~LMP_HOST_SSP;
500 if (hci_dev_test_flag(hdev, HCI_MGMT))
501 mgmt_ssp_enable_complete(hdev, sent->mode, status);
504 hci_dev_set_flag(hdev, HCI_SSP_ENABLED);
506 hci_dev_clear_flag(hdev, HCI_SSP_ENABLED);
509 hci_dev_unlock(hdev);
512 static void hci_cc_write_sc_support(struct hci_dev *hdev, struct sk_buff *skb)
514 u8 status = *((u8 *) skb->data);
515 struct hci_cp_write_sc_support *sent;
517 BT_DBG("%s status 0x%2.2x", hdev->name, status);
519 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SC_SUPPORT);
527 hdev->features[1][0] |= LMP_HOST_SC;
529 hdev->features[1][0] &= ~LMP_HOST_SC;
532 if (!hci_dev_test_flag(hdev, HCI_MGMT) && !status) {
534 hci_dev_set_flag(hdev, HCI_SC_ENABLED);
536 hci_dev_clear_flag(hdev, HCI_SC_ENABLED);
539 hci_dev_unlock(hdev);
542 static void hci_cc_read_local_version(struct hci_dev *hdev, struct sk_buff *skb)
544 struct hci_rp_read_local_version *rp = (void *) skb->data;
546 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
551 if (hci_dev_test_flag(hdev, HCI_SETUP) ||
552 hci_dev_test_flag(hdev, HCI_CONFIG)) {
553 hdev->hci_ver = rp->hci_ver;
554 hdev->hci_rev = __le16_to_cpu(rp->hci_rev);
555 hdev->lmp_ver = rp->lmp_ver;
556 hdev->manufacturer = __le16_to_cpu(rp->manufacturer);
557 hdev->lmp_subver = __le16_to_cpu(rp->lmp_subver);
561 static void hci_cc_read_local_commands(struct hci_dev *hdev,
564 struct hci_rp_read_local_commands *rp = (void *) skb->data;
566 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
571 if (hci_dev_test_flag(hdev, HCI_SETUP) ||
572 hci_dev_test_flag(hdev, HCI_CONFIG))
573 memcpy(hdev->commands, rp->commands, sizeof(hdev->commands));
576 static void hci_cc_read_local_features(struct hci_dev *hdev,
579 struct hci_rp_read_local_features *rp = (void *) skb->data;
581 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
586 memcpy(hdev->features, rp->features, 8);
588 /* Adjust default settings according to features
589 * supported by device. */
591 if (hdev->features[0][0] & LMP_3SLOT)
592 hdev->pkt_type |= (HCI_DM3 | HCI_DH3);
594 if (hdev->features[0][0] & LMP_5SLOT)
595 hdev->pkt_type |= (HCI_DM5 | HCI_DH5);
597 if (hdev->features[0][1] & LMP_HV2) {
598 hdev->pkt_type |= (HCI_HV2);
599 hdev->esco_type |= (ESCO_HV2);
602 if (hdev->features[0][1] & LMP_HV3) {
603 hdev->pkt_type |= (HCI_HV3);
604 hdev->esco_type |= (ESCO_HV3);
607 if (lmp_esco_capable(hdev))
608 hdev->esco_type |= (ESCO_EV3);
610 if (hdev->features[0][4] & LMP_EV4)
611 hdev->esco_type |= (ESCO_EV4);
613 if (hdev->features[0][4] & LMP_EV5)
614 hdev->esco_type |= (ESCO_EV5);
616 if (hdev->features[0][5] & LMP_EDR_ESCO_2M)
617 hdev->esco_type |= (ESCO_2EV3);
619 if (hdev->features[0][5] & LMP_EDR_ESCO_3M)
620 hdev->esco_type |= (ESCO_3EV3);
622 if (hdev->features[0][5] & LMP_EDR_3S_ESCO)
623 hdev->esco_type |= (ESCO_2EV5 | ESCO_3EV5);
626 static void hci_cc_read_local_ext_features(struct hci_dev *hdev,
629 struct hci_rp_read_local_ext_features *rp = (void *) skb->data;
631 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
636 if (hdev->max_page < rp->max_page)
637 hdev->max_page = rp->max_page;
639 if (rp->page < HCI_MAX_PAGES)
640 memcpy(hdev->features[rp->page], rp->features, 8);
643 static void hci_cc_read_flow_control_mode(struct hci_dev *hdev,
646 struct hci_rp_read_flow_control_mode *rp = (void *) skb->data;
648 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
653 hdev->flow_ctl_mode = rp->mode;
656 static void hci_cc_read_buffer_size(struct hci_dev *hdev, struct sk_buff *skb)
658 struct hci_rp_read_buffer_size *rp = (void *) skb->data;
660 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
665 hdev->acl_mtu = __le16_to_cpu(rp->acl_mtu);
666 hdev->sco_mtu = rp->sco_mtu;
667 hdev->acl_pkts = __le16_to_cpu(rp->acl_max_pkt);
668 hdev->sco_pkts = __le16_to_cpu(rp->sco_max_pkt);
670 if (test_bit(HCI_QUIRK_FIXUP_BUFFER_SIZE, &hdev->quirks)) {
675 hdev->acl_cnt = hdev->acl_pkts;
676 hdev->sco_cnt = hdev->sco_pkts;
678 BT_DBG("%s acl mtu %d:%d sco mtu %d:%d", hdev->name, hdev->acl_mtu,
679 hdev->acl_pkts, hdev->sco_mtu, hdev->sco_pkts);
682 static void hci_cc_read_bd_addr(struct hci_dev *hdev, struct sk_buff *skb)
684 struct hci_rp_read_bd_addr *rp = (void *) skb->data;
686 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
691 if (test_bit(HCI_INIT, &hdev->flags))
692 bacpy(&hdev->bdaddr, &rp->bdaddr);
694 if (hci_dev_test_flag(hdev, HCI_SETUP))
695 bacpy(&hdev->setup_addr, &rp->bdaddr);
698 static void hci_cc_read_page_scan_activity(struct hci_dev *hdev,
701 struct hci_rp_read_page_scan_activity *rp = (void *) skb->data;
703 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
708 if (test_bit(HCI_INIT, &hdev->flags)) {
709 hdev->page_scan_interval = __le16_to_cpu(rp->interval);
710 hdev->page_scan_window = __le16_to_cpu(rp->window);
714 static void hci_cc_write_page_scan_activity(struct hci_dev *hdev,
717 u8 status = *((u8 *) skb->data);
718 struct hci_cp_write_page_scan_activity *sent;
720 BT_DBG("%s status 0x%2.2x", hdev->name, status);
725 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_PAGE_SCAN_ACTIVITY);
729 hdev->page_scan_interval = __le16_to_cpu(sent->interval);
730 hdev->page_scan_window = __le16_to_cpu(sent->window);
733 static void hci_cc_read_page_scan_type(struct hci_dev *hdev,
736 struct hci_rp_read_page_scan_type *rp = (void *) skb->data;
738 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
743 if (test_bit(HCI_INIT, &hdev->flags))
744 hdev->page_scan_type = rp->type;
747 static void hci_cc_write_page_scan_type(struct hci_dev *hdev,
750 u8 status = *((u8 *) skb->data);
753 BT_DBG("%s status 0x%2.2x", hdev->name, status);
758 type = hci_sent_cmd_data(hdev, HCI_OP_WRITE_PAGE_SCAN_TYPE);
760 hdev->page_scan_type = *type;
763 static void hci_cc_read_data_block_size(struct hci_dev *hdev,
766 struct hci_rp_read_data_block_size *rp = (void *) skb->data;
768 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
773 hdev->block_mtu = __le16_to_cpu(rp->max_acl_len);
774 hdev->block_len = __le16_to_cpu(rp->block_len);
775 hdev->num_blocks = __le16_to_cpu(rp->num_blocks);
777 hdev->block_cnt = hdev->num_blocks;
779 BT_DBG("%s blk mtu %d cnt %d len %d", hdev->name, hdev->block_mtu,
780 hdev->block_cnt, hdev->block_len);
783 static void hci_cc_read_clock(struct hci_dev *hdev, struct sk_buff *skb)
785 struct hci_rp_read_clock *rp = (void *) skb->data;
786 struct hci_cp_read_clock *cp;
787 struct hci_conn *conn;
789 BT_DBG("%s", hdev->name);
791 if (skb->len < sizeof(*rp))
799 cp = hci_sent_cmd_data(hdev, HCI_OP_READ_CLOCK);
803 if (cp->which == 0x00) {
804 hdev->clock = le32_to_cpu(rp->clock);
808 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
810 conn->clock = le32_to_cpu(rp->clock);
811 conn->clock_accuracy = le16_to_cpu(rp->accuracy);
815 hci_dev_unlock(hdev);
818 static void hci_cc_read_local_amp_info(struct hci_dev *hdev,
821 struct hci_rp_read_local_amp_info *rp = (void *) skb->data;
823 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
828 hdev->amp_status = rp->amp_status;
829 hdev->amp_total_bw = __le32_to_cpu(rp->total_bw);
830 hdev->amp_max_bw = __le32_to_cpu(rp->max_bw);
831 hdev->amp_min_latency = __le32_to_cpu(rp->min_latency);
832 hdev->amp_max_pdu = __le32_to_cpu(rp->max_pdu);
833 hdev->amp_type = rp->amp_type;
834 hdev->amp_pal_cap = __le16_to_cpu(rp->pal_cap);
835 hdev->amp_assoc_size = __le16_to_cpu(rp->max_assoc_size);
836 hdev->amp_be_flush_to = __le32_to_cpu(rp->be_flush_to);
837 hdev->amp_max_flush_to = __le32_to_cpu(rp->max_flush_to);
840 static void hci_cc_read_local_amp_assoc(struct hci_dev *hdev,
843 struct hci_rp_read_local_amp_assoc *rp = (void *) skb->data;
844 struct amp_assoc *assoc = &hdev->loc_assoc;
845 size_t rem_len, frag_len;
847 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
852 frag_len = skb->len - sizeof(*rp);
853 rem_len = __le16_to_cpu(rp->rem_len);
855 if (rem_len > frag_len) {
856 BT_DBG("frag_len %zu rem_len %zu", frag_len, rem_len);
858 memcpy(assoc->data + assoc->offset, rp->frag, frag_len);
859 assoc->offset += frag_len;
861 /* Read other fragments */
862 amp_read_loc_assoc_frag(hdev, rp->phy_handle);
867 memcpy(assoc->data + assoc->offset, rp->frag, rem_len);
868 assoc->len = assoc->offset + rem_len;
872 /* Send A2MP Rsp when all fragments are received */
873 a2mp_send_getampassoc_rsp(hdev, rp->status);
874 a2mp_send_create_phy_link_req(hdev, rp->status);
877 static void hci_cc_read_inq_rsp_tx_power(struct hci_dev *hdev,
880 struct hci_rp_read_inq_rsp_tx_power *rp = (void *) skb->data;
882 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
887 hdev->inq_tx_power = rp->tx_power;
890 static void hci_cc_pin_code_reply(struct hci_dev *hdev, struct sk_buff *skb)
892 struct hci_rp_pin_code_reply *rp = (void *) skb->data;
893 struct hci_cp_pin_code_reply *cp;
894 struct hci_conn *conn;
896 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
900 if (hci_dev_test_flag(hdev, HCI_MGMT))
901 mgmt_pin_code_reply_complete(hdev, &rp->bdaddr, rp->status);
906 cp = hci_sent_cmd_data(hdev, HCI_OP_PIN_CODE_REPLY);
910 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
912 conn->pin_length = cp->pin_len;
915 hci_dev_unlock(hdev);
918 static void hci_cc_pin_code_neg_reply(struct hci_dev *hdev, struct sk_buff *skb)
920 struct hci_rp_pin_code_neg_reply *rp = (void *) skb->data;
922 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
926 if (hci_dev_test_flag(hdev, HCI_MGMT))
927 mgmt_pin_code_neg_reply_complete(hdev, &rp->bdaddr,
930 hci_dev_unlock(hdev);
933 static void hci_cc_le_read_buffer_size(struct hci_dev *hdev,
936 struct hci_rp_le_read_buffer_size *rp = (void *) skb->data;
938 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
943 hdev->le_mtu = __le16_to_cpu(rp->le_mtu);
944 hdev->le_pkts = rp->le_max_pkt;
946 hdev->le_cnt = hdev->le_pkts;
948 BT_DBG("%s le mtu %d:%d", hdev->name, hdev->le_mtu, hdev->le_pkts);
951 static void hci_cc_le_read_local_features(struct hci_dev *hdev,
954 struct hci_rp_le_read_local_features *rp = (void *) skb->data;
956 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
961 memcpy(hdev->le_features, rp->features, 8);
964 static void hci_cc_le_read_adv_tx_power(struct hci_dev *hdev,
967 struct hci_rp_le_read_adv_tx_power *rp = (void *) skb->data;
969 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
974 hdev->adv_tx_power = rp->tx_power;
977 static void hci_cc_user_confirm_reply(struct hci_dev *hdev, struct sk_buff *skb)
979 struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
981 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
985 if (hci_dev_test_flag(hdev, HCI_MGMT))
986 mgmt_user_confirm_reply_complete(hdev, &rp->bdaddr, ACL_LINK, 0,
989 hci_dev_unlock(hdev);
992 static void hci_cc_user_confirm_neg_reply(struct hci_dev *hdev,
995 struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
997 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1001 if (hci_dev_test_flag(hdev, HCI_MGMT))
1002 mgmt_user_confirm_neg_reply_complete(hdev, &rp->bdaddr,
1003 ACL_LINK, 0, rp->status);
1005 hci_dev_unlock(hdev);
1008 static void hci_cc_user_passkey_reply(struct hci_dev *hdev, struct sk_buff *skb)
1010 struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
1012 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1016 if (hci_dev_test_flag(hdev, HCI_MGMT))
1017 mgmt_user_passkey_reply_complete(hdev, &rp->bdaddr, ACL_LINK,
1020 hci_dev_unlock(hdev);
1023 static void hci_cc_user_passkey_neg_reply(struct hci_dev *hdev,
1024 struct sk_buff *skb)
1026 struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
1028 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1032 if (hci_dev_test_flag(hdev, HCI_MGMT))
1033 mgmt_user_passkey_neg_reply_complete(hdev, &rp->bdaddr,
1034 ACL_LINK, 0, rp->status);
1036 hci_dev_unlock(hdev);
1039 static void hci_cc_read_local_oob_data(struct hci_dev *hdev,
1040 struct sk_buff *skb)
1042 struct hci_rp_read_local_oob_data *rp = (void *) skb->data;
1044 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1047 static void hci_cc_read_local_oob_ext_data(struct hci_dev *hdev,
1048 struct sk_buff *skb)
1050 struct hci_rp_read_local_oob_ext_data *rp = (void *) skb->data;
1052 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1055 static void hci_cc_le_set_random_addr(struct hci_dev *hdev, struct sk_buff *skb)
1057 __u8 status = *((__u8 *) skb->data);
1060 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1065 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_RANDOM_ADDR);
1071 bacpy(&hdev->random_addr, sent);
1073 hci_dev_unlock(hdev);
1076 static void hci_cc_le_set_adv_enable(struct hci_dev *hdev, struct sk_buff *skb)
1078 __u8 *sent, status = *((__u8 *) skb->data);
1080 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1085 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_ADV_ENABLE);
1091 /* If we're doing connection initiation as peripheral. Set a
1092 * timeout in case something goes wrong.
1095 struct hci_conn *conn;
1097 hci_dev_set_flag(hdev, HCI_LE_ADV);
1099 conn = hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT);
1101 queue_delayed_work(hdev->workqueue,
1102 &conn->le_conn_timeout,
1103 conn->conn_timeout);
1105 hci_dev_clear_flag(hdev, HCI_LE_ADV);
1108 hci_dev_unlock(hdev);
1111 static void hci_cc_le_set_scan_param(struct hci_dev *hdev, struct sk_buff *skb)
1113 struct hci_cp_le_set_scan_param *cp;
1114 __u8 status = *((__u8 *) skb->data);
1116 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1121 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_SCAN_PARAM);
1127 hdev->le_scan_type = cp->type;
1129 hci_dev_unlock(hdev);
1132 static bool has_pending_adv_report(struct hci_dev *hdev)
1134 struct discovery_state *d = &hdev->discovery;
1136 return bacmp(&d->last_adv_addr, BDADDR_ANY);
1139 static void clear_pending_adv_report(struct hci_dev *hdev)
1141 struct discovery_state *d = &hdev->discovery;
1143 bacpy(&d->last_adv_addr, BDADDR_ANY);
1144 d->last_adv_data_len = 0;
1147 static void store_pending_adv_report(struct hci_dev *hdev, bdaddr_t *bdaddr,
1148 u8 bdaddr_type, s8 rssi, u32 flags,
1151 struct discovery_state *d = &hdev->discovery;
1153 bacpy(&d->last_adv_addr, bdaddr);
1154 d->last_adv_addr_type = bdaddr_type;
1155 d->last_adv_rssi = rssi;
1156 d->last_adv_flags = flags;
1157 memcpy(d->last_adv_data, data, len);
1158 d->last_adv_data_len = len;
1161 static void hci_cc_le_set_scan_enable(struct hci_dev *hdev,
1162 struct sk_buff *skb)
1164 struct hci_cp_le_set_scan_enable *cp;
1165 __u8 status = *((__u8 *) skb->data);
1167 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1172 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_SCAN_ENABLE);
1178 switch (cp->enable) {
1179 case LE_SCAN_ENABLE:
1180 hci_dev_set_flag(hdev, HCI_LE_SCAN);
1181 if (hdev->le_scan_type == LE_SCAN_ACTIVE)
1182 clear_pending_adv_report(hdev);
1185 case LE_SCAN_DISABLE:
1186 /* We do this here instead of when setting DISCOVERY_STOPPED
1187 * since the latter would potentially require waiting for
1188 * inquiry to stop too.
1190 if (has_pending_adv_report(hdev)) {
1191 struct discovery_state *d = &hdev->discovery;
1193 mgmt_device_found(hdev, &d->last_adv_addr, LE_LINK,
1194 d->last_adv_addr_type, NULL,
1195 d->last_adv_rssi, d->last_adv_flags,
1197 d->last_adv_data_len, NULL, 0);
1200 /* Cancel this timer so that we don't try to disable scanning
1201 * when it's already disabled.
1203 cancel_delayed_work(&hdev->le_scan_disable);
1205 hci_dev_clear_flag(hdev, HCI_LE_SCAN);
1207 /* The HCI_LE_SCAN_INTERRUPTED flag indicates that we
1208 * interrupted scanning due to a connect request. Mark
1209 * therefore discovery as stopped. If this was not
1210 * because of a connect request advertising might have
1211 * been disabled because of active scanning, so
1212 * re-enable it again if necessary.
1214 if (hci_dev_test_and_clear_flag(hdev, HCI_LE_SCAN_INTERRUPTED))
1215 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1216 else if (!hci_dev_test_flag(hdev, HCI_LE_ADV) &&
1217 hdev->discovery.state == DISCOVERY_FINDING)
1218 mgmt_reenable_advertising(hdev);
1223 BT_ERR("Used reserved LE_Scan_Enable param %d", cp->enable);
1227 hci_dev_unlock(hdev);
1230 static void hci_cc_le_read_white_list_size(struct hci_dev *hdev,
1231 struct sk_buff *skb)
1233 struct hci_rp_le_read_white_list_size *rp = (void *) skb->data;
1235 BT_DBG("%s status 0x%2.2x size %u", hdev->name, rp->status, rp->size);
1240 hdev->le_white_list_size = rp->size;
1243 static void hci_cc_le_clear_white_list(struct hci_dev *hdev,
1244 struct sk_buff *skb)
1246 __u8 status = *((__u8 *) skb->data);
1248 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1253 hci_bdaddr_list_clear(&hdev->le_white_list);
1256 static void hci_cc_le_add_to_white_list(struct hci_dev *hdev,
1257 struct sk_buff *skb)
1259 struct hci_cp_le_add_to_white_list *sent;
1260 __u8 status = *((__u8 *) skb->data);
1262 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1267 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_ADD_TO_WHITE_LIST);
1271 hci_bdaddr_list_add(&hdev->le_white_list, &sent->bdaddr,
1275 static void hci_cc_le_del_from_white_list(struct hci_dev *hdev,
1276 struct sk_buff *skb)
1278 struct hci_cp_le_del_from_white_list *sent;
1279 __u8 status = *((__u8 *) skb->data);
1281 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1286 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_DEL_FROM_WHITE_LIST);
1290 hci_bdaddr_list_del(&hdev->le_white_list, &sent->bdaddr,
1294 static void hci_cc_le_read_supported_states(struct hci_dev *hdev,
1295 struct sk_buff *skb)
1297 struct hci_rp_le_read_supported_states *rp = (void *) skb->data;
1299 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1304 memcpy(hdev->le_states, rp->le_states, 8);
1307 static void hci_cc_le_read_def_data_len(struct hci_dev *hdev,
1308 struct sk_buff *skb)
1310 struct hci_rp_le_read_def_data_len *rp = (void *) skb->data;
1312 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1317 hdev->le_def_tx_len = le16_to_cpu(rp->tx_len);
1318 hdev->le_def_tx_time = le16_to_cpu(rp->tx_time);
1321 static void hci_cc_le_write_def_data_len(struct hci_dev *hdev,
1322 struct sk_buff *skb)
1324 struct hci_cp_le_write_def_data_len *sent;
1325 __u8 status = *((__u8 *) skb->data);
1327 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1332 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_WRITE_DEF_DATA_LEN);
1336 hdev->le_def_tx_len = le16_to_cpu(sent->tx_len);
1337 hdev->le_def_tx_time = le16_to_cpu(sent->tx_time);
1340 static void hci_cc_le_read_max_data_len(struct hci_dev *hdev,
1341 struct sk_buff *skb)
1343 struct hci_rp_le_read_max_data_len *rp = (void *) skb->data;
1345 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1350 hdev->le_max_tx_len = le16_to_cpu(rp->tx_len);
1351 hdev->le_max_tx_time = le16_to_cpu(rp->tx_time);
1352 hdev->le_max_rx_len = le16_to_cpu(rp->rx_len);
1353 hdev->le_max_rx_time = le16_to_cpu(rp->rx_time);
1356 static void hci_cc_write_le_host_supported(struct hci_dev *hdev,
1357 struct sk_buff *skb)
1359 struct hci_cp_write_le_host_supported *sent;
1360 __u8 status = *((__u8 *) skb->data);
1362 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1367 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LE_HOST_SUPPORTED);
1374 hdev->features[1][0] |= LMP_HOST_LE;
1375 hci_dev_set_flag(hdev, HCI_LE_ENABLED);
1377 hdev->features[1][0] &= ~LMP_HOST_LE;
1378 hci_dev_clear_flag(hdev, HCI_LE_ENABLED);
1379 hci_dev_clear_flag(hdev, HCI_ADVERTISING);
1383 hdev->features[1][0] |= LMP_HOST_LE_BREDR;
1385 hdev->features[1][0] &= ~LMP_HOST_LE_BREDR;
1387 hci_dev_unlock(hdev);
1390 static void hci_cc_set_adv_param(struct hci_dev *hdev, struct sk_buff *skb)
1392 struct hci_cp_le_set_adv_param *cp;
1393 u8 status = *((u8 *) skb->data);
1395 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1400 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_ADV_PARAM);
1405 hdev->adv_addr_type = cp->own_address_type;
1406 hci_dev_unlock(hdev);
1409 static void hci_cc_write_remote_amp_assoc(struct hci_dev *hdev,
1410 struct sk_buff *skb)
1412 struct hci_rp_write_remote_amp_assoc *rp = (void *) skb->data;
1414 BT_DBG("%s status 0x%2.2x phy_handle 0x%2.2x",
1415 hdev->name, rp->status, rp->phy_handle);
1420 amp_write_rem_assoc_continue(hdev, rp->phy_handle);
1423 static void hci_cc_read_rssi(struct hci_dev *hdev, struct sk_buff *skb)
1425 struct hci_rp_read_rssi *rp = (void *) skb->data;
1426 struct hci_conn *conn;
1428 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1435 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
1437 conn->rssi = rp->rssi;
1439 hci_dev_unlock(hdev);
1442 static void hci_cc_read_tx_power(struct hci_dev *hdev, struct sk_buff *skb)
1444 struct hci_cp_read_tx_power *sent;
1445 struct hci_rp_read_tx_power *rp = (void *) skb->data;
1446 struct hci_conn *conn;
1448 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1453 sent = hci_sent_cmd_data(hdev, HCI_OP_READ_TX_POWER);
1459 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
1463 switch (sent->type) {
1465 conn->tx_power = rp->tx_power;
1468 conn->max_tx_power = rp->tx_power;
1473 hci_dev_unlock(hdev);
1476 static void hci_cc_write_ssp_debug_mode(struct hci_dev *hdev, struct sk_buff *skb)
1478 u8 status = *((u8 *) skb->data);
1481 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1486 mode = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SSP_DEBUG_MODE);
1488 hdev->ssp_debug_mode = *mode;
1491 static void hci_cs_inquiry(struct hci_dev *hdev, __u8 status)
1493 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1496 hci_conn_check_pending(hdev);
1500 set_bit(HCI_INQUIRY, &hdev->flags);
1503 static void hci_cs_create_conn(struct hci_dev *hdev, __u8 status)
1505 struct hci_cp_create_conn *cp;
1506 struct hci_conn *conn;
1508 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1510 cp = hci_sent_cmd_data(hdev, HCI_OP_CREATE_CONN);
1516 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
1518 BT_DBG("%s bdaddr %pMR hcon %p", hdev->name, &cp->bdaddr, conn);
1521 if (conn && conn->state == BT_CONNECT) {
1522 if (status != 0x0c || conn->attempt > 2) {
1523 conn->state = BT_CLOSED;
1524 hci_connect_cfm(conn, status);
1527 conn->state = BT_CONNECT2;
1531 conn = hci_conn_add(hdev, ACL_LINK, &cp->bdaddr,
1534 BT_ERR("No memory for new connection");
1538 hci_dev_unlock(hdev);
1541 static void hci_cs_add_sco(struct hci_dev *hdev, __u8 status)
1543 struct hci_cp_add_sco *cp;
1544 struct hci_conn *acl, *sco;
1547 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1552 cp = hci_sent_cmd_data(hdev, HCI_OP_ADD_SCO);
1556 handle = __le16_to_cpu(cp->handle);
1558 BT_DBG("%s handle 0x%4.4x", hdev->name, handle);
1562 acl = hci_conn_hash_lookup_handle(hdev, handle);
1566 sco->state = BT_CLOSED;
1568 hci_connect_cfm(sco, status);
1573 hci_dev_unlock(hdev);
1576 static void hci_cs_auth_requested(struct hci_dev *hdev, __u8 status)
1578 struct hci_cp_auth_requested *cp;
1579 struct hci_conn *conn;
1581 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1586 cp = hci_sent_cmd_data(hdev, HCI_OP_AUTH_REQUESTED);
1592 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1594 if (conn->state == BT_CONFIG) {
1595 hci_connect_cfm(conn, status);
1596 hci_conn_drop(conn);
1600 hci_dev_unlock(hdev);
1603 static void hci_cs_set_conn_encrypt(struct hci_dev *hdev, __u8 status)
1605 struct hci_cp_set_conn_encrypt *cp;
1606 struct hci_conn *conn;
1608 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1613 cp = hci_sent_cmd_data(hdev, HCI_OP_SET_CONN_ENCRYPT);
1619 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1621 if (conn->state == BT_CONFIG) {
1622 hci_connect_cfm(conn, status);
1623 hci_conn_drop(conn);
1627 hci_dev_unlock(hdev);
1630 static int hci_outgoing_auth_needed(struct hci_dev *hdev,
1631 struct hci_conn *conn)
1633 if (conn->state != BT_CONFIG || !conn->out)
1636 if (conn->pending_sec_level == BT_SECURITY_SDP)
1639 /* Only request authentication for SSP connections or non-SSP
1640 * devices with sec_level MEDIUM or HIGH or if MITM protection
1643 if (!hci_conn_ssp_enabled(conn) && !(conn->auth_type & 0x01) &&
1644 conn->pending_sec_level != BT_SECURITY_FIPS &&
1645 conn->pending_sec_level != BT_SECURITY_HIGH &&
1646 conn->pending_sec_level != BT_SECURITY_MEDIUM)
1652 static int hci_resolve_name(struct hci_dev *hdev,
1653 struct inquiry_entry *e)
1655 struct hci_cp_remote_name_req cp;
1657 memset(&cp, 0, sizeof(cp));
1659 bacpy(&cp.bdaddr, &e->data.bdaddr);
1660 cp.pscan_rep_mode = e->data.pscan_rep_mode;
1661 cp.pscan_mode = e->data.pscan_mode;
1662 cp.clock_offset = e->data.clock_offset;
1664 return hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp);
1667 static bool hci_resolve_next_name(struct hci_dev *hdev)
1669 struct discovery_state *discov = &hdev->discovery;
1670 struct inquiry_entry *e;
1672 if (list_empty(&discov->resolve))
1675 e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY, NAME_NEEDED);
1679 if (hci_resolve_name(hdev, e) == 0) {
1680 e->name_state = NAME_PENDING;
1687 static void hci_check_pending_name(struct hci_dev *hdev, struct hci_conn *conn,
1688 bdaddr_t *bdaddr, u8 *name, u8 name_len)
1690 struct discovery_state *discov = &hdev->discovery;
1691 struct inquiry_entry *e;
1693 /* Update the mgmt connected state if necessary. Be careful with
1694 * conn objects that exist but are not (yet) connected however.
1695 * Only those in BT_CONFIG or BT_CONNECTED states can be
1696 * considered connected.
1699 (conn->state == BT_CONFIG || conn->state == BT_CONNECTED) &&
1700 !test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
1701 mgmt_device_connected(hdev, conn, 0, name, name_len);
1703 if (discov->state == DISCOVERY_STOPPED)
1706 if (discov->state == DISCOVERY_STOPPING)
1707 goto discov_complete;
1709 if (discov->state != DISCOVERY_RESOLVING)
1712 e = hci_inquiry_cache_lookup_resolve(hdev, bdaddr, NAME_PENDING);
1713 /* If the device was not found in a list of found devices names of which
1714 * are pending. there is no need to continue resolving a next name as it
1715 * will be done upon receiving another Remote Name Request Complete
1722 e->name_state = NAME_KNOWN;
1723 mgmt_remote_name(hdev, bdaddr, ACL_LINK, 0x00,
1724 e->data.rssi, name, name_len);
1726 e->name_state = NAME_NOT_KNOWN;
1729 if (hci_resolve_next_name(hdev))
1733 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1736 static void hci_cs_remote_name_req(struct hci_dev *hdev, __u8 status)
1738 struct hci_cp_remote_name_req *cp;
1739 struct hci_conn *conn;
1741 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1743 /* If successful wait for the name req complete event before
1744 * checking for the need to do authentication */
1748 cp = hci_sent_cmd_data(hdev, HCI_OP_REMOTE_NAME_REQ);
1754 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
1756 if (hci_dev_test_flag(hdev, HCI_MGMT))
1757 hci_check_pending_name(hdev, conn, &cp->bdaddr, NULL, 0);
1762 if (!hci_outgoing_auth_needed(hdev, conn))
1765 if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->flags)) {
1766 struct hci_cp_auth_requested auth_cp;
1768 set_bit(HCI_CONN_AUTH_INITIATOR, &conn->flags);
1770 auth_cp.handle = __cpu_to_le16(conn->handle);
1771 hci_send_cmd(hdev, HCI_OP_AUTH_REQUESTED,
1772 sizeof(auth_cp), &auth_cp);
1776 hci_dev_unlock(hdev);
1779 static void hci_cs_read_remote_features(struct hci_dev *hdev, __u8 status)
1781 struct hci_cp_read_remote_features *cp;
1782 struct hci_conn *conn;
1784 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1789 cp = hci_sent_cmd_data(hdev, HCI_OP_READ_REMOTE_FEATURES);
1795 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1797 if (conn->state == BT_CONFIG) {
1798 hci_connect_cfm(conn, status);
1799 hci_conn_drop(conn);
1803 hci_dev_unlock(hdev);
1806 static void hci_cs_read_remote_ext_features(struct hci_dev *hdev, __u8 status)
1808 struct hci_cp_read_remote_ext_features *cp;
1809 struct hci_conn *conn;
1811 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1816 cp = hci_sent_cmd_data(hdev, HCI_OP_READ_REMOTE_EXT_FEATURES);
1822 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1824 if (conn->state == BT_CONFIG) {
1825 hci_connect_cfm(conn, status);
1826 hci_conn_drop(conn);
1830 hci_dev_unlock(hdev);
1833 static void hci_cs_setup_sync_conn(struct hci_dev *hdev, __u8 status)
1835 struct hci_cp_setup_sync_conn *cp;
1836 struct hci_conn *acl, *sco;
1839 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1844 cp = hci_sent_cmd_data(hdev, HCI_OP_SETUP_SYNC_CONN);
1848 handle = __le16_to_cpu(cp->handle);
1850 BT_DBG("%s handle 0x%4.4x", hdev->name, handle);
1854 acl = hci_conn_hash_lookup_handle(hdev, handle);
1858 sco->state = BT_CLOSED;
1860 hci_connect_cfm(sco, status);
1865 hci_dev_unlock(hdev);
1868 static void hci_cs_sniff_mode(struct hci_dev *hdev, __u8 status)
1870 struct hci_cp_sniff_mode *cp;
1871 struct hci_conn *conn;
1873 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1878 cp = hci_sent_cmd_data(hdev, HCI_OP_SNIFF_MODE);
1884 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1886 clear_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags);
1888 if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags))
1889 hci_sco_setup(conn, status);
1892 hci_dev_unlock(hdev);
1895 static void hci_cs_exit_sniff_mode(struct hci_dev *hdev, __u8 status)
1897 struct hci_cp_exit_sniff_mode *cp;
1898 struct hci_conn *conn;
1900 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1905 cp = hci_sent_cmd_data(hdev, HCI_OP_EXIT_SNIFF_MODE);
1911 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1913 clear_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags);
1915 if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags))
1916 hci_sco_setup(conn, status);
1919 hci_dev_unlock(hdev);
1922 static void hci_cs_disconnect(struct hci_dev *hdev, u8 status)
1924 struct hci_cp_disconnect *cp;
1925 struct hci_conn *conn;
1930 cp = hci_sent_cmd_data(hdev, HCI_OP_DISCONNECT);
1936 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1938 mgmt_disconnect_failed(hdev, &conn->dst, conn->type,
1939 conn->dst_type, status);
1941 hci_dev_unlock(hdev);
1944 static void hci_cs_create_phylink(struct hci_dev *hdev, u8 status)
1946 struct hci_cp_create_phy_link *cp;
1948 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1950 cp = hci_sent_cmd_data(hdev, HCI_OP_CREATE_PHY_LINK);
1957 struct hci_conn *hcon;
1959 hcon = hci_conn_hash_lookup_handle(hdev, cp->phy_handle);
1963 amp_write_remote_assoc(hdev, cp->phy_handle);
1966 hci_dev_unlock(hdev);
1969 static void hci_cs_accept_phylink(struct hci_dev *hdev, u8 status)
1971 struct hci_cp_accept_phy_link *cp;
1973 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1978 cp = hci_sent_cmd_data(hdev, HCI_OP_ACCEPT_PHY_LINK);
1982 amp_write_remote_assoc(hdev, cp->phy_handle);
1985 static void hci_cs_le_create_conn(struct hci_dev *hdev, u8 status)
1987 struct hci_cp_le_create_conn *cp;
1988 struct hci_conn *conn;
1990 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1992 /* All connection failure handling is taken care of by the
1993 * hci_le_conn_failed function which is triggered by the HCI
1994 * request completion callbacks used for connecting.
1999 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_CREATE_CONN);
2005 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->peer_addr);
2009 /* Store the initiator and responder address information which
2010 * is needed for SMP. These values will not change during the
2011 * lifetime of the connection.
2013 conn->init_addr_type = cp->own_address_type;
2014 if (cp->own_address_type == ADDR_LE_DEV_RANDOM)
2015 bacpy(&conn->init_addr, &hdev->random_addr);
2017 bacpy(&conn->init_addr, &hdev->bdaddr);
2019 conn->resp_addr_type = cp->peer_addr_type;
2020 bacpy(&conn->resp_addr, &cp->peer_addr);
2022 /* We don't want the connection attempt to stick around
2023 * indefinitely since LE doesn't have a page timeout concept
2024 * like BR/EDR. Set a timer for any connection that doesn't use
2025 * the white list for connecting.
2027 if (cp->filter_policy == HCI_LE_USE_PEER_ADDR)
2028 queue_delayed_work(conn->hdev->workqueue,
2029 &conn->le_conn_timeout,
2030 conn->conn_timeout);
2033 hci_dev_unlock(hdev);
2036 static void hci_cs_le_read_remote_features(struct hci_dev *hdev, u8 status)
2038 struct hci_cp_le_read_remote_features *cp;
2039 struct hci_conn *conn;
2041 BT_DBG("%s status 0x%2.2x", hdev->name, status);
2046 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_READ_REMOTE_FEATURES);
2052 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
2054 if (conn->state == BT_CONFIG) {
2055 hci_connect_cfm(conn, status);
2056 hci_conn_drop(conn);
2060 hci_dev_unlock(hdev);
2063 static void hci_cs_le_start_enc(struct hci_dev *hdev, u8 status)
2065 struct hci_cp_le_start_enc *cp;
2066 struct hci_conn *conn;
2068 BT_DBG("%s status 0x%2.2x", hdev->name, status);
2075 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_START_ENC);
2079 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
2083 if (conn->state != BT_CONNECTED)
2086 hci_disconnect(conn, HCI_ERROR_AUTH_FAILURE);
2087 hci_conn_drop(conn);
2090 hci_dev_unlock(hdev);
2093 static void hci_cs_switch_role(struct hci_dev *hdev, u8 status)
2095 struct hci_cp_switch_role *cp;
2096 struct hci_conn *conn;
2098 BT_DBG("%s status 0x%2.2x", hdev->name, status);
2103 cp = hci_sent_cmd_data(hdev, HCI_OP_SWITCH_ROLE);
2109 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
2111 clear_bit(HCI_CONN_RSWITCH_PEND, &conn->flags);
2113 hci_dev_unlock(hdev);
2116 static void hci_inquiry_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
2118 __u8 status = *((__u8 *) skb->data);
2119 struct discovery_state *discov = &hdev->discovery;
2120 struct inquiry_entry *e;
2122 BT_DBG("%s status 0x%2.2x", hdev->name, status);
2124 hci_conn_check_pending(hdev);
2126 if (!test_and_clear_bit(HCI_INQUIRY, &hdev->flags))
2129 smp_mb__after_atomic(); /* wake_up_bit advises about this barrier */
2130 wake_up_bit(&hdev->flags, HCI_INQUIRY);
2132 if (!hci_dev_test_flag(hdev, HCI_MGMT))
2137 if (discov->state != DISCOVERY_FINDING)
2140 if (list_empty(&discov->resolve)) {
2141 /* When BR/EDR inquiry is active and no LE scanning is in
2142 * progress, then change discovery state to indicate completion.
2144 * When running LE scanning and BR/EDR inquiry simultaneously
2145 * and the LE scan already finished, then change the discovery
2146 * state to indicate completion.
2148 if (!hci_dev_test_flag(hdev, HCI_LE_SCAN) ||
2149 !test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY, &hdev->quirks))
2150 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2154 e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY, NAME_NEEDED);
2155 if (e && hci_resolve_name(hdev, e) == 0) {
2156 e->name_state = NAME_PENDING;
2157 hci_discovery_set_state(hdev, DISCOVERY_RESOLVING);
2159 /* When BR/EDR inquiry is active and no LE scanning is in
2160 * progress, then change discovery state to indicate completion.
2162 * When running LE scanning and BR/EDR inquiry simultaneously
2163 * and the LE scan already finished, then change the discovery
2164 * state to indicate completion.
2166 if (!hci_dev_test_flag(hdev, HCI_LE_SCAN) ||
2167 !test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY, &hdev->quirks))
2168 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2172 hci_dev_unlock(hdev);
2175 static void hci_inquiry_result_evt(struct hci_dev *hdev, struct sk_buff *skb)
2177 struct inquiry_data data;
2178 struct inquiry_info *info = (void *) (skb->data + 1);
2179 int num_rsp = *((__u8 *) skb->data);
2181 BT_DBG("%s num_rsp %d", hdev->name, num_rsp);
2186 if (hci_dev_test_flag(hdev, HCI_PERIODIC_INQ))
2191 for (; num_rsp; num_rsp--, info++) {
2194 bacpy(&data.bdaddr, &info->bdaddr);
2195 data.pscan_rep_mode = info->pscan_rep_mode;
2196 data.pscan_period_mode = info->pscan_period_mode;
2197 data.pscan_mode = info->pscan_mode;
2198 memcpy(data.dev_class, info->dev_class, 3);
2199 data.clock_offset = info->clock_offset;
2200 data.rssi = HCI_RSSI_INVALID;
2201 data.ssp_mode = 0x00;
2203 flags = hci_inquiry_cache_update(hdev, &data, false);
2205 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
2206 info->dev_class, HCI_RSSI_INVALID,
2207 flags, NULL, 0, NULL, 0);
2210 hci_dev_unlock(hdev);
2213 static void hci_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
2215 struct hci_ev_conn_complete *ev = (void *) skb->data;
2216 struct hci_conn *conn;
2218 BT_DBG("%s", hdev->name);
2222 conn = hci_conn_hash_lookup_ba(hdev, ev->link_type, &ev->bdaddr);
2224 if (ev->link_type != SCO_LINK)
2227 conn = hci_conn_hash_lookup_ba(hdev, ESCO_LINK, &ev->bdaddr);
2231 conn->type = SCO_LINK;
2235 conn->handle = __le16_to_cpu(ev->handle);
2237 if (conn->type == ACL_LINK) {
2238 conn->state = BT_CONFIG;
2239 hci_conn_hold(conn);
2241 if (!conn->out && !hci_conn_ssp_enabled(conn) &&
2242 !hci_find_link_key(hdev, &ev->bdaddr))
2243 conn->disc_timeout = HCI_PAIRING_TIMEOUT;
2245 conn->disc_timeout = HCI_DISCONN_TIMEOUT;
2247 conn->state = BT_CONNECTED;
2249 hci_debugfs_create_conn(conn);
2250 hci_conn_add_sysfs(conn);
2252 if (test_bit(HCI_AUTH, &hdev->flags))
2253 set_bit(HCI_CONN_AUTH, &conn->flags);
2255 if (test_bit(HCI_ENCRYPT, &hdev->flags))
2256 set_bit(HCI_CONN_ENCRYPT, &conn->flags);
2258 /* Get remote features */
2259 if (conn->type == ACL_LINK) {
2260 struct hci_cp_read_remote_features cp;
2261 cp.handle = ev->handle;
2262 hci_send_cmd(hdev, HCI_OP_READ_REMOTE_FEATURES,
2265 hci_update_page_scan(hdev);
2268 /* Set packet type for incoming connection */
2269 if (!conn->out && hdev->hci_ver < BLUETOOTH_VER_2_0) {
2270 struct hci_cp_change_conn_ptype cp;
2271 cp.handle = ev->handle;
2272 cp.pkt_type = cpu_to_le16(conn->pkt_type);
2273 hci_send_cmd(hdev, HCI_OP_CHANGE_CONN_PTYPE, sizeof(cp),
2277 conn->state = BT_CLOSED;
2278 if (conn->type == ACL_LINK)
2279 mgmt_connect_failed(hdev, &conn->dst, conn->type,
2280 conn->dst_type, ev->status);
2283 if (conn->type == ACL_LINK)
2284 hci_sco_setup(conn, ev->status);
2287 hci_connect_cfm(conn, ev->status);
2289 } else if (ev->link_type != ACL_LINK)
2290 hci_connect_cfm(conn, ev->status);
2293 hci_dev_unlock(hdev);
2295 hci_conn_check_pending(hdev);
2298 static void hci_reject_conn(struct hci_dev *hdev, bdaddr_t *bdaddr)
2300 struct hci_cp_reject_conn_req cp;
2302 bacpy(&cp.bdaddr, bdaddr);
2303 cp.reason = HCI_ERROR_REJ_BAD_ADDR;
2304 hci_send_cmd(hdev, HCI_OP_REJECT_CONN_REQ, sizeof(cp), &cp);
2307 static void hci_conn_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
2309 struct hci_ev_conn_request *ev = (void *) skb->data;
2310 int mask = hdev->link_mode;
2311 struct inquiry_entry *ie;
2312 struct hci_conn *conn;
2315 BT_DBG("%s bdaddr %pMR type 0x%x", hdev->name, &ev->bdaddr,
2318 mask |= hci_proto_connect_ind(hdev, &ev->bdaddr, ev->link_type,
2321 if (!(mask & HCI_LM_ACCEPT)) {
2322 hci_reject_conn(hdev, &ev->bdaddr);
2326 if (hci_bdaddr_list_lookup(&hdev->blacklist, &ev->bdaddr,
2328 hci_reject_conn(hdev, &ev->bdaddr);
2332 /* Require HCI_CONNECTABLE or a whitelist entry to accept the
2333 * connection. These features are only touched through mgmt so
2334 * only do the checks if HCI_MGMT is set.
2336 if (hci_dev_test_flag(hdev, HCI_MGMT) &&
2337 !hci_dev_test_flag(hdev, HCI_CONNECTABLE) &&
2338 !hci_bdaddr_list_lookup(&hdev->whitelist, &ev->bdaddr,
2340 hci_reject_conn(hdev, &ev->bdaddr);
2344 /* Connection accepted */
2348 ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr);
2350 memcpy(ie->data.dev_class, ev->dev_class, 3);
2352 conn = hci_conn_hash_lookup_ba(hdev, ev->link_type,
2355 conn = hci_conn_add(hdev, ev->link_type, &ev->bdaddr,
2358 BT_ERR("No memory for new connection");
2359 hci_dev_unlock(hdev);
2364 memcpy(conn->dev_class, ev->dev_class, 3);
2366 hci_dev_unlock(hdev);
2368 if (ev->link_type == ACL_LINK ||
2369 (!(flags & HCI_PROTO_DEFER) && !lmp_esco_capable(hdev))) {
2370 struct hci_cp_accept_conn_req cp;
2371 conn->state = BT_CONNECT;
2373 bacpy(&cp.bdaddr, &ev->bdaddr);
2375 if (lmp_rswitch_capable(hdev) && (mask & HCI_LM_MASTER))
2376 cp.role = 0x00; /* Become master */
2378 cp.role = 0x01; /* Remain slave */
2380 hci_send_cmd(hdev, HCI_OP_ACCEPT_CONN_REQ, sizeof(cp), &cp);
2381 } else if (!(flags & HCI_PROTO_DEFER)) {
2382 struct hci_cp_accept_sync_conn_req cp;
2383 conn->state = BT_CONNECT;
2385 bacpy(&cp.bdaddr, &ev->bdaddr);
2386 cp.pkt_type = cpu_to_le16(conn->pkt_type);
2388 cp.tx_bandwidth = cpu_to_le32(0x00001f40);
2389 cp.rx_bandwidth = cpu_to_le32(0x00001f40);
2390 cp.max_latency = cpu_to_le16(0xffff);
2391 cp.content_format = cpu_to_le16(hdev->voice_setting);
2392 cp.retrans_effort = 0xff;
2394 hci_send_cmd(hdev, HCI_OP_ACCEPT_SYNC_CONN_REQ, sizeof(cp),
2397 conn->state = BT_CONNECT2;
2398 hci_connect_cfm(conn, 0);
2402 static u8 hci_to_mgmt_reason(u8 err)
2405 case HCI_ERROR_CONNECTION_TIMEOUT:
2406 return MGMT_DEV_DISCONN_TIMEOUT;
2407 case HCI_ERROR_REMOTE_USER_TERM:
2408 case HCI_ERROR_REMOTE_LOW_RESOURCES:
2409 case HCI_ERROR_REMOTE_POWER_OFF:
2410 return MGMT_DEV_DISCONN_REMOTE;
2411 case HCI_ERROR_LOCAL_HOST_TERM:
2412 return MGMT_DEV_DISCONN_LOCAL_HOST;
2414 return MGMT_DEV_DISCONN_UNKNOWN;
2418 static void hci_disconn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
2420 struct hci_ev_disconn_complete *ev = (void *) skb->data;
2421 u8 reason = hci_to_mgmt_reason(ev->reason);
2422 struct hci_conn_params *params;
2423 struct hci_conn *conn;
2424 bool mgmt_connected;
2427 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2431 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2436 mgmt_disconnect_failed(hdev, &conn->dst, conn->type,
2437 conn->dst_type, ev->status);
2441 conn->state = BT_CLOSED;
2443 mgmt_connected = test_and_clear_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags);
2444 mgmt_device_disconnected(hdev, &conn->dst, conn->type, conn->dst_type,
2445 reason, mgmt_connected);
2447 if (conn->type == ACL_LINK) {
2448 if (test_bit(HCI_CONN_FLUSH_KEY, &conn->flags))
2449 hci_remove_link_key(hdev, &conn->dst);
2451 hci_update_page_scan(hdev);
2454 params = hci_conn_params_lookup(hdev, &conn->dst, conn->dst_type);
2456 switch (params->auto_connect) {
2457 case HCI_AUTO_CONN_LINK_LOSS:
2458 if (ev->reason != HCI_ERROR_CONNECTION_TIMEOUT)
2462 case HCI_AUTO_CONN_DIRECT:
2463 case HCI_AUTO_CONN_ALWAYS:
2464 list_del_init(¶ms->action);
2465 list_add(¶ms->action, &hdev->pend_le_conns);
2466 hci_update_background_scan(hdev);
2476 hci_disconn_cfm(conn, ev->reason);
2479 /* Re-enable advertising if necessary, since it might
2480 * have been disabled by the connection. From the
2481 * HCI_LE_Set_Advertise_Enable command description in
2482 * the core specification (v4.0):
2483 * "The Controller shall continue advertising until the Host
2484 * issues an LE_Set_Advertise_Enable command with
2485 * Advertising_Enable set to 0x00 (Advertising is disabled)
2486 * or until a connection is created or until the Advertising
2487 * is timed out due to Directed Advertising."
2489 if (type == LE_LINK)
2490 mgmt_reenable_advertising(hdev);
2493 hci_dev_unlock(hdev);
2496 static void hci_auth_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
2498 struct hci_ev_auth_complete *ev = (void *) skb->data;
2499 struct hci_conn *conn;
2501 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2505 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2510 if (!hci_conn_ssp_enabled(conn) &&
2511 test_bit(HCI_CONN_REAUTH_PEND, &conn->flags)) {
2512 BT_INFO("re-auth of legacy device is not possible.");
2514 set_bit(HCI_CONN_AUTH, &conn->flags);
2515 conn->sec_level = conn->pending_sec_level;
2518 mgmt_auth_failed(conn, ev->status);
2521 clear_bit(HCI_CONN_AUTH_PEND, &conn->flags);
2522 clear_bit(HCI_CONN_REAUTH_PEND, &conn->flags);
2524 if (conn->state == BT_CONFIG) {
2525 if (!ev->status && hci_conn_ssp_enabled(conn)) {
2526 struct hci_cp_set_conn_encrypt cp;
2527 cp.handle = ev->handle;
2529 hci_send_cmd(hdev, HCI_OP_SET_CONN_ENCRYPT, sizeof(cp),
2532 conn->state = BT_CONNECTED;
2533 hci_connect_cfm(conn, ev->status);
2534 hci_conn_drop(conn);
2537 hci_auth_cfm(conn, ev->status);
2539 hci_conn_hold(conn);
2540 conn->disc_timeout = HCI_DISCONN_TIMEOUT;
2541 hci_conn_drop(conn);
2544 if (test_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags)) {
2546 struct hci_cp_set_conn_encrypt cp;
2547 cp.handle = ev->handle;
2549 hci_send_cmd(hdev, HCI_OP_SET_CONN_ENCRYPT, sizeof(cp),
2552 clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
2553 hci_encrypt_cfm(conn, ev->status, 0x00);
2558 hci_dev_unlock(hdev);
2561 static void hci_remote_name_evt(struct hci_dev *hdev, struct sk_buff *skb)
2563 struct hci_ev_remote_name *ev = (void *) skb->data;
2564 struct hci_conn *conn;
2566 BT_DBG("%s", hdev->name);
2568 hci_conn_check_pending(hdev);
2572 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
2574 if (!hci_dev_test_flag(hdev, HCI_MGMT))
2577 if (ev->status == 0)
2578 hci_check_pending_name(hdev, conn, &ev->bdaddr, ev->name,
2579 strnlen(ev->name, HCI_MAX_NAME_LENGTH));
2581 hci_check_pending_name(hdev, conn, &ev->bdaddr, NULL, 0);
2587 if (!hci_outgoing_auth_needed(hdev, conn))
2590 if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->flags)) {
2591 struct hci_cp_auth_requested cp;
2593 set_bit(HCI_CONN_AUTH_INITIATOR, &conn->flags);
2595 cp.handle = __cpu_to_le16(conn->handle);
2596 hci_send_cmd(hdev, HCI_OP_AUTH_REQUESTED, sizeof(cp), &cp);
2600 hci_dev_unlock(hdev);
2603 static void read_enc_key_size_complete(struct hci_dev *hdev, u8 status,
2604 u16 opcode, struct sk_buff *skb)
2606 const struct hci_rp_read_enc_key_size *rp;
2607 struct hci_conn *conn;
2610 BT_DBG("%s status 0x%02x", hdev->name, status);
2612 if (!skb || skb->len < sizeof(*rp)) {
2613 BT_ERR("%s invalid HCI Read Encryption Key Size response",
2618 rp = (void *)skb->data;
2619 handle = le16_to_cpu(rp->handle);
2623 conn = hci_conn_hash_lookup_handle(hdev, handle);
2627 /* If we fail to read the encryption key size, assume maximum
2628 * (which is the same we do also when this HCI command isn't
2632 BT_ERR("%s failed to read key size for handle %u", hdev->name,
2634 conn->enc_key_size = HCI_LINK_KEY_SIZE;
2636 conn->enc_key_size = rp->key_size;
2639 if (conn->state == BT_CONFIG) {
2640 conn->state = BT_CONNECTED;
2641 hci_connect_cfm(conn, 0);
2642 hci_conn_drop(conn);
2646 if (!test_bit(HCI_CONN_ENCRYPT, &conn->flags))
2648 else if (test_bit(HCI_CONN_AES_CCM, &conn->flags))
2653 hci_encrypt_cfm(conn, 0, encrypt);
2657 hci_dev_unlock(hdev);
2660 static void hci_encrypt_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
2662 struct hci_ev_encrypt_change *ev = (void *) skb->data;
2663 struct hci_conn *conn;
2665 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2669 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2675 /* Encryption implies authentication */
2676 set_bit(HCI_CONN_AUTH, &conn->flags);
2677 set_bit(HCI_CONN_ENCRYPT, &conn->flags);
2678 conn->sec_level = conn->pending_sec_level;
2680 /* P-256 authentication key implies FIPS */
2681 if (conn->key_type == HCI_LK_AUTH_COMBINATION_P256)
2682 set_bit(HCI_CONN_FIPS, &conn->flags);
2684 if ((conn->type == ACL_LINK && ev->encrypt == 0x02) ||
2685 conn->type == LE_LINK)
2686 set_bit(HCI_CONN_AES_CCM, &conn->flags);
2688 clear_bit(HCI_CONN_ENCRYPT, &conn->flags);
2689 clear_bit(HCI_CONN_AES_CCM, &conn->flags);
2693 /* We should disregard the current RPA and generate a new one
2694 * whenever the encryption procedure fails.
2696 if (ev->status && conn->type == LE_LINK)
2697 hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
2699 clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
2701 if (ev->status && conn->state == BT_CONNECTED) {
2702 hci_disconnect(conn, HCI_ERROR_AUTH_FAILURE);
2703 hci_conn_drop(conn);
2707 /* In Secure Connections Only mode, do not allow any connections
2708 * that are not encrypted with AES-CCM using a P-256 authenticated
2711 if (hci_dev_test_flag(hdev, HCI_SC_ONLY) &&
2712 (!test_bit(HCI_CONN_AES_CCM, &conn->flags) ||
2713 conn->key_type != HCI_LK_AUTH_COMBINATION_P256)) {
2714 hci_connect_cfm(conn, HCI_ERROR_AUTH_FAILURE);
2715 hci_conn_drop(conn);
2719 /* Try reading the encryption key size for encrypted ACL links */
2720 if (!ev->status && ev->encrypt && conn->type == ACL_LINK) {
2721 struct hci_cp_read_enc_key_size cp;
2722 struct hci_request req;
2724 /* Only send HCI_Read_Encryption_Key_Size if the
2725 * controller really supports it. If it doesn't, assume
2726 * the default size (16).
2728 if (!(hdev->commands[20] & 0x10)) {
2729 conn->enc_key_size = HCI_LINK_KEY_SIZE;
2733 hci_req_init(&req, hdev);
2735 cp.handle = cpu_to_le16(conn->handle);
2736 hci_req_add(&req, HCI_OP_READ_ENC_KEY_SIZE, sizeof(cp), &cp);
2738 if (hci_req_run_skb(&req, read_enc_key_size_complete)) {
2739 BT_ERR("Sending HCI Read Encryption Key Size failed");
2740 conn->enc_key_size = HCI_LINK_KEY_SIZE;
2748 if (conn->state == BT_CONFIG) {
2750 conn->state = BT_CONNECTED;
2752 hci_connect_cfm(conn, ev->status);
2753 hci_conn_drop(conn);
2755 hci_encrypt_cfm(conn, ev->status, ev->encrypt);
2758 hci_dev_unlock(hdev);
2761 static void hci_change_link_key_complete_evt(struct hci_dev *hdev,
2762 struct sk_buff *skb)
2764 struct hci_ev_change_link_key_complete *ev = (void *) skb->data;
2765 struct hci_conn *conn;
2767 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2771 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2774 set_bit(HCI_CONN_SECURE, &conn->flags);
2776 clear_bit(HCI_CONN_AUTH_PEND, &conn->flags);
2778 hci_key_change_cfm(conn, ev->status);
2781 hci_dev_unlock(hdev);
2784 static void hci_remote_features_evt(struct hci_dev *hdev,
2785 struct sk_buff *skb)
2787 struct hci_ev_remote_features *ev = (void *) skb->data;
2788 struct hci_conn *conn;
2790 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2794 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2799 memcpy(conn->features[0], ev->features, 8);
2801 if (conn->state != BT_CONFIG)
2804 if (!ev->status && lmp_ext_feat_capable(hdev) &&
2805 lmp_ext_feat_capable(conn)) {
2806 struct hci_cp_read_remote_ext_features cp;
2807 cp.handle = ev->handle;
2809 hci_send_cmd(hdev, HCI_OP_READ_REMOTE_EXT_FEATURES,
2814 if (!ev->status && !test_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags)) {
2815 struct hci_cp_remote_name_req cp;
2816 memset(&cp, 0, sizeof(cp));
2817 bacpy(&cp.bdaddr, &conn->dst);
2818 cp.pscan_rep_mode = 0x02;
2819 hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp);
2820 } else if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
2821 mgmt_device_connected(hdev, conn, 0, NULL, 0);
2823 if (!hci_outgoing_auth_needed(hdev, conn)) {
2824 conn->state = BT_CONNECTED;
2825 hci_connect_cfm(conn, ev->status);
2826 hci_conn_drop(conn);
2830 hci_dev_unlock(hdev);
2833 static void hci_cmd_complete_evt(struct hci_dev *hdev, struct sk_buff *skb,
2834 u16 *opcode, u8 *status,
2835 hci_req_complete_t *req_complete,
2836 hci_req_complete_skb_t *req_complete_skb)
2838 struct hci_ev_cmd_complete *ev = (void *) skb->data;
2840 *opcode = __le16_to_cpu(ev->opcode);
2841 *status = skb->data[sizeof(*ev)];
2843 skb_pull(skb, sizeof(*ev));
2846 case HCI_OP_INQUIRY_CANCEL:
2847 hci_cc_inquiry_cancel(hdev, skb);
2850 case HCI_OP_PERIODIC_INQ:
2851 hci_cc_periodic_inq(hdev, skb);
2854 case HCI_OP_EXIT_PERIODIC_INQ:
2855 hci_cc_exit_periodic_inq(hdev, skb);
2858 case HCI_OP_REMOTE_NAME_REQ_CANCEL:
2859 hci_cc_remote_name_req_cancel(hdev, skb);
2862 case HCI_OP_ROLE_DISCOVERY:
2863 hci_cc_role_discovery(hdev, skb);
2866 case HCI_OP_READ_LINK_POLICY:
2867 hci_cc_read_link_policy(hdev, skb);
2870 case HCI_OP_WRITE_LINK_POLICY:
2871 hci_cc_write_link_policy(hdev, skb);
2874 case HCI_OP_READ_DEF_LINK_POLICY:
2875 hci_cc_read_def_link_policy(hdev, skb);
2878 case HCI_OP_WRITE_DEF_LINK_POLICY:
2879 hci_cc_write_def_link_policy(hdev, skb);
2883 hci_cc_reset(hdev, skb);
2886 case HCI_OP_READ_STORED_LINK_KEY:
2887 hci_cc_read_stored_link_key(hdev, skb);
2890 case HCI_OP_DELETE_STORED_LINK_KEY:
2891 hci_cc_delete_stored_link_key(hdev, skb);
2894 case HCI_OP_WRITE_LOCAL_NAME:
2895 hci_cc_write_local_name(hdev, skb);
2898 case HCI_OP_READ_LOCAL_NAME:
2899 hci_cc_read_local_name(hdev, skb);
2902 case HCI_OP_WRITE_AUTH_ENABLE:
2903 hci_cc_write_auth_enable(hdev, skb);
2906 case HCI_OP_WRITE_ENCRYPT_MODE:
2907 hci_cc_write_encrypt_mode(hdev, skb);
2910 case HCI_OP_WRITE_SCAN_ENABLE:
2911 hci_cc_write_scan_enable(hdev, skb);
2914 case HCI_OP_READ_CLASS_OF_DEV:
2915 hci_cc_read_class_of_dev(hdev, skb);
2918 case HCI_OP_WRITE_CLASS_OF_DEV:
2919 hci_cc_write_class_of_dev(hdev, skb);
2922 case HCI_OP_READ_VOICE_SETTING:
2923 hci_cc_read_voice_setting(hdev, skb);
2926 case HCI_OP_WRITE_VOICE_SETTING:
2927 hci_cc_write_voice_setting(hdev, skb);
2930 case HCI_OP_READ_NUM_SUPPORTED_IAC:
2931 hci_cc_read_num_supported_iac(hdev, skb);
2934 case HCI_OP_WRITE_SSP_MODE:
2935 hci_cc_write_ssp_mode(hdev, skb);
2938 case HCI_OP_WRITE_SC_SUPPORT:
2939 hci_cc_write_sc_support(hdev, skb);
2942 case HCI_OP_READ_LOCAL_VERSION:
2943 hci_cc_read_local_version(hdev, skb);
2946 case HCI_OP_READ_LOCAL_COMMANDS:
2947 hci_cc_read_local_commands(hdev, skb);
2950 case HCI_OP_READ_LOCAL_FEATURES:
2951 hci_cc_read_local_features(hdev, skb);
2954 case HCI_OP_READ_LOCAL_EXT_FEATURES:
2955 hci_cc_read_local_ext_features(hdev, skb);
2958 case HCI_OP_READ_BUFFER_SIZE:
2959 hci_cc_read_buffer_size(hdev, skb);
2962 case HCI_OP_READ_BD_ADDR:
2963 hci_cc_read_bd_addr(hdev, skb);
2966 case HCI_OP_READ_PAGE_SCAN_ACTIVITY:
2967 hci_cc_read_page_scan_activity(hdev, skb);
2970 case HCI_OP_WRITE_PAGE_SCAN_ACTIVITY:
2971 hci_cc_write_page_scan_activity(hdev, skb);
2974 case HCI_OP_READ_PAGE_SCAN_TYPE:
2975 hci_cc_read_page_scan_type(hdev, skb);
2978 case HCI_OP_WRITE_PAGE_SCAN_TYPE:
2979 hci_cc_write_page_scan_type(hdev, skb);
2982 case HCI_OP_READ_DATA_BLOCK_SIZE:
2983 hci_cc_read_data_block_size(hdev, skb);
2986 case HCI_OP_READ_FLOW_CONTROL_MODE:
2987 hci_cc_read_flow_control_mode(hdev, skb);
2990 case HCI_OP_READ_LOCAL_AMP_INFO:
2991 hci_cc_read_local_amp_info(hdev, skb);
2994 case HCI_OP_READ_CLOCK:
2995 hci_cc_read_clock(hdev, skb);
2998 case HCI_OP_READ_LOCAL_AMP_ASSOC:
2999 hci_cc_read_local_amp_assoc(hdev, skb);
3002 case HCI_OP_READ_INQ_RSP_TX_POWER:
3003 hci_cc_read_inq_rsp_tx_power(hdev, skb);
3006 case HCI_OP_PIN_CODE_REPLY:
3007 hci_cc_pin_code_reply(hdev, skb);
3010 case HCI_OP_PIN_CODE_NEG_REPLY:
3011 hci_cc_pin_code_neg_reply(hdev, skb);
3014 case HCI_OP_READ_LOCAL_OOB_DATA:
3015 hci_cc_read_local_oob_data(hdev, skb);
3018 case HCI_OP_READ_LOCAL_OOB_EXT_DATA:
3019 hci_cc_read_local_oob_ext_data(hdev, skb);
3022 case HCI_OP_LE_READ_BUFFER_SIZE:
3023 hci_cc_le_read_buffer_size(hdev, skb);
3026 case HCI_OP_LE_READ_LOCAL_FEATURES:
3027 hci_cc_le_read_local_features(hdev, skb);
3030 case HCI_OP_LE_READ_ADV_TX_POWER:
3031 hci_cc_le_read_adv_tx_power(hdev, skb);
3034 case HCI_OP_USER_CONFIRM_REPLY:
3035 hci_cc_user_confirm_reply(hdev, skb);
3038 case HCI_OP_USER_CONFIRM_NEG_REPLY:
3039 hci_cc_user_confirm_neg_reply(hdev, skb);
3042 case HCI_OP_USER_PASSKEY_REPLY:
3043 hci_cc_user_passkey_reply(hdev, skb);
3046 case HCI_OP_USER_PASSKEY_NEG_REPLY:
3047 hci_cc_user_passkey_neg_reply(hdev, skb);
3050 case HCI_OP_LE_SET_RANDOM_ADDR:
3051 hci_cc_le_set_random_addr(hdev, skb);
3054 case HCI_OP_LE_SET_ADV_ENABLE:
3055 hci_cc_le_set_adv_enable(hdev, skb);
3058 case HCI_OP_LE_SET_SCAN_PARAM:
3059 hci_cc_le_set_scan_param(hdev, skb);
3062 case HCI_OP_LE_SET_SCAN_ENABLE:
3063 hci_cc_le_set_scan_enable(hdev, skb);
3066 case HCI_OP_LE_READ_WHITE_LIST_SIZE:
3067 hci_cc_le_read_white_list_size(hdev, skb);
3070 case HCI_OP_LE_CLEAR_WHITE_LIST:
3071 hci_cc_le_clear_white_list(hdev, skb);
3074 case HCI_OP_LE_ADD_TO_WHITE_LIST:
3075 hci_cc_le_add_to_white_list(hdev, skb);
3078 case HCI_OP_LE_DEL_FROM_WHITE_LIST:
3079 hci_cc_le_del_from_white_list(hdev, skb);
3082 case HCI_OP_LE_READ_SUPPORTED_STATES:
3083 hci_cc_le_read_supported_states(hdev, skb);
3086 case HCI_OP_LE_READ_DEF_DATA_LEN:
3087 hci_cc_le_read_def_data_len(hdev, skb);
3090 case HCI_OP_LE_WRITE_DEF_DATA_LEN:
3091 hci_cc_le_write_def_data_len(hdev, skb);
3094 case HCI_OP_LE_READ_MAX_DATA_LEN:
3095 hci_cc_le_read_max_data_len(hdev, skb);
3098 case HCI_OP_WRITE_LE_HOST_SUPPORTED:
3099 hci_cc_write_le_host_supported(hdev, skb);
3102 case HCI_OP_LE_SET_ADV_PARAM:
3103 hci_cc_set_adv_param(hdev, skb);
3106 case HCI_OP_WRITE_REMOTE_AMP_ASSOC:
3107 hci_cc_write_remote_amp_assoc(hdev, skb);
3110 case HCI_OP_READ_RSSI:
3111 hci_cc_read_rssi(hdev, skb);
3114 case HCI_OP_READ_TX_POWER:
3115 hci_cc_read_tx_power(hdev, skb);
3118 case HCI_OP_WRITE_SSP_DEBUG_MODE:
3119 hci_cc_write_ssp_debug_mode(hdev, skb);
3123 BT_DBG("%s opcode 0x%4.4x", hdev->name, *opcode);
3127 if (*opcode != HCI_OP_NOP)
3128 cancel_delayed_work(&hdev->cmd_timer);
3130 if (ev->ncmd && !test_bit(HCI_RESET, &hdev->flags))
3131 atomic_set(&hdev->cmd_cnt, 1);
3133 hci_req_cmd_complete(hdev, *opcode, *status, req_complete,
3136 if (atomic_read(&hdev->cmd_cnt) && !skb_queue_empty(&hdev->cmd_q))
3137 queue_work(hdev->workqueue, &hdev->cmd_work);
3140 static void hci_cmd_status_evt(struct hci_dev *hdev, struct sk_buff *skb,
3141 u16 *opcode, u8 *status,
3142 hci_req_complete_t *req_complete,
3143 hci_req_complete_skb_t *req_complete_skb)
3145 struct hci_ev_cmd_status *ev = (void *) skb->data;
3147 skb_pull(skb, sizeof(*ev));
3149 *opcode = __le16_to_cpu(ev->opcode);
3150 *status = ev->status;
3153 case HCI_OP_INQUIRY:
3154 hci_cs_inquiry(hdev, ev->status);
3157 case HCI_OP_CREATE_CONN:
3158 hci_cs_create_conn(hdev, ev->status);
3161 case HCI_OP_DISCONNECT:
3162 hci_cs_disconnect(hdev, ev->status);
3165 case HCI_OP_ADD_SCO:
3166 hci_cs_add_sco(hdev, ev->status);
3169 case HCI_OP_AUTH_REQUESTED:
3170 hci_cs_auth_requested(hdev, ev->status);
3173 case HCI_OP_SET_CONN_ENCRYPT:
3174 hci_cs_set_conn_encrypt(hdev, ev->status);
3177 case HCI_OP_REMOTE_NAME_REQ:
3178 hci_cs_remote_name_req(hdev, ev->status);
3181 case HCI_OP_READ_REMOTE_FEATURES:
3182 hci_cs_read_remote_features(hdev, ev->status);
3185 case HCI_OP_READ_REMOTE_EXT_FEATURES:
3186 hci_cs_read_remote_ext_features(hdev, ev->status);
3189 case HCI_OP_SETUP_SYNC_CONN:
3190 hci_cs_setup_sync_conn(hdev, ev->status);
3193 case HCI_OP_CREATE_PHY_LINK:
3194 hci_cs_create_phylink(hdev, ev->status);
3197 case HCI_OP_ACCEPT_PHY_LINK:
3198 hci_cs_accept_phylink(hdev, ev->status);
3201 case HCI_OP_SNIFF_MODE:
3202 hci_cs_sniff_mode(hdev, ev->status);
3205 case HCI_OP_EXIT_SNIFF_MODE:
3206 hci_cs_exit_sniff_mode(hdev, ev->status);
3209 case HCI_OP_SWITCH_ROLE:
3210 hci_cs_switch_role(hdev, ev->status);
3213 case HCI_OP_LE_CREATE_CONN:
3214 hci_cs_le_create_conn(hdev, ev->status);
3217 case HCI_OP_LE_READ_REMOTE_FEATURES:
3218 hci_cs_le_read_remote_features(hdev, ev->status);
3221 case HCI_OP_LE_START_ENC:
3222 hci_cs_le_start_enc(hdev, ev->status);
3226 BT_DBG("%s opcode 0x%4.4x", hdev->name, *opcode);
3230 if (*opcode != HCI_OP_NOP)
3231 cancel_delayed_work(&hdev->cmd_timer);
3233 if (ev->ncmd && !test_bit(HCI_RESET, &hdev->flags))
3234 atomic_set(&hdev->cmd_cnt, 1);
3236 /* Indicate request completion if the command failed. Also, if
3237 * we're not waiting for a special event and we get a success
3238 * command status we should try to flag the request as completed
3239 * (since for this kind of commands there will not be a command
3243 (hdev->sent_cmd && !bt_cb(hdev->sent_cmd)->req.event))
3244 hci_req_cmd_complete(hdev, *opcode, ev->status, req_complete,
3247 if (atomic_read(&hdev->cmd_cnt) && !skb_queue_empty(&hdev->cmd_q))
3248 queue_work(hdev->workqueue, &hdev->cmd_work);
3251 static void hci_hardware_error_evt(struct hci_dev *hdev, struct sk_buff *skb)
3253 struct hci_ev_hardware_error *ev = (void *) skb->data;
3255 hdev->hw_error_code = ev->code;
3257 queue_work(hdev->req_workqueue, &hdev->error_reset);
3260 static void hci_role_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
3262 struct hci_ev_role_change *ev = (void *) skb->data;
3263 struct hci_conn *conn;
3265 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
3269 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3272 conn->role = ev->role;
3274 clear_bit(HCI_CONN_RSWITCH_PEND, &conn->flags);
3276 hci_role_switch_cfm(conn, ev->status, ev->role);
3279 hci_dev_unlock(hdev);
3282 static void hci_num_comp_pkts_evt(struct hci_dev *hdev, struct sk_buff *skb)
3284 struct hci_ev_num_comp_pkts *ev = (void *) skb->data;
3287 if (hdev->flow_ctl_mode != HCI_FLOW_CTL_MODE_PACKET_BASED) {
3288 BT_ERR("Wrong event for mode %d", hdev->flow_ctl_mode);
3292 if (skb->len < sizeof(*ev) || skb->len < sizeof(*ev) +
3293 ev->num_hndl * sizeof(struct hci_comp_pkts_info)) {
3294 BT_DBG("%s bad parameters", hdev->name);
3298 BT_DBG("%s num_hndl %d", hdev->name, ev->num_hndl);
3300 for (i = 0; i < ev->num_hndl; i++) {
3301 struct hci_comp_pkts_info *info = &ev->handles[i];
3302 struct hci_conn *conn;
3303 __u16 handle, count;
3305 handle = __le16_to_cpu(info->handle);
3306 count = __le16_to_cpu(info->count);
3308 conn = hci_conn_hash_lookup_handle(hdev, handle);
3312 conn->sent -= count;
3314 switch (conn->type) {
3316 hdev->acl_cnt += count;
3317 if (hdev->acl_cnt > hdev->acl_pkts)
3318 hdev->acl_cnt = hdev->acl_pkts;
3322 if (hdev->le_pkts) {
3323 hdev->le_cnt += count;
3324 if (hdev->le_cnt > hdev->le_pkts)
3325 hdev->le_cnt = hdev->le_pkts;
3327 hdev->acl_cnt += count;
3328 if (hdev->acl_cnt > hdev->acl_pkts)
3329 hdev->acl_cnt = hdev->acl_pkts;
3334 hdev->sco_cnt += count;
3335 if (hdev->sco_cnt > hdev->sco_pkts)
3336 hdev->sco_cnt = hdev->sco_pkts;
3340 BT_ERR("Unknown type %d conn %p", conn->type, conn);
3345 queue_work(hdev->workqueue, &hdev->tx_work);
3348 static struct hci_conn *__hci_conn_lookup_handle(struct hci_dev *hdev,
3351 struct hci_chan *chan;
3353 switch (hdev->dev_type) {
3355 return hci_conn_hash_lookup_handle(hdev, handle);
3357 chan = hci_chan_lookup_handle(hdev, handle);
3362 BT_ERR("%s unknown dev_type %d", hdev->name, hdev->dev_type);
3369 static void hci_num_comp_blocks_evt(struct hci_dev *hdev, struct sk_buff *skb)
3371 struct hci_ev_num_comp_blocks *ev = (void *) skb->data;
3374 if (hdev->flow_ctl_mode != HCI_FLOW_CTL_MODE_BLOCK_BASED) {
3375 BT_ERR("Wrong event for mode %d", hdev->flow_ctl_mode);
3379 if (skb->len < sizeof(*ev) || skb->len < sizeof(*ev) +
3380 ev->num_hndl * sizeof(struct hci_comp_blocks_info)) {
3381 BT_DBG("%s bad parameters", hdev->name);
3385 BT_DBG("%s num_blocks %d num_hndl %d", hdev->name, ev->num_blocks,
3388 for (i = 0; i < ev->num_hndl; i++) {
3389 struct hci_comp_blocks_info *info = &ev->handles[i];
3390 struct hci_conn *conn = NULL;
3391 __u16 handle, block_count;
3393 handle = __le16_to_cpu(info->handle);
3394 block_count = __le16_to_cpu(info->blocks);
3396 conn = __hci_conn_lookup_handle(hdev, handle);
3400 conn->sent -= block_count;
3402 switch (conn->type) {
3405 hdev->block_cnt += block_count;
3406 if (hdev->block_cnt > hdev->num_blocks)
3407 hdev->block_cnt = hdev->num_blocks;
3411 BT_ERR("Unknown type %d conn %p", conn->type, conn);
3416 queue_work(hdev->workqueue, &hdev->tx_work);
3419 static void hci_mode_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
3421 struct hci_ev_mode_change *ev = (void *) skb->data;
3422 struct hci_conn *conn;
3424 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
3428 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3430 conn->mode = ev->mode;
3432 if (!test_and_clear_bit(HCI_CONN_MODE_CHANGE_PEND,
3434 if (conn->mode == HCI_CM_ACTIVE)
3435 set_bit(HCI_CONN_POWER_SAVE, &conn->flags);
3437 clear_bit(HCI_CONN_POWER_SAVE, &conn->flags);
3440 if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags))
3441 hci_sco_setup(conn, ev->status);
3444 hci_dev_unlock(hdev);
3447 static void hci_pin_code_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
3449 struct hci_ev_pin_code_req *ev = (void *) skb->data;
3450 struct hci_conn *conn;
3452 BT_DBG("%s", hdev->name);
3456 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3460 if (conn->state == BT_CONNECTED) {
3461 hci_conn_hold(conn);
3462 conn->disc_timeout = HCI_PAIRING_TIMEOUT;
3463 hci_conn_drop(conn);
3466 if (!hci_dev_test_flag(hdev, HCI_BONDABLE) &&
3467 !test_bit(HCI_CONN_AUTH_INITIATOR, &conn->flags)) {
3468 hci_send_cmd(hdev, HCI_OP_PIN_CODE_NEG_REPLY,
3469 sizeof(ev->bdaddr), &ev->bdaddr);
3470 } else if (hci_dev_test_flag(hdev, HCI_MGMT)) {
3473 if (conn->pending_sec_level == BT_SECURITY_HIGH)
3478 mgmt_pin_code_request(hdev, &ev->bdaddr, secure);
3482 hci_dev_unlock(hdev);
3485 static void conn_set_key(struct hci_conn *conn, u8 key_type, u8 pin_len)
3487 if (key_type == HCI_LK_CHANGED_COMBINATION)
3490 conn->pin_length = pin_len;
3491 conn->key_type = key_type;
3494 case HCI_LK_LOCAL_UNIT:
3495 case HCI_LK_REMOTE_UNIT:
3496 case HCI_LK_DEBUG_COMBINATION:
3498 case HCI_LK_COMBINATION:
3500 conn->pending_sec_level = BT_SECURITY_HIGH;
3502 conn->pending_sec_level = BT_SECURITY_MEDIUM;
3504 case HCI_LK_UNAUTH_COMBINATION_P192:
3505 case HCI_LK_UNAUTH_COMBINATION_P256:
3506 conn->pending_sec_level = BT_SECURITY_MEDIUM;
3508 case HCI_LK_AUTH_COMBINATION_P192:
3509 conn->pending_sec_level = BT_SECURITY_HIGH;
3511 case HCI_LK_AUTH_COMBINATION_P256:
3512 conn->pending_sec_level = BT_SECURITY_FIPS;
3517 static void hci_link_key_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
3519 struct hci_ev_link_key_req *ev = (void *) skb->data;
3520 struct hci_cp_link_key_reply cp;
3521 struct hci_conn *conn;
3522 struct link_key *key;
3524 BT_DBG("%s", hdev->name);
3526 if (!hci_dev_test_flag(hdev, HCI_MGMT))
3531 key = hci_find_link_key(hdev, &ev->bdaddr);
3533 BT_DBG("%s link key not found for %pMR", hdev->name,
3538 BT_DBG("%s found key type %u for %pMR", hdev->name, key->type,
3541 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3543 clear_bit(HCI_CONN_NEW_LINK_KEY, &conn->flags);
3545 if ((key->type == HCI_LK_UNAUTH_COMBINATION_P192 ||
3546 key->type == HCI_LK_UNAUTH_COMBINATION_P256) &&
3547 conn->auth_type != 0xff && (conn->auth_type & 0x01)) {
3548 BT_DBG("%s ignoring unauthenticated key", hdev->name);
3552 if (key->type == HCI_LK_COMBINATION && key->pin_len < 16 &&
3553 (conn->pending_sec_level == BT_SECURITY_HIGH ||
3554 conn->pending_sec_level == BT_SECURITY_FIPS)) {
3555 BT_DBG("%s ignoring key unauthenticated for high security",
3560 conn_set_key(conn, key->type, key->pin_len);
3563 bacpy(&cp.bdaddr, &ev->bdaddr);
3564 memcpy(cp.link_key, key->val, HCI_LINK_KEY_SIZE);
3566 hci_send_cmd(hdev, HCI_OP_LINK_KEY_REPLY, sizeof(cp), &cp);
3568 hci_dev_unlock(hdev);
3573 hci_send_cmd(hdev, HCI_OP_LINK_KEY_NEG_REPLY, 6, &ev->bdaddr);
3574 hci_dev_unlock(hdev);
3577 static void hci_link_key_notify_evt(struct hci_dev *hdev, struct sk_buff *skb)
3579 struct hci_ev_link_key_notify *ev = (void *) skb->data;
3580 struct hci_conn *conn;
3581 struct link_key *key;
3585 BT_DBG("%s", hdev->name);
3589 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3593 hci_conn_hold(conn);
3594 conn->disc_timeout = HCI_DISCONN_TIMEOUT;
3595 hci_conn_drop(conn);
3597 set_bit(HCI_CONN_NEW_LINK_KEY, &conn->flags);
3598 conn_set_key(conn, ev->key_type, conn->pin_length);
3600 if (!hci_dev_test_flag(hdev, HCI_MGMT))
3603 key = hci_add_link_key(hdev, conn, &ev->bdaddr, ev->link_key,
3604 ev->key_type, pin_len, &persistent);
3608 /* Update connection information since adding the key will have
3609 * fixed up the type in the case of changed combination keys.
3611 if (ev->key_type == HCI_LK_CHANGED_COMBINATION)
3612 conn_set_key(conn, key->type, key->pin_len);
3614 mgmt_new_link_key(hdev, key, persistent);
3616 /* Keep debug keys around only if the HCI_KEEP_DEBUG_KEYS flag
3617 * is set. If it's not set simply remove the key from the kernel
3618 * list (we've still notified user space about it but with
3619 * store_hint being 0).
3621 if (key->type == HCI_LK_DEBUG_COMBINATION &&
3622 !hci_dev_test_flag(hdev, HCI_KEEP_DEBUG_KEYS)) {
3623 list_del_rcu(&key->list);
3624 kfree_rcu(key, rcu);
3629 clear_bit(HCI_CONN_FLUSH_KEY, &conn->flags);
3631 set_bit(HCI_CONN_FLUSH_KEY, &conn->flags);
3634 hci_dev_unlock(hdev);
3637 static void hci_clock_offset_evt(struct hci_dev *hdev, struct sk_buff *skb)
3639 struct hci_ev_clock_offset *ev = (void *) skb->data;
3640 struct hci_conn *conn;
3642 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
3646 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3647 if (conn && !ev->status) {
3648 struct inquiry_entry *ie;
3650 ie = hci_inquiry_cache_lookup(hdev, &conn->dst);
3652 ie->data.clock_offset = ev->clock_offset;
3653 ie->timestamp = jiffies;
3657 hci_dev_unlock(hdev);
3660 static void hci_pkt_type_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
3662 struct hci_ev_pkt_type_change *ev = (void *) skb->data;
3663 struct hci_conn *conn;
3665 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
3669 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3670 if (conn && !ev->status)
3671 conn->pkt_type = __le16_to_cpu(ev->pkt_type);
3673 hci_dev_unlock(hdev);
3676 static void hci_pscan_rep_mode_evt(struct hci_dev *hdev, struct sk_buff *skb)
3678 struct hci_ev_pscan_rep_mode *ev = (void *) skb->data;
3679 struct inquiry_entry *ie;
3681 BT_DBG("%s", hdev->name);
3685 ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr);
3687 ie->data.pscan_rep_mode = ev->pscan_rep_mode;
3688 ie->timestamp = jiffies;
3691 hci_dev_unlock(hdev);
3694 static void hci_inquiry_result_with_rssi_evt(struct hci_dev *hdev,
3695 struct sk_buff *skb)
3697 struct inquiry_data data;
3698 int num_rsp = *((__u8 *) skb->data);
3700 BT_DBG("%s num_rsp %d", hdev->name, num_rsp);
3705 if (hci_dev_test_flag(hdev, HCI_PERIODIC_INQ))
3710 if ((skb->len - 1) / num_rsp != sizeof(struct inquiry_info_with_rssi)) {
3711 struct inquiry_info_with_rssi_and_pscan_mode *info;
3712 info = (void *) (skb->data + 1);
3714 for (; num_rsp; num_rsp--, info++) {
3717 bacpy(&data.bdaddr, &info->bdaddr);
3718 data.pscan_rep_mode = info->pscan_rep_mode;
3719 data.pscan_period_mode = info->pscan_period_mode;
3720 data.pscan_mode = info->pscan_mode;
3721 memcpy(data.dev_class, info->dev_class, 3);
3722 data.clock_offset = info->clock_offset;
3723 data.rssi = info->rssi;
3724 data.ssp_mode = 0x00;
3726 flags = hci_inquiry_cache_update(hdev, &data, false);
3728 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
3729 info->dev_class, info->rssi,
3730 flags, NULL, 0, NULL, 0);
3733 struct inquiry_info_with_rssi *info = (void *) (skb->data + 1);
3735 for (; num_rsp; num_rsp--, info++) {
3738 bacpy(&data.bdaddr, &info->bdaddr);
3739 data.pscan_rep_mode = info->pscan_rep_mode;
3740 data.pscan_period_mode = info->pscan_period_mode;
3741 data.pscan_mode = 0x00;
3742 memcpy(data.dev_class, info->dev_class, 3);
3743 data.clock_offset = info->clock_offset;
3744 data.rssi = info->rssi;
3745 data.ssp_mode = 0x00;
3747 flags = hci_inquiry_cache_update(hdev, &data, false);
3749 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
3750 info->dev_class, info->rssi,
3751 flags, NULL, 0, NULL, 0);
3755 hci_dev_unlock(hdev);
3758 static void hci_remote_ext_features_evt(struct hci_dev *hdev,
3759 struct sk_buff *skb)
3761 struct hci_ev_remote_ext_features *ev = (void *) skb->data;
3762 struct hci_conn *conn;
3764 BT_DBG("%s", hdev->name);
3768 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3772 if (ev->page < HCI_MAX_PAGES)
3773 memcpy(conn->features[ev->page], ev->features, 8);
3775 if (!ev->status && ev->page == 0x01) {
3776 struct inquiry_entry *ie;
3778 ie = hci_inquiry_cache_lookup(hdev, &conn->dst);
3780 ie->data.ssp_mode = (ev->features[0] & LMP_HOST_SSP);
3782 if (ev->features[0] & LMP_HOST_SSP) {
3783 set_bit(HCI_CONN_SSP_ENABLED, &conn->flags);
3785 /* It is mandatory by the Bluetooth specification that
3786 * Extended Inquiry Results are only used when Secure
3787 * Simple Pairing is enabled, but some devices violate
3790 * To make these devices work, the internal SSP
3791 * enabled flag needs to be cleared if the remote host
3792 * features do not indicate SSP support */
3793 clear_bit(HCI_CONN_SSP_ENABLED, &conn->flags);
3796 if (ev->features[0] & LMP_HOST_SC)
3797 set_bit(HCI_CONN_SC_ENABLED, &conn->flags);
3800 if (conn->state != BT_CONFIG)
3803 if (!ev->status && !test_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags)) {
3804 struct hci_cp_remote_name_req cp;
3805 memset(&cp, 0, sizeof(cp));
3806 bacpy(&cp.bdaddr, &conn->dst);
3807 cp.pscan_rep_mode = 0x02;
3808 hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp);
3809 } else if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
3810 mgmt_device_connected(hdev, conn, 0, NULL, 0);
3812 if (!hci_outgoing_auth_needed(hdev, conn)) {
3813 conn->state = BT_CONNECTED;
3814 hci_connect_cfm(conn, ev->status);
3815 hci_conn_drop(conn);
3819 hci_dev_unlock(hdev);
3822 static void hci_sync_conn_complete_evt(struct hci_dev *hdev,
3823 struct sk_buff *skb)
3825 struct hci_ev_sync_conn_complete *ev = (void *) skb->data;
3826 struct hci_conn *conn;
3828 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
3832 conn = hci_conn_hash_lookup_ba(hdev, ev->link_type, &ev->bdaddr);
3834 if (ev->link_type == ESCO_LINK)
3837 conn = hci_conn_hash_lookup_ba(hdev, ESCO_LINK, &ev->bdaddr);
3841 conn->type = SCO_LINK;
3844 switch (ev->status) {
3846 conn->handle = __le16_to_cpu(ev->handle);
3847 conn->state = BT_CONNECTED;
3849 hci_debugfs_create_conn(conn);
3850 hci_conn_add_sysfs(conn);
3853 case 0x10: /* Connection Accept Timeout */
3854 case 0x0d: /* Connection Rejected due to Limited Resources */
3855 case 0x11: /* Unsupported Feature or Parameter Value */
3856 case 0x1c: /* SCO interval rejected */
3857 case 0x1a: /* Unsupported Remote Feature */
3858 case 0x1f: /* Unspecified error */
3859 case 0x20: /* Unsupported LMP Parameter value */
3861 conn->pkt_type = (hdev->esco_type & SCO_ESCO_MASK) |
3862 (hdev->esco_type & EDR_ESCO_MASK);
3863 if (hci_setup_sync(conn, conn->link->handle))
3869 conn->state = BT_CLOSED;
3873 hci_connect_cfm(conn, ev->status);
3878 hci_dev_unlock(hdev);
3881 static inline size_t eir_get_length(u8 *eir, size_t eir_len)
3885 while (parsed < eir_len) {
3886 u8 field_len = eir[0];
3891 parsed += field_len + 1;
3892 eir += field_len + 1;
3898 static void hci_extended_inquiry_result_evt(struct hci_dev *hdev,
3899 struct sk_buff *skb)
3901 struct inquiry_data data;
3902 struct extended_inquiry_info *info = (void *) (skb->data + 1);
3903 int num_rsp = *((__u8 *) skb->data);
3906 BT_DBG("%s num_rsp %d", hdev->name, num_rsp);
3911 if (hci_dev_test_flag(hdev, HCI_PERIODIC_INQ))
3916 for (; num_rsp; num_rsp--, info++) {
3920 bacpy(&data.bdaddr, &info->bdaddr);
3921 data.pscan_rep_mode = info->pscan_rep_mode;
3922 data.pscan_period_mode = info->pscan_period_mode;
3923 data.pscan_mode = 0x00;
3924 memcpy(data.dev_class, info->dev_class, 3);
3925 data.clock_offset = info->clock_offset;
3926 data.rssi = info->rssi;
3927 data.ssp_mode = 0x01;
3929 if (hci_dev_test_flag(hdev, HCI_MGMT))
3930 name_known = eir_has_data_type(info->data,
3936 flags = hci_inquiry_cache_update(hdev, &data, name_known);
3938 eir_len = eir_get_length(info->data, sizeof(info->data));
3940 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
3941 info->dev_class, info->rssi,
3942 flags, info->data, eir_len, NULL, 0);
3945 hci_dev_unlock(hdev);
3948 static void hci_key_refresh_complete_evt(struct hci_dev *hdev,
3949 struct sk_buff *skb)
3951 struct hci_ev_key_refresh_complete *ev = (void *) skb->data;
3952 struct hci_conn *conn;
3954 BT_DBG("%s status 0x%2.2x handle 0x%4.4x", hdev->name, ev->status,
3955 __le16_to_cpu(ev->handle));
3959 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3963 /* For BR/EDR the necessary steps are taken through the
3964 * auth_complete event.
3966 if (conn->type != LE_LINK)
3970 conn->sec_level = conn->pending_sec_level;
3972 clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
3974 if (ev->status && conn->state == BT_CONNECTED) {
3975 hci_disconnect(conn, HCI_ERROR_AUTH_FAILURE);
3976 hci_conn_drop(conn);
3980 if (conn->state == BT_CONFIG) {
3982 conn->state = BT_CONNECTED;
3984 hci_connect_cfm(conn, ev->status);
3985 hci_conn_drop(conn);
3987 hci_auth_cfm(conn, ev->status);
3989 hci_conn_hold(conn);
3990 conn->disc_timeout = HCI_DISCONN_TIMEOUT;
3991 hci_conn_drop(conn);
3995 hci_dev_unlock(hdev);
3998 static u8 hci_get_auth_req(struct hci_conn *conn)
4000 /* If remote requests no-bonding follow that lead */
4001 if (conn->remote_auth == HCI_AT_NO_BONDING ||
4002 conn->remote_auth == HCI_AT_NO_BONDING_MITM)
4003 return conn->remote_auth | (conn->auth_type & 0x01);
4005 /* If both remote and local have enough IO capabilities, require
4008 if (conn->remote_cap != HCI_IO_NO_INPUT_OUTPUT &&
4009 conn->io_capability != HCI_IO_NO_INPUT_OUTPUT)
4010 return conn->remote_auth | 0x01;
4012 /* No MITM protection possible so ignore remote requirement */
4013 return (conn->remote_auth & ~0x01) | (conn->auth_type & 0x01);
4016 static u8 bredr_oob_data_present(struct hci_conn *conn)
4018 struct hci_dev *hdev = conn->hdev;
4019 struct oob_data *data;
4021 data = hci_find_remote_oob_data(hdev, &conn->dst, BDADDR_BREDR);
4025 if (bredr_sc_enabled(hdev)) {
4026 /* When Secure Connections is enabled, then just
4027 * return the present value stored with the OOB
4028 * data. The stored value contains the right present
4029 * information. However it can only be trusted when
4030 * not in Secure Connection Only mode.
4032 if (!hci_dev_test_flag(hdev, HCI_SC_ONLY))
4033 return data->present;
4035 /* When Secure Connections Only mode is enabled, then
4036 * the P-256 values are required. If they are not
4037 * available, then do not declare that OOB data is
4040 if (!memcmp(data->rand256, ZERO_KEY, 16) ||
4041 !memcmp(data->hash256, ZERO_KEY, 16))
4047 /* When Secure Connections is not enabled or actually
4048 * not supported by the hardware, then check that if
4049 * P-192 data values are present.
4051 if (!memcmp(data->rand192, ZERO_KEY, 16) ||
4052 !memcmp(data->hash192, ZERO_KEY, 16))
4058 static void hci_io_capa_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
4060 struct hci_ev_io_capa_request *ev = (void *) skb->data;
4061 struct hci_conn *conn;
4063 BT_DBG("%s", hdev->name);
4067 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
4071 hci_conn_hold(conn);
4073 if (!hci_dev_test_flag(hdev, HCI_MGMT))
4076 /* Allow pairing if we're pairable, the initiators of the
4077 * pairing or if the remote is not requesting bonding.
4079 if (hci_dev_test_flag(hdev, HCI_BONDABLE) ||
4080 test_bit(HCI_CONN_AUTH_INITIATOR, &conn->flags) ||
4081 (conn->remote_auth & ~0x01) == HCI_AT_NO_BONDING) {
4082 struct hci_cp_io_capability_reply cp;
4084 bacpy(&cp.bdaddr, &ev->bdaddr);
4085 /* Change the IO capability from KeyboardDisplay
4086 * to DisplayYesNo as it is not supported by BT spec. */
4087 cp.capability = (conn->io_capability == 0x04) ?
4088 HCI_IO_DISPLAY_YESNO : conn->io_capability;
4090 /* If we are initiators, there is no remote information yet */
4091 if (conn->remote_auth == 0xff) {
4092 /* Request MITM protection if our IO caps allow it
4093 * except for the no-bonding case.
4095 if (conn->io_capability != HCI_IO_NO_INPUT_OUTPUT &&
4096 conn->auth_type != HCI_AT_NO_BONDING)
4097 conn->auth_type |= 0x01;
4099 conn->auth_type = hci_get_auth_req(conn);
4102 /* If we're not bondable, force one of the non-bondable
4103 * authentication requirement values.
4105 if (!hci_dev_test_flag(hdev, HCI_BONDABLE))
4106 conn->auth_type &= HCI_AT_NO_BONDING_MITM;
4108 cp.authentication = conn->auth_type;
4109 cp.oob_data = bredr_oob_data_present(conn);
4111 hci_send_cmd(hdev, HCI_OP_IO_CAPABILITY_REPLY,
4114 struct hci_cp_io_capability_neg_reply cp;
4116 bacpy(&cp.bdaddr, &ev->bdaddr);
4117 cp.reason = HCI_ERROR_PAIRING_NOT_ALLOWED;
4119 hci_send_cmd(hdev, HCI_OP_IO_CAPABILITY_NEG_REPLY,
4124 hci_dev_unlock(hdev);
4127 static void hci_io_capa_reply_evt(struct hci_dev *hdev, struct sk_buff *skb)
4129 struct hci_ev_io_capa_reply *ev = (void *) skb->data;
4130 struct hci_conn *conn;
4132 BT_DBG("%s", hdev->name);
4136 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
4140 conn->remote_cap = ev->capability;
4141 conn->remote_auth = ev->authentication;
4144 hci_dev_unlock(hdev);
4147 static void hci_user_confirm_request_evt(struct hci_dev *hdev,
4148 struct sk_buff *skb)
4150 struct hci_ev_user_confirm_req *ev = (void *) skb->data;
4151 int loc_mitm, rem_mitm, confirm_hint = 0;
4152 struct hci_conn *conn;
4154 BT_DBG("%s", hdev->name);
4158 if (!hci_dev_test_flag(hdev, HCI_MGMT))
4161 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
4165 loc_mitm = (conn->auth_type & 0x01);
4166 rem_mitm = (conn->remote_auth & 0x01);
4168 /* If we require MITM but the remote device can't provide that
4169 * (it has NoInputNoOutput) then reject the confirmation
4170 * request. We check the security level here since it doesn't
4171 * necessarily match conn->auth_type.
4173 if (conn->pending_sec_level > BT_SECURITY_MEDIUM &&
4174 conn->remote_cap == HCI_IO_NO_INPUT_OUTPUT) {
4175 BT_DBG("Rejecting request: remote device can't provide MITM");
4176 hci_send_cmd(hdev, HCI_OP_USER_CONFIRM_NEG_REPLY,
4177 sizeof(ev->bdaddr), &ev->bdaddr);
4181 /* If no side requires MITM protection; auto-accept */
4182 if ((!loc_mitm || conn->remote_cap == HCI_IO_NO_INPUT_OUTPUT) &&
4183 (!rem_mitm || conn->io_capability == HCI_IO_NO_INPUT_OUTPUT)) {
4185 /* If we're not the initiators request authorization to
4186 * proceed from user space (mgmt_user_confirm with
4187 * confirm_hint set to 1). The exception is if neither
4188 * side had MITM or if the local IO capability is
4189 * NoInputNoOutput, in which case we do auto-accept
4191 if (!test_bit(HCI_CONN_AUTH_PEND, &conn->flags) &&
4192 conn->io_capability != HCI_IO_NO_INPUT_OUTPUT &&
4193 (loc_mitm || rem_mitm)) {
4194 BT_DBG("Confirming auto-accept as acceptor");
4199 BT_DBG("Auto-accept of user confirmation with %ums delay",
4200 hdev->auto_accept_delay);
4202 if (hdev->auto_accept_delay > 0) {
4203 int delay = msecs_to_jiffies(hdev->auto_accept_delay);
4204 queue_delayed_work(conn->hdev->workqueue,
4205 &conn->auto_accept_work, delay);
4209 hci_send_cmd(hdev, HCI_OP_USER_CONFIRM_REPLY,
4210 sizeof(ev->bdaddr), &ev->bdaddr);
4215 mgmt_user_confirm_request(hdev, &ev->bdaddr, ACL_LINK, 0,
4216 le32_to_cpu(ev->passkey), confirm_hint);
4219 hci_dev_unlock(hdev);
4222 static void hci_user_passkey_request_evt(struct hci_dev *hdev,
4223 struct sk_buff *skb)
4225 struct hci_ev_user_passkey_req *ev = (void *) skb->data;
4227 BT_DBG("%s", hdev->name);
4229 if (hci_dev_test_flag(hdev, HCI_MGMT))
4230 mgmt_user_passkey_request(hdev, &ev->bdaddr, ACL_LINK, 0);
4233 static void hci_user_passkey_notify_evt(struct hci_dev *hdev,
4234 struct sk_buff *skb)
4236 struct hci_ev_user_passkey_notify *ev = (void *) skb->data;
4237 struct hci_conn *conn;
4239 BT_DBG("%s", hdev->name);
4241 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
4245 conn->passkey_notify = __le32_to_cpu(ev->passkey);
4246 conn->passkey_entered = 0;
4248 if (hci_dev_test_flag(hdev, HCI_MGMT))
4249 mgmt_user_passkey_notify(hdev, &conn->dst, conn->type,
4250 conn->dst_type, conn->passkey_notify,
4251 conn->passkey_entered);
4254 static void hci_keypress_notify_evt(struct hci_dev *hdev, struct sk_buff *skb)
4256 struct hci_ev_keypress_notify *ev = (void *) skb->data;
4257 struct hci_conn *conn;
4259 BT_DBG("%s", hdev->name);
4261 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
4266 case HCI_KEYPRESS_STARTED:
4267 conn->passkey_entered = 0;
4270 case HCI_KEYPRESS_ENTERED:
4271 conn->passkey_entered++;
4274 case HCI_KEYPRESS_ERASED:
4275 conn->passkey_entered--;
4278 case HCI_KEYPRESS_CLEARED:
4279 conn->passkey_entered = 0;
4282 case HCI_KEYPRESS_COMPLETED:
4286 if (hci_dev_test_flag(hdev, HCI_MGMT))
4287 mgmt_user_passkey_notify(hdev, &conn->dst, conn->type,
4288 conn->dst_type, conn->passkey_notify,
4289 conn->passkey_entered);
4292 static void hci_simple_pair_complete_evt(struct hci_dev *hdev,
4293 struct sk_buff *skb)
4295 struct hci_ev_simple_pair_complete *ev = (void *) skb->data;
4296 struct hci_conn *conn;
4298 BT_DBG("%s", hdev->name);
4302 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
4306 /* Reset the authentication requirement to unknown */
4307 conn->remote_auth = 0xff;
4309 /* To avoid duplicate auth_failed events to user space we check
4310 * the HCI_CONN_AUTH_PEND flag which will be set if we
4311 * initiated the authentication. A traditional auth_complete
4312 * event gets always produced as initiator and is also mapped to
4313 * the mgmt_auth_failed event */
4314 if (!test_bit(HCI_CONN_AUTH_PEND, &conn->flags) && ev->status)
4315 mgmt_auth_failed(conn, ev->status);
4317 hci_conn_drop(conn);
4320 hci_dev_unlock(hdev);
4323 static void hci_remote_host_features_evt(struct hci_dev *hdev,
4324 struct sk_buff *skb)
4326 struct hci_ev_remote_host_features *ev = (void *) skb->data;
4327 struct inquiry_entry *ie;
4328 struct hci_conn *conn;
4330 BT_DBG("%s", hdev->name);
4334 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
4336 memcpy(conn->features[1], ev->features, 8);
4338 ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr);
4340 ie->data.ssp_mode = (ev->features[0] & LMP_HOST_SSP);
4342 hci_dev_unlock(hdev);
4345 static void hci_remote_oob_data_request_evt(struct hci_dev *hdev,
4346 struct sk_buff *skb)
4348 struct hci_ev_remote_oob_data_request *ev = (void *) skb->data;
4349 struct oob_data *data;
4351 BT_DBG("%s", hdev->name);
4355 if (!hci_dev_test_flag(hdev, HCI_MGMT))
4358 data = hci_find_remote_oob_data(hdev, &ev->bdaddr, BDADDR_BREDR);
4360 struct hci_cp_remote_oob_data_neg_reply cp;
4362 bacpy(&cp.bdaddr, &ev->bdaddr);
4363 hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_DATA_NEG_REPLY,
4368 if (bredr_sc_enabled(hdev)) {
4369 struct hci_cp_remote_oob_ext_data_reply cp;
4371 bacpy(&cp.bdaddr, &ev->bdaddr);
4372 if (hci_dev_test_flag(hdev, HCI_SC_ONLY)) {
4373 memset(cp.hash192, 0, sizeof(cp.hash192));
4374 memset(cp.rand192, 0, sizeof(cp.rand192));
4376 memcpy(cp.hash192, data->hash192, sizeof(cp.hash192));
4377 memcpy(cp.rand192, data->rand192, sizeof(cp.rand192));
4379 memcpy(cp.hash256, data->hash256, sizeof(cp.hash256));
4380 memcpy(cp.rand256, data->rand256, sizeof(cp.rand256));
4382 hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_EXT_DATA_REPLY,
4385 struct hci_cp_remote_oob_data_reply cp;
4387 bacpy(&cp.bdaddr, &ev->bdaddr);
4388 memcpy(cp.hash, data->hash192, sizeof(cp.hash));
4389 memcpy(cp.rand, data->rand192, sizeof(cp.rand));
4391 hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_DATA_REPLY,
4396 hci_dev_unlock(hdev);
4399 #if IS_ENABLED(CONFIG_BT_HS)
4400 static void hci_chan_selected_evt(struct hci_dev *hdev, struct sk_buff *skb)
4402 struct hci_ev_channel_selected *ev = (void *)skb->data;
4403 struct hci_conn *hcon;
4405 BT_DBG("%s handle 0x%2.2x", hdev->name, ev->phy_handle);
4407 skb_pull(skb, sizeof(*ev));
4409 hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
4413 amp_read_loc_assoc_final_data(hdev, hcon);
4416 static void hci_phy_link_complete_evt(struct hci_dev *hdev,
4417 struct sk_buff *skb)
4419 struct hci_ev_phy_link_complete *ev = (void *) skb->data;
4420 struct hci_conn *hcon, *bredr_hcon;
4422 BT_DBG("%s handle 0x%2.2x status 0x%2.2x", hdev->name, ev->phy_handle,
4427 hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
4429 hci_dev_unlock(hdev);
4435 hci_dev_unlock(hdev);
4439 bredr_hcon = hcon->amp_mgr->l2cap_conn->hcon;
4441 hcon->state = BT_CONNECTED;
4442 bacpy(&hcon->dst, &bredr_hcon->dst);
4444 hci_conn_hold(hcon);
4445 hcon->disc_timeout = HCI_DISCONN_TIMEOUT;
4446 hci_conn_drop(hcon);
4448 hci_debugfs_create_conn(hcon);
4449 hci_conn_add_sysfs(hcon);
4451 amp_physical_cfm(bredr_hcon, hcon);
4453 hci_dev_unlock(hdev);
4456 static void hci_loglink_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
4458 struct hci_ev_logical_link_complete *ev = (void *) skb->data;
4459 struct hci_conn *hcon;
4460 struct hci_chan *hchan;
4461 struct amp_mgr *mgr;
4463 BT_DBG("%s log_handle 0x%4.4x phy_handle 0x%2.2x status 0x%2.2x",
4464 hdev->name, le16_to_cpu(ev->handle), ev->phy_handle,
4467 hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
4471 /* Create AMP hchan */
4472 hchan = hci_chan_create(hcon);
4476 hchan->handle = le16_to_cpu(ev->handle);
4478 BT_DBG("hcon %p mgr %p hchan %p", hcon, hcon->amp_mgr, hchan);
4480 mgr = hcon->amp_mgr;
4481 if (mgr && mgr->bredr_chan) {
4482 struct l2cap_chan *bredr_chan = mgr->bredr_chan;
4484 l2cap_chan_lock(bredr_chan);
4486 bredr_chan->conn->mtu = hdev->block_mtu;
4487 l2cap_logical_cfm(bredr_chan, hchan, 0);
4488 hci_conn_hold(hcon);
4490 l2cap_chan_unlock(bredr_chan);
4494 static void hci_disconn_loglink_complete_evt(struct hci_dev *hdev,
4495 struct sk_buff *skb)
4497 struct hci_ev_disconn_logical_link_complete *ev = (void *) skb->data;
4498 struct hci_chan *hchan;
4500 BT_DBG("%s log handle 0x%4.4x status 0x%2.2x", hdev->name,
4501 le16_to_cpu(ev->handle), ev->status);
4508 hchan = hci_chan_lookup_handle(hdev, le16_to_cpu(ev->handle));
4512 amp_destroy_logical_link(hchan, ev->reason);
4515 hci_dev_unlock(hdev);
4518 static void hci_disconn_phylink_complete_evt(struct hci_dev *hdev,
4519 struct sk_buff *skb)
4521 struct hci_ev_disconn_phy_link_complete *ev = (void *) skb->data;
4522 struct hci_conn *hcon;
4524 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
4531 hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
4533 hcon->state = BT_CLOSED;
4537 hci_dev_unlock(hdev);
4541 static void hci_le_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
4543 struct hci_ev_le_conn_complete *ev = (void *) skb->data;
4544 struct hci_conn_params *params;
4545 struct hci_conn *conn;
4546 struct smp_irk *irk;
4549 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
4553 /* All controllers implicitly stop advertising in the event of a
4554 * connection, so ensure that the state bit is cleared.
4556 hci_dev_clear_flag(hdev, HCI_LE_ADV);
4558 conn = hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT);
4560 conn = hci_conn_add(hdev, LE_LINK, &ev->bdaddr, ev->role);
4562 BT_ERR("No memory for new connection");
4566 conn->dst_type = ev->bdaddr_type;
4568 /* If we didn't have a hci_conn object previously
4569 * but we're in master role this must be something
4570 * initiated using a white list. Since white list based
4571 * connections are not "first class citizens" we don't
4572 * have full tracking of them. Therefore, we go ahead
4573 * with a "best effort" approach of determining the
4574 * initiator address based on the HCI_PRIVACY flag.
4577 conn->resp_addr_type = ev->bdaddr_type;
4578 bacpy(&conn->resp_addr, &ev->bdaddr);
4579 if (hci_dev_test_flag(hdev, HCI_PRIVACY)) {
4580 conn->init_addr_type = ADDR_LE_DEV_RANDOM;
4581 bacpy(&conn->init_addr, &hdev->rpa);
4583 hci_copy_identity_address(hdev,
4585 &conn->init_addr_type);
4589 cancel_delayed_work(&conn->le_conn_timeout);
4593 /* Set the responder (our side) address type based on
4594 * the advertising address type.
4596 conn->resp_addr_type = hdev->adv_addr_type;
4597 if (hdev->adv_addr_type == ADDR_LE_DEV_RANDOM)
4598 bacpy(&conn->resp_addr, &hdev->random_addr);
4600 bacpy(&conn->resp_addr, &hdev->bdaddr);
4602 conn->init_addr_type = ev->bdaddr_type;
4603 bacpy(&conn->init_addr, &ev->bdaddr);
4605 /* For incoming connections, set the default minimum
4606 * and maximum connection interval. They will be used
4607 * to check if the parameters are in range and if not
4608 * trigger the connection update procedure.
4610 conn->le_conn_min_interval = hdev->le_conn_min_interval;
4611 conn->le_conn_max_interval = hdev->le_conn_max_interval;
4614 /* Lookup the identity address from the stored connection
4615 * address and address type.
4617 * When establishing connections to an identity address, the
4618 * connection procedure will store the resolvable random
4619 * address first. Now if it can be converted back into the
4620 * identity address, start using the identity address from
4623 irk = hci_get_irk(hdev, &conn->dst, conn->dst_type);
4625 bacpy(&conn->dst, &irk->bdaddr);
4626 conn->dst_type = irk->addr_type;
4630 hci_le_conn_failed(conn, ev->status);
4634 if (conn->dst_type == ADDR_LE_DEV_PUBLIC)
4635 addr_type = BDADDR_LE_PUBLIC;
4637 addr_type = BDADDR_LE_RANDOM;
4639 /* Drop the connection if the device is blocked */
4640 if (hci_bdaddr_list_lookup(&hdev->blacklist, &conn->dst, addr_type)) {
4641 hci_conn_drop(conn);
4645 if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
4646 mgmt_device_connected(hdev, conn, 0, NULL, 0);
4648 conn->sec_level = BT_SECURITY_LOW;
4649 conn->handle = __le16_to_cpu(ev->handle);
4650 conn->state = BT_CONFIG;
4652 conn->le_conn_interval = le16_to_cpu(ev->interval);
4653 conn->le_conn_latency = le16_to_cpu(ev->latency);
4654 conn->le_supv_timeout = le16_to_cpu(ev->supervision_timeout);
4656 hci_debugfs_create_conn(conn);
4657 hci_conn_add_sysfs(conn);
4660 /* The remote features procedure is defined for master
4661 * role only. So only in case of an initiated connection
4662 * request the remote features.
4664 * If the local controller supports slave-initiated features
4665 * exchange, then requesting the remote features in slave
4666 * role is possible. Otherwise just transition into the
4667 * connected state without requesting the remote features.
4670 (hdev->le_features[0] & HCI_LE_SLAVE_FEATURES)) {
4671 struct hci_cp_le_read_remote_features cp;
4673 cp.handle = __cpu_to_le16(conn->handle);
4675 hci_send_cmd(hdev, HCI_OP_LE_READ_REMOTE_FEATURES,
4678 hci_conn_hold(conn);
4680 conn->state = BT_CONNECTED;
4681 hci_connect_cfm(conn, ev->status);
4684 hci_connect_cfm(conn, ev->status);
4687 params = hci_pend_le_action_lookup(&hdev->pend_le_conns, &conn->dst,
4690 list_del_init(¶ms->action);
4692 hci_conn_drop(params->conn);
4693 hci_conn_put(params->conn);
4694 params->conn = NULL;
4699 hci_update_background_scan(hdev);
4700 hci_dev_unlock(hdev);
4703 static void hci_le_conn_update_complete_evt(struct hci_dev *hdev,
4704 struct sk_buff *skb)
4706 struct hci_ev_le_conn_update_complete *ev = (void *) skb->data;
4707 struct hci_conn *conn;
4709 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
4716 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
4718 conn->le_conn_interval = le16_to_cpu(ev->interval);
4719 conn->le_conn_latency = le16_to_cpu(ev->latency);
4720 conn->le_supv_timeout = le16_to_cpu(ev->supervision_timeout);
4723 hci_dev_unlock(hdev);
4726 /* This function requires the caller holds hdev->lock */
4727 static struct hci_conn *check_pending_le_conn(struct hci_dev *hdev,
4729 u8 addr_type, u8 adv_type)
4731 struct hci_conn *conn;
4732 struct hci_conn_params *params;
4734 /* If the event is not connectable don't proceed further */
4735 if (adv_type != LE_ADV_IND && adv_type != LE_ADV_DIRECT_IND)
4738 /* Ignore if the device is blocked */
4739 if (hci_bdaddr_list_lookup(&hdev->blacklist, addr, addr_type))
4742 /* Most controller will fail if we try to create new connections
4743 * while we have an existing one in slave role.
4745 if (hdev->conn_hash.le_num_slave > 0)
4748 /* If we're not connectable only connect devices that we have in
4749 * our pend_le_conns list.
4751 params = hci_pend_le_action_lookup(&hdev->pend_le_conns,
4756 switch (params->auto_connect) {
4757 case HCI_AUTO_CONN_DIRECT:
4758 /* Only devices advertising with ADV_DIRECT_IND are
4759 * triggering a connection attempt. This is allowing
4760 * incoming connections from slave devices.
4762 if (adv_type != LE_ADV_DIRECT_IND)
4765 case HCI_AUTO_CONN_ALWAYS:
4766 /* Devices advertising with ADV_IND or ADV_DIRECT_IND
4767 * are triggering a connection attempt. This means
4768 * that incoming connectioms from slave device are
4769 * accepted and also outgoing connections to slave
4770 * devices are established when found.
4777 conn = hci_connect_le(hdev, addr, addr_type, BT_SECURITY_LOW,
4778 HCI_LE_AUTOCONN_TIMEOUT, HCI_ROLE_MASTER);
4779 if (!IS_ERR(conn)) {
4780 /* Store the pointer since we don't really have any
4781 * other owner of the object besides the params that
4782 * triggered it. This way we can abort the connection if
4783 * the parameters get removed and keep the reference
4784 * count consistent once the connection is established.
4786 params->conn = hci_conn_get(conn);
4790 switch (PTR_ERR(conn)) {
4792 /* If hci_connect() returns -EBUSY it means there is already
4793 * an LE connection attempt going on. Since controllers don't
4794 * support more than one connection attempt at the time, we
4795 * don't consider this an error case.
4799 BT_DBG("Failed to connect: err %ld", PTR_ERR(conn));
4806 static void process_adv_report(struct hci_dev *hdev, u8 type, bdaddr_t *bdaddr,
4807 u8 bdaddr_type, bdaddr_t *direct_addr,
4808 u8 direct_addr_type, s8 rssi, u8 *data, u8 len)
4810 struct discovery_state *d = &hdev->discovery;
4811 struct smp_irk *irk;
4812 struct hci_conn *conn;
4816 /* If the direct address is present, then this report is from
4817 * a LE Direct Advertising Report event. In that case it is
4818 * important to see if the address is matching the local
4819 * controller address.
4822 /* Only resolvable random addresses are valid for these
4823 * kind of reports and others can be ignored.
4825 if (!hci_bdaddr_is_rpa(direct_addr, direct_addr_type))
4828 /* If the controller is not using resolvable random
4829 * addresses, then this report can be ignored.
4831 if (!hci_dev_test_flag(hdev, HCI_PRIVACY))
4834 /* If the local IRK of the controller does not match
4835 * with the resolvable random address provided, then
4836 * this report can be ignored.
4838 if (!smp_irk_matches(hdev, hdev->irk, direct_addr))
4842 /* Check if we need to convert to identity address */
4843 irk = hci_get_irk(hdev, bdaddr, bdaddr_type);
4845 bdaddr = &irk->bdaddr;
4846 bdaddr_type = irk->addr_type;
4849 /* Check if we have been requested to connect to this device */
4850 conn = check_pending_le_conn(hdev, bdaddr, bdaddr_type, type);
4851 if (conn && type == LE_ADV_IND) {
4852 /* Store report for later inclusion by
4853 * mgmt_device_connected
4855 memcpy(conn->le_adv_data, data, len);
4856 conn->le_adv_data_len = len;
4859 /* Passive scanning shouldn't trigger any device found events,
4860 * except for devices marked as CONN_REPORT for which we do send
4861 * device found events.
4863 if (hdev->le_scan_type == LE_SCAN_PASSIVE) {
4864 if (type == LE_ADV_DIRECT_IND)
4867 if (!hci_pend_le_action_lookup(&hdev->pend_le_reports,
4868 bdaddr, bdaddr_type))
4871 if (type == LE_ADV_NONCONN_IND || type == LE_ADV_SCAN_IND)
4872 flags = MGMT_DEV_FOUND_NOT_CONNECTABLE;
4875 mgmt_device_found(hdev, bdaddr, LE_LINK, bdaddr_type, NULL,
4876 rssi, flags, data, len, NULL, 0);
4880 /* When receiving non-connectable or scannable undirected
4881 * advertising reports, this means that the remote device is
4882 * not connectable and then clearly indicate this in the
4883 * device found event.
4885 * When receiving a scan response, then there is no way to
4886 * know if the remote device is connectable or not. However
4887 * since scan responses are merged with a previously seen
4888 * advertising report, the flags field from that report
4891 * In the really unlikely case that a controller get confused
4892 * and just sends a scan response event, then it is marked as
4893 * not connectable as well.
4895 if (type == LE_ADV_NONCONN_IND || type == LE_ADV_SCAN_IND ||
4896 type == LE_ADV_SCAN_RSP)
4897 flags = MGMT_DEV_FOUND_NOT_CONNECTABLE;
4901 /* If there's nothing pending either store the data from this
4902 * event or send an immediate device found event if the data
4903 * should not be stored for later.
4905 if (!has_pending_adv_report(hdev)) {
4906 /* If the report will trigger a SCAN_REQ store it for
4909 if (type == LE_ADV_IND || type == LE_ADV_SCAN_IND) {
4910 store_pending_adv_report(hdev, bdaddr, bdaddr_type,
4911 rssi, flags, data, len);
4915 mgmt_device_found(hdev, bdaddr, LE_LINK, bdaddr_type, NULL,
4916 rssi, flags, data, len, NULL, 0);
4920 /* Check if the pending report is for the same device as the new one */
4921 match = (!bacmp(bdaddr, &d->last_adv_addr) &&
4922 bdaddr_type == d->last_adv_addr_type);
4924 /* If the pending data doesn't match this report or this isn't a
4925 * scan response (e.g. we got a duplicate ADV_IND) then force
4926 * sending of the pending data.
4928 if (type != LE_ADV_SCAN_RSP || !match) {
4929 /* Send out whatever is in the cache, but skip duplicates */
4931 mgmt_device_found(hdev, &d->last_adv_addr, LE_LINK,
4932 d->last_adv_addr_type, NULL,
4933 d->last_adv_rssi, d->last_adv_flags,
4935 d->last_adv_data_len, NULL, 0);
4937 /* If the new report will trigger a SCAN_REQ store it for
4940 if (type == LE_ADV_IND || type == LE_ADV_SCAN_IND) {
4941 store_pending_adv_report(hdev, bdaddr, bdaddr_type,
4942 rssi, flags, data, len);
4946 /* The advertising reports cannot be merged, so clear
4947 * the pending report and send out a device found event.
4949 clear_pending_adv_report(hdev);
4950 mgmt_device_found(hdev, bdaddr, LE_LINK, bdaddr_type, NULL,
4951 rssi, flags, data, len, NULL, 0);
4955 /* If we get here we've got a pending ADV_IND or ADV_SCAN_IND and
4956 * the new event is a SCAN_RSP. We can therefore proceed with
4957 * sending a merged device found event.
4959 mgmt_device_found(hdev, &d->last_adv_addr, LE_LINK,
4960 d->last_adv_addr_type, NULL, rssi, d->last_adv_flags,
4961 d->last_adv_data, d->last_adv_data_len, data, len);
4962 clear_pending_adv_report(hdev);
4965 static void hci_le_adv_report_evt(struct hci_dev *hdev, struct sk_buff *skb)
4967 u8 num_reports = skb->data[0];
4968 void *ptr = &skb->data[1];
4972 while (num_reports--) {
4973 struct hci_ev_le_advertising_info *ev = ptr;
4976 rssi = ev->data[ev->length];
4977 process_adv_report(hdev, ev->evt_type, &ev->bdaddr,
4978 ev->bdaddr_type, NULL, 0, rssi,
4979 ev->data, ev->length);
4981 ptr += sizeof(*ev) + ev->length + 1;
4984 hci_dev_unlock(hdev);
4987 static void hci_le_remote_feat_complete_evt(struct hci_dev *hdev,
4988 struct sk_buff *skb)
4990 struct hci_ev_le_remote_feat_complete *ev = (void *)skb->data;
4991 struct hci_conn *conn;
4993 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
4997 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
5000 memcpy(conn->features[0], ev->features, 8);
5002 if (conn->state == BT_CONFIG) {
5005 /* If the local controller supports slave-initiated
5006 * features exchange, but the remote controller does
5007 * not, then it is possible that the error code 0x1a
5008 * for unsupported remote feature gets returned.
5010 * In this specific case, allow the connection to
5011 * transition into connected state and mark it as
5014 if ((hdev->le_features[0] & HCI_LE_SLAVE_FEATURES) &&
5015 !conn->out && ev->status == 0x1a)
5018 status = ev->status;
5020 conn->state = BT_CONNECTED;
5021 hci_connect_cfm(conn, status);
5022 hci_conn_drop(conn);
5026 hci_dev_unlock(hdev);
5029 static void hci_le_ltk_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
5031 struct hci_ev_le_ltk_req *ev = (void *) skb->data;
5032 struct hci_cp_le_ltk_reply cp;
5033 struct hci_cp_le_ltk_neg_reply neg;
5034 struct hci_conn *conn;
5035 struct smp_ltk *ltk;
5037 BT_DBG("%s handle 0x%4.4x", hdev->name, __le16_to_cpu(ev->handle));
5041 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
5045 ltk = hci_find_ltk(hdev, &conn->dst, conn->dst_type, conn->role);
5049 if (smp_ltk_is_sc(ltk)) {
5050 /* With SC both EDiv and Rand are set to zero */
5051 if (ev->ediv || ev->rand)
5054 /* For non-SC keys check that EDiv and Rand match */
5055 if (ev->ediv != ltk->ediv || ev->rand != ltk->rand)
5059 memcpy(cp.ltk, ltk->val, ltk->enc_size);
5060 memset(cp.ltk + ltk->enc_size, 0, sizeof(cp.ltk) - ltk->enc_size);
5061 cp.handle = cpu_to_le16(conn->handle);
5063 conn->pending_sec_level = smp_ltk_sec_level(ltk);
5065 conn->enc_key_size = ltk->enc_size;
5067 hci_send_cmd(hdev, HCI_OP_LE_LTK_REPLY, sizeof(cp), &cp);
5069 /* Ref. Bluetooth Core SPEC pages 1975 and 2004. STK is a
5070 * temporary key used to encrypt a connection following
5071 * pairing. It is used during the Encrypted Session Setup to
5072 * distribute the keys. Later, security can be re-established
5073 * using a distributed LTK.
5075 if (ltk->type == SMP_STK) {
5076 set_bit(HCI_CONN_STK_ENCRYPT, &conn->flags);
5077 list_del_rcu(<k->list);
5078 kfree_rcu(ltk, rcu);
5080 clear_bit(HCI_CONN_STK_ENCRYPT, &conn->flags);
5083 hci_dev_unlock(hdev);
5088 neg.handle = ev->handle;
5089 hci_send_cmd(hdev, HCI_OP_LE_LTK_NEG_REPLY, sizeof(neg), &neg);
5090 hci_dev_unlock(hdev);
5093 static void send_conn_param_neg_reply(struct hci_dev *hdev, u16 handle,
5096 struct hci_cp_le_conn_param_req_neg_reply cp;
5098 cp.handle = cpu_to_le16(handle);
5101 hci_send_cmd(hdev, HCI_OP_LE_CONN_PARAM_REQ_NEG_REPLY, sizeof(cp),
5105 static void hci_le_remote_conn_param_req_evt(struct hci_dev *hdev,
5106 struct sk_buff *skb)
5108 struct hci_ev_le_remote_conn_param_req *ev = (void *) skb->data;
5109 struct hci_cp_le_conn_param_req_reply cp;
5110 struct hci_conn *hcon;
5111 u16 handle, min, max, latency, timeout;
5113 handle = le16_to_cpu(ev->handle);
5114 min = le16_to_cpu(ev->interval_min);
5115 max = le16_to_cpu(ev->interval_max);
5116 latency = le16_to_cpu(ev->latency);
5117 timeout = le16_to_cpu(ev->timeout);
5119 hcon = hci_conn_hash_lookup_handle(hdev, handle);
5120 if (!hcon || hcon->state != BT_CONNECTED)
5121 return send_conn_param_neg_reply(hdev, handle,
5122 HCI_ERROR_UNKNOWN_CONN_ID);
5124 if (hci_check_conn_params(min, max, latency, timeout))
5125 return send_conn_param_neg_reply(hdev, handle,
5126 HCI_ERROR_INVALID_LL_PARAMS);
5128 if (hcon->role == HCI_ROLE_MASTER) {
5129 struct hci_conn_params *params;
5134 params = hci_conn_params_lookup(hdev, &hcon->dst,
5137 params->conn_min_interval = min;
5138 params->conn_max_interval = max;
5139 params->conn_latency = latency;
5140 params->supervision_timeout = timeout;
5146 hci_dev_unlock(hdev);
5148 mgmt_new_conn_param(hdev, &hcon->dst, hcon->dst_type,
5149 store_hint, min, max, latency, timeout);
5152 cp.handle = ev->handle;
5153 cp.interval_min = ev->interval_min;
5154 cp.interval_max = ev->interval_max;
5155 cp.latency = ev->latency;
5156 cp.timeout = ev->timeout;
5160 hci_send_cmd(hdev, HCI_OP_LE_CONN_PARAM_REQ_REPLY, sizeof(cp), &cp);
5163 static void hci_le_direct_adv_report_evt(struct hci_dev *hdev,
5164 struct sk_buff *skb)
5166 u8 num_reports = skb->data[0];
5167 void *ptr = &skb->data[1];
5171 while (num_reports--) {
5172 struct hci_ev_le_direct_adv_info *ev = ptr;
5174 process_adv_report(hdev, ev->evt_type, &ev->bdaddr,
5175 ev->bdaddr_type, &ev->direct_addr,
5176 ev->direct_addr_type, ev->rssi, NULL, 0);
5181 hci_dev_unlock(hdev);
5184 static void hci_le_meta_evt(struct hci_dev *hdev, struct sk_buff *skb)
5186 struct hci_ev_le_meta *le_ev = (void *) skb->data;
5188 skb_pull(skb, sizeof(*le_ev));
5190 switch (le_ev->subevent) {
5191 case HCI_EV_LE_CONN_COMPLETE:
5192 hci_le_conn_complete_evt(hdev, skb);
5195 case HCI_EV_LE_CONN_UPDATE_COMPLETE:
5196 hci_le_conn_update_complete_evt(hdev, skb);
5199 case HCI_EV_LE_ADVERTISING_REPORT:
5200 hci_le_adv_report_evt(hdev, skb);
5203 case HCI_EV_LE_REMOTE_FEAT_COMPLETE:
5204 hci_le_remote_feat_complete_evt(hdev, skb);
5207 case HCI_EV_LE_LTK_REQ:
5208 hci_le_ltk_request_evt(hdev, skb);
5211 case HCI_EV_LE_REMOTE_CONN_PARAM_REQ:
5212 hci_le_remote_conn_param_req_evt(hdev, skb);
5215 case HCI_EV_LE_DIRECT_ADV_REPORT:
5216 hci_le_direct_adv_report_evt(hdev, skb);
5224 static bool hci_get_cmd_complete(struct hci_dev *hdev, u16 opcode,
5225 u8 event, struct sk_buff *skb)
5227 struct hci_ev_cmd_complete *ev;
5228 struct hci_event_hdr *hdr;
5233 if (skb->len < sizeof(*hdr)) {
5234 BT_ERR("Too short HCI event");
5238 hdr = (void *) skb->data;
5239 skb_pull(skb, HCI_EVENT_HDR_SIZE);
5242 if (hdr->evt != event)
5247 if (hdr->evt != HCI_EV_CMD_COMPLETE) {
5248 BT_DBG("Last event is not cmd complete (0x%2.2x)", hdr->evt);
5252 if (skb->len < sizeof(*ev)) {
5253 BT_ERR("Too short cmd_complete event");
5257 ev = (void *) skb->data;
5258 skb_pull(skb, sizeof(*ev));
5260 if (opcode != __le16_to_cpu(ev->opcode)) {
5261 BT_DBG("opcode doesn't match (0x%2.2x != 0x%2.2x)", opcode,
5262 __le16_to_cpu(ev->opcode));
5269 void hci_event_packet(struct hci_dev *hdev, struct sk_buff *skb)
5271 struct hci_event_hdr *hdr = (void *) skb->data;
5272 hci_req_complete_t req_complete = NULL;
5273 hci_req_complete_skb_t req_complete_skb = NULL;
5274 struct sk_buff *orig_skb = NULL;
5275 u8 status = 0, event = hdr->evt, req_evt = 0;
5276 u16 opcode = HCI_OP_NOP;
5278 if (hdev->sent_cmd && bt_cb(hdev->sent_cmd)->req.event == event) {
5279 struct hci_command_hdr *cmd_hdr = (void *) hdev->sent_cmd->data;
5280 opcode = __le16_to_cpu(cmd_hdr->opcode);
5281 hci_req_cmd_complete(hdev, opcode, status, &req_complete,
5286 /* If it looks like we might end up having to call
5287 * req_complete_skb, store a pristine copy of the skb since the
5288 * various handlers may modify the original one through
5289 * skb_pull() calls, etc.
5291 if (req_complete_skb || event == HCI_EV_CMD_STATUS ||
5292 event == HCI_EV_CMD_COMPLETE)
5293 orig_skb = skb_clone(skb, GFP_KERNEL);
5295 skb_pull(skb, HCI_EVENT_HDR_SIZE);
5298 case HCI_EV_INQUIRY_COMPLETE:
5299 hci_inquiry_complete_evt(hdev, skb);
5302 case HCI_EV_INQUIRY_RESULT:
5303 hci_inquiry_result_evt(hdev, skb);
5306 case HCI_EV_CONN_COMPLETE:
5307 hci_conn_complete_evt(hdev, skb);
5310 case HCI_EV_CONN_REQUEST:
5311 hci_conn_request_evt(hdev, skb);
5314 case HCI_EV_DISCONN_COMPLETE:
5315 hci_disconn_complete_evt(hdev, skb);
5318 case HCI_EV_AUTH_COMPLETE:
5319 hci_auth_complete_evt(hdev, skb);
5322 case HCI_EV_REMOTE_NAME:
5323 hci_remote_name_evt(hdev, skb);
5326 case HCI_EV_ENCRYPT_CHANGE:
5327 hci_encrypt_change_evt(hdev, skb);
5330 case HCI_EV_CHANGE_LINK_KEY_COMPLETE:
5331 hci_change_link_key_complete_evt(hdev, skb);
5334 case HCI_EV_REMOTE_FEATURES:
5335 hci_remote_features_evt(hdev, skb);
5338 case HCI_EV_CMD_COMPLETE:
5339 hci_cmd_complete_evt(hdev, skb, &opcode, &status,
5340 &req_complete, &req_complete_skb);
5343 case HCI_EV_CMD_STATUS:
5344 hci_cmd_status_evt(hdev, skb, &opcode, &status, &req_complete,
5348 case HCI_EV_HARDWARE_ERROR:
5349 hci_hardware_error_evt(hdev, skb);
5352 case HCI_EV_ROLE_CHANGE:
5353 hci_role_change_evt(hdev, skb);
5356 case HCI_EV_NUM_COMP_PKTS:
5357 hci_num_comp_pkts_evt(hdev, skb);
5360 case HCI_EV_MODE_CHANGE:
5361 hci_mode_change_evt(hdev, skb);
5364 case HCI_EV_PIN_CODE_REQ:
5365 hci_pin_code_request_evt(hdev, skb);
5368 case HCI_EV_LINK_KEY_REQ:
5369 hci_link_key_request_evt(hdev, skb);
5372 case HCI_EV_LINK_KEY_NOTIFY:
5373 hci_link_key_notify_evt(hdev, skb);
5376 case HCI_EV_CLOCK_OFFSET:
5377 hci_clock_offset_evt(hdev, skb);
5380 case HCI_EV_PKT_TYPE_CHANGE:
5381 hci_pkt_type_change_evt(hdev, skb);
5384 case HCI_EV_PSCAN_REP_MODE:
5385 hci_pscan_rep_mode_evt(hdev, skb);
5388 case HCI_EV_INQUIRY_RESULT_WITH_RSSI:
5389 hci_inquiry_result_with_rssi_evt(hdev, skb);
5392 case HCI_EV_REMOTE_EXT_FEATURES:
5393 hci_remote_ext_features_evt(hdev, skb);
5396 case HCI_EV_SYNC_CONN_COMPLETE:
5397 hci_sync_conn_complete_evt(hdev, skb);
5400 case HCI_EV_EXTENDED_INQUIRY_RESULT:
5401 hci_extended_inquiry_result_evt(hdev, skb);
5404 case HCI_EV_KEY_REFRESH_COMPLETE:
5405 hci_key_refresh_complete_evt(hdev, skb);
5408 case HCI_EV_IO_CAPA_REQUEST:
5409 hci_io_capa_request_evt(hdev, skb);
5412 case HCI_EV_IO_CAPA_REPLY:
5413 hci_io_capa_reply_evt(hdev, skb);
5416 case HCI_EV_USER_CONFIRM_REQUEST:
5417 hci_user_confirm_request_evt(hdev, skb);
5420 case HCI_EV_USER_PASSKEY_REQUEST:
5421 hci_user_passkey_request_evt(hdev, skb);
5424 case HCI_EV_USER_PASSKEY_NOTIFY:
5425 hci_user_passkey_notify_evt(hdev, skb);
5428 case HCI_EV_KEYPRESS_NOTIFY:
5429 hci_keypress_notify_evt(hdev, skb);
5432 case HCI_EV_SIMPLE_PAIR_COMPLETE:
5433 hci_simple_pair_complete_evt(hdev, skb);
5436 case HCI_EV_REMOTE_HOST_FEATURES:
5437 hci_remote_host_features_evt(hdev, skb);
5440 case HCI_EV_LE_META:
5441 hci_le_meta_evt(hdev, skb);
5444 case HCI_EV_REMOTE_OOB_DATA_REQUEST:
5445 hci_remote_oob_data_request_evt(hdev, skb);
5448 #if IS_ENABLED(CONFIG_BT_HS)
5449 case HCI_EV_CHANNEL_SELECTED:
5450 hci_chan_selected_evt(hdev, skb);
5453 case HCI_EV_PHY_LINK_COMPLETE:
5454 hci_phy_link_complete_evt(hdev, skb);
5457 case HCI_EV_LOGICAL_LINK_COMPLETE:
5458 hci_loglink_complete_evt(hdev, skb);
5461 case HCI_EV_DISCONN_LOGICAL_LINK_COMPLETE:
5462 hci_disconn_loglink_complete_evt(hdev, skb);
5465 case HCI_EV_DISCONN_PHY_LINK_COMPLETE:
5466 hci_disconn_phylink_complete_evt(hdev, skb);
5470 case HCI_EV_NUM_COMP_BLOCKS:
5471 hci_num_comp_blocks_evt(hdev, skb);
5475 BT_DBG("%s event 0x%2.2x", hdev->name, event);
5480 req_complete(hdev, status, opcode);
5481 } else if (req_complete_skb) {
5482 if (!hci_get_cmd_complete(hdev, opcode, req_evt, orig_skb)) {
5483 kfree_skb(orig_skb);
5486 req_complete_skb(hdev, status, opcode, orig_skb);
5489 kfree_skb(orig_skb);
5491 hdev->stat.evt_rx++;