2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (c) 2000-2001, 2010, Code Aurora Forum. All rights reserved.
5 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
25 /* Bluetooth HCI event handling. */
27 #include <asm/unaligned.h>
29 #include <net/bluetooth/bluetooth.h>
30 #include <net/bluetooth/hci_core.h>
31 #include <net/bluetooth/mgmt.h>
33 #include "hci_request.h"
34 #include "hci_debugfs.h"
39 /* Handle HCI Event packets */
41 static void hci_cc_inquiry_cancel(struct hci_dev *hdev, struct sk_buff *skb)
43 __u8 status = *((__u8 *) skb->data);
45 BT_DBG("%s status 0x%2.2x", hdev->name, status);
50 clear_bit(HCI_INQUIRY, &hdev->flags);
51 smp_mb__after_atomic(); /* wake_up_bit advises about this barrier */
52 wake_up_bit(&hdev->flags, HCI_INQUIRY);
55 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
58 hci_conn_check_pending(hdev);
61 static void hci_cc_periodic_inq(struct hci_dev *hdev, struct sk_buff *skb)
63 __u8 status = *((__u8 *) skb->data);
65 BT_DBG("%s status 0x%2.2x", hdev->name, status);
70 set_bit(HCI_PERIODIC_INQ, &hdev->dev_flags);
73 static void hci_cc_exit_periodic_inq(struct hci_dev *hdev, struct sk_buff *skb)
75 __u8 status = *((__u8 *) skb->data);
77 BT_DBG("%s status 0x%2.2x", hdev->name, status);
82 clear_bit(HCI_PERIODIC_INQ, &hdev->dev_flags);
84 hci_conn_check_pending(hdev);
87 static void hci_cc_remote_name_req_cancel(struct hci_dev *hdev,
90 BT_DBG("%s", hdev->name);
93 static void hci_cc_role_discovery(struct hci_dev *hdev, struct sk_buff *skb)
95 struct hci_rp_role_discovery *rp = (void *) skb->data;
96 struct hci_conn *conn;
98 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
105 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
107 conn->role = rp->role;
109 hci_dev_unlock(hdev);
112 static void hci_cc_read_link_policy(struct hci_dev *hdev, struct sk_buff *skb)
114 struct hci_rp_read_link_policy *rp = (void *) skb->data;
115 struct hci_conn *conn;
117 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
124 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
126 conn->link_policy = __le16_to_cpu(rp->policy);
128 hci_dev_unlock(hdev);
131 static void hci_cc_write_link_policy(struct hci_dev *hdev, struct sk_buff *skb)
133 struct hci_rp_write_link_policy *rp = (void *) skb->data;
134 struct hci_conn *conn;
137 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
142 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LINK_POLICY);
148 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
150 conn->link_policy = get_unaligned_le16(sent + 2);
152 hci_dev_unlock(hdev);
155 static void hci_cc_read_def_link_policy(struct hci_dev *hdev,
158 struct hci_rp_read_def_link_policy *rp = (void *) skb->data;
160 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
165 hdev->link_policy = __le16_to_cpu(rp->policy);
168 static void hci_cc_write_def_link_policy(struct hci_dev *hdev,
171 __u8 status = *((__u8 *) skb->data);
174 BT_DBG("%s status 0x%2.2x", hdev->name, status);
179 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_DEF_LINK_POLICY);
183 hdev->link_policy = get_unaligned_le16(sent);
186 static void hci_cc_reset(struct hci_dev *hdev, struct sk_buff *skb)
188 __u8 status = *((__u8 *) skb->data);
190 BT_DBG("%s status 0x%2.2x", hdev->name, status);
192 clear_bit(HCI_RESET, &hdev->flags);
197 /* Reset all non-persistent flags */
198 hdev->dev_flags &= ~HCI_PERSISTENT_MASK;
200 hdev->discovery.state = DISCOVERY_STOPPED;
201 hdev->inq_tx_power = HCI_TX_POWER_INVALID;
202 hdev->adv_tx_power = HCI_TX_POWER_INVALID;
204 memset(hdev->adv_data, 0, sizeof(hdev->adv_data));
205 hdev->adv_data_len = 0;
207 memset(hdev->scan_rsp_data, 0, sizeof(hdev->scan_rsp_data));
208 hdev->scan_rsp_data_len = 0;
210 hdev->le_scan_type = LE_SCAN_PASSIVE;
212 hdev->ssp_debug_mode = 0;
214 hci_bdaddr_list_clear(&hdev->le_white_list);
217 static void hci_cc_write_local_name(struct hci_dev *hdev, struct sk_buff *skb)
219 __u8 status = *((__u8 *) skb->data);
222 BT_DBG("%s status 0x%2.2x", hdev->name, status);
224 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LOCAL_NAME);
230 if (test_bit(HCI_MGMT, &hdev->dev_flags))
231 mgmt_set_local_name_complete(hdev, sent, status);
233 memcpy(hdev->dev_name, sent, HCI_MAX_NAME_LENGTH);
235 hci_dev_unlock(hdev);
238 static void hci_cc_read_local_name(struct hci_dev *hdev, struct sk_buff *skb)
240 struct hci_rp_read_local_name *rp = (void *) skb->data;
242 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
247 if (test_bit(HCI_SETUP, &hdev->dev_flags) ||
248 test_bit(HCI_CONFIG, &hdev->dev_flags))
249 memcpy(hdev->dev_name, rp->name, HCI_MAX_NAME_LENGTH);
252 static void hci_cc_write_auth_enable(struct hci_dev *hdev, struct sk_buff *skb)
254 __u8 status = *((__u8 *) skb->data);
257 BT_DBG("%s status 0x%2.2x", hdev->name, status);
259 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_AUTH_ENABLE);
266 __u8 param = *((__u8 *) sent);
268 if (param == AUTH_ENABLED)
269 set_bit(HCI_AUTH, &hdev->flags);
271 clear_bit(HCI_AUTH, &hdev->flags);
274 if (test_bit(HCI_MGMT, &hdev->dev_flags))
275 mgmt_auth_enable_complete(hdev, status);
277 hci_dev_unlock(hdev);
280 static void hci_cc_write_encrypt_mode(struct hci_dev *hdev, struct sk_buff *skb)
282 __u8 status = *((__u8 *) skb->data);
286 BT_DBG("%s status 0x%2.2x", hdev->name, status);
291 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_ENCRYPT_MODE);
295 param = *((__u8 *) sent);
298 set_bit(HCI_ENCRYPT, &hdev->flags);
300 clear_bit(HCI_ENCRYPT, &hdev->flags);
303 static void hci_cc_write_scan_enable(struct hci_dev *hdev, struct sk_buff *skb)
305 __u8 status = *((__u8 *) skb->data);
309 BT_DBG("%s status 0x%2.2x", hdev->name, status);
311 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SCAN_ENABLE);
315 param = *((__u8 *) sent);
320 hdev->discov_timeout = 0;
324 if (param & SCAN_INQUIRY)
325 set_bit(HCI_ISCAN, &hdev->flags);
327 clear_bit(HCI_ISCAN, &hdev->flags);
329 if (param & SCAN_PAGE)
330 set_bit(HCI_PSCAN, &hdev->flags);
332 clear_bit(HCI_PSCAN, &hdev->flags);
335 hci_dev_unlock(hdev);
338 static void hci_cc_read_class_of_dev(struct hci_dev *hdev, struct sk_buff *skb)
340 struct hci_rp_read_class_of_dev *rp = (void *) skb->data;
342 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
347 memcpy(hdev->dev_class, rp->dev_class, 3);
349 BT_DBG("%s class 0x%.2x%.2x%.2x", hdev->name,
350 hdev->dev_class[2], hdev->dev_class[1], hdev->dev_class[0]);
353 static void hci_cc_write_class_of_dev(struct hci_dev *hdev, struct sk_buff *skb)
355 __u8 status = *((__u8 *) skb->data);
358 BT_DBG("%s status 0x%2.2x", hdev->name, status);
360 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_CLASS_OF_DEV);
367 memcpy(hdev->dev_class, sent, 3);
369 if (test_bit(HCI_MGMT, &hdev->dev_flags))
370 mgmt_set_class_of_dev_complete(hdev, sent, status);
372 hci_dev_unlock(hdev);
375 static void hci_cc_read_voice_setting(struct hci_dev *hdev, struct sk_buff *skb)
377 struct hci_rp_read_voice_setting *rp = (void *) skb->data;
380 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
385 setting = __le16_to_cpu(rp->voice_setting);
387 if (hdev->voice_setting == setting)
390 hdev->voice_setting = setting;
392 BT_DBG("%s voice setting 0x%4.4x", hdev->name, setting);
395 hdev->notify(hdev, HCI_NOTIFY_VOICE_SETTING);
398 static void hci_cc_write_voice_setting(struct hci_dev *hdev,
401 __u8 status = *((__u8 *) skb->data);
405 BT_DBG("%s status 0x%2.2x", hdev->name, status);
410 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_VOICE_SETTING);
414 setting = get_unaligned_le16(sent);
416 if (hdev->voice_setting == setting)
419 hdev->voice_setting = setting;
421 BT_DBG("%s voice setting 0x%4.4x", hdev->name, setting);
424 hdev->notify(hdev, HCI_NOTIFY_VOICE_SETTING);
427 static void hci_cc_read_num_supported_iac(struct hci_dev *hdev,
430 struct hci_rp_read_num_supported_iac *rp = (void *) skb->data;
432 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
437 hdev->num_iac = rp->num_iac;
439 BT_DBG("%s num iac %d", hdev->name, hdev->num_iac);
442 static void hci_cc_write_ssp_mode(struct hci_dev *hdev, struct sk_buff *skb)
444 __u8 status = *((__u8 *) skb->data);
445 struct hci_cp_write_ssp_mode *sent;
447 BT_DBG("%s status 0x%2.2x", hdev->name, status);
449 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SSP_MODE);
457 hdev->features[1][0] |= LMP_HOST_SSP;
459 hdev->features[1][0] &= ~LMP_HOST_SSP;
462 if (test_bit(HCI_MGMT, &hdev->dev_flags))
463 mgmt_ssp_enable_complete(hdev, sent->mode, status);
466 set_bit(HCI_SSP_ENABLED, &hdev->dev_flags);
468 clear_bit(HCI_SSP_ENABLED, &hdev->dev_flags);
471 hci_dev_unlock(hdev);
474 static void hci_cc_write_sc_support(struct hci_dev *hdev, struct sk_buff *skb)
476 u8 status = *((u8 *) skb->data);
477 struct hci_cp_write_sc_support *sent;
479 BT_DBG("%s status 0x%2.2x", hdev->name, status);
481 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SC_SUPPORT);
489 hdev->features[1][0] |= LMP_HOST_SC;
491 hdev->features[1][0] &= ~LMP_HOST_SC;
494 if (test_bit(HCI_MGMT, &hdev->dev_flags))
495 mgmt_sc_enable_complete(hdev, sent->support, status);
498 set_bit(HCI_SC_ENABLED, &hdev->dev_flags);
500 clear_bit(HCI_SC_ENABLED, &hdev->dev_flags);
503 hci_dev_unlock(hdev);
506 static void hci_cc_read_local_version(struct hci_dev *hdev, struct sk_buff *skb)
508 struct hci_rp_read_local_version *rp = (void *) skb->data;
510 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
515 if (test_bit(HCI_SETUP, &hdev->dev_flags) ||
516 test_bit(HCI_CONFIG, &hdev->dev_flags)) {
517 hdev->hci_ver = rp->hci_ver;
518 hdev->hci_rev = __le16_to_cpu(rp->hci_rev);
519 hdev->lmp_ver = rp->lmp_ver;
520 hdev->manufacturer = __le16_to_cpu(rp->manufacturer);
521 hdev->lmp_subver = __le16_to_cpu(rp->lmp_subver);
525 static void hci_cc_read_local_commands(struct hci_dev *hdev,
528 struct hci_rp_read_local_commands *rp = (void *) skb->data;
530 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
535 if (test_bit(HCI_SETUP, &hdev->dev_flags) ||
536 test_bit(HCI_CONFIG, &hdev->dev_flags))
537 memcpy(hdev->commands, rp->commands, sizeof(hdev->commands));
540 static void hci_cc_read_local_features(struct hci_dev *hdev,
543 struct hci_rp_read_local_features *rp = (void *) skb->data;
545 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
550 memcpy(hdev->features, rp->features, 8);
552 /* Adjust default settings according to features
553 * supported by device. */
555 if (hdev->features[0][0] & LMP_3SLOT)
556 hdev->pkt_type |= (HCI_DM3 | HCI_DH3);
558 if (hdev->features[0][0] & LMP_5SLOT)
559 hdev->pkt_type |= (HCI_DM5 | HCI_DH5);
561 if (hdev->features[0][1] & LMP_HV2) {
562 hdev->pkt_type |= (HCI_HV2);
563 hdev->esco_type |= (ESCO_HV2);
566 if (hdev->features[0][1] & LMP_HV3) {
567 hdev->pkt_type |= (HCI_HV3);
568 hdev->esco_type |= (ESCO_HV3);
571 if (lmp_esco_capable(hdev))
572 hdev->esco_type |= (ESCO_EV3);
574 if (hdev->features[0][4] & LMP_EV4)
575 hdev->esco_type |= (ESCO_EV4);
577 if (hdev->features[0][4] & LMP_EV5)
578 hdev->esco_type |= (ESCO_EV5);
580 if (hdev->features[0][5] & LMP_EDR_ESCO_2M)
581 hdev->esco_type |= (ESCO_2EV3);
583 if (hdev->features[0][5] & LMP_EDR_ESCO_3M)
584 hdev->esco_type |= (ESCO_3EV3);
586 if (hdev->features[0][5] & LMP_EDR_3S_ESCO)
587 hdev->esco_type |= (ESCO_2EV5 | ESCO_3EV5);
590 static void hci_cc_read_local_ext_features(struct hci_dev *hdev,
593 struct hci_rp_read_local_ext_features *rp = (void *) skb->data;
595 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
600 if (hdev->max_page < rp->max_page)
601 hdev->max_page = rp->max_page;
603 if (rp->page < HCI_MAX_PAGES)
604 memcpy(hdev->features[rp->page], rp->features, 8);
607 static void hci_cc_read_flow_control_mode(struct hci_dev *hdev,
610 struct hci_rp_read_flow_control_mode *rp = (void *) skb->data;
612 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
617 hdev->flow_ctl_mode = rp->mode;
620 static void hci_cc_read_buffer_size(struct hci_dev *hdev, struct sk_buff *skb)
622 struct hci_rp_read_buffer_size *rp = (void *) skb->data;
624 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
629 hdev->acl_mtu = __le16_to_cpu(rp->acl_mtu);
630 hdev->sco_mtu = rp->sco_mtu;
631 hdev->acl_pkts = __le16_to_cpu(rp->acl_max_pkt);
632 hdev->sco_pkts = __le16_to_cpu(rp->sco_max_pkt);
634 if (test_bit(HCI_QUIRK_FIXUP_BUFFER_SIZE, &hdev->quirks)) {
639 hdev->acl_cnt = hdev->acl_pkts;
640 hdev->sco_cnt = hdev->sco_pkts;
642 BT_DBG("%s acl mtu %d:%d sco mtu %d:%d", hdev->name, hdev->acl_mtu,
643 hdev->acl_pkts, hdev->sco_mtu, hdev->sco_pkts);
646 static void hci_cc_read_bd_addr(struct hci_dev *hdev, struct sk_buff *skb)
648 struct hci_rp_read_bd_addr *rp = (void *) skb->data;
650 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
655 if (test_bit(HCI_INIT, &hdev->flags))
656 bacpy(&hdev->bdaddr, &rp->bdaddr);
658 if (test_bit(HCI_SETUP, &hdev->dev_flags))
659 bacpy(&hdev->setup_addr, &rp->bdaddr);
662 static void hci_cc_read_page_scan_activity(struct hci_dev *hdev,
665 struct hci_rp_read_page_scan_activity *rp = (void *) skb->data;
667 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
672 if (test_bit(HCI_INIT, &hdev->flags)) {
673 hdev->page_scan_interval = __le16_to_cpu(rp->interval);
674 hdev->page_scan_window = __le16_to_cpu(rp->window);
678 static void hci_cc_write_page_scan_activity(struct hci_dev *hdev,
681 u8 status = *((u8 *) skb->data);
682 struct hci_cp_write_page_scan_activity *sent;
684 BT_DBG("%s status 0x%2.2x", hdev->name, status);
689 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_PAGE_SCAN_ACTIVITY);
693 hdev->page_scan_interval = __le16_to_cpu(sent->interval);
694 hdev->page_scan_window = __le16_to_cpu(sent->window);
697 static void hci_cc_read_page_scan_type(struct hci_dev *hdev,
700 struct hci_rp_read_page_scan_type *rp = (void *) skb->data;
702 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
707 if (test_bit(HCI_INIT, &hdev->flags))
708 hdev->page_scan_type = rp->type;
711 static void hci_cc_write_page_scan_type(struct hci_dev *hdev,
714 u8 status = *((u8 *) skb->data);
717 BT_DBG("%s status 0x%2.2x", hdev->name, status);
722 type = hci_sent_cmd_data(hdev, HCI_OP_WRITE_PAGE_SCAN_TYPE);
724 hdev->page_scan_type = *type;
727 static void hci_cc_read_data_block_size(struct hci_dev *hdev,
730 struct hci_rp_read_data_block_size *rp = (void *) skb->data;
732 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
737 hdev->block_mtu = __le16_to_cpu(rp->max_acl_len);
738 hdev->block_len = __le16_to_cpu(rp->block_len);
739 hdev->num_blocks = __le16_to_cpu(rp->num_blocks);
741 hdev->block_cnt = hdev->num_blocks;
743 BT_DBG("%s blk mtu %d cnt %d len %d", hdev->name, hdev->block_mtu,
744 hdev->block_cnt, hdev->block_len);
747 static void hci_cc_read_clock(struct hci_dev *hdev, struct sk_buff *skb)
749 struct hci_rp_read_clock *rp = (void *) skb->data;
750 struct hci_cp_read_clock *cp;
751 struct hci_conn *conn;
753 BT_DBG("%s", hdev->name);
755 if (skb->len < sizeof(*rp))
763 cp = hci_sent_cmd_data(hdev, HCI_OP_READ_CLOCK);
767 if (cp->which == 0x00) {
768 hdev->clock = le32_to_cpu(rp->clock);
772 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
774 conn->clock = le32_to_cpu(rp->clock);
775 conn->clock_accuracy = le16_to_cpu(rp->accuracy);
779 hci_dev_unlock(hdev);
782 static void hci_cc_read_local_amp_info(struct hci_dev *hdev,
785 struct hci_rp_read_local_amp_info *rp = (void *) skb->data;
787 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
792 hdev->amp_status = rp->amp_status;
793 hdev->amp_total_bw = __le32_to_cpu(rp->total_bw);
794 hdev->amp_max_bw = __le32_to_cpu(rp->max_bw);
795 hdev->amp_min_latency = __le32_to_cpu(rp->min_latency);
796 hdev->amp_max_pdu = __le32_to_cpu(rp->max_pdu);
797 hdev->amp_type = rp->amp_type;
798 hdev->amp_pal_cap = __le16_to_cpu(rp->pal_cap);
799 hdev->amp_assoc_size = __le16_to_cpu(rp->max_assoc_size);
800 hdev->amp_be_flush_to = __le32_to_cpu(rp->be_flush_to);
801 hdev->amp_max_flush_to = __le32_to_cpu(rp->max_flush_to);
804 a2mp_send_getinfo_rsp(hdev);
807 static void hci_cc_read_local_amp_assoc(struct hci_dev *hdev,
810 struct hci_rp_read_local_amp_assoc *rp = (void *) skb->data;
811 struct amp_assoc *assoc = &hdev->loc_assoc;
812 size_t rem_len, frag_len;
814 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
819 frag_len = skb->len - sizeof(*rp);
820 rem_len = __le16_to_cpu(rp->rem_len);
822 if (rem_len > frag_len) {
823 BT_DBG("frag_len %zu rem_len %zu", frag_len, rem_len);
825 memcpy(assoc->data + assoc->offset, rp->frag, frag_len);
826 assoc->offset += frag_len;
828 /* Read other fragments */
829 amp_read_loc_assoc_frag(hdev, rp->phy_handle);
834 memcpy(assoc->data + assoc->offset, rp->frag, rem_len);
835 assoc->len = assoc->offset + rem_len;
839 /* Send A2MP Rsp when all fragments are received */
840 a2mp_send_getampassoc_rsp(hdev, rp->status);
841 a2mp_send_create_phy_link_req(hdev, rp->status);
844 static void hci_cc_read_inq_rsp_tx_power(struct hci_dev *hdev,
847 struct hci_rp_read_inq_rsp_tx_power *rp = (void *) skb->data;
849 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
854 hdev->inq_tx_power = rp->tx_power;
857 static void hci_cc_pin_code_reply(struct hci_dev *hdev, struct sk_buff *skb)
859 struct hci_rp_pin_code_reply *rp = (void *) skb->data;
860 struct hci_cp_pin_code_reply *cp;
861 struct hci_conn *conn;
863 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
867 if (test_bit(HCI_MGMT, &hdev->dev_flags))
868 mgmt_pin_code_reply_complete(hdev, &rp->bdaddr, rp->status);
873 cp = hci_sent_cmd_data(hdev, HCI_OP_PIN_CODE_REPLY);
877 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
879 conn->pin_length = cp->pin_len;
882 hci_dev_unlock(hdev);
885 static void hci_cc_pin_code_neg_reply(struct hci_dev *hdev, struct sk_buff *skb)
887 struct hci_rp_pin_code_neg_reply *rp = (void *) skb->data;
889 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
893 if (test_bit(HCI_MGMT, &hdev->dev_flags))
894 mgmt_pin_code_neg_reply_complete(hdev, &rp->bdaddr,
897 hci_dev_unlock(hdev);
900 static void hci_cc_le_read_buffer_size(struct hci_dev *hdev,
903 struct hci_rp_le_read_buffer_size *rp = (void *) skb->data;
905 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
910 hdev->le_mtu = __le16_to_cpu(rp->le_mtu);
911 hdev->le_pkts = rp->le_max_pkt;
913 hdev->le_cnt = hdev->le_pkts;
915 BT_DBG("%s le mtu %d:%d", hdev->name, hdev->le_mtu, hdev->le_pkts);
918 static void hci_cc_le_read_local_features(struct hci_dev *hdev,
921 struct hci_rp_le_read_local_features *rp = (void *) skb->data;
923 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
928 memcpy(hdev->le_features, rp->features, 8);
931 static void hci_cc_le_read_adv_tx_power(struct hci_dev *hdev,
934 struct hci_rp_le_read_adv_tx_power *rp = (void *) skb->data;
936 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
941 hdev->adv_tx_power = rp->tx_power;
944 static void hci_cc_user_confirm_reply(struct hci_dev *hdev, struct sk_buff *skb)
946 struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
948 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
952 if (test_bit(HCI_MGMT, &hdev->dev_flags))
953 mgmt_user_confirm_reply_complete(hdev, &rp->bdaddr, ACL_LINK, 0,
956 hci_dev_unlock(hdev);
959 static void hci_cc_user_confirm_neg_reply(struct hci_dev *hdev,
962 struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
964 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
968 if (test_bit(HCI_MGMT, &hdev->dev_flags))
969 mgmt_user_confirm_neg_reply_complete(hdev, &rp->bdaddr,
970 ACL_LINK, 0, rp->status);
972 hci_dev_unlock(hdev);
975 static void hci_cc_user_passkey_reply(struct hci_dev *hdev, struct sk_buff *skb)
977 struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
979 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
983 if (test_bit(HCI_MGMT, &hdev->dev_flags))
984 mgmt_user_passkey_reply_complete(hdev, &rp->bdaddr, ACL_LINK,
987 hci_dev_unlock(hdev);
990 static void hci_cc_user_passkey_neg_reply(struct hci_dev *hdev,
993 struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
995 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
999 if (test_bit(HCI_MGMT, &hdev->dev_flags))
1000 mgmt_user_passkey_neg_reply_complete(hdev, &rp->bdaddr,
1001 ACL_LINK, 0, rp->status);
1003 hci_dev_unlock(hdev);
1006 static void hci_cc_read_local_oob_data(struct hci_dev *hdev,
1007 struct sk_buff *skb)
1009 struct hci_rp_read_local_oob_data *rp = (void *) skb->data;
1011 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1014 mgmt_read_local_oob_data_complete(hdev, rp->hash, rp->rand, NULL, NULL,
1016 hci_dev_unlock(hdev);
1019 static void hci_cc_read_local_oob_ext_data(struct hci_dev *hdev,
1020 struct sk_buff *skb)
1022 struct hci_rp_read_local_oob_ext_data *rp = (void *) skb->data;
1024 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1027 mgmt_read_local_oob_data_complete(hdev, rp->hash192, rp->rand192,
1028 rp->hash256, rp->rand256,
1030 hci_dev_unlock(hdev);
1034 static void hci_cc_le_set_random_addr(struct hci_dev *hdev, struct sk_buff *skb)
1036 __u8 status = *((__u8 *) skb->data);
1039 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1044 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_RANDOM_ADDR);
1050 bacpy(&hdev->random_addr, sent);
1052 hci_dev_unlock(hdev);
1055 static void hci_cc_le_set_adv_enable(struct hci_dev *hdev, struct sk_buff *skb)
1057 __u8 *sent, status = *((__u8 *) skb->data);
1059 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1064 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_ADV_ENABLE);
1070 /* If we're doing connection initiation as peripheral. Set a
1071 * timeout in case something goes wrong.
1074 struct hci_conn *conn;
1076 set_bit(HCI_LE_ADV, &hdev->dev_flags);
1078 conn = hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT);
1080 queue_delayed_work(hdev->workqueue,
1081 &conn->le_conn_timeout,
1082 conn->conn_timeout);
1084 clear_bit(HCI_LE_ADV, &hdev->dev_flags);
1087 hci_dev_unlock(hdev);
1090 static void hci_cc_le_set_scan_param(struct hci_dev *hdev, struct sk_buff *skb)
1092 struct hci_cp_le_set_scan_param *cp;
1093 __u8 status = *((__u8 *) skb->data);
1095 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1100 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_SCAN_PARAM);
1106 hdev->le_scan_type = cp->type;
1108 hci_dev_unlock(hdev);
1111 static bool has_pending_adv_report(struct hci_dev *hdev)
1113 struct discovery_state *d = &hdev->discovery;
1115 return bacmp(&d->last_adv_addr, BDADDR_ANY);
1118 static void clear_pending_adv_report(struct hci_dev *hdev)
1120 struct discovery_state *d = &hdev->discovery;
1122 bacpy(&d->last_adv_addr, BDADDR_ANY);
1123 d->last_adv_data_len = 0;
1126 static void store_pending_adv_report(struct hci_dev *hdev, bdaddr_t *bdaddr,
1127 u8 bdaddr_type, s8 rssi, u32 flags,
1130 struct discovery_state *d = &hdev->discovery;
1132 bacpy(&d->last_adv_addr, bdaddr);
1133 d->last_adv_addr_type = bdaddr_type;
1134 d->last_adv_rssi = rssi;
1135 d->last_adv_flags = flags;
1136 memcpy(d->last_adv_data, data, len);
1137 d->last_adv_data_len = len;
1140 static void hci_cc_le_set_scan_enable(struct hci_dev *hdev,
1141 struct sk_buff *skb)
1143 struct hci_cp_le_set_scan_enable *cp;
1144 __u8 status = *((__u8 *) skb->data);
1146 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1151 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_SCAN_ENABLE);
1157 switch (cp->enable) {
1158 case LE_SCAN_ENABLE:
1159 set_bit(HCI_LE_SCAN, &hdev->dev_flags);
1160 if (hdev->le_scan_type == LE_SCAN_ACTIVE)
1161 clear_pending_adv_report(hdev);
1164 case LE_SCAN_DISABLE:
1165 /* We do this here instead of when setting DISCOVERY_STOPPED
1166 * since the latter would potentially require waiting for
1167 * inquiry to stop too.
1169 if (has_pending_adv_report(hdev)) {
1170 struct discovery_state *d = &hdev->discovery;
1172 mgmt_device_found(hdev, &d->last_adv_addr, LE_LINK,
1173 d->last_adv_addr_type, NULL,
1174 d->last_adv_rssi, d->last_adv_flags,
1176 d->last_adv_data_len, NULL, 0);
1179 /* Cancel this timer so that we don't try to disable scanning
1180 * when it's already disabled.
1182 cancel_delayed_work(&hdev->le_scan_disable);
1184 clear_bit(HCI_LE_SCAN, &hdev->dev_flags);
1186 /* The HCI_LE_SCAN_INTERRUPTED flag indicates that we
1187 * interrupted scanning due to a connect request. Mark
1188 * therefore discovery as stopped. If this was not
1189 * because of a connect request advertising might have
1190 * been disabled because of active scanning, so
1191 * re-enable it again if necessary.
1193 if (test_and_clear_bit(HCI_LE_SCAN_INTERRUPTED,
1195 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1196 else if (!test_bit(HCI_LE_ADV, &hdev->dev_flags) &&
1197 hdev->discovery.state == DISCOVERY_FINDING)
1198 mgmt_reenable_advertising(hdev);
1203 BT_ERR("Used reserved LE_Scan_Enable param %d", cp->enable);
1207 hci_dev_unlock(hdev);
1210 static void hci_cc_le_read_white_list_size(struct hci_dev *hdev,
1211 struct sk_buff *skb)
1213 struct hci_rp_le_read_white_list_size *rp = (void *) skb->data;
1215 BT_DBG("%s status 0x%2.2x size %u", hdev->name, rp->status, rp->size);
1220 hdev->le_white_list_size = rp->size;
1223 static void hci_cc_le_clear_white_list(struct hci_dev *hdev,
1224 struct sk_buff *skb)
1226 __u8 status = *((__u8 *) skb->data);
1228 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1233 hci_bdaddr_list_clear(&hdev->le_white_list);
1236 static void hci_cc_le_add_to_white_list(struct hci_dev *hdev,
1237 struct sk_buff *skb)
1239 struct hci_cp_le_add_to_white_list *sent;
1240 __u8 status = *((__u8 *) skb->data);
1242 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1247 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_ADD_TO_WHITE_LIST);
1251 hci_bdaddr_list_add(&hdev->le_white_list, &sent->bdaddr,
1255 static void hci_cc_le_del_from_white_list(struct hci_dev *hdev,
1256 struct sk_buff *skb)
1258 struct hci_cp_le_del_from_white_list *sent;
1259 __u8 status = *((__u8 *) skb->data);
1261 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1266 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_DEL_FROM_WHITE_LIST);
1270 hci_bdaddr_list_del(&hdev->le_white_list, &sent->bdaddr,
1274 static void hci_cc_le_read_supported_states(struct hci_dev *hdev,
1275 struct sk_buff *skb)
1277 struct hci_rp_le_read_supported_states *rp = (void *) skb->data;
1279 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1284 memcpy(hdev->le_states, rp->le_states, 8);
1287 static void hci_cc_le_read_def_data_len(struct hci_dev *hdev,
1288 struct sk_buff *skb)
1290 struct hci_rp_le_read_def_data_len *rp = (void *) skb->data;
1292 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1297 hdev->le_def_tx_len = le16_to_cpu(rp->tx_len);
1298 hdev->le_def_tx_time = le16_to_cpu(rp->tx_time);
1301 static void hci_cc_le_write_def_data_len(struct hci_dev *hdev,
1302 struct sk_buff *skb)
1304 struct hci_cp_le_write_def_data_len *sent;
1305 __u8 status = *((__u8 *) skb->data);
1307 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1312 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_WRITE_DEF_DATA_LEN);
1316 hdev->le_def_tx_len = le16_to_cpu(sent->tx_len);
1317 hdev->le_def_tx_time = le16_to_cpu(sent->tx_time);
1320 static void hci_cc_le_read_max_data_len(struct hci_dev *hdev,
1321 struct sk_buff *skb)
1323 struct hci_rp_le_read_max_data_len *rp = (void *) skb->data;
1325 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1330 hdev->le_max_tx_len = le16_to_cpu(rp->tx_len);
1331 hdev->le_max_tx_time = le16_to_cpu(rp->tx_time);
1332 hdev->le_max_rx_len = le16_to_cpu(rp->rx_len);
1333 hdev->le_max_rx_time = le16_to_cpu(rp->rx_time);
1336 static void hci_cc_write_le_host_supported(struct hci_dev *hdev,
1337 struct sk_buff *skb)
1339 struct hci_cp_write_le_host_supported *sent;
1340 __u8 status = *((__u8 *) skb->data);
1342 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1347 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LE_HOST_SUPPORTED);
1354 hdev->features[1][0] |= LMP_HOST_LE;
1355 set_bit(HCI_LE_ENABLED, &hdev->dev_flags);
1357 hdev->features[1][0] &= ~LMP_HOST_LE;
1358 clear_bit(HCI_LE_ENABLED, &hdev->dev_flags);
1359 clear_bit(HCI_ADVERTISING, &hdev->dev_flags);
1363 hdev->features[1][0] |= LMP_HOST_LE_BREDR;
1365 hdev->features[1][0] &= ~LMP_HOST_LE_BREDR;
1367 hci_dev_unlock(hdev);
1370 static void hci_cc_set_adv_param(struct hci_dev *hdev, struct sk_buff *skb)
1372 struct hci_cp_le_set_adv_param *cp;
1373 u8 status = *((u8 *) skb->data);
1375 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1380 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_ADV_PARAM);
1385 hdev->adv_addr_type = cp->own_address_type;
1386 hci_dev_unlock(hdev);
1389 static void hci_cc_write_remote_amp_assoc(struct hci_dev *hdev,
1390 struct sk_buff *skb)
1392 struct hci_rp_write_remote_amp_assoc *rp = (void *) skb->data;
1394 BT_DBG("%s status 0x%2.2x phy_handle 0x%2.2x",
1395 hdev->name, rp->status, rp->phy_handle);
1400 amp_write_rem_assoc_continue(hdev, rp->phy_handle);
1403 static void hci_cc_read_rssi(struct hci_dev *hdev, struct sk_buff *skb)
1405 struct hci_rp_read_rssi *rp = (void *) skb->data;
1406 struct hci_conn *conn;
1408 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1415 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
1417 conn->rssi = rp->rssi;
1419 hci_dev_unlock(hdev);
1422 static void hci_cc_read_tx_power(struct hci_dev *hdev, struct sk_buff *skb)
1424 struct hci_cp_read_tx_power *sent;
1425 struct hci_rp_read_tx_power *rp = (void *) skb->data;
1426 struct hci_conn *conn;
1428 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1433 sent = hci_sent_cmd_data(hdev, HCI_OP_READ_TX_POWER);
1439 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
1443 switch (sent->type) {
1445 conn->tx_power = rp->tx_power;
1448 conn->max_tx_power = rp->tx_power;
1453 hci_dev_unlock(hdev);
1456 static void hci_cs_inquiry(struct hci_dev *hdev, __u8 status)
1458 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1461 hci_conn_check_pending(hdev);
1465 set_bit(HCI_INQUIRY, &hdev->flags);
1468 static void hci_cs_create_conn(struct hci_dev *hdev, __u8 status)
1470 struct hci_cp_create_conn *cp;
1471 struct hci_conn *conn;
1473 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1475 cp = hci_sent_cmd_data(hdev, HCI_OP_CREATE_CONN);
1481 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
1483 BT_DBG("%s bdaddr %pMR hcon %p", hdev->name, &cp->bdaddr, conn);
1486 if (conn && conn->state == BT_CONNECT) {
1487 if (status != 0x0c || conn->attempt > 2) {
1488 conn->state = BT_CLOSED;
1489 hci_proto_connect_cfm(conn, status);
1492 conn->state = BT_CONNECT2;
1496 conn = hci_conn_add(hdev, ACL_LINK, &cp->bdaddr,
1499 BT_ERR("No memory for new connection");
1503 hci_dev_unlock(hdev);
1506 static void hci_cs_add_sco(struct hci_dev *hdev, __u8 status)
1508 struct hci_cp_add_sco *cp;
1509 struct hci_conn *acl, *sco;
1512 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1517 cp = hci_sent_cmd_data(hdev, HCI_OP_ADD_SCO);
1521 handle = __le16_to_cpu(cp->handle);
1523 BT_DBG("%s handle 0x%4.4x", hdev->name, handle);
1527 acl = hci_conn_hash_lookup_handle(hdev, handle);
1531 sco->state = BT_CLOSED;
1533 hci_proto_connect_cfm(sco, status);
1538 hci_dev_unlock(hdev);
1541 static void hci_cs_auth_requested(struct hci_dev *hdev, __u8 status)
1543 struct hci_cp_auth_requested *cp;
1544 struct hci_conn *conn;
1546 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1551 cp = hci_sent_cmd_data(hdev, HCI_OP_AUTH_REQUESTED);
1557 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1559 if (conn->state == BT_CONFIG) {
1560 hci_proto_connect_cfm(conn, status);
1561 hci_conn_drop(conn);
1565 hci_dev_unlock(hdev);
1568 static void hci_cs_set_conn_encrypt(struct hci_dev *hdev, __u8 status)
1570 struct hci_cp_set_conn_encrypt *cp;
1571 struct hci_conn *conn;
1573 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1578 cp = hci_sent_cmd_data(hdev, HCI_OP_SET_CONN_ENCRYPT);
1584 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1586 if (conn->state == BT_CONFIG) {
1587 hci_proto_connect_cfm(conn, status);
1588 hci_conn_drop(conn);
1592 hci_dev_unlock(hdev);
1595 static int hci_outgoing_auth_needed(struct hci_dev *hdev,
1596 struct hci_conn *conn)
1598 if (conn->state != BT_CONFIG || !conn->out)
1601 if (conn->pending_sec_level == BT_SECURITY_SDP)
1604 /* Only request authentication for SSP connections or non-SSP
1605 * devices with sec_level MEDIUM or HIGH or if MITM protection
1608 if (!hci_conn_ssp_enabled(conn) && !(conn->auth_type & 0x01) &&
1609 conn->pending_sec_level != BT_SECURITY_FIPS &&
1610 conn->pending_sec_level != BT_SECURITY_HIGH &&
1611 conn->pending_sec_level != BT_SECURITY_MEDIUM)
1617 static int hci_resolve_name(struct hci_dev *hdev,
1618 struct inquiry_entry *e)
1620 struct hci_cp_remote_name_req cp;
1622 memset(&cp, 0, sizeof(cp));
1624 bacpy(&cp.bdaddr, &e->data.bdaddr);
1625 cp.pscan_rep_mode = e->data.pscan_rep_mode;
1626 cp.pscan_mode = e->data.pscan_mode;
1627 cp.clock_offset = e->data.clock_offset;
1629 return hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp);
1632 static bool hci_resolve_next_name(struct hci_dev *hdev)
1634 struct discovery_state *discov = &hdev->discovery;
1635 struct inquiry_entry *e;
1637 if (list_empty(&discov->resolve))
1640 e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY, NAME_NEEDED);
1644 if (hci_resolve_name(hdev, e) == 0) {
1645 e->name_state = NAME_PENDING;
1652 static void hci_check_pending_name(struct hci_dev *hdev, struct hci_conn *conn,
1653 bdaddr_t *bdaddr, u8 *name, u8 name_len)
1655 struct discovery_state *discov = &hdev->discovery;
1656 struct inquiry_entry *e;
1658 /* Update the mgmt connected state if necessary. Be careful with
1659 * conn objects that exist but are not (yet) connected however.
1660 * Only those in BT_CONFIG or BT_CONNECTED states can be
1661 * considered connected.
1664 (conn->state == BT_CONFIG || conn->state == BT_CONNECTED) &&
1665 !test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
1666 mgmt_device_connected(hdev, conn, 0, name, name_len);
1668 if (discov->state == DISCOVERY_STOPPED)
1671 if (discov->state == DISCOVERY_STOPPING)
1672 goto discov_complete;
1674 if (discov->state != DISCOVERY_RESOLVING)
1677 e = hci_inquiry_cache_lookup_resolve(hdev, bdaddr, NAME_PENDING);
1678 /* If the device was not found in a list of found devices names of which
1679 * are pending. there is no need to continue resolving a next name as it
1680 * will be done upon receiving another Remote Name Request Complete
1687 e->name_state = NAME_KNOWN;
1688 mgmt_remote_name(hdev, bdaddr, ACL_LINK, 0x00,
1689 e->data.rssi, name, name_len);
1691 e->name_state = NAME_NOT_KNOWN;
1694 if (hci_resolve_next_name(hdev))
1698 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1701 static void hci_cs_remote_name_req(struct hci_dev *hdev, __u8 status)
1703 struct hci_cp_remote_name_req *cp;
1704 struct hci_conn *conn;
1706 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1708 /* If successful wait for the name req complete event before
1709 * checking for the need to do authentication */
1713 cp = hci_sent_cmd_data(hdev, HCI_OP_REMOTE_NAME_REQ);
1719 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
1721 if (test_bit(HCI_MGMT, &hdev->dev_flags))
1722 hci_check_pending_name(hdev, conn, &cp->bdaddr, NULL, 0);
1727 if (!hci_outgoing_auth_needed(hdev, conn))
1730 if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->flags)) {
1731 struct hci_cp_auth_requested auth_cp;
1733 set_bit(HCI_CONN_AUTH_INITIATOR, &conn->flags);
1735 auth_cp.handle = __cpu_to_le16(conn->handle);
1736 hci_send_cmd(hdev, HCI_OP_AUTH_REQUESTED,
1737 sizeof(auth_cp), &auth_cp);
1741 hci_dev_unlock(hdev);
1744 static void hci_cs_read_remote_features(struct hci_dev *hdev, __u8 status)
1746 struct hci_cp_read_remote_features *cp;
1747 struct hci_conn *conn;
1749 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1754 cp = hci_sent_cmd_data(hdev, HCI_OP_READ_REMOTE_FEATURES);
1760 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1762 if (conn->state == BT_CONFIG) {
1763 hci_proto_connect_cfm(conn, status);
1764 hci_conn_drop(conn);
1768 hci_dev_unlock(hdev);
1771 static void hci_cs_read_remote_ext_features(struct hci_dev *hdev, __u8 status)
1773 struct hci_cp_read_remote_ext_features *cp;
1774 struct hci_conn *conn;
1776 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1781 cp = hci_sent_cmd_data(hdev, HCI_OP_READ_REMOTE_EXT_FEATURES);
1787 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1789 if (conn->state == BT_CONFIG) {
1790 hci_proto_connect_cfm(conn, status);
1791 hci_conn_drop(conn);
1795 hci_dev_unlock(hdev);
1798 static void hci_cs_setup_sync_conn(struct hci_dev *hdev, __u8 status)
1800 struct hci_cp_setup_sync_conn *cp;
1801 struct hci_conn *acl, *sco;
1804 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1809 cp = hci_sent_cmd_data(hdev, HCI_OP_SETUP_SYNC_CONN);
1813 handle = __le16_to_cpu(cp->handle);
1815 BT_DBG("%s handle 0x%4.4x", hdev->name, handle);
1819 acl = hci_conn_hash_lookup_handle(hdev, handle);
1823 sco->state = BT_CLOSED;
1825 hci_proto_connect_cfm(sco, status);
1830 hci_dev_unlock(hdev);
1833 static void hci_cs_sniff_mode(struct hci_dev *hdev, __u8 status)
1835 struct hci_cp_sniff_mode *cp;
1836 struct hci_conn *conn;
1838 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1843 cp = hci_sent_cmd_data(hdev, HCI_OP_SNIFF_MODE);
1849 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1851 clear_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags);
1853 if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags))
1854 hci_sco_setup(conn, status);
1857 hci_dev_unlock(hdev);
1860 static void hci_cs_exit_sniff_mode(struct hci_dev *hdev, __u8 status)
1862 struct hci_cp_exit_sniff_mode *cp;
1863 struct hci_conn *conn;
1865 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1870 cp = hci_sent_cmd_data(hdev, HCI_OP_EXIT_SNIFF_MODE);
1876 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1878 clear_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags);
1880 if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags))
1881 hci_sco_setup(conn, status);
1884 hci_dev_unlock(hdev);
1887 static void hci_cs_disconnect(struct hci_dev *hdev, u8 status)
1889 struct hci_cp_disconnect *cp;
1890 struct hci_conn *conn;
1895 cp = hci_sent_cmd_data(hdev, HCI_OP_DISCONNECT);
1901 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1903 mgmt_disconnect_failed(hdev, &conn->dst, conn->type,
1904 conn->dst_type, status);
1906 hci_dev_unlock(hdev);
1909 static void hci_cs_create_phylink(struct hci_dev *hdev, u8 status)
1911 struct hci_cp_create_phy_link *cp;
1913 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1915 cp = hci_sent_cmd_data(hdev, HCI_OP_CREATE_PHY_LINK);
1922 struct hci_conn *hcon;
1924 hcon = hci_conn_hash_lookup_handle(hdev, cp->phy_handle);
1928 amp_write_remote_assoc(hdev, cp->phy_handle);
1931 hci_dev_unlock(hdev);
1934 static void hci_cs_accept_phylink(struct hci_dev *hdev, u8 status)
1936 struct hci_cp_accept_phy_link *cp;
1938 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1943 cp = hci_sent_cmd_data(hdev, HCI_OP_ACCEPT_PHY_LINK);
1947 amp_write_remote_assoc(hdev, cp->phy_handle);
1950 static void hci_cs_le_create_conn(struct hci_dev *hdev, u8 status)
1952 struct hci_cp_le_create_conn *cp;
1953 struct hci_conn *conn;
1955 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1957 /* All connection failure handling is taken care of by the
1958 * hci_le_conn_failed function which is triggered by the HCI
1959 * request completion callbacks used for connecting.
1964 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_CREATE_CONN);
1970 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->peer_addr);
1974 /* Store the initiator and responder address information which
1975 * is needed for SMP. These values will not change during the
1976 * lifetime of the connection.
1978 conn->init_addr_type = cp->own_address_type;
1979 if (cp->own_address_type == ADDR_LE_DEV_RANDOM)
1980 bacpy(&conn->init_addr, &hdev->random_addr);
1982 bacpy(&conn->init_addr, &hdev->bdaddr);
1984 conn->resp_addr_type = cp->peer_addr_type;
1985 bacpy(&conn->resp_addr, &cp->peer_addr);
1987 /* We don't want the connection attempt to stick around
1988 * indefinitely since LE doesn't have a page timeout concept
1989 * like BR/EDR. Set a timer for any connection that doesn't use
1990 * the white list for connecting.
1992 if (cp->filter_policy == HCI_LE_USE_PEER_ADDR)
1993 queue_delayed_work(conn->hdev->workqueue,
1994 &conn->le_conn_timeout,
1995 conn->conn_timeout);
1998 hci_dev_unlock(hdev);
2001 static void hci_cs_le_start_enc(struct hci_dev *hdev, u8 status)
2003 struct hci_cp_le_start_enc *cp;
2004 struct hci_conn *conn;
2006 BT_DBG("%s status 0x%2.2x", hdev->name, status);
2013 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_START_ENC);
2017 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
2021 if (conn->state != BT_CONNECTED)
2024 hci_disconnect(conn, HCI_ERROR_AUTH_FAILURE);
2025 hci_conn_drop(conn);
2028 hci_dev_unlock(hdev);
2031 static void hci_cs_switch_role(struct hci_dev *hdev, u8 status)
2033 struct hci_cp_switch_role *cp;
2034 struct hci_conn *conn;
2036 BT_DBG("%s status 0x%2.2x", hdev->name, status);
2041 cp = hci_sent_cmd_data(hdev, HCI_OP_SWITCH_ROLE);
2047 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
2049 clear_bit(HCI_CONN_RSWITCH_PEND, &conn->flags);
2051 hci_dev_unlock(hdev);
2054 static void hci_inquiry_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
2056 __u8 status = *((__u8 *) skb->data);
2057 struct discovery_state *discov = &hdev->discovery;
2058 struct inquiry_entry *e;
2060 BT_DBG("%s status 0x%2.2x", hdev->name, status);
2062 hci_conn_check_pending(hdev);
2064 if (!test_and_clear_bit(HCI_INQUIRY, &hdev->flags))
2067 smp_mb__after_atomic(); /* wake_up_bit advises about this barrier */
2068 wake_up_bit(&hdev->flags, HCI_INQUIRY);
2070 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
2075 if (discov->state != DISCOVERY_FINDING)
2078 if (list_empty(&discov->resolve)) {
2079 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2083 e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY, NAME_NEEDED);
2084 if (e && hci_resolve_name(hdev, e) == 0) {
2085 e->name_state = NAME_PENDING;
2086 hci_discovery_set_state(hdev, DISCOVERY_RESOLVING);
2088 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2092 hci_dev_unlock(hdev);
2095 static void hci_inquiry_result_evt(struct hci_dev *hdev, struct sk_buff *skb)
2097 struct inquiry_data data;
2098 struct inquiry_info *info = (void *) (skb->data + 1);
2099 int num_rsp = *((__u8 *) skb->data);
2101 BT_DBG("%s num_rsp %d", hdev->name, num_rsp);
2106 if (test_bit(HCI_PERIODIC_INQ, &hdev->dev_flags))
2111 for (; num_rsp; num_rsp--, info++) {
2114 bacpy(&data.bdaddr, &info->bdaddr);
2115 data.pscan_rep_mode = info->pscan_rep_mode;
2116 data.pscan_period_mode = info->pscan_period_mode;
2117 data.pscan_mode = info->pscan_mode;
2118 memcpy(data.dev_class, info->dev_class, 3);
2119 data.clock_offset = info->clock_offset;
2120 data.rssi = HCI_RSSI_INVALID;
2121 data.ssp_mode = 0x00;
2123 flags = hci_inquiry_cache_update(hdev, &data, false);
2125 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
2126 info->dev_class, HCI_RSSI_INVALID,
2127 flags, NULL, 0, NULL, 0);
2130 hci_dev_unlock(hdev);
2133 static void hci_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
2135 struct hci_ev_conn_complete *ev = (void *) skb->data;
2136 struct hci_conn *conn;
2138 BT_DBG("%s", hdev->name);
2142 conn = hci_conn_hash_lookup_ba(hdev, ev->link_type, &ev->bdaddr);
2144 if (ev->link_type != SCO_LINK)
2147 conn = hci_conn_hash_lookup_ba(hdev, ESCO_LINK, &ev->bdaddr);
2151 conn->type = SCO_LINK;
2155 conn->handle = __le16_to_cpu(ev->handle);
2157 if (conn->type == ACL_LINK) {
2158 conn->state = BT_CONFIG;
2159 hci_conn_hold(conn);
2161 if (!conn->out && !hci_conn_ssp_enabled(conn) &&
2162 !hci_find_link_key(hdev, &ev->bdaddr))
2163 conn->disc_timeout = HCI_PAIRING_TIMEOUT;
2165 conn->disc_timeout = HCI_DISCONN_TIMEOUT;
2167 conn->state = BT_CONNECTED;
2169 hci_debugfs_create_conn(conn);
2170 hci_conn_add_sysfs(conn);
2172 if (test_bit(HCI_AUTH, &hdev->flags))
2173 set_bit(HCI_CONN_AUTH, &conn->flags);
2175 if (test_bit(HCI_ENCRYPT, &hdev->flags))
2176 set_bit(HCI_CONN_ENCRYPT, &conn->flags);
2178 /* Get remote features */
2179 if (conn->type == ACL_LINK) {
2180 struct hci_cp_read_remote_features cp;
2181 cp.handle = ev->handle;
2182 hci_send_cmd(hdev, HCI_OP_READ_REMOTE_FEATURES,
2185 hci_update_page_scan(hdev);
2188 /* Set packet type for incoming connection */
2189 if (!conn->out && hdev->hci_ver < BLUETOOTH_VER_2_0) {
2190 struct hci_cp_change_conn_ptype cp;
2191 cp.handle = ev->handle;
2192 cp.pkt_type = cpu_to_le16(conn->pkt_type);
2193 hci_send_cmd(hdev, HCI_OP_CHANGE_CONN_PTYPE, sizeof(cp),
2197 conn->state = BT_CLOSED;
2198 if (conn->type == ACL_LINK)
2199 mgmt_connect_failed(hdev, &conn->dst, conn->type,
2200 conn->dst_type, ev->status);
2203 if (conn->type == ACL_LINK)
2204 hci_sco_setup(conn, ev->status);
2207 hci_proto_connect_cfm(conn, ev->status);
2209 } else if (ev->link_type != ACL_LINK)
2210 hci_proto_connect_cfm(conn, ev->status);
2213 hci_dev_unlock(hdev);
2215 hci_conn_check_pending(hdev);
2218 static void hci_reject_conn(struct hci_dev *hdev, bdaddr_t *bdaddr)
2220 struct hci_cp_reject_conn_req cp;
2222 bacpy(&cp.bdaddr, bdaddr);
2223 cp.reason = HCI_ERROR_REJ_BAD_ADDR;
2224 hci_send_cmd(hdev, HCI_OP_REJECT_CONN_REQ, sizeof(cp), &cp);
2227 static void hci_conn_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
2229 struct hci_ev_conn_request *ev = (void *) skb->data;
2230 int mask = hdev->link_mode;
2231 struct inquiry_entry *ie;
2232 struct hci_conn *conn;
2235 BT_DBG("%s bdaddr %pMR type 0x%x", hdev->name, &ev->bdaddr,
2238 mask |= hci_proto_connect_ind(hdev, &ev->bdaddr, ev->link_type,
2241 if (!(mask & HCI_LM_ACCEPT)) {
2242 hci_reject_conn(hdev, &ev->bdaddr);
2246 if (hci_bdaddr_list_lookup(&hdev->blacklist, &ev->bdaddr,
2248 hci_reject_conn(hdev, &ev->bdaddr);
2252 /* Require HCI_CONNECTABLE or a whitelist entry to accept the
2253 * connection. These features are only touched through mgmt so
2254 * only do the checks if HCI_MGMT is set.
2256 if (test_bit(HCI_MGMT, &hdev->dev_flags) &&
2257 !test_bit(HCI_CONNECTABLE, &hdev->dev_flags) &&
2258 !hci_bdaddr_list_lookup(&hdev->whitelist, &ev->bdaddr,
2260 hci_reject_conn(hdev, &ev->bdaddr);
2264 /* Connection accepted */
2268 ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr);
2270 memcpy(ie->data.dev_class, ev->dev_class, 3);
2272 conn = hci_conn_hash_lookup_ba(hdev, ev->link_type,
2275 conn = hci_conn_add(hdev, ev->link_type, &ev->bdaddr,
2278 BT_ERR("No memory for new connection");
2279 hci_dev_unlock(hdev);
2284 memcpy(conn->dev_class, ev->dev_class, 3);
2286 hci_dev_unlock(hdev);
2288 if (ev->link_type == ACL_LINK ||
2289 (!(flags & HCI_PROTO_DEFER) && !lmp_esco_capable(hdev))) {
2290 struct hci_cp_accept_conn_req cp;
2291 conn->state = BT_CONNECT;
2293 bacpy(&cp.bdaddr, &ev->bdaddr);
2295 if (lmp_rswitch_capable(hdev) && (mask & HCI_LM_MASTER))
2296 cp.role = 0x00; /* Become master */
2298 cp.role = 0x01; /* Remain slave */
2300 hci_send_cmd(hdev, HCI_OP_ACCEPT_CONN_REQ, sizeof(cp), &cp);
2301 } else if (!(flags & HCI_PROTO_DEFER)) {
2302 struct hci_cp_accept_sync_conn_req cp;
2303 conn->state = BT_CONNECT;
2305 bacpy(&cp.bdaddr, &ev->bdaddr);
2306 cp.pkt_type = cpu_to_le16(conn->pkt_type);
2308 cp.tx_bandwidth = cpu_to_le32(0x00001f40);
2309 cp.rx_bandwidth = cpu_to_le32(0x00001f40);
2310 cp.max_latency = cpu_to_le16(0xffff);
2311 cp.content_format = cpu_to_le16(hdev->voice_setting);
2312 cp.retrans_effort = 0xff;
2314 hci_send_cmd(hdev, HCI_OP_ACCEPT_SYNC_CONN_REQ, sizeof(cp),
2317 conn->state = BT_CONNECT2;
2318 hci_proto_connect_cfm(conn, 0);
2322 static u8 hci_to_mgmt_reason(u8 err)
2325 case HCI_ERROR_CONNECTION_TIMEOUT:
2326 return MGMT_DEV_DISCONN_TIMEOUT;
2327 case HCI_ERROR_REMOTE_USER_TERM:
2328 case HCI_ERROR_REMOTE_LOW_RESOURCES:
2329 case HCI_ERROR_REMOTE_POWER_OFF:
2330 return MGMT_DEV_DISCONN_REMOTE;
2331 case HCI_ERROR_LOCAL_HOST_TERM:
2332 return MGMT_DEV_DISCONN_LOCAL_HOST;
2334 return MGMT_DEV_DISCONN_UNKNOWN;
2338 static void hci_disconn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
2340 struct hci_ev_disconn_complete *ev = (void *) skb->data;
2341 u8 reason = hci_to_mgmt_reason(ev->reason);
2342 struct hci_conn_params *params;
2343 struct hci_conn *conn;
2344 bool mgmt_connected;
2347 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2351 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2356 mgmt_disconnect_failed(hdev, &conn->dst, conn->type,
2357 conn->dst_type, ev->status);
2361 conn->state = BT_CLOSED;
2363 mgmt_connected = test_and_clear_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags);
2364 mgmt_device_disconnected(hdev, &conn->dst, conn->type, conn->dst_type,
2365 reason, mgmt_connected);
2367 if (conn->type == ACL_LINK) {
2368 if (test_bit(HCI_CONN_FLUSH_KEY, &conn->flags))
2369 hci_remove_link_key(hdev, &conn->dst);
2371 hci_update_page_scan(hdev);
2374 params = hci_conn_params_lookup(hdev, &conn->dst, conn->dst_type);
2376 switch (params->auto_connect) {
2377 case HCI_AUTO_CONN_LINK_LOSS:
2378 if (ev->reason != HCI_ERROR_CONNECTION_TIMEOUT)
2382 case HCI_AUTO_CONN_DIRECT:
2383 case HCI_AUTO_CONN_ALWAYS:
2384 list_del_init(¶ms->action);
2385 list_add(¶ms->action, &hdev->pend_le_conns);
2386 hci_update_background_scan(hdev);
2396 hci_proto_disconn_cfm(conn, ev->reason);
2399 /* Re-enable advertising if necessary, since it might
2400 * have been disabled by the connection. From the
2401 * HCI_LE_Set_Advertise_Enable command description in
2402 * the core specification (v4.0):
2403 * "The Controller shall continue advertising until the Host
2404 * issues an LE_Set_Advertise_Enable command with
2405 * Advertising_Enable set to 0x00 (Advertising is disabled)
2406 * or until a connection is created or until the Advertising
2407 * is timed out due to Directed Advertising."
2409 if (type == LE_LINK)
2410 mgmt_reenable_advertising(hdev);
2413 hci_dev_unlock(hdev);
2416 static void hci_auth_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
2418 struct hci_ev_auth_complete *ev = (void *) skb->data;
2419 struct hci_conn *conn;
2421 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2425 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2430 if (!hci_conn_ssp_enabled(conn) &&
2431 test_bit(HCI_CONN_REAUTH_PEND, &conn->flags)) {
2432 BT_INFO("re-auth of legacy device is not possible.");
2434 set_bit(HCI_CONN_AUTH, &conn->flags);
2435 conn->sec_level = conn->pending_sec_level;
2438 mgmt_auth_failed(conn, ev->status);
2441 clear_bit(HCI_CONN_AUTH_PEND, &conn->flags);
2442 clear_bit(HCI_CONN_REAUTH_PEND, &conn->flags);
2444 if (conn->state == BT_CONFIG) {
2445 if (!ev->status && hci_conn_ssp_enabled(conn)) {
2446 struct hci_cp_set_conn_encrypt cp;
2447 cp.handle = ev->handle;
2449 hci_send_cmd(hdev, HCI_OP_SET_CONN_ENCRYPT, sizeof(cp),
2452 conn->state = BT_CONNECTED;
2453 hci_proto_connect_cfm(conn, ev->status);
2454 hci_conn_drop(conn);
2457 hci_auth_cfm(conn, ev->status);
2459 hci_conn_hold(conn);
2460 conn->disc_timeout = HCI_DISCONN_TIMEOUT;
2461 hci_conn_drop(conn);
2464 if (test_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags)) {
2466 struct hci_cp_set_conn_encrypt cp;
2467 cp.handle = ev->handle;
2469 hci_send_cmd(hdev, HCI_OP_SET_CONN_ENCRYPT, sizeof(cp),
2472 clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
2473 hci_encrypt_cfm(conn, ev->status, 0x00);
2478 hci_dev_unlock(hdev);
2481 static void hci_remote_name_evt(struct hci_dev *hdev, struct sk_buff *skb)
2483 struct hci_ev_remote_name *ev = (void *) skb->data;
2484 struct hci_conn *conn;
2486 BT_DBG("%s", hdev->name);
2488 hci_conn_check_pending(hdev);
2492 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
2494 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
2497 if (ev->status == 0)
2498 hci_check_pending_name(hdev, conn, &ev->bdaddr, ev->name,
2499 strnlen(ev->name, HCI_MAX_NAME_LENGTH));
2501 hci_check_pending_name(hdev, conn, &ev->bdaddr, NULL, 0);
2507 if (!hci_outgoing_auth_needed(hdev, conn))
2510 if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->flags)) {
2511 struct hci_cp_auth_requested cp;
2513 set_bit(HCI_CONN_AUTH_INITIATOR, &conn->flags);
2515 cp.handle = __cpu_to_le16(conn->handle);
2516 hci_send_cmd(hdev, HCI_OP_AUTH_REQUESTED, sizeof(cp), &cp);
2520 hci_dev_unlock(hdev);
2523 static void hci_encrypt_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
2525 struct hci_ev_encrypt_change *ev = (void *) skb->data;
2526 struct hci_conn *conn;
2528 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2532 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2538 /* Encryption implies authentication */
2539 set_bit(HCI_CONN_AUTH, &conn->flags);
2540 set_bit(HCI_CONN_ENCRYPT, &conn->flags);
2541 conn->sec_level = conn->pending_sec_level;
2543 /* P-256 authentication key implies FIPS */
2544 if (conn->key_type == HCI_LK_AUTH_COMBINATION_P256)
2545 set_bit(HCI_CONN_FIPS, &conn->flags);
2547 if ((conn->type == ACL_LINK && ev->encrypt == 0x02) ||
2548 conn->type == LE_LINK)
2549 set_bit(HCI_CONN_AES_CCM, &conn->flags);
2551 clear_bit(HCI_CONN_ENCRYPT, &conn->flags);
2552 clear_bit(HCI_CONN_AES_CCM, &conn->flags);
2556 /* We should disregard the current RPA and generate a new one
2557 * whenever the encryption procedure fails.
2559 if (ev->status && conn->type == LE_LINK)
2560 set_bit(HCI_RPA_EXPIRED, &hdev->dev_flags);
2562 clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
2564 if (ev->status && conn->state == BT_CONNECTED) {
2565 hci_disconnect(conn, HCI_ERROR_AUTH_FAILURE);
2566 hci_conn_drop(conn);
2570 if (conn->state == BT_CONFIG) {
2572 conn->state = BT_CONNECTED;
2574 /* In Secure Connections Only mode, do not allow any
2575 * connections that are not encrypted with AES-CCM
2576 * using a P-256 authenticated combination key.
2578 if (test_bit(HCI_SC_ONLY, &hdev->dev_flags) &&
2579 (!test_bit(HCI_CONN_AES_CCM, &conn->flags) ||
2580 conn->key_type != HCI_LK_AUTH_COMBINATION_P256)) {
2581 hci_proto_connect_cfm(conn, HCI_ERROR_AUTH_FAILURE);
2582 hci_conn_drop(conn);
2586 hci_proto_connect_cfm(conn, ev->status);
2587 hci_conn_drop(conn);
2589 hci_encrypt_cfm(conn, ev->status, ev->encrypt);
2592 hci_dev_unlock(hdev);
2595 static void hci_change_link_key_complete_evt(struct hci_dev *hdev,
2596 struct sk_buff *skb)
2598 struct hci_ev_change_link_key_complete *ev = (void *) skb->data;
2599 struct hci_conn *conn;
2601 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2605 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2608 set_bit(HCI_CONN_SECURE, &conn->flags);
2610 clear_bit(HCI_CONN_AUTH_PEND, &conn->flags);
2612 hci_key_change_cfm(conn, ev->status);
2615 hci_dev_unlock(hdev);
2618 static void hci_remote_features_evt(struct hci_dev *hdev,
2619 struct sk_buff *skb)
2621 struct hci_ev_remote_features *ev = (void *) skb->data;
2622 struct hci_conn *conn;
2624 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2628 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2633 memcpy(conn->features[0], ev->features, 8);
2635 if (conn->state != BT_CONFIG)
2638 if (!ev->status && lmp_ssp_capable(hdev) && lmp_ssp_capable(conn)) {
2639 struct hci_cp_read_remote_ext_features cp;
2640 cp.handle = ev->handle;
2642 hci_send_cmd(hdev, HCI_OP_READ_REMOTE_EXT_FEATURES,
2647 if (!ev->status && !test_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags)) {
2648 struct hci_cp_remote_name_req cp;
2649 memset(&cp, 0, sizeof(cp));
2650 bacpy(&cp.bdaddr, &conn->dst);
2651 cp.pscan_rep_mode = 0x02;
2652 hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp);
2653 } else if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
2654 mgmt_device_connected(hdev, conn, 0, NULL, 0);
2656 if (!hci_outgoing_auth_needed(hdev, conn)) {
2657 conn->state = BT_CONNECTED;
2658 hci_proto_connect_cfm(conn, ev->status);
2659 hci_conn_drop(conn);
2663 hci_dev_unlock(hdev);
2666 static void hci_cmd_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
2668 struct hci_ev_cmd_complete *ev = (void *) skb->data;
2669 u8 status = skb->data[sizeof(*ev)];
2672 skb_pull(skb, sizeof(*ev));
2674 opcode = __le16_to_cpu(ev->opcode);
2677 case HCI_OP_INQUIRY_CANCEL:
2678 hci_cc_inquiry_cancel(hdev, skb);
2681 case HCI_OP_PERIODIC_INQ:
2682 hci_cc_periodic_inq(hdev, skb);
2685 case HCI_OP_EXIT_PERIODIC_INQ:
2686 hci_cc_exit_periodic_inq(hdev, skb);
2689 case HCI_OP_REMOTE_NAME_REQ_CANCEL:
2690 hci_cc_remote_name_req_cancel(hdev, skb);
2693 case HCI_OP_ROLE_DISCOVERY:
2694 hci_cc_role_discovery(hdev, skb);
2697 case HCI_OP_READ_LINK_POLICY:
2698 hci_cc_read_link_policy(hdev, skb);
2701 case HCI_OP_WRITE_LINK_POLICY:
2702 hci_cc_write_link_policy(hdev, skb);
2705 case HCI_OP_READ_DEF_LINK_POLICY:
2706 hci_cc_read_def_link_policy(hdev, skb);
2709 case HCI_OP_WRITE_DEF_LINK_POLICY:
2710 hci_cc_write_def_link_policy(hdev, skb);
2714 hci_cc_reset(hdev, skb);
2717 case HCI_OP_WRITE_LOCAL_NAME:
2718 hci_cc_write_local_name(hdev, skb);
2721 case HCI_OP_READ_LOCAL_NAME:
2722 hci_cc_read_local_name(hdev, skb);
2725 case HCI_OP_WRITE_AUTH_ENABLE:
2726 hci_cc_write_auth_enable(hdev, skb);
2729 case HCI_OP_WRITE_ENCRYPT_MODE:
2730 hci_cc_write_encrypt_mode(hdev, skb);
2733 case HCI_OP_WRITE_SCAN_ENABLE:
2734 hci_cc_write_scan_enable(hdev, skb);
2737 case HCI_OP_READ_CLASS_OF_DEV:
2738 hci_cc_read_class_of_dev(hdev, skb);
2741 case HCI_OP_WRITE_CLASS_OF_DEV:
2742 hci_cc_write_class_of_dev(hdev, skb);
2745 case HCI_OP_READ_VOICE_SETTING:
2746 hci_cc_read_voice_setting(hdev, skb);
2749 case HCI_OP_WRITE_VOICE_SETTING:
2750 hci_cc_write_voice_setting(hdev, skb);
2753 case HCI_OP_READ_NUM_SUPPORTED_IAC:
2754 hci_cc_read_num_supported_iac(hdev, skb);
2757 case HCI_OP_WRITE_SSP_MODE:
2758 hci_cc_write_ssp_mode(hdev, skb);
2761 case HCI_OP_WRITE_SC_SUPPORT:
2762 hci_cc_write_sc_support(hdev, skb);
2765 case HCI_OP_READ_LOCAL_VERSION:
2766 hci_cc_read_local_version(hdev, skb);
2769 case HCI_OP_READ_LOCAL_COMMANDS:
2770 hci_cc_read_local_commands(hdev, skb);
2773 case HCI_OP_READ_LOCAL_FEATURES:
2774 hci_cc_read_local_features(hdev, skb);
2777 case HCI_OP_READ_LOCAL_EXT_FEATURES:
2778 hci_cc_read_local_ext_features(hdev, skb);
2781 case HCI_OP_READ_BUFFER_SIZE:
2782 hci_cc_read_buffer_size(hdev, skb);
2785 case HCI_OP_READ_BD_ADDR:
2786 hci_cc_read_bd_addr(hdev, skb);
2789 case HCI_OP_READ_PAGE_SCAN_ACTIVITY:
2790 hci_cc_read_page_scan_activity(hdev, skb);
2793 case HCI_OP_WRITE_PAGE_SCAN_ACTIVITY:
2794 hci_cc_write_page_scan_activity(hdev, skb);
2797 case HCI_OP_READ_PAGE_SCAN_TYPE:
2798 hci_cc_read_page_scan_type(hdev, skb);
2801 case HCI_OP_WRITE_PAGE_SCAN_TYPE:
2802 hci_cc_write_page_scan_type(hdev, skb);
2805 case HCI_OP_READ_DATA_BLOCK_SIZE:
2806 hci_cc_read_data_block_size(hdev, skb);
2809 case HCI_OP_READ_FLOW_CONTROL_MODE:
2810 hci_cc_read_flow_control_mode(hdev, skb);
2813 case HCI_OP_READ_LOCAL_AMP_INFO:
2814 hci_cc_read_local_amp_info(hdev, skb);
2817 case HCI_OP_READ_CLOCK:
2818 hci_cc_read_clock(hdev, skb);
2821 case HCI_OP_READ_LOCAL_AMP_ASSOC:
2822 hci_cc_read_local_amp_assoc(hdev, skb);
2825 case HCI_OP_READ_INQ_RSP_TX_POWER:
2826 hci_cc_read_inq_rsp_tx_power(hdev, skb);
2829 case HCI_OP_PIN_CODE_REPLY:
2830 hci_cc_pin_code_reply(hdev, skb);
2833 case HCI_OP_PIN_CODE_NEG_REPLY:
2834 hci_cc_pin_code_neg_reply(hdev, skb);
2837 case HCI_OP_READ_LOCAL_OOB_DATA:
2838 hci_cc_read_local_oob_data(hdev, skb);
2841 case HCI_OP_READ_LOCAL_OOB_EXT_DATA:
2842 hci_cc_read_local_oob_ext_data(hdev, skb);
2845 case HCI_OP_LE_READ_BUFFER_SIZE:
2846 hci_cc_le_read_buffer_size(hdev, skb);
2849 case HCI_OP_LE_READ_LOCAL_FEATURES:
2850 hci_cc_le_read_local_features(hdev, skb);
2853 case HCI_OP_LE_READ_ADV_TX_POWER:
2854 hci_cc_le_read_adv_tx_power(hdev, skb);
2857 case HCI_OP_USER_CONFIRM_REPLY:
2858 hci_cc_user_confirm_reply(hdev, skb);
2861 case HCI_OP_USER_CONFIRM_NEG_REPLY:
2862 hci_cc_user_confirm_neg_reply(hdev, skb);
2865 case HCI_OP_USER_PASSKEY_REPLY:
2866 hci_cc_user_passkey_reply(hdev, skb);
2869 case HCI_OP_USER_PASSKEY_NEG_REPLY:
2870 hci_cc_user_passkey_neg_reply(hdev, skb);
2873 case HCI_OP_LE_SET_RANDOM_ADDR:
2874 hci_cc_le_set_random_addr(hdev, skb);
2877 case HCI_OP_LE_SET_ADV_ENABLE:
2878 hci_cc_le_set_adv_enable(hdev, skb);
2881 case HCI_OP_LE_SET_SCAN_PARAM:
2882 hci_cc_le_set_scan_param(hdev, skb);
2885 case HCI_OP_LE_SET_SCAN_ENABLE:
2886 hci_cc_le_set_scan_enable(hdev, skb);
2889 case HCI_OP_LE_READ_WHITE_LIST_SIZE:
2890 hci_cc_le_read_white_list_size(hdev, skb);
2893 case HCI_OP_LE_CLEAR_WHITE_LIST:
2894 hci_cc_le_clear_white_list(hdev, skb);
2897 case HCI_OP_LE_ADD_TO_WHITE_LIST:
2898 hci_cc_le_add_to_white_list(hdev, skb);
2901 case HCI_OP_LE_DEL_FROM_WHITE_LIST:
2902 hci_cc_le_del_from_white_list(hdev, skb);
2905 case HCI_OP_LE_READ_SUPPORTED_STATES:
2906 hci_cc_le_read_supported_states(hdev, skb);
2909 case HCI_OP_LE_READ_DEF_DATA_LEN:
2910 hci_cc_le_read_def_data_len(hdev, skb);
2913 case HCI_OP_LE_WRITE_DEF_DATA_LEN:
2914 hci_cc_le_write_def_data_len(hdev, skb);
2917 case HCI_OP_LE_READ_MAX_DATA_LEN:
2918 hci_cc_le_read_max_data_len(hdev, skb);
2921 case HCI_OP_WRITE_LE_HOST_SUPPORTED:
2922 hci_cc_write_le_host_supported(hdev, skb);
2925 case HCI_OP_LE_SET_ADV_PARAM:
2926 hci_cc_set_adv_param(hdev, skb);
2929 case HCI_OP_WRITE_REMOTE_AMP_ASSOC:
2930 hci_cc_write_remote_amp_assoc(hdev, skb);
2933 case HCI_OP_READ_RSSI:
2934 hci_cc_read_rssi(hdev, skb);
2937 case HCI_OP_READ_TX_POWER:
2938 hci_cc_read_tx_power(hdev, skb);
2942 BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
2946 if (opcode != HCI_OP_NOP)
2947 cancel_delayed_work(&hdev->cmd_timer);
2949 hci_req_cmd_complete(hdev, opcode, status);
2951 if (ev->ncmd && !test_bit(HCI_RESET, &hdev->flags)) {
2952 atomic_set(&hdev->cmd_cnt, 1);
2953 if (!skb_queue_empty(&hdev->cmd_q))
2954 queue_work(hdev->workqueue, &hdev->cmd_work);
2958 static void hci_cmd_status_evt(struct hci_dev *hdev, struct sk_buff *skb)
2960 struct hci_ev_cmd_status *ev = (void *) skb->data;
2963 skb_pull(skb, sizeof(*ev));
2965 opcode = __le16_to_cpu(ev->opcode);
2968 case HCI_OP_INQUIRY:
2969 hci_cs_inquiry(hdev, ev->status);
2972 case HCI_OP_CREATE_CONN:
2973 hci_cs_create_conn(hdev, ev->status);
2976 case HCI_OP_DISCONNECT:
2977 hci_cs_disconnect(hdev, ev->status);
2980 case HCI_OP_ADD_SCO:
2981 hci_cs_add_sco(hdev, ev->status);
2984 case HCI_OP_AUTH_REQUESTED:
2985 hci_cs_auth_requested(hdev, ev->status);
2988 case HCI_OP_SET_CONN_ENCRYPT:
2989 hci_cs_set_conn_encrypt(hdev, ev->status);
2992 case HCI_OP_REMOTE_NAME_REQ:
2993 hci_cs_remote_name_req(hdev, ev->status);
2996 case HCI_OP_READ_REMOTE_FEATURES:
2997 hci_cs_read_remote_features(hdev, ev->status);
3000 case HCI_OP_READ_REMOTE_EXT_FEATURES:
3001 hci_cs_read_remote_ext_features(hdev, ev->status);
3004 case HCI_OP_SETUP_SYNC_CONN:
3005 hci_cs_setup_sync_conn(hdev, ev->status);
3008 case HCI_OP_CREATE_PHY_LINK:
3009 hci_cs_create_phylink(hdev, ev->status);
3012 case HCI_OP_ACCEPT_PHY_LINK:
3013 hci_cs_accept_phylink(hdev, ev->status);
3016 case HCI_OP_SNIFF_MODE:
3017 hci_cs_sniff_mode(hdev, ev->status);
3020 case HCI_OP_EXIT_SNIFF_MODE:
3021 hci_cs_exit_sniff_mode(hdev, ev->status);
3024 case HCI_OP_SWITCH_ROLE:
3025 hci_cs_switch_role(hdev, ev->status);
3028 case HCI_OP_LE_CREATE_CONN:
3029 hci_cs_le_create_conn(hdev, ev->status);
3032 case HCI_OP_LE_START_ENC:
3033 hci_cs_le_start_enc(hdev, ev->status);
3037 BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
3041 if (opcode != HCI_OP_NOP)
3042 cancel_delayed_work(&hdev->cmd_timer);
3045 (hdev->sent_cmd && !bt_cb(hdev->sent_cmd)->req.event))
3046 hci_req_cmd_complete(hdev, opcode, ev->status);
3048 if (ev->ncmd && !test_bit(HCI_RESET, &hdev->flags)) {
3049 atomic_set(&hdev->cmd_cnt, 1);
3050 if (!skb_queue_empty(&hdev->cmd_q))
3051 queue_work(hdev->workqueue, &hdev->cmd_work);
3055 static void hci_hardware_error_evt(struct hci_dev *hdev, struct sk_buff *skb)
3057 struct hci_ev_hardware_error *ev = (void *) skb->data;
3059 BT_ERR("%s hardware error 0x%2.2x", hdev->name, ev->code);
3062 static void hci_role_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
3064 struct hci_ev_role_change *ev = (void *) skb->data;
3065 struct hci_conn *conn;
3067 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
3071 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3074 conn->role = ev->role;
3076 clear_bit(HCI_CONN_RSWITCH_PEND, &conn->flags);
3078 hci_role_switch_cfm(conn, ev->status, ev->role);
3081 hci_dev_unlock(hdev);
3084 static void hci_num_comp_pkts_evt(struct hci_dev *hdev, struct sk_buff *skb)
3086 struct hci_ev_num_comp_pkts *ev = (void *) skb->data;
3089 if (hdev->flow_ctl_mode != HCI_FLOW_CTL_MODE_PACKET_BASED) {
3090 BT_ERR("Wrong event for mode %d", hdev->flow_ctl_mode);
3094 if (skb->len < sizeof(*ev) || skb->len < sizeof(*ev) +
3095 ev->num_hndl * sizeof(struct hci_comp_pkts_info)) {
3096 BT_DBG("%s bad parameters", hdev->name);
3100 BT_DBG("%s num_hndl %d", hdev->name, ev->num_hndl);
3102 for (i = 0; i < ev->num_hndl; i++) {
3103 struct hci_comp_pkts_info *info = &ev->handles[i];
3104 struct hci_conn *conn;
3105 __u16 handle, count;
3107 handle = __le16_to_cpu(info->handle);
3108 count = __le16_to_cpu(info->count);
3110 conn = hci_conn_hash_lookup_handle(hdev, handle);
3114 conn->sent -= count;
3116 switch (conn->type) {
3118 hdev->acl_cnt += count;
3119 if (hdev->acl_cnt > hdev->acl_pkts)
3120 hdev->acl_cnt = hdev->acl_pkts;
3124 if (hdev->le_pkts) {
3125 hdev->le_cnt += count;
3126 if (hdev->le_cnt > hdev->le_pkts)
3127 hdev->le_cnt = hdev->le_pkts;
3129 hdev->acl_cnt += count;
3130 if (hdev->acl_cnt > hdev->acl_pkts)
3131 hdev->acl_cnt = hdev->acl_pkts;
3136 hdev->sco_cnt += count;
3137 if (hdev->sco_cnt > hdev->sco_pkts)
3138 hdev->sco_cnt = hdev->sco_pkts;
3142 BT_ERR("Unknown type %d conn %p", conn->type, conn);
3147 queue_work(hdev->workqueue, &hdev->tx_work);
3150 static struct hci_conn *__hci_conn_lookup_handle(struct hci_dev *hdev,
3153 struct hci_chan *chan;
3155 switch (hdev->dev_type) {
3157 return hci_conn_hash_lookup_handle(hdev, handle);
3159 chan = hci_chan_lookup_handle(hdev, handle);
3164 BT_ERR("%s unknown dev_type %d", hdev->name, hdev->dev_type);
3171 static void hci_num_comp_blocks_evt(struct hci_dev *hdev, struct sk_buff *skb)
3173 struct hci_ev_num_comp_blocks *ev = (void *) skb->data;
3176 if (hdev->flow_ctl_mode != HCI_FLOW_CTL_MODE_BLOCK_BASED) {
3177 BT_ERR("Wrong event for mode %d", hdev->flow_ctl_mode);
3181 if (skb->len < sizeof(*ev) || skb->len < sizeof(*ev) +
3182 ev->num_hndl * sizeof(struct hci_comp_blocks_info)) {
3183 BT_DBG("%s bad parameters", hdev->name);
3187 BT_DBG("%s num_blocks %d num_hndl %d", hdev->name, ev->num_blocks,
3190 for (i = 0; i < ev->num_hndl; i++) {
3191 struct hci_comp_blocks_info *info = &ev->handles[i];
3192 struct hci_conn *conn = NULL;
3193 __u16 handle, block_count;
3195 handle = __le16_to_cpu(info->handle);
3196 block_count = __le16_to_cpu(info->blocks);
3198 conn = __hci_conn_lookup_handle(hdev, handle);
3202 conn->sent -= block_count;
3204 switch (conn->type) {
3207 hdev->block_cnt += block_count;
3208 if (hdev->block_cnt > hdev->num_blocks)
3209 hdev->block_cnt = hdev->num_blocks;
3213 BT_ERR("Unknown type %d conn %p", conn->type, conn);
3218 queue_work(hdev->workqueue, &hdev->tx_work);
3221 static void hci_mode_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
3223 struct hci_ev_mode_change *ev = (void *) skb->data;
3224 struct hci_conn *conn;
3226 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
3230 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3232 conn->mode = ev->mode;
3234 if (!test_and_clear_bit(HCI_CONN_MODE_CHANGE_PEND,
3236 if (conn->mode == HCI_CM_ACTIVE)
3237 set_bit(HCI_CONN_POWER_SAVE, &conn->flags);
3239 clear_bit(HCI_CONN_POWER_SAVE, &conn->flags);
3242 if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags))
3243 hci_sco_setup(conn, ev->status);
3246 hci_dev_unlock(hdev);
3249 static void hci_pin_code_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
3251 struct hci_ev_pin_code_req *ev = (void *) skb->data;
3252 struct hci_conn *conn;
3254 BT_DBG("%s", hdev->name);
3258 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3262 if (conn->state == BT_CONNECTED) {
3263 hci_conn_hold(conn);
3264 conn->disc_timeout = HCI_PAIRING_TIMEOUT;
3265 hci_conn_drop(conn);
3268 if (!test_bit(HCI_BONDABLE, &hdev->dev_flags) &&
3269 !test_bit(HCI_CONN_AUTH_INITIATOR, &conn->flags)) {
3270 hci_send_cmd(hdev, HCI_OP_PIN_CODE_NEG_REPLY,
3271 sizeof(ev->bdaddr), &ev->bdaddr);
3272 } else if (test_bit(HCI_MGMT, &hdev->dev_flags)) {
3275 if (conn->pending_sec_level == BT_SECURITY_HIGH)
3280 mgmt_pin_code_request(hdev, &ev->bdaddr, secure);
3284 hci_dev_unlock(hdev);
3287 static void conn_set_key(struct hci_conn *conn, u8 key_type, u8 pin_len)
3289 if (key_type == HCI_LK_CHANGED_COMBINATION)
3292 conn->pin_length = pin_len;
3293 conn->key_type = key_type;
3296 case HCI_LK_LOCAL_UNIT:
3297 case HCI_LK_REMOTE_UNIT:
3298 case HCI_LK_DEBUG_COMBINATION:
3300 case HCI_LK_COMBINATION:
3302 conn->pending_sec_level = BT_SECURITY_HIGH;
3304 conn->pending_sec_level = BT_SECURITY_MEDIUM;
3306 case HCI_LK_UNAUTH_COMBINATION_P192:
3307 case HCI_LK_UNAUTH_COMBINATION_P256:
3308 conn->pending_sec_level = BT_SECURITY_MEDIUM;
3310 case HCI_LK_AUTH_COMBINATION_P192:
3311 conn->pending_sec_level = BT_SECURITY_HIGH;
3313 case HCI_LK_AUTH_COMBINATION_P256:
3314 conn->pending_sec_level = BT_SECURITY_FIPS;
3319 static void hci_link_key_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
3321 struct hci_ev_link_key_req *ev = (void *) skb->data;
3322 struct hci_cp_link_key_reply cp;
3323 struct hci_conn *conn;
3324 struct link_key *key;
3326 BT_DBG("%s", hdev->name);
3328 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
3333 key = hci_find_link_key(hdev, &ev->bdaddr);
3335 BT_DBG("%s link key not found for %pMR", hdev->name,
3340 BT_DBG("%s found key type %u for %pMR", hdev->name, key->type,
3343 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3345 clear_bit(HCI_CONN_NEW_LINK_KEY, &conn->flags);
3347 if ((key->type == HCI_LK_UNAUTH_COMBINATION_P192 ||
3348 key->type == HCI_LK_UNAUTH_COMBINATION_P256) &&
3349 conn->auth_type != 0xff && (conn->auth_type & 0x01)) {
3350 BT_DBG("%s ignoring unauthenticated key", hdev->name);
3354 if (key->type == HCI_LK_COMBINATION && key->pin_len < 16 &&
3355 (conn->pending_sec_level == BT_SECURITY_HIGH ||
3356 conn->pending_sec_level == BT_SECURITY_FIPS)) {
3357 BT_DBG("%s ignoring key unauthenticated for high security",
3362 conn_set_key(conn, key->type, key->pin_len);
3365 bacpy(&cp.bdaddr, &ev->bdaddr);
3366 memcpy(cp.link_key, key->val, HCI_LINK_KEY_SIZE);
3368 hci_send_cmd(hdev, HCI_OP_LINK_KEY_REPLY, sizeof(cp), &cp);
3370 hci_dev_unlock(hdev);
3375 hci_send_cmd(hdev, HCI_OP_LINK_KEY_NEG_REPLY, 6, &ev->bdaddr);
3376 hci_dev_unlock(hdev);
3379 static void hci_link_key_notify_evt(struct hci_dev *hdev, struct sk_buff *skb)
3381 struct hci_ev_link_key_notify *ev = (void *) skb->data;
3382 struct hci_conn *conn;
3383 struct link_key *key;
3387 BT_DBG("%s", hdev->name);
3391 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3395 hci_conn_hold(conn);
3396 conn->disc_timeout = HCI_DISCONN_TIMEOUT;
3397 hci_conn_drop(conn);
3399 set_bit(HCI_CONN_NEW_LINK_KEY, &conn->flags);
3400 conn_set_key(conn, ev->key_type, conn->pin_length);
3402 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
3405 key = hci_add_link_key(hdev, conn, &ev->bdaddr, ev->link_key,
3406 ev->key_type, pin_len, &persistent);
3410 /* Update connection information since adding the key will have
3411 * fixed up the type in the case of changed combination keys.
3413 if (ev->key_type == HCI_LK_CHANGED_COMBINATION)
3414 conn_set_key(conn, key->type, key->pin_len);
3416 mgmt_new_link_key(hdev, key, persistent);
3418 /* Keep debug keys around only if the HCI_KEEP_DEBUG_KEYS flag
3419 * is set. If it's not set simply remove the key from the kernel
3420 * list (we've still notified user space about it but with
3421 * store_hint being 0).
3423 if (key->type == HCI_LK_DEBUG_COMBINATION &&
3424 !test_bit(HCI_KEEP_DEBUG_KEYS, &hdev->dev_flags)) {
3425 list_del_rcu(&key->list);
3426 kfree_rcu(key, rcu);
3431 clear_bit(HCI_CONN_FLUSH_KEY, &conn->flags);
3433 set_bit(HCI_CONN_FLUSH_KEY, &conn->flags);
3436 hci_dev_unlock(hdev);
3439 static void hci_clock_offset_evt(struct hci_dev *hdev, struct sk_buff *skb)
3441 struct hci_ev_clock_offset *ev = (void *) skb->data;
3442 struct hci_conn *conn;
3444 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
3448 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3449 if (conn && !ev->status) {
3450 struct inquiry_entry *ie;
3452 ie = hci_inquiry_cache_lookup(hdev, &conn->dst);
3454 ie->data.clock_offset = ev->clock_offset;
3455 ie->timestamp = jiffies;
3459 hci_dev_unlock(hdev);
3462 static void hci_pkt_type_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
3464 struct hci_ev_pkt_type_change *ev = (void *) skb->data;
3465 struct hci_conn *conn;
3467 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
3471 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3472 if (conn && !ev->status)
3473 conn->pkt_type = __le16_to_cpu(ev->pkt_type);
3475 hci_dev_unlock(hdev);
3478 static void hci_pscan_rep_mode_evt(struct hci_dev *hdev, struct sk_buff *skb)
3480 struct hci_ev_pscan_rep_mode *ev = (void *) skb->data;
3481 struct inquiry_entry *ie;
3483 BT_DBG("%s", hdev->name);
3487 ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr);
3489 ie->data.pscan_rep_mode = ev->pscan_rep_mode;
3490 ie->timestamp = jiffies;
3493 hci_dev_unlock(hdev);
3496 static void hci_inquiry_result_with_rssi_evt(struct hci_dev *hdev,
3497 struct sk_buff *skb)
3499 struct inquiry_data data;
3500 int num_rsp = *((__u8 *) skb->data);
3502 BT_DBG("%s num_rsp %d", hdev->name, num_rsp);
3507 if (test_bit(HCI_PERIODIC_INQ, &hdev->dev_flags))
3512 if ((skb->len - 1) / num_rsp != sizeof(struct inquiry_info_with_rssi)) {
3513 struct inquiry_info_with_rssi_and_pscan_mode *info;
3514 info = (void *) (skb->data + 1);
3516 for (; num_rsp; num_rsp--, info++) {
3519 bacpy(&data.bdaddr, &info->bdaddr);
3520 data.pscan_rep_mode = info->pscan_rep_mode;
3521 data.pscan_period_mode = info->pscan_period_mode;
3522 data.pscan_mode = info->pscan_mode;
3523 memcpy(data.dev_class, info->dev_class, 3);
3524 data.clock_offset = info->clock_offset;
3525 data.rssi = info->rssi;
3526 data.ssp_mode = 0x00;
3528 flags = hci_inquiry_cache_update(hdev, &data, false);
3530 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
3531 info->dev_class, info->rssi,
3532 flags, NULL, 0, NULL, 0);
3535 struct inquiry_info_with_rssi *info = (void *) (skb->data + 1);
3537 for (; num_rsp; num_rsp--, info++) {
3540 bacpy(&data.bdaddr, &info->bdaddr);
3541 data.pscan_rep_mode = info->pscan_rep_mode;
3542 data.pscan_period_mode = info->pscan_period_mode;
3543 data.pscan_mode = 0x00;
3544 memcpy(data.dev_class, info->dev_class, 3);
3545 data.clock_offset = info->clock_offset;
3546 data.rssi = info->rssi;
3547 data.ssp_mode = 0x00;
3549 flags = hci_inquiry_cache_update(hdev, &data, false);
3551 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
3552 info->dev_class, info->rssi,
3553 flags, NULL, 0, NULL, 0);
3557 hci_dev_unlock(hdev);
3560 static void hci_remote_ext_features_evt(struct hci_dev *hdev,
3561 struct sk_buff *skb)
3563 struct hci_ev_remote_ext_features *ev = (void *) skb->data;
3564 struct hci_conn *conn;
3566 BT_DBG("%s", hdev->name);
3570 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3574 if (ev->page < HCI_MAX_PAGES)
3575 memcpy(conn->features[ev->page], ev->features, 8);
3577 if (!ev->status && ev->page == 0x01) {
3578 struct inquiry_entry *ie;
3580 ie = hci_inquiry_cache_lookup(hdev, &conn->dst);
3582 ie->data.ssp_mode = (ev->features[0] & LMP_HOST_SSP);
3584 if (ev->features[0] & LMP_HOST_SSP) {
3585 set_bit(HCI_CONN_SSP_ENABLED, &conn->flags);
3587 /* It is mandatory by the Bluetooth specification that
3588 * Extended Inquiry Results are only used when Secure
3589 * Simple Pairing is enabled, but some devices violate
3592 * To make these devices work, the internal SSP
3593 * enabled flag needs to be cleared if the remote host
3594 * features do not indicate SSP support */
3595 clear_bit(HCI_CONN_SSP_ENABLED, &conn->flags);
3598 if (ev->features[0] & LMP_HOST_SC)
3599 set_bit(HCI_CONN_SC_ENABLED, &conn->flags);
3602 if (conn->state != BT_CONFIG)
3605 if (!ev->status && !test_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags)) {
3606 struct hci_cp_remote_name_req cp;
3607 memset(&cp, 0, sizeof(cp));
3608 bacpy(&cp.bdaddr, &conn->dst);
3609 cp.pscan_rep_mode = 0x02;
3610 hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp);
3611 } else if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
3612 mgmt_device_connected(hdev, conn, 0, NULL, 0);
3614 if (!hci_outgoing_auth_needed(hdev, conn)) {
3615 conn->state = BT_CONNECTED;
3616 hci_proto_connect_cfm(conn, ev->status);
3617 hci_conn_drop(conn);
3621 hci_dev_unlock(hdev);
3624 static void hci_sync_conn_complete_evt(struct hci_dev *hdev,
3625 struct sk_buff *skb)
3627 struct hci_ev_sync_conn_complete *ev = (void *) skb->data;
3628 struct hci_conn *conn;
3630 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
3634 conn = hci_conn_hash_lookup_ba(hdev, ev->link_type, &ev->bdaddr);
3636 if (ev->link_type == ESCO_LINK)
3639 conn = hci_conn_hash_lookup_ba(hdev, ESCO_LINK, &ev->bdaddr);
3643 conn->type = SCO_LINK;
3646 switch (ev->status) {
3648 conn->handle = __le16_to_cpu(ev->handle);
3649 conn->state = BT_CONNECTED;
3651 hci_debugfs_create_conn(conn);
3652 hci_conn_add_sysfs(conn);
3655 case 0x10: /* Connection Accept Timeout */
3656 case 0x0d: /* Connection Rejected due to Limited Resources */
3657 case 0x11: /* Unsupported Feature or Parameter Value */
3658 case 0x1c: /* SCO interval rejected */
3659 case 0x1a: /* Unsupported Remote Feature */
3660 case 0x1f: /* Unspecified error */
3661 case 0x20: /* Unsupported LMP Parameter value */
3663 conn->pkt_type = (hdev->esco_type & SCO_ESCO_MASK) |
3664 (hdev->esco_type & EDR_ESCO_MASK);
3665 if (hci_setup_sync(conn, conn->link->handle))
3671 conn->state = BT_CLOSED;
3675 hci_proto_connect_cfm(conn, ev->status);
3680 hci_dev_unlock(hdev);
3683 static inline size_t eir_get_length(u8 *eir, size_t eir_len)
3687 while (parsed < eir_len) {
3688 u8 field_len = eir[0];
3693 parsed += field_len + 1;
3694 eir += field_len + 1;
3700 static void hci_extended_inquiry_result_evt(struct hci_dev *hdev,
3701 struct sk_buff *skb)
3703 struct inquiry_data data;
3704 struct extended_inquiry_info *info = (void *) (skb->data + 1);
3705 int num_rsp = *((__u8 *) skb->data);
3708 BT_DBG("%s num_rsp %d", hdev->name, num_rsp);
3713 if (test_bit(HCI_PERIODIC_INQ, &hdev->dev_flags))
3718 for (; num_rsp; num_rsp--, info++) {
3722 bacpy(&data.bdaddr, &info->bdaddr);
3723 data.pscan_rep_mode = info->pscan_rep_mode;
3724 data.pscan_period_mode = info->pscan_period_mode;
3725 data.pscan_mode = 0x00;
3726 memcpy(data.dev_class, info->dev_class, 3);
3727 data.clock_offset = info->clock_offset;
3728 data.rssi = info->rssi;
3729 data.ssp_mode = 0x01;
3731 if (test_bit(HCI_MGMT, &hdev->dev_flags))
3732 name_known = eir_has_data_type(info->data,
3738 flags = hci_inquiry_cache_update(hdev, &data, name_known);
3740 eir_len = eir_get_length(info->data, sizeof(info->data));
3742 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
3743 info->dev_class, info->rssi,
3744 flags, info->data, eir_len, NULL, 0);
3747 hci_dev_unlock(hdev);
3750 static void hci_key_refresh_complete_evt(struct hci_dev *hdev,
3751 struct sk_buff *skb)
3753 struct hci_ev_key_refresh_complete *ev = (void *) skb->data;
3754 struct hci_conn *conn;
3756 BT_DBG("%s status 0x%2.2x handle 0x%4.4x", hdev->name, ev->status,
3757 __le16_to_cpu(ev->handle));
3761 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3765 /* For BR/EDR the necessary steps are taken through the
3766 * auth_complete event.
3768 if (conn->type != LE_LINK)
3772 conn->sec_level = conn->pending_sec_level;
3774 clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
3776 if (ev->status && conn->state == BT_CONNECTED) {
3777 hci_disconnect(conn, HCI_ERROR_AUTH_FAILURE);
3778 hci_conn_drop(conn);
3782 if (conn->state == BT_CONFIG) {
3784 conn->state = BT_CONNECTED;
3786 hci_proto_connect_cfm(conn, ev->status);
3787 hci_conn_drop(conn);
3789 hci_auth_cfm(conn, ev->status);
3791 hci_conn_hold(conn);
3792 conn->disc_timeout = HCI_DISCONN_TIMEOUT;
3793 hci_conn_drop(conn);
3797 hci_dev_unlock(hdev);
3800 static u8 hci_get_auth_req(struct hci_conn *conn)
3802 /* If remote requests no-bonding follow that lead */
3803 if (conn->remote_auth == HCI_AT_NO_BONDING ||
3804 conn->remote_auth == HCI_AT_NO_BONDING_MITM)
3805 return conn->remote_auth | (conn->auth_type & 0x01);
3807 /* If both remote and local have enough IO capabilities, require
3810 if (conn->remote_cap != HCI_IO_NO_INPUT_OUTPUT &&
3811 conn->io_capability != HCI_IO_NO_INPUT_OUTPUT)
3812 return conn->remote_auth | 0x01;
3814 /* No MITM protection possible so ignore remote requirement */
3815 return (conn->remote_auth & ~0x01) | (conn->auth_type & 0x01);
3818 static void hci_io_capa_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
3820 struct hci_ev_io_capa_request *ev = (void *) skb->data;
3821 struct hci_conn *conn;
3823 BT_DBG("%s", hdev->name);
3827 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3831 hci_conn_hold(conn);
3833 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
3836 /* Allow pairing if we're pairable, the initiators of the
3837 * pairing or if the remote is not requesting bonding.
3839 if (test_bit(HCI_BONDABLE, &hdev->dev_flags) ||
3840 test_bit(HCI_CONN_AUTH_INITIATOR, &conn->flags) ||
3841 (conn->remote_auth & ~0x01) == HCI_AT_NO_BONDING) {
3842 struct hci_cp_io_capability_reply cp;
3844 bacpy(&cp.bdaddr, &ev->bdaddr);
3845 /* Change the IO capability from KeyboardDisplay
3846 * to DisplayYesNo as it is not supported by BT spec. */
3847 cp.capability = (conn->io_capability == 0x04) ?
3848 HCI_IO_DISPLAY_YESNO : conn->io_capability;
3850 /* If we are initiators, there is no remote information yet */
3851 if (conn->remote_auth == 0xff) {
3852 /* Request MITM protection if our IO caps allow it
3853 * except for the no-bonding case.
3855 if (conn->io_capability != HCI_IO_NO_INPUT_OUTPUT &&
3856 conn->auth_type != HCI_AT_NO_BONDING)
3857 conn->auth_type |= 0x01;
3859 conn->auth_type = hci_get_auth_req(conn);
3862 /* If we're not bondable, force one of the non-bondable
3863 * authentication requirement values.
3865 if (!test_bit(HCI_BONDABLE, &hdev->dev_flags))
3866 conn->auth_type &= HCI_AT_NO_BONDING_MITM;
3868 cp.authentication = conn->auth_type;
3870 if (hci_find_remote_oob_data(hdev, &conn->dst, BDADDR_BREDR) &&
3871 (conn->out || test_bit(HCI_CONN_REMOTE_OOB, &conn->flags)))
3876 hci_send_cmd(hdev, HCI_OP_IO_CAPABILITY_REPLY,
3879 struct hci_cp_io_capability_neg_reply cp;
3881 bacpy(&cp.bdaddr, &ev->bdaddr);
3882 cp.reason = HCI_ERROR_PAIRING_NOT_ALLOWED;
3884 hci_send_cmd(hdev, HCI_OP_IO_CAPABILITY_NEG_REPLY,
3889 hci_dev_unlock(hdev);
3892 static void hci_io_capa_reply_evt(struct hci_dev *hdev, struct sk_buff *skb)
3894 struct hci_ev_io_capa_reply *ev = (void *) skb->data;
3895 struct hci_conn *conn;
3897 BT_DBG("%s", hdev->name);
3901 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3905 conn->remote_cap = ev->capability;
3906 conn->remote_auth = ev->authentication;
3908 set_bit(HCI_CONN_REMOTE_OOB, &conn->flags);
3911 hci_dev_unlock(hdev);
3914 static void hci_user_confirm_request_evt(struct hci_dev *hdev,
3915 struct sk_buff *skb)
3917 struct hci_ev_user_confirm_req *ev = (void *) skb->data;
3918 int loc_mitm, rem_mitm, confirm_hint = 0;
3919 struct hci_conn *conn;
3921 BT_DBG("%s", hdev->name);
3925 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
3928 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3932 loc_mitm = (conn->auth_type & 0x01);
3933 rem_mitm = (conn->remote_auth & 0x01);
3935 /* If we require MITM but the remote device can't provide that
3936 * (it has NoInputNoOutput) then reject the confirmation
3937 * request. We check the security level here since it doesn't
3938 * necessarily match conn->auth_type.
3940 if (conn->pending_sec_level > BT_SECURITY_MEDIUM &&
3941 conn->remote_cap == HCI_IO_NO_INPUT_OUTPUT) {
3942 BT_DBG("Rejecting request: remote device can't provide MITM");
3943 hci_send_cmd(hdev, HCI_OP_USER_CONFIRM_NEG_REPLY,
3944 sizeof(ev->bdaddr), &ev->bdaddr);
3948 /* If no side requires MITM protection; auto-accept */
3949 if ((!loc_mitm || conn->remote_cap == HCI_IO_NO_INPUT_OUTPUT) &&
3950 (!rem_mitm || conn->io_capability == HCI_IO_NO_INPUT_OUTPUT)) {
3952 /* If we're not the initiators request authorization to
3953 * proceed from user space (mgmt_user_confirm with
3954 * confirm_hint set to 1). The exception is if neither
3955 * side had MITM or if the local IO capability is
3956 * NoInputNoOutput, in which case we do auto-accept
3958 if (!test_bit(HCI_CONN_AUTH_PEND, &conn->flags) &&
3959 conn->io_capability != HCI_IO_NO_INPUT_OUTPUT &&
3960 (loc_mitm || rem_mitm)) {
3961 BT_DBG("Confirming auto-accept as acceptor");
3966 BT_DBG("Auto-accept of user confirmation with %ums delay",
3967 hdev->auto_accept_delay);
3969 if (hdev->auto_accept_delay > 0) {
3970 int delay = msecs_to_jiffies(hdev->auto_accept_delay);
3971 queue_delayed_work(conn->hdev->workqueue,
3972 &conn->auto_accept_work, delay);
3976 hci_send_cmd(hdev, HCI_OP_USER_CONFIRM_REPLY,
3977 sizeof(ev->bdaddr), &ev->bdaddr);
3982 mgmt_user_confirm_request(hdev, &ev->bdaddr, ACL_LINK, 0,
3983 le32_to_cpu(ev->passkey), confirm_hint);
3986 hci_dev_unlock(hdev);
3989 static void hci_user_passkey_request_evt(struct hci_dev *hdev,
3990 struct sk_buff *skb)
3992 struct hci_ev_user_passkey_req *ev = (void *) skb->data;
3994 BT_DBG("%s", hdev->name);
3996 if (test_bit(HCI_MGMT, &hdev->dev_flags))
3997 mgmt_user_passkey_request(hdev, &ev->bdaddr, ACL_LINK, 0);
4000 static void hci_user_passkey_notify_evt(struct hci_dev *hdev,
4001 struct sk_buff *skb)
4003 struct hci_ev_user_passkey_notify *ev = (void *) skb->data;
4004 struct hci_conn *conn;
4006 BT_DBG("%s", hdev->name);
4008 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
4012 conn->passkey_notify = __le32_to_cpu(ev->passkey);
4013 conn->passkey_entered = 0;
4015 if (test_bit(HCI_MGMT, &hdev->dev_flags))
4016 mgmt_user_passkey_notify(hdev, &conn->dst, conn->type,
4017 conn->dst_type, conn->passkey_notify,
4018 conn->passkey_entered);
4021 static void hci_keypress_notify_evt(struct hci_dev *hdev, struct sk_buff *skb)
4023 struct hci_ev_keypress_notify *ev = (void *) skb->data;
4024 struct hci_conn *conn;
4026 BT_DBG("%s", hdev->name);
4028 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
4033 case HCI_KEYPRESS_STARTED:
4034 conn->passkey_entered = 0;
4037 case HCI_KEYPRESS_ENTERED:
4038 conn->passkey_entered++;
4041 case HCI_KEYPRESS_ERASED:
4042 conn->passkey_entered--;
4045 case HCI_KEYPRESS_CLEARED:
4046 conn->passkey_entered = 0;
4049 case HCI_KEYPRESS_COMPLETED:
4053 if (test_bit(HCI_MGMT, &hdev->dev_flags))
4054 mgmt_user_passkey_notify(hdev, &conn->dst, conn->type,
4055 conn->dst_type, conn->passkey_notify,
4056 conn->passkey_entered);
4059 static void hci_simple_pair_complete_evt(struct hci_dev *hdev,
4060 struct sk_buff *skb)
4062 struct hci_ev_simple_pair_complete *ev = (void *) skb->data;
4063 struct hci_conn *conn;
4065 BT_DBG("%s", hdev->name);
4069 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
4073 /* Reset the authentication requirement to unknown */
4074 conn->remote_auth = 0xff;
4076 /* To avoid duplicate auth_failed events to user space we check
4077 * the HCI_CONN_AUTH_PEND flag which will be set if we
4078 * initiated the authentication. A traditional auth_complete
4079 * event gets always produced as initiator and is also mapped to
4080 * the mgmt_auth_failed event */
4081 if (!test_bit(HCI_CONN_AUTH_PEND, &conn->flags) && ev->status)
4082 mgmt_auth_failed(conn, ev->status);
4084 hci_conn_drop(conn);
4087 hci_dev_unlock(hdev);
4090 static void hci_remote_host_features_evt(struct hci_dev *hdev,
4091 struct sk_buff *skb)
4093 struct hci_ev_remote_host_features *ev = (void *) skb->data;
4094 struct inquiry_entry *ie;
4095 struct hci_conn *conn;
4097 BT_DBG("%s", hdev->name);
4101 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
4103 memcpy(conn->features[1], ev->features, 8);
4105 ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr);
4107 ie->data.ssp_mode = (ev->features[0] & LMP_HOST_SSP);
4109 hci_dev_unlock(hdev);
4112 static void hci_remote_oob_data_request_evt(struct hci_dev *hdev,
4113 struct sk_buff *skb)
4115 struct hci_ev_remote_oob_data_request *ev = (void *) skb->data;
4116 struct oob_data *data;
4118 BT_DBG("%s", hdev->name);
4122 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
4125 data = hci_find_remote_oob_data(hdev, &ev->bdaddr, BDADDR_BREDR);
4127 if (bredr_sc_enabled(hdev)) {
4128 struct hci_cp_remote_oob_ext_data_reply cp;
4130 bacpy(&cp.bdaddr, &ev->bdaddr);
4131 memcpy(cp.hash192, data->hash192, sizeof(cp.hash192));
4132 memcpy(cp.rand192, data->rand192, sizeof(cp.rand192));
4133 memcpy(cp.hash256, data->hash256, sizeof(cp.hash256));
4134 memcpy(cp.rand256, data->rand256, sizeof(cp.rand256));
4136 hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_EXT_DATA_REPLY,
4139 struct hci_cp_remote_oob_data_reply cp;
4141 bacpy(&cp.bdaddr, &ev->bdaddr);
4142 memcpy(cp.hash, data->hash192, sizeof(cp.hash));
4143 memcpy(cp.rand, data->rand192, sizeof(cp.rand));
4145 hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_DATA_REPLY,
4149 struct hci_cp_remote_oob_data_neg_reply cp;
4151 bacpy(&cp.bdaddr, &ev->bdaddr);
4152 hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_DATA_NEG_REPLY,
4157 hci_dev_unlock(hdev);
4160 static void hci_phy_link_complete_evt(struct hci_dev *hdev,
4161 struct sk_buff *skb)
4163 struct hci_ev_phy_link_complete *ev = (void *) skb->data;
4164 struct hci_conn *hcon, *bredr_hcon;
4166 BT_DBG("%s handle 0x%2.2x status 0x%2.2x", hdev->name, ev->phy_handle,
4171 hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
4173 hci_dev_unlock(hdev);
4179 hci_dev_unlock(hdev);
4183 bredr_hcon = hcon->amp_mgr->l2cap_conn->hcon;
4185 hcon->state = BT_CONNECTED;
4186 bacpy(&hcon->dst, &bredr_hcon->dst);
4188 hci_conn_hold(hcon);
4189 hcon->disc_timeout = HCI_DISCONN_TIMEOUT;
4190 hci_conn_drop(hcon);
4192 hci_debugfs_create_conn(hcon);
4193 hci_conn_add_sysfs(hcon);
4195 amp_physical_cfm(bredr_hcon, hcon);
4197 hci_dev_unlock(hdev);
4200 static void hci_loglink_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
4202 struct hci_ev_logical_link_complete *ev = (void *) skb->data;
4203 struct hci_conn *hcon;
4204 struct hci_chan *hchan;
4205 struct amp_mgr *mgr;
4207 BT_DBG("%s log_handle 0x%4.4x phy_handle 0x%2.2x status 0x%2.2x",
4208 hdev->name, le16_to_cpu(ev->handle), ev->phy_handle,
4211 hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
4215 /* Create AMP hchan */
4216 hchan = hci_chan_create(hcon);
4220 hchan->handle = le16_to_cpu(ev->handle);
4222 BT_DBG("hcon %p mgr %p hchan %p", hcon, hcon->amp_mgr, hchan);
4224 mgr = hcon->amp_mgr;
4225 if (mgr && mgr->bredr_chan) {
4226 struct l2cap_chan *bredr_chan = mgr->bredr_chan;
4228 l2cap_chan_lock(bredr_chan);
4230 bredr_chan->conn->mtu = hdev->block_mtu;
4231 l2cap_logical_cfm(bredr_chan, hchan, 0);
4232 hci_conn_hold(hcon);
4234 l2cap_chan_unlock(bredr_chan);
4238 static void hci_disconn_loglink_complete_evt(struct hci_dev *hdev,
4239 struct sk_buff *skb)
4241 struct hci_ev_disconn_logical_link_complete *ev = (void *) skb->data;
4242 struct hci_chan *hchan;
4244 BT_DBG("%s log handle 0x%4.4x status 0x%2.2x", hdev->name,
4245 le16_to_cpu(ev->handle), ev->status);
4252 hchan = hci_chan_lookup_handle(hdev, le16_to_cpu(ev->handle));
4256 amp_destroy_logical_link(hchan, ev->reason);
4259 hci_dev_unlock(hdev);
4262 static void hci_disconn_phylink_complete_evt(struct hci_dev *hdev,
4263 struct sk_buff *skb)
4265 struct hci_ev_disconn_phy_link_complete *ev = (void *) skb->data;
4266 struct hci_conn *hcon;
4268 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
4275 hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
4277 hcon->state = BT_CLOSED;
4281 hci_dev_unlock(hdev);
4284 static void hci_le_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
4286 struct hci_ev_le_conn_complete *ev = (void *) skb->data;
4287 struct hci_conn_params *params;
4288 struct hci_conn *conn;
4289 struct smp_irk *irk;
4292 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
4296 /* All controllers implicitly stop advertising in the event of a
4297 * connection, so ensure that the state bit is cleared.
4299 clear_bit(HCI_LE_ADV, &hdev->dev_flags);
4301 conn = hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT);
4303 conn = hci_conn_add(hdev, LE_LINK, &ev->bdaddr, ev->role);
4305 BT_ERR("No memory for new connection");
4309 conn->dst_type = ev->bdaddr_type;
4311 /* If we didn't have a hci_conn object previously
4312 * but we're in master role this must be something
4313 * initiated using a white list. Since white list based
4314 * connections are not "first class citizens" we don't
4315 * have full tracking of them. Therefore, we go ahead
4316 * with a "best effort" approach of determining the
4317 * initiator address based on the HCI_PRIVACY flag.
4320 conn->resp_addr_type = ev->bdaddr_type;
4321 bacpy(&conn->resp_addr, &ev->bdaddr);
4322 if (test_bit(HCI_PRIVACY, &hdev->dev_flags)) {
4323 conn->init_addr_type = ADDR_LE_DEV_RANDOM;
4324 bacpy(&conn->init_addr, &hdev->rpa);
4326 hci_copy_identity_address(hdev,
4328 &conn->init_addr_type);
4332 cancel_delayed_work(&conn->le_conn_timeout);
4336 /* Set the responder (our side) address type based on
4337 * the advertising address type.
4339 conn->resp_addr_type = hdev->adv_addr_type;
4340 if (hdev->adv_addr_type == ADDR_LE_DEV_RANDOM)
4341 bacpy(&conn->resp_addr, &hdev->random_addr);
4343 bacpy(&conn->resp_addr, &hdev->bdaddr);
4345 conn->init_addr_type = ev->bdaddr_type;
4346 bacpy(&conn->init_addr, &ev->bdaddr);
4348 /* For incoming connections, set the default minimum
4349 * and maximum connection interval. They will be used
4350 * to check if the parameters are in range and if not
4351 * trigger the connection update procedure.
4353 conn->le_conn_min_interval = hdev->le_conn_min_interval;
4354 conn->le_conn_max_interval = hdev->le_conn_max_interval;
4357 /* Lookup the identity address from the stored connection
4358 * address and address type.
4360 * When establishing connections to an identity address, the
4361 * connection procedure will store the resolvable random
4362 * address first. Now if it can be converted back into the
4363 * identity address, start using the identity address from
4366 irk = hci_get_irk(hdev, &conn->dst, conn->dst_type);
4368 bacpy(&conn->dst, &irk->bdaddr);
4369 conn->dst_type = irk->addr_type;
4373 hci_le_conn_failed(conn, ev->status);
4377 if (conn->dst_type == ADDR_LE_DEV_PUBLIC)
4378 addr_type = BDADDR_LE_PUBLIC;
4380 addr_type = BDADDR_LE_RANDOM;
4382 /* Drop the connection if the device is blocked */
4383 if (hci_bdaddr_list_lookup(&hdev->blacklist, &conn->dst, addr_type)) {
4384 hci_conn_drop(conn);
4388 if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
4389 mgmt_device_connected(hdev, conn, 0, NULL, 0);
4391 conn->sec_level = BT_SECURITY_LOW;
4392 conn->handle = __le16_to_cpu(ev->handle);
4393 conn->state = BT_CONNECTED;
4395 conn->le_conn_interval = le16_to_cpu(ev->interval);
4396 conn->le_conn_latency = le16_to_cpu(ev->latency);
4397 conn->le_supv_timeout = le16_to_cpu(ev->supervision_timeout);
4399 hci_debugfs_create_conn(conn);
4400 hci_conn_add_sysfs(conn);
4402 hci_proto_connect_cfm(conn, ev->status);
4404 params = hci_pend_le_action_lookup(&hdev->pend_le_conns, &conn->dst,
4407 list_del_init(¶ms->action);
4409 hci_conn_drop(params->conn);
4410 hci_conn_put(params->conn);
4411 params->conn = NULL;
4416 hci_update_background_scan(hdev);
4417 hci_dev_unlock(hdev);
4420 static void hci_le_conn_update_complete_evt(struct hci_dev *hdev,
4421 struct sk_buff *skb)
4423 struct hci_ev_le_conn_update_complete *ev = (void *) skb->data;
4424 struct hci_conn *conn;
4426 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
4433 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
4435 conn->le_conn_interval = le16_to_cpu(ev->interval);
4436 conn->le_conn_latency = le16_to_cpu(ev->latency);
4437 conn->le_supv_timeout = le16_to_cpu(ev->supervision_timeout);
4440 hci_dev_unlock(hdev);
4443 /* This function requires the caller holds hdev->lock */
4444 static struct hci_conn *check_pending_le_conn(struct hci_dev *hdev,
4446 u8 addr_type, u8 adv_type)
4448 struct hci_conn *conn;
4449 struct hci_conn_params *params;
4451 /* If the event is not connectable don't proceed further */
4452 if (adv_type != LE_ADV_IND && adv_type != LE_ADV_DIRECT_IND)
4455 /* Ignore if the device is blocked */
4456 if (hci_bdaddr_list_lookup(&hdev->blacklist, addr, addr_type))
4459 /* Most controller will fail if we try to create new connections
4460 * while we have an existing one in slave role.
4462 if (hdev->conn_hash.le_num_slave > 0)
4465 /* If we're not connectable only connect devices that we have in
4466 * our pend_le_conns list.
4468 params = hci_pend_le_action_lookup(&hdev->pend_le_conns,
4473 switch (params->auto_connect) {
4474 case HCI_AUTO_CONN_DIRECT:
4475 /* Only devices advertising with ADV_DIRECT_IND are
4476 * triggering a connection attempt. This is allowing
4477 * incoming connections from slave devices.
4479 if (adv_type != LE_ADV_DIRECT_IND)
4482 case HCI_AUTO_CONN_ALWAYS:
4483 /* Devices advertising with ADV_IND or ADV_DIRECT_IND
4484 * are triggering a connection attempt. This means
4485 * that incoming connectioms from slave device are
4486 * accepted and also outgoing connections to slave
4487 * devices are established when found.
4494 conn = hci_connect_le(hdev, addr, addr_type, BT_SECURITY_LOW,
4495 HCI_LE_AUTOCONN_TIMEOUT, HCI_ROLE_MASTER);
4496 if (!IS_ERR(conn)) {
4497 /* Store the pointer since we don't really have any
4498 * other owner of the object besides the params that
4499 * triggered it. This way we can abort the connection if
4500 * the parameters get removed and keep the reference
4501 * count consistent once the connection is established.
4503 params->conn = hci_conn_get(conn);
4507 switch (PTR_ERR(conn)) {
4509 /* If hci_connect() returns -EBUSY it means there is already
4510 * an LE connection attempt going on. Since controllers don't
4511 * support more than one connection attempt at the time, we
4512 * don't consider this an error case.
4516 BT_DBG("Failed to connect: err %ld", PTR_ERR(conn));
4523 static void process_adv_report(struct hci_dev *hdev, u8 type, bdaddr_t *bdaddr,
4524 u8 bdaddr_type, bdaddr_t *direct_addr,
4525 u8 direct_addr_type, s8 rssi, u8 *data, u8 len)
4527 struct discovery_state *d = &hdev->discovery;
4528 struct smp_irk *irk;
4529 struct hci_conn *conn;
4533 /* If the direct address is present, then this report is from
4534 * a LE Direct Advertising Report event. In that case it is
4535 * important to see if the address is matching the local
4536 * controller address.
4539 /* Only resolvable random addresses are valid for these
4540 * kind of reports and others can be ignored.
4542 if (!hci_bdaddr_is_rpa(direct_addr, direct_addr_type))
4545 /* If the controller is not using resolvable random
4546 * addresses, then this report can be ignored.
4548 if (!test_bit(HCI_PRIVACY, &hdev->dev_flags))
4551 /* If the local IRK of the controller does not match
4552 * with the resolvable random address provided, then
4553 * this report can be ignored.
4555 if (!smp_irk_matches(hdev, hdev->irk, direct_addr))
4559 /* Check if we need to convert to identity address */
4560 irk = hci_get_irk(hdev, bdaddr, bdaddr_type);
4562 bdaddr = &irk->bdaddr;
4563 bdaddr_type = irk->addr_type;
4566 /* Check if we have been requested to connect to this device */
4567 conn = check_pending_le_conn(hdev, bdaddr, bdaddr_type, type);
4568 if (conn && type == LE_ADV_IND) {
4569 /* Store report for later inclusion by
4570 * mgmt_device_connected
4572 memcpy(conn->le_adv_data, data, len);
4573 conn->le_adv_data_len = len;
4576 /* Passive scanning shouldn't trigger any device found events,
4577 * except for devices marked as CONN_REPORT for which we do send
4578 * device found events.
4580 if (hdev->le_scan_type == LE_SCAN_PASSIVE) {
4581 if (type == LE_ADV_DIRECT_IND)
4584 if (!hci_pend_le_action_lookup(&hdev->pend_le_reports,
4585 bdaddr, bdaddr_type))
4588 if (type == LE_ADV_NONCONN_IND || type == LE_ADV_SCAN_IND)
4589 flags = MGMT_DEV_FOUND_NOT_CONNECTABLE;
4592 mgmt_device_found(hdev, bdaddr, LE_LINK, bdaddr_type, NULL,
4593 rssi, flags, data, len, NULL, 0);
4597 /* When receiving non-connectable or scannable undirected
4598 * advertising reports, this means that the remote device is
4599 * not connectable and then clearly indicate this in the
4600 * device found event.
4602 * When receiving a scan response, then there is no way to
4603 * know if the remote device is connectable or not. However
4604 * since scan responses are merged with a previously seen
4605 * advertising report, the flags field from that report
4608 * In the really unlikely case that a controller get confused
4609 * and just sends a scan response event, then it is marked as
4610 * not connectable as well.
4612 if (type == LE_ADV_NONCONN_IND || type == LE_ADV_SCAN_IND ||
4613 type == LE_ADV_SCAN_RSP)
4614 flags = MGMT_DEV_FOUND_NOT_CONNECTABLE;
4618 /* If there's nothing pending either store the data from this
4619 * event or send an immediate device found event if the data
4620 * should not be stored for later.
4622 if (!has_pending_adv_report(hdev)) {
4623 /* If the report will trigger a SCAN_REQ store it for
4626 if (type == LE_ADV_IND || type == LE_ADV_SCAN_IND) {
4627 store_pending_adv_report(hdev, bdaddr, bdaddr_type,
4628 rssi, flags, data, len);
4632 mgmt_device_found(hdev, bdaddr, LE_LINK, bdaddr_type, NULL,
4633 rssi, flags, data, len, NULL, 0);
4637 /* Check if the pending report is for the same device as the new one */
4638 match = (!bacmp(bdaddr, &d->last_adv_addr) &&
4639 bdaddr_type == d->last_adv_addr_type);
4641 /* If the pending data doesn't match this report or this isn't a
4642 * scan response (e.g. we got a duplicate ADV_IND) then force
4643 * sending of the pending data.
4645 if (type != LE_ADV_SCAN_RSP || !match) {
4646 /* Send out whatever is in the cache, but skip duplicates */
4648 mgmt_device_found(hdev, &d->last_adv_addr, LE_LINK,
4649 d->last_adv_addr_type, NULL,
4650 d->last_adv_rssi, d->last_adv_flags,
4652 d->last_adv_data_len, NULL, 0);
4654 /* If the new report will trigger a SCAN_REQ store it for
4657 if (type == LE_ADV_IND || type == LE_ADV_SCAN_IND) {
4658 store_pending_adv_report(hdev, bdaddr, bdaddr_type,
4659 rssi, flags, data, len);
4663 /* The advertising reports cannot be merged, so clear
4664 * the pending report and send out a device found event.
4666 clear_pending_adv_report(hdev);
4667 mgmt_device_found(hdev, bdaddr, LE_LINK, bdaddr_type, NULL,
4668 rssi, flags, data, len, NULL, 0);
4672 /* If we get here we've got a pending ADV_IND or ADV_SCAN_IND and
4673 * the new event is a SCAN_RSP. We can therefore proceed with
4674 * sending a merged device found event.
4676 mgmt_device_found(hdev, &d->last_adv_addr, LE_LINK,
4677 d->last_adv_addr_type, NULL, rssi, d->last_adv_flags,
4678 d->last_adv_data, d->last_adv_data_len, data, len);
4679 clear_pending_adv_report(hdev);
4682 static void hci_le_adv_report_evt(struct hci_dev *hdev, struct sk_buff *skb)
4684 u8 num_reports = skb->data[0];
4685 void *ptr = &skb->data[1];
4689 while (num_reports--) {
4690 struct hci_ev_le_advertising_info *ev = ptr;
4693 rssi = ev->data[ev->length];
4694 process_adv_report(hdev, ev->evt_type, &ev->bdaddr,
4695 ev->bdaddr_type, NULL, 0, rssi,
4696 ev->data, ev->length);
4698 ptr += sizeof(*ev) + ev->length + 1;
4701 hci_dev_unlock(hdev);
4704 static void hci_le_ltk_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
4706 struct hci_ev_le_ltk_req *ev = (void *) skb->data;
4707 struct hci_cp_le_ltk_reply cp;
4708 struct hci_cp_le_ltk_neg_reply neg;
4709 struct hci_conn *conn;
4710 struct smp_ltk *ltk;
4712 BT_DBG("%s handle 0x%4.4x", hdev->name, __le16_to_cpu(ev->handle));
4716 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
4720 ltk = hci_find_ltk(hdev, &conn->dst, conn->dst_type, conn->role);
4724 if (smp_ltk_is_sc(ltk)) {
4725 /* With SC both EDiv and Rand are set to zero */
4726 if (ev->ediv || ev->rand)
4729 /* For non-SC keys check that EDiv and Rand match */
4730 if (ev->ediv != ltk->ediv || ev->rand != ltk->rand)
4734 memcpy(cp.ltk, ltk->val, sizeof(ltk->val));
4735 cp.handle = cpu_to_le16(conn->handle);
4737 conn->pending_sec_level = smp_ltk_sec_level(ltk);
4739 conn->enc_key_size = ltk->enc_size;
4741 hci_send_cmd(hdev, HCI_OP_LE_LTK_REPLY, sizeof(cp), &cp);
4743 /* Ref. Bluetooth Core SPEC pages 1975 and 2004. STK is a
4744 * temporary key used to encrypt a connection following
4745 * pairing. It is used during the Encrypted Session Setup to
4746 * distribute the keys. Later, security can be re-established
4747 * using a distributed LTK.
4749 if (ltk->type == SMP_STK) {
4750 set_bit(HCI_CONN_STK_ENCRYPT, &conn->flags);
4751 list_del_rcu(<k->list);
4752 kfree_rcu(ltk, rcu);
4754 clear_bit(HCI_CONN_STK_ENCRYPT, &conn->flags);
4757 hci_dev_unlock(hdev);
4762 neg.handle = ev->handle;
4763 hci_send_cmd(hdev, HCI_OP_LE_LTK_NEG_REPLY, sizeof(neg), &neg);
4764 hci_dev_unlock(hdev);
4767 static void send_conn_param_neg_reply(struct hci_dev *hdev, u16 handle,
4770 struct hci_cp_le_conn_param_req_neg_reply cp;
4772 cp.handle = cpu_to_le16(handle);
4775 hci_send_cmd(hdev, HCI_OP_LE_CONN_PARAM_REQ_NEG_REPLY, sizeof(cp),
4779 static void hci_le_remote_conn_param_req_evt(struct hci_dev *hdev,
4780 struct sk_buff *skb)
4782 struct hci_ev_le_remote_conn_param_req *ev = (void *) skb->data;
4783 struct hci_cp_le_conn_param_req_reply cp;
4784 struct hci_conn *hcon;
4785 u16 handle, min, max, latency, timeout;
4787 handle = le16_to_cpu(ev->handle);
4788 min = le16_to_cpu(ev->interval_min);
4789 max = le16_to_cpu(ev->interval_max);
4790 latency = le16_to_cpu(ev->latency);
4791 timeout = le16_to_cpu(ev->timeout);
4793 hcon = hci_conn_hash_lookup_handle(hdev, handle);
4794 if (!hcon || hcon->state != BT_CONNECTED)
4795 return send_conn_param_neg_reply(hdev, handle,
4796 HCI_ERROR_UNKNOWN_CONN_ID);
4798 if (hci_check_conn_params(min, max, latency, timeout))
4799 return send_conn_param_neg_reply(hdev, handle,
4800 HCI_ERROR_INVALID_LL_PARAMS);
4802 if (hcon->role == HCI_ROLE_MASTER) {
4803 struct hci_conn_params *params;
4808 params = hci_conn_params_lookup(hdev, &hcon->dst,
4811 params->conn_min_interval = min;
4812 params->conn_max_interval = max;
4813 params->conn_latency = latency;
4814 params->supervision_timeout = timeout;
4820 hci_dev_unlock(hdev);
4822 mgmt_new_conn_param(hdev, &hcon->dst, hcon->dst_type,
4823 store_hint, min, max, latency, timeout);
4826 cp.handle = ev->handle;
4827 cp.interval_min = ev->interval_min;
4828 cp.interval_max = ev->interval_max;
4829 cp.latency = ev->latency;
4830 cp.timeout = ev->timeout;
4834 hci_send_cmd(hdev, HCI_OP_LE_CONN_PARAM_REQ_REPLY, sizeof(cp), &cp);
4837 static void hci_le_direct_adv_report_evt(struct hci_dev *hdev,
4838 struct sk_buff *skb)
4840 u8 num_reports = skb->data[0];
4841 void *ptr = &skb->data[1];
4845 while (num_reports--) {
4846 struct hci_ev_le_direct_adv_info *ev = ptr;
4848 process_adv_report(hdev, ev->evt_type, &ev->bdaddr,
4849 ev->bdaddr_type, &ev->direct_addr,
4850 ev->direct_addr_type, ev->rssi, NULL, 0);
4855 hci_dev_unlock(hdev);
4858 static void hci_le_meta_evt(struct hci_dev *hdev, struct sk_buff *skb)
4860 struct hci_ev_le_meta *le_ev = (void *) skb->data;
4862 skb_pull(skb, sizeof(*le_ev));
4864 switch (le_ev->subevent) {
4865 case HCI_EV_LE_CONN_COMPLETE:
4866 hci_le_conn_complete_evt(hdev, skb);
4869 case HCI_EV_LE_CONN_UPDATE_COMPLETE:
4870 hci_le_conn_update_complete_evt(hdev, skb);
4873 case HCI_EV_LE_ADVERTISING_REPORT:
4874 hci_le_adv_report_evt(hdev, skb);
4877 case HCI_EV_LE_LTK_REQ:
4878 hci_le_ltk_request_evt(hdev, skb);
4881 case HCI_EV_LE_REMOTE_CONN_PARAM_REQ:
4882 hci_le_remote_conn_param_req_evt(hdev, skb);
4885 case HCI_EV_LE_DIRECT_ADV_REPORT:
4886 hci_le_direct_adv_report_evt(hdev, skb);
4894 static void hci_chan_selected_evt(struct hci_dev *hdev, struct sk_buff *skb)
4896 struct hci_ev_channel_selected *ev = (void *) skb->data;
4897 struct hci_conn *hcon;
4899 BT_DBG("%s handle 0x%2.2x", hdev->name, ev->phy_handle);
4901 skb_pull(skb, sizeof(*ev));
4903 hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
4907 amp_read_loc_assoc_final_data(hdev, hcon);
4910 void hci_event_packet(struct hci_dev *hdev, struct sk_buff *skb)
4912 struct hci_event_hdr *hdr = (void *) skb->data;
4913 __u8 event = hdr->evt;
4917 /* Received events are (currently) only needed when a request is
4918 * ongoing so avoid unnecessary memory allocation.
4920 if (hci_req_pending(hdev)) {
4921 kfree_skb(hdev->recv_evt);
4922 hdev->recv_evt = skb_clone(skb, GFP_KERNEL);
4925 hci_dev_unlock(hdev);
4927 skb_pull(skb, HCI_EVENT_HDR_SIZE);
4929 if (hdev->sent_cmd && bt_cb(hdev->sent_cmd)->req.event == event) {
4930 struct hci_command_hdr *cmd_hdr = (void *) hdev->sent_cmd->data;
4931 u16 opcode = __le16_to_cpu(cmd_hdr->opcode);
4933 hci_req_cmd_complete(hdev, opcode, 0);
4937 case HCI_EV_INQUIRY_COMPLETE:
4938 hci_inquiry_complete_evt(hdev, skb);
4941 case HCI_EV_INQUIRY_RESULT:
4942 hci_inquiry_result_evt(hdev, skb);
4945 case HCI_EV_CONN_COMPLETE:
4946 hci_conn_complete_evt(hdev, skb);
4949 case HCI_EV_CONN_REQUEST:
4950 hci_conn_request_evt(hdev, skb);
4953 case HCI_EV_DISCONN_COMPLETE:
4954 hci_disconn_complete_evt(hdev, skb);
4957 case HCI_EV_AUTH_COMPLETE:
4958 hci_auth_complete_evt(hdev, skb);
4961 case HCI_EV_REMOTE_NAME:
4962 hci_remote_name_evt(hdev, skb);
4965 case HCI_EV_ENCRYPT_CHANGE:
4966 hci_encrypt_change_evt(hdev, skb);
4969 case HCI_EV_CHANGE_LINK_KEY_COMPLETE:
4970 hci_change_link_key_complete_evt(hdev, skb);
4973 case HCI_EV_REMOTE_FEATURES:
4974 hci_remote_features_evt(hdev, skb);
4977 case HCI_EV_CMD_COMPLETE:
4978 hci_cmd_complete_evt(hdev, skb);
4981 case HCI_EV_CMD_STATUS:
4982 hci_cmd_status_evt(hdev, skb);
4985 case HCI_EV_HARDWARE_ERROR:
4986 hci_hardware_error_evt(hdev, skb);
4989 case HCI_EV_ROLE_CHANGE:
4990 hci_role_change_evt(hdev, skb);
4993 case HCI_EV_NUM_COMP_PKTS:
4994 hci_num_comp_pkts_evt(hdev, skb);
4997 case HCI_EV_MODE_CHANGE:
4998 hci_mode_change_evt(hdev, skb);
5001 case HCI_EV_PIN_CODE_REQ:
5002 hci_pin_code_request_evt(hdev, skb);
5005 case HCI_EV_LINK_KEY_REQ:
5006 hci_link_key_request_evt(hdev, skb);
5009 case HCI_EV_LINK_KEY_NOTIFY:
5010 hci_link_key_notify_evt(hdev, skb);
5013 case HCI_EV_CLOCK_OFFSET:
5014 hci_clock_offset_evt(hdev, skb);
5017 case HCI_EV_PKT_TYPE_CHANGE:
5018 hci_pkt_type_change_evt(hdev, skb);
5021 case HCI_EV_PSCAN_REP_MODE:
5022 hci_pscan_rep_mode_evt(hdev, skb);
5025 case HCI_EV_INQUIRY_RESULT_WITH_RSSI:
5026 hci_inquiry_result_with_rssi_evt(hdev, skb);
5029 case HCI_EV_REMOTE_EXT_FEATURES:
5030 hci_remote_ext_features_evt(hdev, skb);
5033 case HCI_EV_SYNC_CONN_COMPLETE:
5034 hci_sync_conn_complete_evt(hdev, skb);
5037 case HCI_EV_EXTENDED_INQUIRY_RESULT:
5038 hci_extended_inquiry_result_evt(hdev, skb);
5041 case HCI_EV_KEY_REFRESH_COMPLETE:
5042 hci_key_refresh_complete_evt(hdev, skb);
5045 case HCI_EV_IO_CAPA_REQUEST:
5046 hci_io_capa_request_evt(hdev, skb);
5049 case HCI_EV_IO_CAPA_REPLY:
5050 hci_io_capa_reply_evt(hdev, skb);
5053 case HCI_EV_USER_CONFIRM_REQUEST:
5054 hci_user_confirm_request_evt(hdev, skb);
5057 case HCI_EV_USER_PASSKEY_REQUEST:
5058 hci_user_passkey_request_evt(hdev, skb);
5061 case HCI_EV_USER_PASSKEY_NOTIFY:
5062 hci_user_passkey_notify_evt(hdev, skb);
5065 case HCI_EV_KEYPRESS_NOTIFY:
5066 hci_keypress_notify_evt(hdev, skb);
5069 case HCI_EV_SIMPLE_PAIR_COMPLETE:
5070 hci_simple_pair_complete_evt(hdev, skb);
5073 case HCI_EV_REMOTE_HOST_FEATURES:
5074 hci_remote_host_features_evt(hdev, skb);
5077 case HCI_EV_LE_META:
5078 hci_le_meta_evt(hdev, skb);
5081 case HCI_EV_CHANNEL_SELECTED:
5082 hci_chan_selected_evt(hdev, skb);
5085 case HCI_EV_REMOTE_OOB_DATA_REQUEST:
5086 hci_remote_oob_data_request_evt(hdev, skb);
5089 case HCI_EV_PHY_LINK_COMPLETE:
5090 hci_phy_link_complete_evt(hdev, skb);
5093 case HCI_EV_LOGICAL_LINK_COMPLETE:
5094 hci_loglink_complete_evt(hdev, skb);
5097 case HCI_EV_DISCONN_LOGICAL_LINK_COMPLETE:
5098 hci_disconn_loglink_complete_evt(hdev, skb);
5101 case HCI_EV_DISCONN_PHY_LINK_COMPLETE:
5102 hci_disconn_phylink_complete_evt(hdev, skb);
5105 case HCI_EV_NUM_COMP_BLOCKS:
5106 hci_num_comp_blocks_evt(hdev, skb);
5110 BT_DBG("%s event 0x%2.2x", hdev->name, event);
5115 hdev->stat.evt_rx++;