2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (c) 2000-2001, 2010, Code Aurora Forum. All rights reserved.
5 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
25 /* Bluetooth HCI event handling. */
27 #include <asm/unaligned.h>
29 #include <net/bluetooth/bluetooth.h>
30 #include <net/bluetooth/hci_core.h>
31 #include <net/bluetooth/mgmt.h>
36 /* Handle HCI Event packets */
38 static void hci_cc_inquiry_cancel(struct hci_dev *hdev, struct sk_buff *skb)
40 __u8 status = *((__u8 *) skb->data);
42 BT_DBG("%s status 0x%2.2x", hdev->name, status);
47 clear_bit(HCI_INQUIRY, &hdev->flags);
48 smp_mb__after_clear_bit(); /* wake_up_bit advises about this barrier */
49 wake_up_bit(&hdev->flags, HCI_INQUIRY);
51 hci_conn_check_pending(hdev);
54 static void hci_cc_periodic_inq(struct hci_dev *hdev, struct sk_buff *skb)
56 __u8 status = *((__u8 *) skb->data);
58 BT_DBG("%s status 0x%2.2x", hdev->name, status);
63 set_bit(HCI_PERIODIC_INQ, &hdev->dev_flags);
66 static void hci_cc_exit_periodic_inq(struct hci_dev *hdev, struct sk_buff *skb)
68 __u8 status = *((__u8 *) skb->data);
70 BT_DBG("%s status 0x%2.2x", hdev->name, status);
75 clear_bit(HCI_PERIODIC_INQ, &hdev->dev_flags);
77 hci_conn_check_pending(hdev);
80 static void hci_cc_remote_name_req_cancel(struct hci_dev *hdev,
83 BT_DBG("%s", hdev->name);
86 static void hci_cc_role_discovery(struct hci_dev *hdev, struct sk_buff *skb)
88 struct hci_rp_role_discovery *rp = (void *) skb->data;
89 struct hci_conn *conn;
91 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
98 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
101 conn->link_mode &= ~HCI_LM_MASTER;
103 conn->link_mode |= HCI_LM_MASTER;
106 hci_dev_unlock(hdev);
109 static void hci_cc_read_link_policy(struct hci_dev *hdev, struct sk_buff *skb)
111 struct hci_rp_read_link_policy *rp = (void *) skb->data;
112 struct hci_conn *conn;
114 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
121 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
123 conn->link_policy = __le16_to_cpu(rp->policy);
125 hci_dev_unlock(hdev);
128 static void hci_cc_write_link_policy(struct hci_dev *hdev, struct sk_buff *skb)
130 struct hci_rp_write_link_policy *rp = (void *) skb->data;
131 struct hci_conn *conn;
134 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
139 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LINK_POLICY);
145 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
147 conn->link_policy = get_unaligned_le16(sent + 2);
149 hci_dev_unlock(hdev);
152 static void hci_cc_read_def_link_policy(struct hci_dev *hdev,
155 struct hci_rp_read_def_link_policy *rp = (void *) skb->data;
157 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
162 hdev->link_policy = __le16_to_cpu(rp->policy);
165 static void hci_cc_write_def_link_policy(struct hci_dev *hdev,
168 __u8 status = *((__u8 *) skb->data);
171 BT_DBG("%s status 0x%2.2x", hdev->name, status);
173 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_DEF_LINK_POLICY);
178 hdev->link_policy = get_unaligned_le16(sent);
181 static void hci_cc_reset(struct hci_dev *hdev, struct sk_buff *skb)
183 __u8 status = *((__u8 *) skb->data);
185 BT_DBG("%s status 0x%2.2x", hdev->name, status);
187 clear_bit(HCI_RESET, &hdev->flags);
189 /* Reset all non-persistent flags */
190 hdev->dev_flags &= ~HCI_PERSISTENT_MASK;
192 hdev->discovery.state = DISCOVERY_STOPPED;
193 hdev->inq_tx_power = HCI_TX_POWER_INVALID;
194 hdev->adv_tx_power = HCI_TX_POWER_INVALID;
196 memset(hdev->adv_data, 0, sizeof(hdev->adv_data));
197 hdev->adv_data_len = 0;
199 memset(hdev->scan_rsp_data, 0, sizeof(hdev->scan_rsp_data));
200 hdev->scan_rsp_data_len = 0;
202 hdev->le_scan_type = LE_SCAN_PASSIVE;
204 hdev->ssp_debug_mode = 0;
207 static void hci_cc_write_local_name(struct hci_dev *hdev, struct sk_buff *skb)
209 __u8 status = *((__u8 *) skb->data);
212 BT_DBG("%s status 0x%2.2x", hdev->name, status);
214 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LOCAL_NAME);
220 if (test_bit(HCI_MGMT, &hdev->dev_flags))
221 mgmt_set_local_name_complete(hdev, sent, status);
223 memcpy(hdev->dev_name, sent, HCI_MAX_NAME_LENGTH);
225 hci_dev_unlock(hdev);
228 static void hci_cc_read_local_name(struct hci_dev *hdev, struct sk_buff *skb)
230 struct hci_rp_read_local_name *rp = (void *) skb->data;
232 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
237 if (test_bit(HCI_SETUP, &hdev->dev_flags))
238 memcpy(hdev->dev_name, rp->name, HCI_MAX_NAME_LENGTH);
241 static void hci_cc_write_auth_enable(struct hci_dev *hdev, struct sk_buff *skb)
243 __u8 status = *((__u8 *) skb->data);
246 BT_DBG("%s status 0x%2.2x", hdev->name, status);
248 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_AUTH_ENABLE);
253 __u8 param = *((__u8 *) sent);
255 if (param == AUTH_ENABLED)
256 set_bit(HCI_AUTH, &hdev->flags);
258 clear_bit(HCI_AUTH, &hdev->flags);
261 if (test_bit(HCI_MGMT, &hdev->dev_flags))
262 mgmt_auth_enable_complete(hdev, status);
265 static void hci_cc_write_encrypt_mode(struct hci_dev *hdev, struct sk_buff *skb)
267 __u8 status = *((__u8 *) skb->data);
270 BT_DBG("%s status 0x%2.2x", hdev->name, status);
272 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_ENCRYPT_MODE);
277 __u8 param = *((__u8 *) sent);
280 set_bit(HCI_ENCRYPT, &hdev->flags);
282 clear_bit(HCI_ENCRYPT, &hdev->flags);
286 static void hci_cc_write_scan_enable(struct hci_dev *hdev, struct sk_buff *skb)
288 __u8 param, status = *((__u8 *) skb->data);
289 int old_pscan, old_iscan;
292 BT_DBG("%s status 0x%2.2x", hdev->name, status);
294 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SCAN_ENABLE);
298 param = *((__u8 *) sent);
303 mgmt_write_scan_failed(hdev, param, status);
304 hdev->discov_timeout = 0;
308 /* We need to ensure that we set this back on if someone changed
309 * the scan mode through a raw HCI socket.
311 set_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
313 old_pscan = test_and_clear_bit(HCI_PSCAN, &hdev->flags);
314 old_iscan = test_and_clear_bit(HCI_ISCAN, &hdev->flags);
316 if (param & SCAN_INQUIRY) {
317 set_bit(HCI_ISCAN, &hdev->flags);
319 mgmt_discoverable(hdev, 1);
320 } else if (old_iscan)
321 mgmt_discoverable(hdev, 0);
323 if (param & SCAN_PAGE) {
324 set_bit(HCI_PSCAN, &hdev->flags);
326 mgmt_connectable(hdev, 1);
327 } else if (old_pscan)
328 mgmt_connectable(hdev, 0);
331 hci_dev_unlock(hdev);
334 static void hci_cc_read_class_of_dev(struct hci_dev *hdev, struct sk_buff *skb)
336 struct hci_rp_read_class_of_dev *rp = (void *) skb->data;
338 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
343 memcpy(hdev->dev_class, rp->dev_class, 3);
345 BT_DBG("%s class 0x%.2x%.2x%.2x", hdev->name,
346 hdev->dev_class[2], hdev->dev_class[1], hdev->dev_class[0]);
349 static void hci_cc_write_class_of_dev(struct hci_dev *hdev, struct sk_buff *skb)
351 __u8 status = *((__u8 *) skb->data);
354 BT_DBG("%s status 0x%2.2x", hdev->name, status);
356 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_CLASS_OF_DEV);
363 memcpy(hdev->dev_class, sent, 3);
365 if (test_bit(HCI_MGMT, &hdev->dev_flags))
366 mgmt_set_class_of_dev_complete(hdev, sent, status);
368 hci_dev_unlock(hdev);
371 static void hci_cc_read_voice_setting(struct hci_dev *hdev, struct sk_buff *skb)
373 struct hci_rp_read_voice_setting *rp = (void *) skb->data;
376 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
381 setting = __le16_to_cpu(rp->voice_setting);
383 if (hdev->voice_setting == setting)
386 hdev->voice_setting = setting;
388 BT_DBG("%s voice setting 0x%4.4x", hdev->name, setting);
391 hdev->notify(hdev, HCI_NOTIFY_VOICE_SETTING);
394 static void hci_cc_write_voice_setting(struct hci_dev *hdev,
397 __u8 status = *((__u8 *) skb->data);
401 BT_DBG("%s status 0x%2.2x", hdev->name, status);
406 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_VOICE_SETTING);
410 setting = get_unaligned_le16(sent);
412 if (hdev->voice_setting == setting)
415 hdev->voice_setting = setting;
417 BT_DBG("%s voice setting 0x%4.4x", hdev->name, setting);
420 hdev->notify(hdev, HCI_NOTIFY_VOICE_SETTING);
423 static void hci_cc_read_num_supported_iac(struct hci_dev *hdev,
426 struct hci_rp_read_num_supported_iac *rp = (void *) skb->data;
428 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
433 hdev->num_iac = rp->num_iac;
435 BT_DBG("%s num iac %d", hdev->name, hdev->num_iac);
438 static void hci_cc_write_ssp_mode(struct hci_dev *hdev, struct sk_buff *skb)
440 __u8 status = *((__u8 *) skb->data);
441 struct hci_cp_write_ssp_mode *sent;
443 BT_DBG("%s status 0x%2.2x", hdev->name, status);
445 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SSP_MODE);
451 hdev->features[1][0] |= LMP_HOST_SSP;
453 hdev->features[1][0] &= ~LMP_HOST_SSP;
456 if (test_bit(HCI_MGMT, &hdev->dev_flags))
457 mgmt_ssp_enable_complete(hdev, sent->mode, status);
460 set_bit(HCI_SSP_ENABLED, &hdev->dev_flags);
462 clear_bit(HCI_SSP_ENABLED, &hdev->dev_flags);
466 static void hci_cc_write_sc_support(struct hci_dev *hdev, struct sk_buff *skb)
468 u8 status = *((u8 *) skb->data);
469 struct hci_cp_write_sc_support *sent;
471 BT_DBG("%s status 0x%2.2x", hdev->name, status);
473 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SC_SUPPORT);
479 hdev->features[1][0] |= LMP_HOST_SC;
481 hdev->features[1][0] &= ~LMP_HOST_SC;
484 if (test_bit(HCI_MGMT, &hdev->dev_flags))
485 mgmt_sc_enable_complete(hdev, sent->support, status);
488 set_bit(HCI_SC_ENABLED, &hdev->dev_flags);
490 clear_bit(HCI_SC_ENABLED, &hdev->dev_flags);
494 static void hci_cc_read_local_version(struct hci_dev *hdev, struct sk_buff *skb)
496 struct hci_rp_read_local_version *rp = (void *) skb->data;
498 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
503 if (test_bit(HCI_SETUP, &hdev->dev_flags)) {
504 hdev->hci_ver = rp->hci_ver;
505 hdev->hci_rev = __le16_to_cpu(rp->hci_rev);
506 hdev->lmp_ver = rp->lmp_ver;
507 hdev->manufacturer = __le16_to_cpu(rp->manufacturer);
508 hdev->lmp_subver = __le16_to_cpu(rp->lmp_subver);
512 static void hci_cc_read_local_commands(struct hci_dev *hdev,
515 struct hci_rp_read_local_commands *rp = (void *) skb->data;
517 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
522 if (test_bit(HCI_SETUP, &hdev->dev_flags))
523 memcpy(hdev->commands, rp->commands, sizeof(hdev->commands));
526 static void hci_cc_read_local_features(struct hci_dev *hdev,
529 struct hci_rp_read_local_features *rp = (void *) skb->data;
531 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
536 memcpy(hdev->features, rp->features, 8);
538 /* Adjust default settings according to features
539 * supported by device. */
541 if (hdev->features[0][0] & LMP_3SLOT)
542 hdev->pkt_type |= (HCI_DM3 | HCI_DH3);
544 if (hdev->features[0][0] & LMP_5SLOT)
545 hdev->pkt_type |= (HCI_DM5 | HCI_DH5);
547 if (hdev->features[0][1] & LMP_HV2) {
548 hdev->pkt_type |= (HCI_HV2);
549 hdev->esco_type |= (ESCO_HV2);
552 if (hdev->features[0][1] & LMP_HV3) {
553 hdev->pkt_type |= (HCI_HV3);
554 hdev->esco_type |= (ESCO_HV3);
557 if (lmp_esco_capable(hdev))
558 hdev->esco_type |= (ESCO_EV3);
560 if (hdev->features[0][4] & LMP_EV4)
561 hdev->esco_type |= (ESCO_EV4);
563 if (hdev->features[0][4] & LMP_EV5)
564 hdev->esco_type |= (ESCO_EV5);
566 if (hdev->features[0][5] & LMP_EDR_ESCO_2M)
567 hdev->esco_type |= (ESCO_2EV3);
569 if (hdev->features[0][5] & LMP_EDR_ESCO_3M)
570 hdev->esco_type |= (ESCO_3EV3);
572 if (hdev->features[0][5] & LMP_EDR_3S_ESCO)
573 hdev->esco_type |= (ESCO_2EV5 | ESCO_3EV5);
576 static void hci_cc_read_local_ext_features(struct hci_dev *hdev,
579 struct hci_rp_read_local_ext_features *rp = (void *) skb->data;
581 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
586 if (hdev->max_page < rp->max_page)
587 hdev->max_page = rp->max_page;
589 if (rp->page < HCI_MAX_PAGES)
590 memcpy(hdev->features[rp->page], rp->features, 8);
593 static void hci_cc_read_flow_control_mode(struct hci_dev *hdev,
596 struct hci_rp_read_flow_control_mode *rp = (void *) skb->data;
598 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
601 hdev->flow_ctl_mode = rp->mode;
604 static void hci_cc_read_buffer_size(struct hci_dev *hdev, struct sk_buff *skb)
606 struct hci_rp_read_buffer_size *rp = (void *) skb->data;
608 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
613 hdev->acl_mtu = __le16_to_cpu(rp->acl_mtu);
614 hdev->sco_mtu = rp->sco_mtu;
615 hdev->acl_pkts = __le16_to_cpu(rp->acl_max_pkt);
616 hdev->sco_pkts = __le16_to_cpu(rp->sco_max_pkt);
618 if (test_bit(HCI_QUIRK_FIXUP_BUFFER_SIZE, &hdev->quirks)) {
623 hdev->acl_cnt = hdev->acl_pkts;
624 hdev->sco_cnt = hdev->sco_pkts;
626 BT_DBG("%s acl mtu %d:%d sco mtu %d:%d", hdev->name, hdev->acl_mtu,
627 hdev->acl_pkts, hdev->sco_mtu, hdev->sco_pkts);
630 static void hci_cc_read_bd_addr(struct hci_dev *hdev, struct sk_buff *skb)
632 struct hci_rp_read_bd_addr *rp = (void *) skb->data;
634 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
637 bacpy(&hdev->bdaddr, &rp->bdaddr);
640 static void hci_cc_read_page_scan_activity(struct hci_dev *hdev,
643 struct hci_rp_read_page_scan_activity *rp = (void *) skb->data;
645 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
647 if (test_bit(HCI_INIT, &hdev->flags) && !rp->status) {
648 hdev->page_scan_interval = __le16_to_cpu(rp->interval);
649 hdev->page_scan_window = __le16_to_cpu(rp->window);
653 static void hci_cc_write_page_scan_activity(struct hci_dev *hdev,
656 u8 status = *((u8 *) skb->data);
657 struct hci_cp_write_page_scan_activity *sent;
659 BT_DBG("%s status 0x%2.2x", hdev->name, status);
664 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_PAGE_SCAN_ACTIVITY);
668 hdev->page_scan_interval = __le16_to_cpu(sent->interval);
669 hdev->page_scan_window = __le16_to_cpu(sent->window);
672 static void hci_cc_read_page_scan_type(struct hci_dev *hdev,
675 struct hci_rp_read_page_scan_type *rp = (void *) skb->data;
677 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
679 if (test_bit(HCI_INIT, &hdev->flags) && !rp->status)
680 hdev->page_scan_type = rp->type;
683 static void hci_cc_write_page_scan_type(struct hci_dev *hdev,
686 u8 status = *((u8 *) skb->data);
689 BT_DBG("%s status 0x%2.2x", hdev->name, status);
694 type = hci_sent_cmd_data(hdev, HCI_OP_WRITE_PAGE_SCAN_TYPE);
696 hdev->page_scan_type = *type;
699 static void hci_cc_read_data_block_size(struct hci_dev *hdev,
702 struct hci_rp_read_data_block_size *rp = (void *) skb->data;
704 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
709 hdev->block_mtu = __le16_to_cpu(rp->max_acl_len);
710 hdev->block_len = __le16_to_cpu(rp->block_len);
711 hdev->num_blocks = __le16_to_cpu(rp->num_blocks);
713 hdev->block_cnt = hdev->num_blocks;
715 BT_DBG("%s blk mtu %d cnt %d len %d", hdev->name, hdev->block_mtu,
716 hdev->block_cnt, hdev->block_len);
719 static void hci_cc_read_local_amp_info(struct hci_dev *hdev,
722 struct hci_rp_read_local_amp_info *rp = (void *) skb->data;
724 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
729 hdev->amp_status = rp->amp_status;
730 hdev->amp_total_bw = __le32_to_cpu(rp->total_bw);
731 hdev->amp_max_bw = __le32_to_cpu(rp->max_bw);
732 hdev->amp_min_latency = __le32_to_cpu(rp->min_latency);
733 hdev->amp_max_pdu = __le32_to_cpu(rp->max_pdu);
734 hdev->amp_type = rp->amp_type;
735 hdev->amp_pal_cap = __le16_to_cpu(rp->pal_cap);
736 hdev->amp_assoc_size = __le16_to_cpu(rp->max_assoc_size);
737 hdev->amp_be_flush_to = __le32_to_cpu(rp->be_flush_to);
738 hdev->amp_max_flush_to = __le32_to_cpu(rp->max_flush_to);
741 a2mp_send_getinfo_rsp(hdev);
744 static void hci_cc_read_local_amp_assoc(struct hci_dev *hdev,
747 struct hci_rp_read_local_amp_assoc *rp = (void *) skb->data;
748 struct amp_assoc *assoc = &hdev->loc_assoc;
749 size_t rem_len, frag_len;
751 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
756 frag_len = skb->len - sizeof(*rp);
757 rem_len = __le16_to_cpu(rp->rem_len);
759 if (rem_len > frag_len) {
760 BT_DBG("frag_len %zu rem_len %zu", frag_len, rem_len);
762 memcpy(assoc->data + assoc->offset, rp->frag, frag_len);
763 assoc->offset += frag_len;
765 /* Read other fragments */
766 amp_read_loc_assoc_frag(hdev, rp->phy_handle);
771 memcpy(assoc->data + assoc->offset, rp->frag, rem_len);
772 assoc->len = assoc->offset + rem_len;
776 /* Send A2MP Rsp when all fragments are received */
777 a2mp_send_getampassoc_rsp(hdev, rp->status);
778 a2mp_send_create_phy_link_req(hdev, rp->status);
781 static void hci_cc_read_inq_rsp_tx_power(struct hci_dev *hdev,
784 struct hci_rp_read_inq_rsp_tx_power *rp = (void *) skb->data;
786 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
789 hdev->inq_tx_power = rp->tx_power;
792 static void hci_cc_pin_code_reply(struct hci_dev *hdev, struct sk_buff *skb)
794 struct hci_rp_pin_code_reply *rp = (void *) skb->data;
795 struct hci_cp_pin_code_reply *cp;
796 struct hci_conn *conn;
798 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
802 if (test_bit(HCI_MGMT, &hdev->dev_flags))
803 mgmt_pin_code_reply_complete(hdev, &rp->bdaddr, rp->status);
808 cp = hci_sent_cmd_data(hdev, HCI_OP_PIN_CODE_REPLY);
812 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
814 conn->pin_length = cp->pin_len;
817 hci_dev_unlock(hdev);
820 static void hci_cc_pin_code_neg_reply(struct hci_dev *hdev, struct sk_buff *skb)
822 struct hci_rp_pin_code_neg_reply *rp = (void *) skb->data;
824 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
828 if (test_bit(HCI_MGMT, &hdev->dev_flags))
829 mgmt_pin_code_neg_reply_complete(hdev, &rp->bdaddr,
832 hci_dev_unlock(hdev);
835 static void hci_cc_le_read_buffer_size(struct hci_dev *hdev,
838 struct hci_rp_le_read_buffer_size *rp = (void *) skb->data;
840 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
845 hdev->le_mtu = __le16_to_cpu(rp->le_mtu);
846 hdev->le_pkts = rp->le_max_pkt;
848 hdev->le_cnt = hdev->le_pkts;
850 BT_DBG("%s le mtu %d:%d", hdev->name, hdev->le_mtu, hdev->le_pkts);
853 static void hci_cc_le_read_local_features(struct hci_dev *hdev,
856 struct hci_rp_le_read_local_features *rp = (void *) skb->data;
858 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
861 memcpy(hdev->le_features, rp->features, 8);
864 static void hci_cc_le_read_adv_tx_power(struct hci_dev *hdev,
867 struct hci_rp_le_read_adv_tx_power *rp = (void *) skb->data;
869 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
872 hdev->adv_tx_power = rp->tx_power;
875 static void hci_cc_user_confirm_reply(struct hci_dev *hdev, struct sk_buff *skb)
877 struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
879 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
883 if (test_bit(HCI_MGMT, &hdev->dev_flags))
884 mgmt_user_confirm_reply_complete(hdev, &rp->bdaddr, ACL_LINK, 0,
887 hci_dev_unlock(hdev);
890 static void hci_cc_user_confirm_neg_reply(struct hci_dev *hdev,
893 struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
895 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
899 if (test_bit(HCI_MGMT, &hdev->dev_flags))
900 mgmt_user_confirm_neg_reply_complete(hdev, &rp->bdaddr,
901 ACL_LINK, 0, rp->status);
903 hci_dev_unlock(hdev);
906 static void hci_cc_user_passkey_reply(struct hci_dev *hdev, struct sk_buff *skb)
908 struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
910 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
914 if (test_bit(HCI_MGMT, &hdev->dev_flags))
915 mgmt_user_passkey_reply_complete(hdev, &rp->bdaddr, ACL_LINK,
918 hci_dev_unlock(hdev);
921 static void hci_cc_user_passkey_neg_reply(struct hci_dev *hdev,
924 struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
926 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
930 if (test_bit(HCI_MGMT, &hdev->dev_flags))
931 mgmt_user_passkey_neg_reply_complete(hdev, &rp->bdaddr,
932 ACL_LINK, 0, rp->status);
934 hci_dev_unlock(hdev);
937 static void hci_cc_read_local_oob_data(struct hci_dev *hdev,
940 struct hci_rp_read_local_oob_data *rp = (void *) skb->data;
942 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
945 mgmt_read_local_oob_data_complete(hdev, rp->hash, rp->randomizer,
946 NULL, NULL, rp->status);
947 hci_dev_unlock(hdev);
950 static void hci_cc_read_local_oob_ext_data(struct hci_dev *hdev,
953 struct hci_rp_read_local_oob_ext_data *rp = (void *) skb->data;
955 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
958 mgmt_read_local_oob_data_complete(hdev, rp->hash192, rp->randomizer192,
959 rp->hash256, rp->randomizer256,
961 hci_dev_unlock(hdev);
965 static void hci_cc_le_set_random_addr(struct hci_dev *hdev, struct sk_buff *skb)
967 __u8 status = *((__u8 *) skb->data);
970 BT_DBG("%s status 0x%2.2x", hdev->name, status);
972 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_RANDOM_ADDR);
979 bacpy(&hdev->random_addr, sent);
981 hci_dev_unlock(hdev);
984 static void hci_cc_le_set_adv_enable(struct hci_dev *hdev, struct sk_buff *skb)
986 __u8 *sent, status = *((__u8 *) skb->data);
988 BT_DBG("%s status 0x%2.2x", hdev->name, status);
990 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_ADV_ENABLE);
999 /* If we're doing connection initation as peripheral. Set a
1000 * timeout in case something goes wrong.
1003 struct hci_conn *conn;
1005 conn = hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT);
1007 queue_delayed_work(hdev->workqueue,
1008 &conn->le_conn_timeout,
1009 HCI_LE_CONN_TIMEOUT);
1012 mgmt_advertising(hdev, *sent);
1014 hci_dev_unlock(hdev);
1017 static void hci_cc_le_set_scan_param(struct hci_dev *hdev, struct sk_buff *skb)
1019 struct hci_cp_le_set_scan_param *cp;
1020 __u8 status = *((__u8 *) skb->data);
1022 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1024 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_SCAN_PARAM);
1031 hdev->le_scan_type = cp->type;
1033 hci_dev_unlock(hdev);
1036 static bool has_pending_adv_report(struct hci_dev *hdev)
1038 struct discovery_state *d = &hdev->discovery;
1040 return bacmp(&d->last_adv_addr, BDADDR_ANY);
1043 static void clear_pending_adv_report(struct hci_dev *hdev)
1045 struct discovery_state *d = &hdev->discovery;
1047 bacpy(&d->last_adv_addr, BDADDR_ANY);
1048 d->last_adv_data_len = 0;
1051 static void store_pending_adv_report(struct hci_dev *hdev, bdaddr_t *bdaddr,
1052 u8 bdaddr_type, s8 rssi, u8 *data, u8 len)
1054 struct discovery_state *d = &hdev->discovery;
1056 bacpy(&d->last_adv_addr, bdaddr);
1057 d->last_adv_addr_type = bdaddr_type;
1058 d->last_adv_rssi = rssi;
1059 memcpy(d->last_adv_data, data, len);
1060 d->last_adv_data_len = len;
1063 static void hci_cc_le_set_scan_enable(struct hci_dev *hdev,
1064 struct sk_buff *skb)
1066 struct hci_cp_le_set_scan_enable *cp;
1067 __u8 status = *((__u8 *) skb->data);
1069 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1071 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_SCAN_ENABLE);
1078 switch (cp->enable) {
1079 case LE_SCAN_ENABLE:
1080 set_bit(HCI_LE_SCAN, &hdev->dev_flags);
1081 if (hdev->le_scan_type == LE_SCAN_ACTIVE)
1082 clear_pending_adv_report(hdev);
1085 case LE_SCAN_DISABLE:
1086 /* We do this here instead of when setting DISCOVERY_STOPPED
1087 * since the latter would potentially require waiting for
1088 * inquiry to stop too.
1090 if (has_pending_adv_report(hdev)) {
1091 struct discovery_state *d = &hdev->discovery;
1093 mgmt_device_found(hdev, &d->last_adv_addr, LE_LINK,
1094 d->last_adv_addr_type, NULL,
1095 d->last_adv_rssi, 0, 1,
1097 d->last_adv_data_len, NULL, 0);
1100 /* Cancel this timer so that we don't try to disable scanning
1101 * when it's already disabled.
1103 cancel_delayed_work(&hdev->le_scan_disable);
1105 clear_bit(HCI_LE_SCAN, &hdev->dev_flags);
1106 /* The HCI_LE_SCAN_INTERRUPTED flag indicates that we
1107 * interrupted scanning due to a connect request. Mark
1108 * therefore discovery as stopped.
1110 if (test_and_clear_bit(HCI_LE_SCAN_INTERRUPTED,
1112 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1116 BT_ERR("Used reserved LE_Scan_Enable param %d", cp->enable);
1121 static void hci_cc_le_read_white_list_size(struct hci_dev *hdev,
1122 struct sk_buff *skb)
1124 struct hci_rp_le_read_white_list_size *rp = (void *) skb->data;
1126 BT_DBG("%s status 0x%2.2x size %u", hdev->name, rp->status, rp->size);
1129 hdev->le_white_list_size = rp->size;
1132 static void hci_cc_le_clear_white_list(struct hci_dev *hdev,
1133 struct sk_buff *skb)
1135 __u8 status = *((__u8 *) skb->data);
1137 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1140 hci_white_list_clear(hdev);
1143 static void hci_cc_le_add_to_white_list(struct hci_dev *hdev,
1144 struct sk_buff *skb)
1146 struct hci_cp_le_add_to_white_list *sent;
1147 __u8 status = *((__u8 *) skb->data);
1149 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1151 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_ADD_TO_WHITE_LIST);
1156 hci_white_list_add(hdev, &sent->bdaddr, sent->bdaddr_type);
1159 static void hci_cc_le_del_from_white_list(struct hci_dev *hdev,
1160 struct sk_buff *skb)
1162 struct hci_cp_le_del_from_white_list *sent;
1163 __u8 status = *((__u8 *) skb->data);
1165 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1167 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_DEL_FROM_WHITE_LIST);
1172 hci_white_list_del(hdev, &sent->bdaddr, sent->bdaddr_type);
1175 static void hci_cc_le_read_supported_states(struct hci_dev *hdev,
1176 struct sk_buff *skb)
1178 struct hci_rp_le_read_supported_states *rp = (void *) skb->data;
1180 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1183 memcpy(hdev->le_states, rp->le_states, 8);
1186 static void hci_cc_write_le_host_supported(struct hci_dev *hdev,
1187 struct sk_buff *skb)
1189 struct hci_cp_write_le_host_supported *sent;
1190 __u8 status = *((__u8 *) skb->data);
1192 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1194 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LE_HOST_SUPPORTED);
1200 hdev->features[1][0] |= LMP_HOST_LE;
1201 set_bit(HCI_LE_ENABLED, &hdev->dev_flags);
1203 hdev->features[1][0] &= ~LMP_HOST_LE;
1204 clear_bit(HCI_LE_ENABLED, &hdev->dev_flags);
1205 clear_bit(HCI_ADVERTISING, &hdev->dev_flags);
1209 hdev->features[1][0] |= LMP_HOST_LE_BREDR;
1211 hdev->features[1][0] &= ~LMP_HOST_LE_BREDR;
1215 static void hci_cc_set_adv_param(struct hci_dev *hdev, struct sk_buff *skb)
1217 struct hci_cp_le_set_adv_param *cp;
1218 u8 status = *((u8 *) skb->data);
1220 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1225 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_ADV_PARAM);
1230 hdev->adv_addr_type = cp->own_address_type;
1231 hci_dev_unlock(hdev);
1234 static void hci_cc_write_remote_amp_assoc(struct hci_dev *hdev,
1235 struct sk_buff *skb)
1237 struct hci_rp_write_remote_amp_assoc *rp = (void *) skb->data;
1239 BT_DBG("%s status 0x%2.2x phy_handle 0x%2.2x",
1240 hdev->name, rp->status, rp->phy_handle);
1245 amp_write_rem_assoc_continue(hdev, rp->phy_handle);
1248 static void hci_cs_inquiry(struct hci_dev *hdev, __u8 status)
1250 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1253 hci_conn_check_pending(hdev);
1257 set_bit(HCI_INQUIRY, &hdev->flags);
1260 static void hci_cs_create_conn(struct hci_dev *hdev, __u8 status)
1262 struct hci_cp_create_conn *cp;
1263 struct hci_conn *conn;
1265 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1267 cp = hci_sent_cmd_data(hdev, HCI_OP_CREATE_CONN);
1273 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
1275 BT_DBG("%s bdaddr %pMR hcon %p", hdev->name, &cp->bdaddr, conn);
1278 if (conn && conn->state == BT_CONNECT) {
1279 if (status != 0x0c || conn->attempt > 2) {
1280 conn->state = BT_CLOSED;
1281 hci_proto_connect_cfm(conn, status);
1284 conn->state = BT_CONNECT2;
1288 conn = hci_conn_add(hdev, ACL_LINK, &cp->bdaddr);
1291 conn->link_mode |= HCI_LM_MASTER;
1293 BT_ERR("No memory for new connection");
1297 hci_dev_unlock(hdev);
1300 static void hci_cs_add_sco(struct hci_dev *hdev, __u8 status)
1302 struct hci_cp_add_sco *cp;
1303 struct hci_conn *acl, *sco;
1306 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1311 cp = hci_sent_cmd_data(hdev, HCI_OP_ADD_SCO);
1315 handle = __le16_to_cpu(cp->handle);
1317 BT_DBG("%s handle 0x%4.4x", hdev->name, handle);
1321 acl = hci_conn_hash_lookup_handle(hdev, handle);
1325 sco->state = BT_CLOSED;
1327 hci_proto_connect_cfm(sco, status);
1332 hci_dev_unlock(hdev);
1335 static void hci_cs_auth_requested(struct hci_dev *hdev, __u8 status)
1337 struct hci_cp_auth_requested *cp;
1338 struct hci_conn *conn;
1340 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1345 cp = hci_sent_cmd_data(hdev, HCI_OP_AUTH_REQUESTED);
1351 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1353 if (conn->state == BT_CONFIG) {
1354 hci_proto_connect_cfm(conn, status);
1355 hci_conn_drop(conn);
1359 hci_dev_unlock(hdev);
1362 static void hci_cs_set_conn_encrypt(struct hci_dev *hdev, __u8 status)
1364 struct hci_cp_set_conn_encrypt *cp;
1365 struct hci_conn *conn;
1367 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1372 cp = hci_sent_cmd_data(hdev, HCI_OP_SET_CONN_ENCRYPT);
1378 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1380 if (conn->state == BT_CONFIG) {
1381 hci_proto_connect_cfm(conn, status);
1382 hci_conn_drop(conn);
1386 hci_dev_unlock(hdev);
1389 static int hci_outgoing_auth_needed(struct hci_dev *hdev,
1390 struct hci_conn *conn)
1392 if (conn->state != BT_CONFIG || !conn->out)
1395 if (conn->pending_sec_level == BT_SECURITY_SDP)
1398 /* Only request authentication for SSP connections or non-SSP
1399 * devices with sec_level MEDIUM or HIGH or if MITM protection
1402 if (!hci_conn_ssp_enabled(conn) && !(conn->auth_type & 0x01) &&
1403 conn->pending_sec_level != BT_SECURITY_HIGH &&
1404 conn->pending_sec_level != BT_SECURITY_MEDIUM)
1410 static int hci_resolve_name(struct hci_dev *hdev,
1411 struct inquiry_entry *e)
1413 struct hci_cp_remote_name_req cp;
1415 memset(&cp, 0, sizeof(cp));
1417 bacpy(&cp.bdaddr, &e->data.bdaddr);
1418 cp.pscan_rep_mode = e->data.pscan_rep_mode;
1419 cp.pscan_mode = e->data.pscan_mode;
1420 cp.clock_offset = e->data.clock_offset;
1422 return hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp);
1425 static bool hci_resolve_next_name(struct hci_dev *hdev)
1427 struct discovery_state *discov = &hdev->discovery;
1428 struct inquiry_entry *e;
1430 if (list_empty(&discov->resolve))
1433 e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY, NAME_NEEDED);
1437 if (hci_resolve_name(hdev, e) == 0) {
1438 e->name_state = NAME_PENDING;
1445 static void hci_check_pending_name(struct hci_dev *hdev, struct hci_conn *conn,
1446 bdaddr_t *bdaddr, u8 *name, u8 name_len)
1448 struct discovery_state *discov = &hdev->discovery;
1449 struct inquiry_entry *e;
1451 if (conn && !test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
1452 mgmt_device_connected(hdev, bdaddr, ACL_LINK, 0x00, 0, name,
1453 name_len, conn->dev_class);
1455 if (discov->state == DISCOVERY_STOPPED)
1458 if (discov->state == DISCOVERY_STOPPING)
1459 goto discov_complete;
1461 if (discov->state != DISCOVERY_RESOLVING)
1464 e = hci_inquiry_cache_lookup_resolve(hdev, bdaddr, NAME_PENDING);
1465 /* If the device was not found in a list of found devices names of which
1466 * are pending. there is no need to continue resolving a next name as it
1467 * will be done upon receiving another Remote Name Request Complete
1474 e->name_state = NAME_KNOWN;
1475 mgmt_remote_name(hdev, bdaddr, ACL_LINK, 0x00,
1476 e->data.rssi, name, name_len);
1478 e->name_state = NAME_NOT_KNOWN;
1481 if (hci_resolve_next_name(hdev))
1485 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1488 static void hci_cs_remote_name_req(struct hci_dev *hdev, __u8 status)
1490 struct hci_cp_remote_name_req *cp;
1491 struct hci_conn *conn;
1493 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1495 /* If successful wait for the name req complete event before
1496 * checking for the need to do authentication */
1500 cp = hci_sent_cmd_data(hdev, HCI_OP_REMOTE_NAME_REQ);
1506 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
1508 if (test_bit(HCI_MGMT, &hdev->dev_flags))
1509 hci_check_pending_name(hdev, conn, &cp->bdaddr, NULL, 0);
1514 if (!hci_outgoing_auth_needed(hdev, conn))
1517 if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->flags)) {
1518 struct hci_cp_auth_requested auth_cp;
1520 auth_cp.handle = __cpu_to_le16(conn->handle);
1521 hci_send_cmd(hdev, HCI_OP_AUTH_REQUESTED,
1522 sizeof(auth_cp), &auth_cp);
1526 hci_dev_unlock(hdev);
1529 static void hci_cs_read_remote_features(struct hci_dev *hdev, __u8 status)
1531 struct hci_cp_read_remote_features *cp;
1532 struct hci_conn *conn;
1534 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1539 cp = hci_sent_cmd_data(hdev, HCI_OP_READ_REMOTE_FEATURES);
1545 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1547 if (conn->state == BT_CONFIG) {
1548 hci_proto_connect_cfm(conn, status);
1549 hci_conn_drop(conn);
1553 hci_dev_unlock(hdev);
1556 static void hci_cs_read_remote_ext_features(struct hci_dev *hdev, __u8 status)
1558 struct hci_cp_read_remote_ext_features *cp;
1559 struct hci_conn *conn;
1561 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1566 cp = hci_sent_cmd_data(hdev, HCI_OP_READ_REMOTE_EXT_FEATURES);
1572 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1574 if (conn->state == BT_CONFIG) {
1575 hci_proto_connect_cfm(conn, status);
1576 hci_conn_drop(conn);
1580 hci_dev_unlock(hdev);
1583 static void hci_cs_setup_sync_conn(struct hci_dev *hdev, __u8 status)
1585 struct hci_cp_setup_sync_conn *cp;
1586 struct hci_conn *acl, *sco;
1589 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1594 cp = hci_sent_cmd_data(hdev, HCI_OP_SETUP_SYNC_CONN);
1598 handle = __le16_to_cpu(cp->handle);
1600 BT_DBG("%s handle 0x%4.4x", hdev->name, handle);
1604 acl = hci_conn_hash_lookup_handle(hdev, handle);
1608 sco->state = BT_CLOSED;
1610 hci_proto_connect_cfm(sco, status);
1615 hci_dev_unlock(hdev);
1618 static void hci_cs_sniff_mode(struct hci_dev *hdev, __u8 status)
1620 struct hci_cp_sniff_mode *cp;
1621 struct hci_conn *conn;
1623 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1628 cp = hci_sent_cmd_data(hdev, HCI_OP_SNIFF_MODE);
1634 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1636 clear_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags);
1638 if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags))
1639 hci_sco_setup(conn, status);
1642 hci_dev_unlock(hdev);
1645 static void hci_cs_exit_sniff_mode(struct hci_dev *hdev, __u8 status)
1647 struct hci_cp_exit_sniff_mode *cp;
1648 struct hci_conn *conn;
1650 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1655 cp = hci_sent_cmd_data(hdev, HCI_OP_EXIT_SNIFF_MODE);
1661 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1663 clear_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags);
1665 if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags))
1666 hci_sco_setup(conn, status);
1669 hci_dev_unlock(hdev);
1672 static void hci_cs_disconnect(struct hci_dev *hdev, u8 status)
1674 struct hci_cp_disconnect *cp;
1675 struct hci_conn *conn;
1680 cp = hci_sent_cmd_data(hdev, HCI_OP_DISCONNECT);
1686 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1688 mgmt_disconnect_failed(hdev, &conn->dst, conn->type,
1689 conn->dst_type, status);
1691 hci_dev_unlock(hdev);
1694 static void hci_cs_create_phylink(struct hci_dev *hdev, u8 status)
1696 struct hci_cp_create_phy_link *cp;
1698 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1700 cp = hci_sent_cmd_data(hdev, HCI_OP_CREATE_PHY_LINK);
1707 struct hci_conn *hcon;
1709 hcon = hci_conn_hash_lookup_handle(hdev, cp->phy_handle);
1713 amp_write_remote_assoc(hdev, cp->phy_handle);
1716 hci_dev_unlock(hdev);
1719 static void hci_cs_accept_phylink(struct hci_dev *hdev, u8 status)
1721 struct hci_cp_accept_phy_link *cp;
1723 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1728 cp = hci_sent_cmd_data(hdev, HCI_OP_ACCEPT_PHY_LINK);
1732 amp_write_remote_assoc(hdev, cp->phy_handle);
1735 static void hci_cs_le_create_conn(struct hci_dev *hdev, u8 status)
1737 struct hci_cp_le_create_conn *cp;
1738 struct hci_conn *conn;
1740 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1742 /* All connection failure handling is taken care of by the
1743 * hci_le_conn_failed function which is triggered by the HCI
1744 * request completion callbacks used for connecting.
1749 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_CREATE_CONN);
1755 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->peer_addr);
1759 /* Store the initiator and responder address information which
1760 * is needed for SMP. These values will not change during the
1761 * lifetime of the connection.
1763 conn->init_addr_type = cp->own_address_type;
1764 if (cp->own_address_type == ADDR_LE_DEV_RANDOM)
1765 bacpy(&conn->init_addr, &hdev->random_addr);
1767 bacpy(&conn->init_addr, &hdev->bdaddr);
1769 conn->resp_addr_type = cp->peer_addr_type;
1770 bacpy(&conn->resp_addr, &cp->peer_addr);
1772 /* We don't want the connection attempt to stick around
1773 * indefinitely since LE doesn't have a page timeout concept
1774 * like BR/EDR. Set a timer for any connection that doesn't use
1775 * the white list for connecting.
1777 if (cp->filter_policy == HCI_LE_USE_PEER_ADDR)
1778 queue_delayed_work(conn->hdev->workqueue,
1779 &conn->le_conn_timeout,
1780 HCI_LE_CONN_TIMEOUT);
1783 hci_dev_unlock(hdev);
1786 static void hci_cs_le_start_enc(struct hci_dev *hdev, u8 status)
1788 struct hci_cp_le_start_enc *cp;
1789 struct hci_conn *conn;
1791 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1798 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_START_ENC);
1802 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1806 if (conn->state != BT_CONNECTED)
1809 hci_disconnect(conn, HCI_ERROR_AUTH_FAILURE);
1810 hci_conn_drop(conn);
1813 hci_dev_unlock(hdev);
1816 static void hci_inquiry_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
1818 __u8 status = *((__u8 *) skb->data);
1819 struct discovery_state *discov = &hdev->discovery;
1820 struct inquiry_entry *e;
1822 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1824 hci_conn_check_pending(hdev);
1826 if (!test_and_clear_bit(HCI_INQUIRY, &hdev->flags))
1829 smp_mb__after_clear_bit(); /* wake_up_bit advises about this barrier */
1830 wake_up_bit(&hdev->flags, HCI_INQUIRY);
1832 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
1837 if (discov->state != DISCOVERY_FINDING)
1840 if (list_empty(&discov->resolve)) {
1841 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1845 e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY, NAME_NEEDED);
1846 if (e && hci_resolve_name(hdev, e) == 0) {
1847 e->name_state = NAME_PENDING;
1848 hci_discovery_set_state(hdev, DISCOVERY_RESOLVING);
1850 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1854 hci_dev_unlock(hdev);
1857 static void hci_inquiry_result_evt(struct hci_dev *hdev, struct sk_buff *skb)
1859 struct inquiry_data data;
1860 struct inquiry_info *info = (void *) (skb->data + 1);
1861 int num_rsp = *((__u8 *) skb->data);
1863 BT_DBG("%s num_rsp %d", hdev->name, num_rsp);
1868 if (test_bit(HCI_PERIODIC_INQ, &hdev->dev_flags))
1873 for (; num_rsp; num_rsp--, info++) {
1874 bool name_known, ssp;
1876 bacpy(&data.bdaddr, &info->bdaddr);
1877 data.pscan_rep_mode = info->pscan_rep_mode;
1878 data.pscan_period_mode = info->pscan_period_mode;
1879 data.pscan_mode = info->pscan_mode;
1880 memcpy(data.dev_class, info->dev_class, 3);
1881 data.clock_offset = info->clock_offset;
1883 data.ssp_mode = 0x00;
1885 name_known = hci_inquiry_cache_update(hdev, &data, false, &ssp);
1886 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
1887 info->dev_class, 0, !name_known, ssp, NULL,
1891 hci_dev_unlock(hdev);
1894 static void hci_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
1896 struct hci_ev_conn_complete *ev = (void *) skb->data;
1897 struct hci_conn *conn;
1899 BT_DBG("%s", hdev->name);
1903 conn = hci_conn_hash_lookup_ba(hdev, ev->link_type, &ev->bdaddr);
1905 if (ev->link_type != SCO_LINK)
1908 conn = hci_conn_hash_lookup_ba(hdev, ESCO_LINK, &ev->bdaddr);
1912 conn->type = SCO_LINK;
1916 conn->handle = __le16_to_cpu(ev->handle);
1918 if (conn->type == ACL_LINK) {
1919 conn->state = BT_CONFIG;
1920 hci_conn_hold(conn);
1922 if (!conn->out && !hci_conn_ssp_enabled(conn) &&
1923 !hci_find_link_key(hdev, &ev->bdaddr))
1924 conn->disc_timeout = HCI_PAIRING_TIMEOUT;
1926 conn->disc_timeout = HCI_DISCONN_TIMEOUT;
1928 conn->state = BT_CONNECTED;
1930 hci_conn_add_sysfs(conn);
1932 if (test_bit(HCI_AUTH, &hdev->flags))
1933 conn->link_mode |= HCI_LM_AUTH;
1935 if (test_bit(HCI_ENCRYPT, &hdev->flags))
1936 conn->link_mode |= HCI_LM_ENCRYPT;
1938 /* Get remote features */
1939 if (conn->type == ACL_LINK) {
1940 struct hci_cp_read_remote_features cp;
1941 cp.handle = ev->handle;
1942 hci_send_cmd(hdev, HCI_OP_READ_REMOTE_FEATURES,
1946 /* Set packet type for incoming connection */
1947 if (!conn->out && hdev->hci_ver < BLUETOOTH_VER_2_0) {
1948 struct hci_cp_change_conn_ptype cp;
1949 cp.handle = ev->handle;
1950 cp.pkt_type = cpu_to_le16(conn->pkt_type);
1951 hci_send_cmd(hdev, HCI_OP_CHANGE_CONN_PTYPE, sizeof(cp),
1955 conn->state = BT_CLOSED;
1956 if (conn->type == ACL_LINK)
1957 mgmt_connect_failed(hdev, &conn->dst, conn->type,
1958 conn->dst_type, ev->status);
1961 if (conn->type == ACL_LINK)
1962 hci_sco_setup(conn, ev->status);
1965 hci_proto_connect_cfm(conn, ev->status);
1967 } else if (ev->link_type != ACL_LINK)
1968 hci_proto_connect_cfm(conn, ev->status);
1971 hci_dev_unlock(hdev);
1973 hci_conn_check_pending(hdev);
1976 static void hci_conn_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
1978 struct hci_ev_conn_request *ev = (void *) skb->data;
1979 int mask = hdev->link_mode;
1982 BT_DBG("%s bdaddr %pMR type 0x%x", hdev->name, &ev->bdaddr,
1985 mask |= hci_proto_connect_ind(hdev, &ev->bdaddr, ev->link_type,
1988 if ((mask & HCI_LM_ACCEPT) &&
1989 !hci_blacklist_lookup(hdev, &ev->bdaddr, BDADDR_BREDR)) {
1990 /* Connection accepted */
1991 struct inquiry_entry *ie;
1992 struct hci_conn *conn;
1996 ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr);
1998 memcpy(ie->data.dev_class, ev->dev_class, 3);
2000 conn = hci_conn_hash_lookup_ba(hdev, ev->link_type,
2003 conn = hci_conn_add(hdev, ev->link_type, &ev->bdaddr);
2005 BT_ERR("No memory for new connection");
2006 hci_dev_unlock(hdev);
2011 memcpy(conn->dev_class, ev->dev_class, 3);
2013 hci_dev_unlock(hdev);
2015 if (ev->link_type == ACL_LINK ||
2016 (!(flags & HCI_PROTO_DEFER) && !lmp_esco_capable(hdev))) {
2017 struct hci_cp_accept_conn_req cp;
2018 conn->state = BT_CONNECT;
2020 bacpy(&cp.bdaddr, &ev->bdaddr);
2022 if (lmp_rswitch_capable(hdev) && (mask & HCI_LM_MASTER))
2023 cp.role = 0x00; /* Become master */
2025 cp.role = 0x01; /* Remain slave */
2027 hci_send_cmd(hdev, HCI_OP_ACCEPT_CONN_REQ, sizeof(cp),
2029 } else if (!(flags & HCI_PROTO_DEFER)) {
2030 struct hci_cp_accept_sync_conn_req cp;
2031 conn->state = BT_CONNECT;
2033 bacpy(&cp.bdaddr, &ev->bdaddr);
2034 cp.pkt_type = cpu_to_le16(conn->pkt_type);
2036 cp.tx_bandwidth = cpu_to_le32(0x00001f40);
2037 cp.rx_bandwidth = cpu_to_le32(0x00001f40);
2038 cp.max_latency = cpu_to_le16(0xffff);
2039 cp.content_format = cpu_to_le16(hdev->voice_setting);
2040 cp.retrans_effort = 0xff;
2042 hci_send_cmd(hdev, HCI_OP_ACCEPT_SYNC_CONN_REQ,
2045 conn->state = BT_CONNECT2;
2046 hci_proto_connect_cfm(conn, 0);
2049 /* Connection rejected */
2050 struct hci_cp_reject_conn_req cp;
2052 bacpy(&cp.bdaddr, &ev->bdaddr);
2053 cp.reason = HCI_ERROR_REJ_BAD_ADDR;
2054 hci_send_cmd(hdev, HCI_OP_REJECT_CONN_REQ, sizeof(cp), &cp);
2058 static u8 hci_to_mgmt_reason(u8 err)
2061 case HCI_ERROR_CONNECTION_TIMEOUT:
2062 return MGMT_DEV_DISCONN_TIMEOUT;
2063 case HCI_ERROR_REMOTE_USER_TERM:
2064 case HCI_ERROR_REMOTE_LOW_RESOURCES:
2065 case HCI_ERROR_REMOTE_POWER_OFF:
2066 return MGMT_DEV_DISCONN_REMOTE;
2067 case HCI_ERROR_LOCAL_HOST_TERM:
2068 return MGMT_DEV_DISCONN_LOCAL_HOST;
2070 return MGMT_DEV_DISCONN_UNKNOWN;
2074 static void hci_disconn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
2076 struct hci_ev_disconn_complete *ev = (void *) skb->data;
2077 u8 reason = hci_to_mgmt_reason(ev->reason);
2078 struct hci_conn_params *params;
2079 struct hci_conn *conn;
2080 bool mgmt_connected;
2083 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2087 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2092 mgmt_disconnect_failed(hdev, &conn->dst, conn->type,
2093 conn->dst_type, ev->status);
2097 conn->state = BT_CLOSED;
2099 mgmt_connected = test_and_clear_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags);
2100 mgmt_device_disconnected(hdev, &conn->dst, conn->type, conn->dst_type,
2101 reason, mgmt_connected);
2103 if (conn->type == ACL_LINK && conn->flush_key)
2104 hci_remove_link_key(hdev, &conn->dst);
2106 params = hci_conn_params_lookup(hdev, &conn->dst, conn->dst_type);
2108 switch (params->auto_connect) {
2109 case HCI_AUTO_CONN_LINK_LOSS:
2110 if (ev->reason != HCI_ERROR_CONNECTION_TIMEOUT)
2114 case HCI_AUTO_CONN_ALWAYS:
2115 hci_pend_le_conn_add(hdev, &conn->dst, conn->dst_type);
2125 hci_proto_disconn_cfm(conn, ev->reason);
2128 /* Re-enable advertising if necessary, since it might
2129 * have been disabled by the connection. From the
2130 * HCI_LE_Set_Advertise_Enable command description in
2131 * the core specification (v4.0):
2132 * "The Controller shall continue advertising until the Host
2133 * issues an LE_Set_Advertise_Enable command with
2134 * Advertising_Enable set to 0x00 (Advertising is disabled)
2135 * or until a connection is created or until the Advertising
2136 * is timed out due to Directed Advertising."
2138 if (type == LE_LINK)
2139 mgmt_reenable_advertising(hdev);
2142 hci_dev_unlock(hdev);
2145 static void hci_auth_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
2147 struct hci_ev_auth_complete *ev = (void *) skb->data;
2148 struct hci_conn *conn;
2150 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2154 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2159 if (!hci_conn_ssp_enabled(conn) &&
2160 test_bit(HCI_CONN_REAUTH_PEND, &conn->flags)) {
2161 BT_INFO("re-auth of legacy device is not possible.");
2163 conn->link_mode |= HCI_LM_AUTH;
2164 conn->sec_level = conn->pending_sec_level;
2167 mgmt_auth_failed(hdev, &conn->dst, conn->type, conn->dst_type,
2171 clear_bit(HCI_CONN_AUTH_PEND, &conn->flags);
2172 clear_bit(HCI_CONN_REAUTH_PEND, &conn->flags);
2174 if (conn->state == BT_CONFIG) {
2175 if (!ev->status && hci_conn_ssp_enabled(conn)) {
2176 struct hci_cp_set_conn_encrypt cp;
2177 cp.handle = ev->handle;
2179 hci_send_cmd(hdev, HCI_OP_SET_CONN_ENCRYPT, sizeof(cp),
2182 conn->state = BT_CONNECTED;
2183 hci_proto_connect_cfm(conn, ev->status);
2184 hci_conn_drop(conn);
2187 hci_auth_cfm(conn, ev->status);
2189 hci_conn_hold(conn);
2190 conn->disc_timeout = HCI_DISCONN_TIMEOUT;
2191 hci_conn_drop(conn);
2194 if (test_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags)) {
2196 struct hci_cp_set_conn_encrypt cp;
2197 cp.handle = ev->handle;
2199 hci_send_cmd(hdev, HCI_OP_SET_CONN_ENCRYPT, sizeof(cp),
2202 clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
2203 hci_encrypt_cfm(conn, ev->status, 0x00);
2208 hci_dev_unlock(hdev);
2211 static void hci_remote_name_evt(struct hci_dev *hdev, struct sk_buff *skb)
2213 struct hci_ev_remote_name *ev = (void *) skb->data;
2214 struct hci_conn *conn;
2216 BT_DBG("%s", hdev->name);
2218 hci_conn_check_pending(hdev);
2222 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
2224 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
2227 if (ev->status == 0)
2228 hci_check_pending_name(hdev, conn, &ev->bdaddr, ev->name,
2229 strnlen(ev->name, HCI_MAX_NAME_LENGTH));
2231 hci_check_pending_name(hdev, conn, &ev->bdaddr, NULL, 0);
2237 if (!hci_outgoing_auth_needed(hdev, conn))
2240 if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->flags)) {
2241 struct hci_cp_auth_requested cp;
2242 cp.handle = __cpu_to_le16(conn->handle);
2243 hci_send_cmd(hdev, HCI_OP_AUTH_REQUESTED, sizeof(cp), &cp);
2247 hci_dev_unlock(hdev);
2250 static void hci_encrypt_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
2252 struct hci_ev_encrypt_change *ev = (void *) skb->data;
2253 struct hci_conn *conn;
2255 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2259 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2265 /* Encryption implies authentication */
2266 conn->link_mode |= HCI_LM_AUTH;
2267 conn->link_mode |= HCI_LM_ENCRYPT;
2268 conn->sec_level = conn->pending_sec_level;
2270 /* P-256 authentication key implies FIPS */
2271 if (conn->key_type == HCI_LK_AUTH_COMBINATION_P256)
2272 conn->link_mode |= HCI_LM_FIPS;
2274 if ((conn->type == ACL_LINK && ev->encrypt == 0x02) ||
2275 conn->type == LE_LINK)
2276 set_bit(HCI_CONN_AES_CCM, &conn->flags);
2278 conn->link_mode &= ~HCI_LM_ENCRYPT;
2279 clear_bit(HCI_CONN_AES_CCM, &conn->flags);
2283 clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
2285 if (ev->status && conn->state == BT_CONNECTED) {
2286 hci_disconnect(conn, HCI_ERROR_AUTH_FAILURE);
2287 hci_conn_drop(conn);
2291 if (conn->state == BT_CONFIG) {
2293 conn->state = BT_CONNECTED;
2295 /* In Secure Connections Only mode, do not allow any
2296 * connections that are not encrypted with AES-CCM
2297 * using a P-256 authenticated combination key.
2299 if (test_bit(HCI_SC_ONLY, &hdev->dev_flags) &&
2300 (!test_bit(HCI_CONN_AES_CCM, &conn->flags) ||
2301 conn->key_type != HCI_LK_AUTH_COMBINATION_P256)) {
2302 hci_proto_connect_cfm(conn, HCI_ERROR_AUTH_FAILURE);
2303 hci_conn_drop(conn);
2307 hci_proto_connect_cfm(conn, ev->status);
2308 hci_conn_drop(conn);
2310 hci_encrypt_cfm(conn, ev->status, ev->encrypt);
2313 hci_dev_unlock(hdev);
2316 static void hci_change_link_key_complete_evt(struct hci_dev *hdev,
2317 struct sk_buff *skb)
2319 struct hci_ev_change_link_key_complete *ev = (void *) skb->data;
2320 struct hci_conn *conn;
2322 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2326 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2329 conn->link_mode |= HCI_LM_SECURE;
2331 clear_bit(HCI_CONN_AUTH_PEND, &conn->flags);
2333 hci_key_change_cfm(conn, ev->status);
2336 hci_dev_unlock(hdev);
2339 static void hci_remote_features_evt(struct hci_dev *hdev,
2340 struct sk_buff *skb)
2342 struct hci_ev_remote_features *ev = (void *) skb->data;
2343 struct hci_conn *conn;
2345 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2349 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2354 memcpy(conn->features[0], ev->features, 8);
2356 if (conn->state != BT_CONFIG)
2359 if (!ev->status && lmp_ssp_capable(hdev) && lmp_ssp_capable(conn)) {
2360 struct hci_cp_read_remote_ext_features cp;
2361 cp.handle = ev->handle;
2363 hci_send_cmd(hdev, HCI_OP_READ_REMOTE_EXT_FEATURES,
2368 if (!ev->status && !test_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags)) {
2369 struct hci_cp_remote_name_req cp;
2370 memset(&cp, 0, sizeof(cp));
2371 bacpy(&cp.bdaddr, &conn->dst);
2372 cp.pscan_rep_mode = 0x02;
2373 hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp);
2374 } else if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
2375 mgmt_device_connected(hdev, &conn->dst, conn->type,
2376 conn->dst_type, 0, NULL, 0,
2379 if (!hci_outgoing_auth_needed(hdev, conn)) {
2380 conn->state = BT_CONNECTED;
2381 hci_proto_connect_cfm(conn, ev->status);
2382 hci_conn_drop(conn);
2386 hci_dev_unlock(hdev);
2389 static void hci_cmd_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
2391 struct hci_ev_cmd_complete *ev = (void *) skb->data;
2392 u8 status = skb->data[sizeof(*ev)];
2395 skb_pull(skb, sizeof(*ev));
2397 opcode = __le16_to_cpu(ev->opcode);
2400 case HCI_OP_INQUIRY_CANCEL:
2401 hci_cc_inquiry_cancel(hdev, skb);
2404 case HCI_OP_PERIODIC_INQ:
2405 hci_cc_periodic_inq(hdev, skb);
2408 case HCI_OP_EXIT_PERIODIC_INQ:
2409 hci_cc_exit_periodic_inq(hdev, skb);
2412 case HCI_OP_REMOTE_NAME_REQ_CANCEL:
2413 hci_cc_remote_name_req_cancel(hdev, skb);
2416 case HCI_OP_ROLE_DISCOVERY:
2417 hci_cc_role_discovery(hdev, skb);
2420 case HCI_OP_READ_LINK_POLICY:
2421 hci_cc_read_link_policy(hdev, skb);
2424 case HCI_OP_WRITE_LINK_POLICY:
2425 hci_cc_write_link_policy(hdev, skb);
2428 case HCI_OP_READ_DEF_LINK_POLICY:
2429 hci_cc_read_def_link_policy(hdev, skb);
2432 case HCI_OP_WRITE_DEF_LINK_POLICY:
2433 hci_cc_write_def_link_policy(hdev, skb);
2437 hci_cc_reset(hdev, skb);
2440 case HCI_OP_WRITE_LOCAL_NAME:
2441 hci_cc_write_local_name(hdev, skb);
2444 case HCI_OP_READ_LOCAL_NAME:
2445 hci_cc_read_local_name(hdev, skb);
2448 case HCI_OP_WRITE_AUTH_ENABLE:
2449 hci_cc_write_auth_enable(hdev, skb);
2452 case HCI_OP_WRITE_ENCRYPT_MODE:
2453 hci_cc_write_encrypt_mode(hdev, skb);
2456 case HCI_OP_WRITE_SCAN_ENABLE:
2457 hci_cc_write_scan_enable(hdev, skb);
2460 case HCI_OP_READ_CLASS_OF_DEV:
2461 hci_cc_read_class_of_dev(hdev, skb);
2464 case HCI_OP_WRITE_CLASS_OF_DEV:
2465 hci_cc_write_class_of_dev(hdev, skb);
2468 case HCI_OP_READ_VOICE_SETTING:
2469 hci_cc_read_voice_setting(hdev, skb);
2472 case HCI_OP_WRITE_VOICE_SETTING:
2473 hci_cc_write_voice_setting(hdev, skb);
2476 case HCI_OP_READ_NUM_SUPPORTED_IAC:
2477 hci_cc_read_num_supported_iac(hdev, skb);
2480 case HCI_OP_WRITE_SSP_MODE:
2481 hci_cc_write_ssp_mode(hdev, skb);
2484 case HCI_OP_WRITE_SC_SUPPORT:
2485 hci_cc_write_sc_support(hdev, skb);
2488 case HCI_OP_READ_LOCAL_VERSION:
2489 hci_cc_read_local_version(hdev, skb);
2492 case HCI_OP_READ_LOCAL_COMMANDS:
2493 hci_cc_read_local_commands(hdev, skb);
2496 case HCI_OP_READ_LOCAL_FEATURES:
2497 hci_cc_read_local_features(hdev, skb);
2500 case HCI_OP_READ_LOCAL_EXT_FEATURES:
2501 hci_cc_read_local_ext_features(hdev, skb);
2504 case HCI_OP_READ_BUFFER_SIZE:
2505 hci_cc_read_buffer_size(hdev, skb);
2508 case HCI_OP_READ_BD_ADDR:
2509 hci_cc_read_bd_addr(hdev, skb);
2512 case HCI_OP_READ_PAGE_SCAN_ACTIVITY:
2513 hci_cc_read_page_scan_activity(hdev, skb);
2516 case HCI_OP_WRITE_PAGE_SCAN_ACTIVITY:
2517 hci_cc_write_page_scan_activity(hdev, skb);
2520 case HCI_OP_READ_PAGE_SCAN_TYPE:
2521 hci_cc_read_page_scan_type(hdev, skb);
2524 case HCI_OP_WRITE_PAGE_SCAN_TYPE:
2525 hci_cc_write_page_scan_type(hdev, skb);
2528 case HCI_OP_READ_DATA_BLOCK_SIZE:
2529 hci_cc_read_data_block_size(hdev, skb);
2532 case HCI_OP_READ_FLOW_CONTROL_MODE:
2533 hci_cc_read_flow_control_mode(hdev, skb);
2536 case HCI_OP_READ_LOCAL_AMP_INFO:
2537 hci_cc_read_local_amp_info(hdev, skb);
2540 case HCI_OP_READ_LOCAL_AMP_ASSOC:
2541 hci_cc_read_local_amp_assoc(hdev, skb);
2544 case HCI_OP_READ_INQ_RSP_TX_POWER:
2545 hci_cc_read_inq_rsp_tx_power(hdev, skb);
2548 case HCI_OP_PIN_CODE_REPLY:
2549 hci_cc_pin_code_reply(hdev, skb);
2552 case HCI_OP_PIN_CODE_NEG_REPLY:
2553 hci_cc_pin_code_neg_reply(hdev, skb);
2556 case HCI_OP_READ_LOCAL_OOB_DATA:
2557 hci_cc_read_local_oob_data(hdev, skb);
2560 case HCI_OP_READ_LOCAL_OOB_EXT_DATA:
2561 hci_cc_read_local_oob_ext_data(hdev, skb);
2564 case HCI_OP_LE_READ_BUFFER_SIZE:
2565 hci_cc_le_read_buffer_size(hdev, skb);
2568 case HCI_OP_LE_READ_LOCAL_FEATURES:
2569 hci_cc_le_read_local_features(hdev, skb);
2572 case HCI_OP_LE_READ_ADV_TX_POWER:
2573 hci_cc_le_read_adv_tx_power(hdev, skb);
2576 case HCI_OP_USER_CONFIRM_REPLY:
2577 hci_cc_user_confirm_reply(hdev, skb);
2580 case HCI_OP_USER_CONFIRM_NEG_REPLY:
2581 hci_cc_user_confirm_neg_reply(hdev, skb);
2584 case HCI_OP_USER_PASSKEY_REPLY:
2585 hci_cc_user_passkey_reply(hdev, skb);
2588 case HCI_OP_USER_PASSKEY_NEG_REPLY:
2589 hci_cc_user_passkey_neg_reply(hdev, skb);
2592 case HCI_OP_LE_SET_RANDOM_ADDR:
2593 hci_cc_le_set_random_addr(hdev, skb);
2596 case HCI_OP_LE_SET_ADV_ENABLE:
2597 hci_cc_le_set_adv_enable(hdev, skb);
2600 case HCI_OP_LE_SET_SCAN_PARAM:
2601 hci_cc_le_set_scan_param(hdev, skb);
2604 case HCI_OP_LE_SET_SCAN_ENABLE:
2605 hci_cc_le_set_scan_enable(hdev, skb);
2608 case HCI_OP_LE_READ_WHITE_LIST_SIZE:
2609 hci_cc_le_read_white_list_size(hdev, skb);
2612 case HCI_OP_LE_CLEAR_WHITE_LIST:
2613 hci_cc_le_clear_white_list(hdev, skb);
2616 case HCI_OP_LE_ADD_TO_WHITE_LIST:
2617 hci_cc_le_add_to_white_list(hdev, skb);
2620 case HCI_OP_LE_DEL_FROM_WHITE_LIST:
2621 hci_cc_le_del_from_white_list(hdev, skb);
2624 case HCI_OP_LE_READ_SUPPORTED_STATES:
2625 hci_cc_le_read_supported_states(hdev, skb);
2628 case HCI_OP_WRITE_LE_HOST_SUPPORTED:
2629 hci_cc_write_le_host_supported(hdev, skb);
2632 case HCI_OP_LE_SET_ADV_PARAM:
2633 hci_cc_set_adv_param(hdev, skb);
2636 case HCI_OP_WRITE_REMOTE_AMP_ASSOC:
2637 hci_cc_write_remote_amp_assoc(hdev, skb);
2641 BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
2645 if (opcode != HCI_OP_NOP)
2646 del_timer(&hdev->cmd_timer);
2648 hci_req_cmd_complete(hdev, opcode, status);
2650 if (ev->ncmd && !test_bit(HCI_RESET, &hdev->flags)) {
2651 atomic_set(&hdev->cmd_cnt, 1);
2652 if (!skb_queue_empty(&hdev->cmd_q))
2653 queue_work(hdev->workqueue, &hdev->cmd_work);
2657 static void hci_cmd_status_evt(struct hci_dev *hdev, struct sk_buff *skb)
2659 struct hci_ev_cmd_status *ev = (void *) skb->data;
2662 skb_pull(skb, sizeof(*ev));
2664 opcode = __le16_to_cpu(ev->opcode);
2667 case HCI_OP_INQUIRY:
2668 hci_cs_inquiry(hdev, ev->status);
2671 case HCI_OP_CREATE_CONN:
2672 hci_cs_create_conn(hdev, ev->status);
2675 case HCI_OP_ADD_SCO:
2676 hci_cs_add_sco(hdev, ev->status);
2679 case HCI_OP_AUTH_REQUESTED:
2680 hci_cs_auth_requested(hdev, ev->status);
2683 case HCI_OP_SET_CONN_ENCRYPT:
2684 hci_cs_set_conn_encrypt(hdev, ev->status);
2687 case HCI_OP_REMOTE_NAME_REQ:
2688 hci_cs_remote_name_req(hdev, ev->status);
2691 case HCI_OP_READ_REMOTE_FEATURES:
2692 hci_cs_read_remote_features(hdev, ev->status);
2695 case HCI_OP_READ_REMOTE_EXT_FEATURES:
2696 hci_cs_read_remote_ext_features(hdev, ev->status);
2699 case HCI_OP_SETUP_SYNC_CONN:
2700 hci_cs_setup_sync_conn(hdev, ev->status);
2703 case HCI_OP_SNIFF_MODE:
2704 hci_cs_sniff_mode(hdev, ev->status);
2707 case HCI_OP_EXIT_SNIFF_MODE:
2708 hci_cs_exit_sniff_mode(hdev, ev->status);
2711 case HCI_OP_DISCONNECT:
2712 hci_cs_disconnect(hdev, ev->status);
2715 case HCI_OP_CREATE_PHY_LINK:
2716 hci_cs_create_phylink(hdev, ev->status);
2719 case HCI_OP_ACCEPT_PHY_LINK:
2720 hci_cs_accept_phylink(hdev, ev->status);
2723 case HCI_OP_LE_CREATE_CONN:
2724 hci_cs_le_create_conn(hdev, ev->status);
2727 case HCI_OP_LE_START_ENC:
2728 hci_cs_le_start_enc(hdev, ev->status);
2732 BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
2736 if (opcode != HCI_OP_NOP)
2737 del_timer(&hdev->cmd_timer);
2740 (hdev->sent_cmd && !bt_cb(hdev->sent_cmd)->req.event))
2741 hci_req_cmd_complete(hdev, opcode, ev->status);
2743 if (ev->ncmd && !test_bit(HCI_RESET, &hdev->flags)) {
2744 atomic_set(&hdev->cmd_cnt, 1);
2745 if (!skb_queue_empty(&hdev->cmd_q))
2746 queue_work(hdev->workqueue, &hdev->cmd_work);
2750 static void hci_role_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
2752 struct hci_ev_role_change *ev = (void *) skb->data;
2753 struct hci_conn *conn;
2755 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2759 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
2763 conn->link_mode &= ~HCI_LM_MASTER;
2765 conn->link_mode |= HCI_LM_MASTER;
2768 clear_bit(HCI_CONN_RSWITCH_PEND, &conn->flags);
2770 hci_role_switch_cfm(conn, ev->status, ev->role);
2773 hci_dev_unlock(hdev);
2776 static void hci_num_comp_pkts_evt(struct hci_dev *hdev, struct sk_buff *skb)
2778 struct hci_ev_num_comp_pkts *ev = (void *) skb->data;
2781 if (hdev->flow_ctl_mode != HCI_FLOW_CTL_MODE_PACKET_BASED) {
2782 BT_ERR("Wrong event for mode %d", hdev->flow_ctl_mode);
2786 if (skb->len < sizeof(*ev) || skb->len < sizeof(*ev) +
2787 ev->num_hndl * sizeof(struct hci_comp_pkts_info)) {
2788 BT_DBG("%s bad parameters", hdev->name);
2792 BT_DBG("%s num_hndl %d", hdev->name, ev->num_hndl);
2794 for (i = 0; i < ev->num_hndl; i++) {
2795 struct hci_comp_pkts_info *info = &ev->handles[i];
2796 struct hci_conn *conn;
2797 __u16 handle, count;
2799 handle = __le16_to_cpu(info->handle);
2800 count = __le16_to_cpu(info->count);
2802 conn = hci_conn_hash_lookup_handle(hdev, handle);
2806 conn->sent -= count;
2808 switch (conn->type) {
2810 hdev->acl_cnt += count;
2811 if (hdev->acl_cnt > hdev->acl_pkts)
2812 hdev->acl_cnt = hdev->acl_pkts;
2816 if (hdev->le_pkts) {
2817 hdev->le_cnt += count;
2818 if (hdev->le_cnt > hdev->le_pkts)
2819 hdev->le_cnt = hdev->le_pkts;
2821 hdev->acl_cnt += count;
2822 if (hdev->acl_cnt > hdev->acl_pkts)
2823 hdev->acl_cnt = hdev->acl_pkts;
2828 hdev->sco_cnt += count;
2829 if (hdev->sco_cnt > hdev->sco_pkts)
2830 hdev->sco_cnt = hdev->sco_pkts;
2834 BT_ERR("Unknown type %d conn %p", conn->type, conn);
2839 queue_work(hdev->workqueue, &hdev->tx_work);
2842 static struct hci_conn *__hci_conn_lookup_handle(struct hci_dev *hdev,
2845 struct hci_chan *chan;
2847 switch (hdev->dev_type) {
2849 return hci_conn_hash_lookup_handle(hdev, handle);
2851 chan = hci_chan_lookup_handle(hdev, handle);
2856 BT_ERR("%s unknown dev_type %d", hdev->name, hdev->dev_type);
2863 static void hci_num_comp_blocks_evt(struct hci_dev *hdev, struct sk_buff *skb)
2865 struct hci_ev_num_comp_blocks *ev = (void *) skb->data;
2868 if (hdev->flow_ctl_mode != HCI_FLOW_CTL_MODE_BLOCK_BASED) {
2869 BT_ERR("Wrong event for mode %d", hdev->flow_ctl_mode);
2873 if (skb->len < sizeof(*ev) || skb->len < sizeof(*ev) +
2874 ev->num_hndl * sizeof(struct hci_comp_blocks_info)) {
2875 BT_DBG("%s bad parameters", hdev->name);
2879 BT_DBG("%s num_blocks %d num_hndl %d", hdev->name, ev->num_blocks,
2882 for (i = 0; i < ev->num_hndl; i++) {
2883 struct hci_comp_blocks_info *info = &ev->handles[i];
2884 struct hci_conn *conn = NULL;
2885 __u16 handle, block_count;
2887 handle = __le16_to_cpu(info->handle);
2888 block_count = __le16_to_cpu(info->blocks);
2890 conn = __hci_conn_lookup_handle(hdev, handle);
2894 conn->sent -= block_count;
2896 switch (conn->type) {
2899 hdev->block_cnt += block_count;
2900 if (hdev->block_cnt > hdev->num_blocks)
2901 hdev->block_cnt = hdev->num_blocks;
2905 BT_ERR("Unknown type %d conn %p", conn->type, conn);
2910 queue_work(hdev->workqueue, &hdev->tx_work);
2913 static void hci_mode_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
2915 struct hci_ev_mode_change *ev = (void *) skb->data;
2916 struct hci_conn *conn;
2918 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2922 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2924 conn->mode = ev->mode;
2926 if (!test_and_clear_bit(HCI_CONN_MODE_CHANGE_PEND,
2928 if (conn->mode == HCI_CM_ACTIVE)
2929 set_bit(HCI_CONN_POWER_SAVE, &conn->flags);
2931 clear_bit(HCI_CONN_POWER_SAVE, &conn->flags);
2934 if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags))
2935 hci_sco_setup(conn, ev->status);
2938 hci_dev_unlock(hdev);
2941 static void hci_pin_code_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
2943 struct hci_ev_pin_code_req *ev = (void *) skb->data;
2944 struct hci_conn *conn;
2946 BT_DBG("%s", hdev->name);
2950 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
2954 if (conn->state == BT_CONNECTED) {
2955 hci_conn_hold(conn);
2956 conn->disc_timeout = HCI_PAIRING_TIMEOUT;
2957 hci_conn_drop(conn);
2960 if (!test_bit(HCI_PAIRABLE, &hdev->dev_flags))
2961 hci_send_cmd(hdev, HCI_OP_PIN_CODE_NEG_REPLY,
2962 sizeof(ev->bdaddr), &ev->bdaddr);
2963 else if (test_bit(HCI_MGMT, &hdev->dev_flags)) {
2966 if (conn->pending_sec_level == BT_SECURITY_HIGH)
2971 mgmt_pin_code_request(hdev, &ev->bdaddr, secure);
2975 hci_dev_unlock(hdev);
2978 static void hci_link_key_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
2980 struct hci_ev_link_key_req *ev = (void *) skb->data;
2981 struct hci_cp_link_key_reply cp;
2982 struct hci_conn *conn;
2983 struct link_key *key;
2985 BT_DBG("%s", hdev->name);
2987 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
2992 key = hci_find_link_key(hdev, &ev->bdaddr);
2994 BT_DBG("%s link key not found for %pMR", hdev->name,
2999 BT_DBG("%s found key type %u for %pMR", hdev->name, key->type,
3002 if (!test_bit(HCI_DEBUG_KEYS, &hdev->dev_flags) &&
3003 key->type == HCI_LK_DEBUG_COMBINATION) {
3004 BT_DBG("%s ignoring debug key", hdev->name);
3008 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3010 if ((key->type == HCI_LK_UNAUTH_COMBINATION_P192 ||
3011 key->type == HCI_LK_UNAUTH_COMBINATION_P256) &&
3012 conn->auth_type != 0xff && (conn->auth_type & 0x01)) {
3013 BT_DBG("%s ignoring unauthenticated key", hdev->name);
3017 if (key->type == HCI_LK_COMBINATION && key->pin_len < 16 &&
3018 conn->pending_sec_level == BT_SECURITY_HIGH) {
3019 BT_DBG("%s ignoring key unauthenticated for high security",
3024 conn->key_type = key->type;
3025 conn->pin_length = key->pin_len;
3028 bacpy(&cp.bdaddr, &ev->bdaddr);
3029 memcpy(cp.link_key, key->val, HCI_LINK_KEY_SIZE);
3031 hci_send_cmd(hdev, HCI_OP_LINK_KEY_REPLY, sizeof(cp), &cp);
3033 hci_dev_unlock(hdev);
3038 hci_send_cmd(hdev, HCI_OP_LINK_KEY_NEG_REPLY, 6, &ev->bdaddr);
3039 hci_dev_unlock(hdev);
3042 static void hci_link_key_notify_evt(struct hci_dev *hdev, struct sk_buff *skb)
3044 struct hci_ev_link_key_notify *ev = (void *) skb->data;
3045 struct hci_conn *conn;
3048 BT_DBG("%s", hdev->name);
3052 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3054 hci_conn_hold(conn);
3055 conn->disc_timeout = HCI_DISCONN_TIMEOUT;
3056 pin_len = conn->pin_length;
3058 if (ev->key_type != HCI_LK_CHANGED_COMBINATION)
3059 conn->key_type = ev->key_type;
3061 hci_conn_drop(conn);
3064 if (test_bit(HCI_MGMT, &hdev->dev_flags))
3065 hci_add_link_key(hdev, conn, 1, &ev->bdaddr, ev->link_key,
3066 ev->key_type, pin_len);
3068 hci_dev_unlock(hdev);
3071 static void hci_clock_offset_evt(struct hci_dev *hdev, struct sk_buff *skb)
3073 struct hci_ev_clock_offset *ev = (void *) skb->data;
3074 struct hci_conn *conn;
3076 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
3080 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3081 if (conn && !ev->status) {
3082 struct inquiry_entry *ie;
3084 ie = hci_inquiry_cache_lookup(hdev, &conn->dst);
3086 ie->data.clock_offset = ev->clock_offset;
3087 ie->timestamp = jiffies;
3091 hci_dev_unlock(hdev);
3094 static void hci_pkt_type_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
3096 struct hci_ev_pkt_type_change *ev = (void *) skb->data;
3097 struct hci_conn *conn;
3099 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
3103 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3104 if (conn && !ev->status)
3105 conn->pkt_type = __le16_to_cpu(ev->pkt_type);
3107 hci_dev_unlock(hdev);
3110 static void hci_pscan_rep_mode_evt(struct hci_dev *hdev, struct sk_buff *skb)
3112 struct hci_ev_pscan_rep_mode *ev = (void *) skb->data;
3113 struct inquiry_entry *ie;
3115 BT_DBG("%s", hdev->name);
3119 ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr);
3121 ie->data.pscan_rep_mode = ev->pscan_rep_mode;
3122 ie->timestamp = jiffies;
3125 hci_dev_unlock(hdev);
3128 static void hci_inquiry_result_with_rssi_evt(struct hci_dev *hdev,
3129 struct sk_buff *skb)
3131 struct inquiry_data data;
3132 int num_rsp = *((__u8 *) skb->data);
3133 bool name_known, ssp;
3135 BT_DBG("%s num_rsp %d", hdev->name, num_rsp);
3140 if (test_bit(HCI_PERIODIC_INQ, &hdev->dev_flags))
3145 if ((skb->len - 1) / num_rsp != sizeof(struct inquiry_info_with_rssi)) {
3146 struct inquiry_info_with_rssi_and_pscan_mode *info;
3147 info = (void *) (skb->data + 1);
3149 for (; num_rsp; num_rsp--, info++) {
3150 bacpy(&data.bdaddr, &info->bdaddr);
3151 data.pscan_rep_mode = info->pscan_rep_mode;
3152 data.pscan_period_mode = info->pscan_period_mode;
3153 data.pscan_mode = info->pscan_mode;
3154 memcpy(data.dev_class, info->dev_class, 3);
3155 data.clock_offset = info->clock_offset;
3156 data.rssi = info->rssi;
3157 data.ssp_mode = 0x00;
3159 name_known = hci_inquiry_cache_update(hdev, &data,
3161 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
3162 info->dev_class, info->rssi,
3163 !name_known, ssp, NULL, 0, NULL, 0);
3166 struct inquiry_info_with_rssi *info = (void *) (skb->data + 1);
3168 for (; num_rsp; num_rsp--, info++) {
3169 bacpy(&data.bdaddr, &info->bdaddr);
3170 data.pscan_rep_mode = info->pscan_rep_mode;
3171 data.pscan_period_mode = info->pscan_period_mode;
3172 data.pscan_mode = 0x00;
3173 memcpy(data.dev_class, info->dev_class, 3);
3174 data.clock_offset = info->clock_offset;
3175 data.rssi = info->rssi;
3176 data.ssp_mode = 0x00;
3177 name_known = hci_inquiry_cache_update(hdev, &data,
3179 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
3180 info->dev_class, info->rssi,
3181 !name_known, ssp, NULL, 0, NULL, 0);
3185 hci_dev_unlock(hdev);
3188 static void hci_remote_ext_features_evt(struct hci_dev *hdev,
3189 struct sk_buff *skb)
3191 struct hci_ev_remote_ext_features *ev = (void *) skb->data;
3192 struct hci_conn *conn;
3194 BT_DBG("%s", hdev->name);
3198 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3202 if (ev->page < HCI_MAX_PAGES)
3203 memcpy(conn->features[ev->page], ev->features, 8);
3205 if (!ev->status && ev->page == 0x01) {
3206 struct inquiry_entry *ie;
3208 ie = hci_inquiry_cache_lookup(hdev, &conn->dst);
3210 ie->data.ssp_mode = (ev->features[0] & LMP_HOST_SSP);
3212 if (ev->features[0] & LMP_HOST_SSP) {
3213 set_bit(HCI_CONN_SSP_ENABLED, &conn->flags);
3215 /* It is mandatory by the Bluetooth specification that
3216 * Extended Inquiry Results are only used when Secure
3217 * Simple Pairing is enabled, but some devices violate
3220 * To make these devices work, the internal SSP
3221 * enabled flag needs to be cleared if the remote host
3222 * features do not indicate SSP support */
3223 clear_bit(HCI_CONN_SSP_ENABLED, &conn->flags);
3226 if (ev->features[0] & LMP_HOST_SC)
3227 set_bit(HCI_CONN_SC_ENABLED, &conn->flags);
3230 if (conn->state != BT_CONFIG)
3233 if (!ev->status && !test_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags)) {
3234 struct hci_cp_remote_name_req cp;
3235 memset(&cp, 0, sizeof(cp));
3236 bacpy(&cp.bdaddr, &conn->dst);
3237 cp.pscan_rep_mode = 0x02;
3238 hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp);
3239 } else if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
3240 mgmt_device_connected(hdev, &conn->dst, conn->type,
3241 conn->dst_type, 0, NULL, 0,
3244 if (!hci_outgoing_auth_needed(hdev, conn)) {
3245 conn->state = BT_CONNECTED;
3246 hci_proto_connect_cfm(conn, ev->status);
3247 hci_conn_drop(conn);
3251 hci_dev_unlock(hdev);
3254 static void hci_sync_conn_complete_evt(struct hci_dev *hdev,
3255 struct sk_buff *skb)
3257 struct hci_ev_sync_conn_complete *ev = (void *) skb->data;
3258 struct hci_conn *conn;
3260 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
3264 conn = hci_conn_hash_lookup_ba(hdev, ev->link_type, &ev->bdaddr);
3266 if (ev->link_type == ESCO_LINK)
3269 conn = hci_conn_hash_lookup_ba(hdev, ESCO_LINK, &ev->bdaddr);
3273 conn->type = SCO_LINK;
3276 switch (ev->status) {
3278 conn->handle = __le16_to_cpu(ev->handle);
3279 conn->state = BT_CONNECTED;
3281 hci_conn_add_sysfs(conn);
3284 case 0x0d: /* Connection Rejected due to Limited Resources */
3285 case 0x11: /* Unsupported Feature or Parameter Value */
3286 case 0x1c: /* SCO interval rejected */
3287 case 0x1a: /* Unsupported Remote Feature */
3288 case 0x1f: /* Unspecified error */
3289 case 0x20: /* Unsupported LMP Parameter value */
3291 conn->pkt_type = (hdev->esco_type & SCO_ESCO_MASK) |
3292 (hdev->esco_type & EDR_ESCO_MASK);
3293 if (hci_setup_sync(conn, conn->link->handle))
3299 conn->state = BT_CLOSED;
3303 hci_proto_connect_cfm(conn, ev->status);
3308 hci_dev_unlock(hdev);
3311 static inline size_t eir_get_length(u8 *eir, size_t eir_len)
3315 while (parsed < eir_len) {
3316 u8 field_len = eir[0];
3321 parsed += field_len + 1;
3322 eir += field_len + 1;
3328 static void hci_extended_inquiry_result_evt(struct hci_dev *hdev,
3329 struct sk_buff *skb)
3331 struct inquiry_data data;
3332 struct extended_inquiry_info *info = (void *) (skb->data + 1);
3333 int num_rsp = *((__u8 *) skb->data);
3336 BT_DBG("%s num_rsp %d", hdev->name, num_rsp);
3341 if (test_bit(HCI_PERIODIC_INQ, &hdev->dev_flags))
3346 for (; num_rsp; num_rsp--, info++) {
3347 bool name_known, ssp;
3349 bacpy(&data.bdaddr, &info->bdaddr);
3350 data.pscan_rep_mode = info->pscan_rep_mode;
3351 data.pscan_period_mode = info->pscan_period_mode;
3352 data.pscan_mode = 0x00;
3353 memcpy(data.dev_class, info->dev_class, 3);
3354 data.clock_offset = info->clock_offset;
3355 data.rssi = info->rssi;
3356 data.ssp_mode = 0x01;
3358 if (test_bit(HCI_MGMT, &hdev->dev_flags))
3359 name_known = eir_has_data_type(info->data,
3365 name_known = hci_inquiry_cache_update(hdev, &data, name_known,
3367 eir_len = eir_get_length(info->data, sizeof(info->data));
3368 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
3369 info->dev_class, info->rssi, !name_known,
3370 ssp, info->data, eir_len, NULL, 0);
3373 hci_dev_unlock(hdev);
3376 static void hci_key_refresh_complete_evt(struct hci_dev *hdev,
3377 struct sk_buff *skb)
3379 struct hci_ev_key_refresh_complete *ev = (void *) skb->data;
3380 struct hci_conn *conn;
3382 BT_DBG("%s status 0x%2.2x handle 0x%4.4x", hdev->name, ev->status,
3383 __le16_to_cpu(ev->handle));
3387 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3391 /* For BR/EDR the necessary steps are taken through the
3392 * auth_complete event.
3394 if (conn->type != LE_LINK)
3398 conn->sec_level = conn->pending_sec_level;
3400 clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
3402 if (ev->status && conn->state == BT_CONNECTED) {
3403 hci_disconnect(conn, HCI_ERROR_AUTH_FAILURE);
3404 hci_conn_drop(conn);
3408 if (conn->state == BT_CONFIG) {
3410 conn->state = BT_CONNECTED;
3412 hci_proto_connect_cfm(conn, ev->status);
3413 hci_conn_drop(conn);
3415 hci_auth_cfm(conn, ev->status);
3417 hci_conn_hold(conn);
3418 conn->disc_timeout = HCI_DISCONN_TIMEOUT;
3419 hci_conn_drop(conn);
3423 hci_dev_unlock(hdev);
3426 static u8 hci_get_auth_req(struct hci_conn *conn)
3428 /* If remote requests no-bonding follow that lead */
3429 if (conn->remote_auth == HCI_AT_NO_BONDING ||
3430 conn->remote_auth == HCI_AT_NO_BONDING_MITM)
3431 return conn->remote_auth | (conn->auth_type & 0x01);
3433 /* If both remote and local have enough IO capabilities, require
3436 if (conn->remote_cap != HCI_IO_NO_INPUT_OUTPUT &&
3437 conn->io_capability != HCI_IO_NO_INPUT_OUTPUT)
3438 return conn->remote_auth | 0x01;
3440 /* No MITM protection possible so ignore remote requirement */
3441 return (conn->remote_auth & ~0x01) | (conn->auth_type & 0x01);
3444 static void hci_io_capa_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
3446 struct hci_ev_io_capa_request *ev = (void *) skb->data;
3447 struct hci_conn *conn;
3449 BT_DBG("%s", hdev->name);
3453 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3457 hci_conn_hold(conn);
3459 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
3462 if (test_bit(HCI_PAIRABLE, &hdev->dev_flags) ||
3463 (conn->remote_auth & ~0x01) == HCI_AT_NO_BONDING) {
3464 struct hci_cp_io_capability_reply cp;
3466 bacpy(&cp.bdaddr, &ev->bdaddr);
3467 /* Change the IO capability from KeyboardDisplay
3468 * to DisplayYesNo as it is not supported by BT spec. */
3469 cp.capability = (conn->io_capability == 0x04) ?
3470 HCI_IO_DISPLAY_YESNO : conn->io_capability;
3472 /* If we are initiators, there is no remote information yet */
3473 if (conn->remote_auth == 0xff) {
3474 cp.authentication = conn->auth_type;
3476 /* Request MITM protection if our IO caps allow it
3477 * except for the no-bonding case
3479 if (conn->io_capability != HCI_IO_NO_INPUT_OUTPUT &&
3480 cp.authentication != HCI_AT_NO_BONDING)
3481 cp.authentication |= 0x01;
3483 conn->auth_type = hci_get_auth_req(conn);
3484 cp.authentication = conn->auth_type;
3487 if (hci_find_remote_oob_data(hdev, &conn->dst) &&
3488 (conn->out || test_bit(HCI_CONN_REMOTE_OOB, &conn->flags)))
3493 hci_send_cmd(hdev, HCI_OP_IO_CAPABILITY_REPLY,
3496 struct hci_cp_io_capability_neg_reply cp;
3498 bacpy(&cp.bdaddr, &ev->bdaddr);
3499 cp.reason = HCI_ERROR_PAIRING_NOT_ALLOWED;
3501 hci_send_cmd(hdev, HCI_OP_IO_CAPABILITY_NEG_REPLY,
3506 hci_dev_unlock(hdev);
3509 static void hci_io_capa_reply_evt(struct hci_dev *hdev, struct sk_buff *skb)
3511 struct hci_ev_io_capa_reply *ev = (void *) skb->data;
3512 struct hci_conn *conn;
3514 BT_DBG("%s", hdev->name);
3518 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3522 conn->remote_cap = ev->capability;
3523 conn->remote_auth = ev->authentication;
3525 set_bit(HCI_CONN_REMOTE_OOB, &conn->flags);
3528 hci_dev_unlock(hdev);
3531 static void hci_user_confirm_request_evt(struct hci_dev *hdev,
3532 struct sk_buff *skb)
3534 struct hci_ev_user_confirm_req *ev = (void *) skb->data;
3535 int loc_mitm, rem_mitm, confirm_hint = 0;
3536 struct hci_conn *conn;
3538 BT_DBG("%s", hdev->name);
3542 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
3545 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3549 loc_mitm = (conn->auth_type & 0x01);
3550 rem_mitm = (conn->remote_auth & 0x01);
3552 /* If we require MITM but the remote device can't provide that
3553 * (it has NoInputNoOutput) then reject the confirmation request
3555 if (loc_mitm && conn->remote_cap == HCI_IO_NO_INPUT_OUTPUT) {
3556 BT_DBG("Rejecting request: remote device can't provide MITM");
3557 hci_send_cmd(hdev, HCI_OP_USER_CONFIRM_NEG_REPLY,
3558 sizeof(ev->bdaddr), &ev->bdaddr);
3562 /* If no side requires MITM protection; auto-accept */
3563 if ((!loc_mitm || conn->remote_cap == HCI_IO_NO_INPUT_OUTPUT) &&
3564 (!rem_mitm || conn->io_capability == HCI_IO_NO_INPUT_OUTPUT)) {
3566 /* If we're not the initiators request authorization to
3567 * proceed from user space (mgmt_user_confirm with
3568 * confirm_hint set to 1). */
3569 if (!test_bit(HCI_CONN_AUTH_PEND, &conn->flags)) {
3570 BT_DBG("Confirming auto-accept as acceptor");
3575 BT_DBG("Auto-accept of user confirmation with %ums delay",
3576 hdev->auto_accept_delay);
3578 if (hdev->auto_accept_delay > 0) {
3579 int delay = msecs_to_jiffies(hdev->auto_accept_delay);
3580 queue_delayed_work(conn->hdev->workqueue,
3581 &conn->auto_accept_work, delay);
3585 hci_send_cmd(hdev, HCI_OP_USER_CONFIRM_REPLY,
3586 sizeof(ev->bdaddr), &ev->bdaddr);
3591 mgmt_user_confirm_request(hdev, &ev->bdaddr, ACL_LINK, 0,
3592 le32_to_cpu(ev->passkey), confirm_hint);
3595 hci_dev_unlock(hdev);
3598 static void hci_user_passkey_request_evt(struct hci_dev *hdev,
3599 struct sk_buff *skb)
3601 struct hci_ev_user_passkey_req *ev = (void *) skb->data;
3603 BT_DBG("%s", hdev->name);
3605 if (test_bit(HCI_MGMT, &hdev->dev_flags))
3606 mgmt_user_passkey_request(hdev, &ev->bdaddr, ACL_LINK, 0);
3609 static void hci_user_passkey_notify_evt(struct hci_dev *hdev,
3610 struct sk_buff *skb)
3612 struct hci_ev_user_passkey_notify *ev = (void *) skb->data;
3613 struct hci_conn *conn;
3615 BT_DBG("%s", hdev->name);
3617 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3621 conn->passkey_notify = __le32_to_cpu(ev->passkey);
3622 conn->passkey_entered = 0;
3624 if (test_bit(HCI_MGMT, &hdev->dev_flags))
3625 mgmt_user_passkey_notify(hdev, &conn->dst, conn->type,
3626 conn->dst_type, conn->passkey_notify,
3627 conn->passkey_entered);
3630 static void hci_keypress_notify_evt(struct hci_dev *hdev, struct sk_buff *skb)
3632 struct hci_ev_keypress_notify *ev = (void *) skb->data;
3633 struct hci_conn *conn;
3635 BT_DBG("%s", hdev->name);
3637 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3642 case HCI_KEYPRESS_STARTED:
3643 conn->passkey_entered = 0;
3646 case HCI_KEYPRESS_ENTERED:
3647 conn->passkey_entered++;
3650 case HCI_KEYPRESS_ERASED:
3651 conn->passkey_entered--;
3654 case HCI_KEYPRESS_CLEARED:
3655 conn->passkey_entered = 0;
3658 case HCI_KEYPRESS_COMPLETED:
3662 if (test_bit(HCI_MGMT, &hdev->dev_flags))
3663 mgmt_user_passkey_notify(hdev, &conn->dst, conn->type,
3664 conn->dst_type, conn->passkey_notify,
3665 conn->passkey_entered);
3668 static void hci_simple_pair_complete_evt(struct hci_dev *hdev,
3669 struct sk_buff *skb)
3671 struct hci_ev_simple_pair_complete *ev = (void *) skb->data;
3672 struct hci_conn *conn;
3674 BT_DBG("%s", hdev->name);
3678 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3682 /* To avoid duplicate auth_failed events to user space we check
3683 * the HCI_CONN_AUTH_PEND flag which will be set if we
3684 * initiated the authentication. A traditional auth_complete
3685 * event gets always produced as initiator and is also mapped to
3686 * the mgmt_auth_failed event */
3687 if (!test_bit(HCI_CONN_AUTH_PEND, &conn->flags) && ev->status)
3688 mgmt_auth_failed(hdev, &conn->dst, conn->type, conn->dst_type,
3691 hci_conn_drop(conn);
3694 hci_dev_unlock(hdev);
3697 static void hci_remote_host_features_evt(struct hci_dev *hdev,
3698 struct sk_buff *skb)
3700 struct hci_ev_remote_host_features *ev = (void *) skb->data;
3701 struct inquiry_entry *ie;
3702 struct hci_conn *conn;
3704 BT_DBG("%s", hdev->name);
3708 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3710 memcpy(conn->features[1], ev->features, 8);
3712 ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr);
3714 ie->data.ssp_mode = (ev->features[0] & LMP_HOST_SSP);
3716 hci_dev_unlock(hdev);
3719 static void hci_remote_oob_data_request_evt(struct hci_dev *hdev,
3720 struct sk_buff *skb)
3722 struct hci_ev_remote_oob_data_request *ev = (void *) skb->data;
3723 struct oob_data *data;
3725 BT_DBG("%s", hdev->name);
3729 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
3732 data = hci_find_remote_oob_data(hdev, &ev->bdaddr);
3734 if (test_bit(HCI_SC_ENABLED, &hdev->dev_flags)) {
3735 struct hci_cp_remote_oob_ext_data_reply cp;
3737 bacpy(&cp.bdaddr, &ev->bdaddr);
3738 memcpy(cp.hash192, data->hash192, sizeof(cp.hash192));
3739 memcpy(cp.randomizer192, data->randomizer192,
3740 sizeof(cp.randomizer192));
3741 memcpy(cp.hash256, data->hash256, sizeof(cp.hash256));
3742 memcpy(cp.randomizer256, data->randomizer256,
3743 sizeof(cp.randomizer256));
3745 hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_EXT_DATA_REPLY,
3748 struct hci_cp_remote_oob_data_reply cp;
3750 bacpy(&cp.bdaddr, &ev->bdaddr);
3751 memcpy(cp.hash, data->hash192, sizeof(cp.hash));
3752 memcpy(cp.randomizer, data->randomizer192,
3753 sizeof(cp.randomizer));
3755 hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_DATA_REPLY,
3759 struct hci_cp_remote_oob_data_neg_reply cp;
3761 bacpy(&cp.bdaddr, &ev->bdaddr);
3762 hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_DATA_NEG_REPLY,
3767 hci_dev_unlock(hdev);
3770 static void hci_phy_link_complete_evt(struct hci_dev *hdev,
3771 struct sk_buff *skb)
3773 struct hci_ev_phy_link_complete *ev = (void *) skb->data;
3774 struct hci_conn *hcon, *bredr_hcon;
3776 BT_DBG("%s handle 0x%2.2x status 0x%2.2x", hdev->name, ev->phy_handle,
3781 hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
3783 hci_dev_unlock(hdev);
3789 hci_dev_unlock(hdev);
3793 bredr_hcon = hcon->amp_mgr->l2cap_conn->hcon;
3795 hcon->state = BT_CONNECTED;
3796 bacpy(&hcon->dst, &bredr_hcon->dst);
3798 hci_conn_hold(hcon);
3799 hcon->disc_timeout = HCI_DISCONN_TIMEOUT;
3800 hci_conn_drop(hcon);
3802 hci_conn_add_sysfs(hcon);
3804 amp_physical_cfm(bredr_hcon, hcon);
3806 hci_dev_unlock(hdev);
3809 static void hci_loglink_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
3811 struct hci_ev_logical_link_complete *ev = (void *) skb->data;
3812 struct hci_conn *hcon;
3813 struct hci_chan *hchan;
3814 struct amp_mgr *mgr;
3816 BT_DBG("%s log_handle 0x%4.4x phy_handle 0x%2.2x status 0x%2.2x",
3817 hdev->name, le16_to_cpu(ev->handle), ev->phy_handle,
3820 hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
3824 /* Create AMP hchan */
3825 hchan = hci_chan_create(hcon);
3829 hchan->handle = le16_to_cpu(ev->handle);
3831 BT_DBG("hcon %p mgr %p hchan %p", hcon, hcon->amp_mgr, hchan);
3833 mgr = hcon->amp_mgr;
3834 if (mgr && mgr->bredr_chan) {
3835 struct l2cap_chan *bredr_chan = mgr->bredr_chan;
3837 l2cap_chan_lock(bredr_chan);
3839 bredr_chan->conn->mtu = hdev->block_mtu;
3840 l2cap_logical_cfm(bredr_chan, hchan, 0);
3841 hci_conn_hold(hcon);
3843 l2cap_chan_unlock(bredr_chan);
3847 static void hci_disconn_loglink_complete_evt(struct hci_dev *hdev,
3848 struct sk_buff *skb)
3850 struct hci_ev_disconn_logical_link_complete *ev = (void *) skb->data;
3851 struct hci_chan *hchan;
3853 BT_DBG("%s log handle 0x%4.4x status 0x%2.2x", hdev->name,
3854 le16_to_cpu(ev->handle), ev->status);
3861 hchan = hci_chan_lookup_handle(hdev, le16_to_cpu(ev->handle));
3865 amp_destroy_logical_link(hchan, ev->reason);
3868 hci_dev_unlock(hdev);
3871 static void hci_disconn_phylink_complete_evt(struct hci_dev *hdev,
3872 struct sk_buff *skb)
3874 struct hci_ev_disconn_phy_link_complete *ev = (void *) skb->data;
3875 struct hci_conn *hcon;
3877 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
3884 hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
3886 hcon->state = BT_CLOSED;
3890 hci_dev_unlock(hdev);
3893 static void hci_le_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
3895 struct hci_ev_le_conn_complete *ev = (void *) skb->data;
3896 struct hci_conn *conn;
3897 struct smp_irk *irk;
3899 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
3903 conn = hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT);
3905 conn = hci_conn_add(hdev, LE_LINK, &ev->bdaddr);
3907 BT_ERR("No memory for new connection");
3911 conn->dst_type = ev->bdaddr_type;
3913 if (ev->role == LE_CONN_ROLE_MASTER) {
3915 conn->link_mode |= HCI_LM_MASTER;
3918 /* If we didn't have a hci_conn object previously
3919 * but we're in master role this must be something
3920 * initiated using a white list. Since white list based
3921 * connections are not "first class citizens" we don't
3922 * have full tracking of them. Therefore, we go ahead
3923 * with a "best effort" approach of determining the
3924 * initiator address based on the HCI_PRIVACY flag.
3927 conn->resp_addr_type = ev->bdaddr_type;
3928 bacpy(&conn->resp_addr, &ev->bdaddr);
3929 if (test_bit(HCI_PRIVACY, &hdev->dev_flags)) {
3930 conn->init_addr_type = ADDR_LE_DEV_RANDOM;
3931 bacpy(&conn->init_addr, &hdev->rpa);
3933 hci_copy_identity_address(hdev,
3935 &conn->init_addr_type);
3939 cancel_delayed_work(&conn->le_conn_timeout);
3943 /* Set the responder (our side) address type based on
3944 * the advertising address type.
3946 conn->resp_addr_type = hdev->adv_addr_type;
3947 if (hdev->adv_addr_type == ADDR_LE_DEV_RANDOM)
3948 bacpy(&conn->resp_addr, &hdev->random_addr);
3950 bacpy(&conn->resp_addr, &hdev->bdaddr);
3952 conn->init_addr_type = ev->bdaddr_type;
3953 bacpy(&conn->init_addr, &ev->bdaddr);
3956 /* Lookup the identity address from the stored connection
3957 * address and address type.
3959 * When establishing connections to an identity address, the
3960 * connection procedure will store the resolvable random
3961 * address first. Now if it can be converted back into the
3962 * identity address, start using the identity address from
3965 irk = hci_get_irk(hdev, &conn->dst, conn->dst_type);
3967 bacpy(&conn->dst, &irk->bdaddr);
3968 conn->dst_type = irk->addr_type;
3972 hci_le_conn_failed(conn, ev->status);
3976 if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
3977 mgmt_device_connected(hdev, &conn->dst, conn->type,
3978 conn->dst_type, 0, NULL, 0, NULL);
3980 conn->sec_level = BT_SECURITY_LOW;
3981 conn->handle = __le16_to_cpu(ev->handle);
3982 conn->state = BT_CONNECTED;
3984 if (test_bit(HCI_6LOWPAN_ENABLED, &hdev->dev_flags))
3985 set_bit(HCI_CONN_6LOWPAN, &conn->flags);
3987 hci_conn_add_sysfs(conn);
3989 hci_proto_connect_cfm(conn, ev->status);
3991 hci_pend_le_conn_del(hdev, &conn->dst, conn->dst_type);
3994 hci_dev_unlock(hdev);
3997 /* This function requires the caller holds hdev->lock */
3998 static void check_pending_le_conn(struct hci_dev *hdev, bdaddr_t *addr,
4001 struct hci_conn *conn;
4002 struct smp_irk *irk;
4004 /* If this is a resolvable address, we should resolve it and then
4005 * update address and address type variables.
4007 irk = hci_get_irk(hdev, addr, addr_type);
4009 addr = &irk->bdaddr;
4010 addr_type = irk->addr_type;
4013 if (!hci_pend_le_conn_lookup(hdev, addr, addr_type))
4016 conn = hci_connect_le(hdev, addr, addr_type, BT_SECURITY_LOW,
4021 switch (PTR_ERR(conn)) {
4023 /* If hci_connect() returns -EBUSY it means there is already
4024 * an LE connection attempt going on. Since controllers don't
4025 * support more than one connection attempt at the time, we
4026 * don't consider this an error case.
4030 BT_DBG("Failed to connect: err %ld", PTR_ERR(conn));
4034 static void process_adv_report(struct hci_dev *hdev, u8 type, bdaddr_t *bdaddr,
4035 u8 bdaddr_type, s8 rssi, u8 *data, u8 len)
4037 struct discovery_state *d = &hdev->discovery;
4040 /* Passive scanning shouldn't trigger any device found events */
4041 if (hdev->le_scan_type == LE_SCAN_PASSIVE) {
4042 if (type == LE_ADV_IND || type == LE_ADV_DIRECT_IND)
4043 check_pending_le_conn(hdev, bdaddr, bdaddr_type);
4047 /* If there's nothing pending either store the data from this
4048 * event or send an immediate device found event if the data
4049 * should not be stored for later.
4051 if (!has_pending_adv_report(hdev)) {
4052 /* If the report will trigger a SCAN_REQ store it for
4055 if (type == LE_ADV_IND || type == LE_ADV_SCAN_IND) {
4056 store_pending_adv_report(hdev, bdaddr, bdaddr_type,
4061 mgmt_device_found(hdev, bdaddr, LE_LINK, bdaddr_type, NULL,
4062 rssi, 0, 1, data, len, NULL, 0);
4066 /* Check if the pending report is for the same device as the new one */
4067 match = (!bacmp(bdaddr, &d->last_adv_addr) &&
4068 bdaddr_type == d->last_adv_addr_type);
4070 /* If the pending data doesn't match this report or this isn't a
4071 * scan response (e.g. we got a duplicate ADV_IND) then force
4072 * sending of the pending data.
4074 if (type != LE_ADV_SCAN_RSP || !match) {
4075 /* Send out whatever is in the cache, but skip duplicates */
4077 mgmt_device_found(hdev, &d->last_adv_addr, LE_LINK,
4078 d->last_adv_addr_type, NULL,
4079 d->last_adv_rssi, 0, 1,
4081 d->last_adv_data_len, NULL, 0);
4083 /* If the new report will trigger a SCAN_REQ store it for
4086 if (type == LE_ADV_IND || type == LE_ADV_SCAN_IND) {
4087 store_pending_adv_report(hdev, bdaddr, bdaddr_type,
4092 /* The advertising reports cannot be merged, so clear
4093 * the pending report and send out a device found event.
4095 clear_pending_adv_report(hdev);
4096 mgmt_device_found(hdev, bdaddr, LE_LINK, bdaddr_type, NULL,
4097 rssi, 0, 1, data, len, NULL, 0);
4101 /* If we get here we've got a pending ADV_IND or ADV_SCAN_IND and
4102 * the new event is a SCAN_RSP. We can therefore proceed with
4103 * sending a merged device found event.
4105 mgmt_device_found(hdev, &d->last_adv_addr, LE_LINK,
4106 d->last_adv_addr_type, NULL, rssi, 0, 1, data, len,
4107 d->last_adv_data, d->last_adv_data_len);
4108 clear_pending_adv_report(hdev);
4111 static void hci_le_adv_report_evt(struct hci_dev *hdev, struct sk_buff *skb)
4113 u8 num_reports = skb->data[0];
4114 void *ptr = &skb->data[1];
4118 while (num_reports--) {
4119 struct hci_ev_le_advertising_info *ev = ptr;
4122 rssi = ev->data[ev->length];
4123 process_adv_report(hdev, ev->evt_type, &ev->bdaddr,
4124 ev->bdaddr_type, rssi, ev->data, ev->length);
4126 ptr += sizeof(*ev) + ev->length + 1;
4129 hci_dev_unlock(hdev);
4132 static void hci_le_ltk_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
4134 struct hci_ev_le_ltk_req *ev = (void *) skb->data;
4135 struct hci_cp_le_ltk_reply cp;
4136 struct hci_cp_le_ltk_neg_reply neg;
4137 struct hci_conn *conn;
4138 struct smp_ltk *ltk;
4140 BT_DBG("%s handle 0x%4.4x", hdev->name, __le16_to_cpu(ev->handle));
4144 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
4148 ltk = hci_find_ltk(hdev, ev->ediv, ev->rand, conn->out);
4152 memcpy(cp.ltk, ltk->val, sizeof(ltk->val));
4153 cp.handle = cpu_to_le16(conn->handle);
4155 if (ltk->authenticated)
4156 conn->pending_sec_level = BT_SECURITY_HIGH;
4158 conn->pending_sec_level = BT_SECURITY_MEDIUM;
4160 conn->enc_key_size = ltk->enc_size;
4162 hci_send_cmd(hdev, HCI_OP_LE_LTK_REPLY, sizeof(cp), &cp);
4164 /* Ref. Bluetooth Core SPEC pages 1975 and 2004. STK is a
4165 * temporary key used to encrypt a connection following
4166 * pairing. It is used during the Encrypted Session Setup to
4167 * distribute the keys. Later, security can be re-established
4168 * using a distributed LTK.
4170 if (ltk->type == HCI_SMP_STK_SLAVE) {
4171 list_del(<k->list);
4175 hci_dev_unlock(hdev);
4180 neg.handle = ev->handle;
4181 hci_send_cmd(hdev, HCI_OP_LE_LTK_NEG_REPLY, sizeof(neg), &neg);
4182 hci_dev_unlock(hdev);
4185 static void hci_le_meta_evt(struct hci_dev *hdev, struct sk_buff *skb)
4187 struct hci_ev_le_meta *le_ev = (void *) skb->data;
4189 skb_pull(skb, sizeof(*le_ev));
4191 switch (le_ev->subevent) {
4192 case HCI_EV_LE_CONN_COMPLETE:
4193 hci_le_conn_complete_evt(hdev, skb);
4196 case HCI_EV_LE_ADVERTISING_REPORT:
4197 hci_le_adv_report_evt(hdev, skb);
4200 case HCI_EV_LE_LTK_REQ:
4201 hci_le_ltk_request_evt(hdev, skb);
4209 static void hci_chan_selected_evt(struct hci_dev *hdev, struct sk_buff *skb)
4211 struct hci_ev_channel_selected *ev = (void *) skb->data;
4212 struct hci_conn *hcon;
4214 BT_DBG("%s handle 0x%2.2x", hdev->name, ev->phy_handle);
4216 skb_pull(skb, sizeof(*ev));
4218 hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
4222 amp_read_loc_assoc_final_data(hdev, hcon);
4225 void hci_event_packet(struct hci_dev *hdev, struct sk_buff *skb)
4227 struct hci_event_hdr *hdr = (void *) skb->data;
4228 __u8 event = hdr->evt;
4232 /* Received events are (currently) only needed when a request is
4233 * ongoing so avoid unnecessary memory allocation.
4235 if (hdev->req_status == HCI_REQ_PEND) {
4236 kfree_skb(hdev->recv_evt);
4237 hdev->recv_evt = skb_clone(skb, GFP_KERNEL);
4240 hci_dev_unlock(hdev);
4242 skb_pull(skb, HCI_EVENT_HDR_SIZE);
4244 if (hdev->sent_cmd && bt_cb(hdev->sent_cmd)->req.event == event) {
4245 struct hci_command_hdr *cmd_hdr = (void *) hdev->sent_cmd->data;
4246 u16 opcode = __le16_to_cpu(cmd_hdr->opcode);
4248 hci_req_cmd_complete(hdev, opcode, 0);
4252 case HCI_EV_INQUIRY_COMPLETE:
4253 hci_inquiry_complete_evt(hdev, skb);
4256 case HCI_EV_INQUIRY_RESULT:
4257 hci_inquiry_result_evt(hdev, skb);
4260 case HCI_EV_CONN_COMPLETE:
4261 hci_conn_complete_evt(hdev, skb);
4264 case HCI_EV_CONN_REQUEST:
4265 hci_conn_request_evt(hdev, skb);
4268 case HCI_EV_DISCONN_COMPLETE:
4269 hci_disconn_complete_evt(hdev, skb);
4272 case HCI_EV_AUTH_COMPLETE:
4273 hci_auth_complete_evt(hdev, skb);
4276 case HCI_EV_REMOTE_NAME:
4277 hci_remote_name_evt(hdev, skb);
4280 case HCI_EV_ENCRYPT_CHANGE:
4281 hci_encrypt_change_evt(hdev, skb);
4284 case HCI_EV_CHANGE_LINK_KEY_COMPLETE:
4285 hci_change_link_key_complete_evt(hdev, skb);
4288 case HCI_EV_REMOTE_FEATURES:
4289 hci_remote_features_evt(hdev, skb);
4292 case HCI_EV_CMD_COMPLETE:
4293 hci_cmd_complete_evt(hdev, skb);
4296 case HCI_EV_CMD_STATUS:
4297 hci_cmd_status_evt(hdev, skb);
4300 case HCI_EV_ROLE_CHANGE:
4301 hci_role_change_evt(hdev, skb);
4304 case HCI_EV_NUM_COMP_PKTS:
4305 hci_num_comp_pkts_evt(hdev, skb);
4308 case HCI_EV_MODE_CHANGE:
4309 hci_mode_change_evt(hdev, skb);
4312 case HCI_EV_PIN_CODE_REQ:
4313 hci_pin_code_request_evt(hdev, skb);
4316 case HCI_EV_LINK_KEY_REQ:
4317 hci_link_key_request_evt(hdev, skb);
4320 case HCI_EV_LINK_KEY_NOTIFY:
4321 hci_link_key_notify_evt(hdev, skb);
4324 case HCI_EV_CLOCK_OFFSET:
4325 hci_clock_offset_evt(hdev, skb);
4328 case HCI_EV_PKT_TYPE_CHANGE:
4329 hci_pkt_type_change_evt(hdev, skb);
4332 case HCI_EV_PSCAN_REP_MODE:
4333 hci_pscan_rep_mode_evt(hdev, skb);
4336 case HCI_EV_INQUIRY_RESULT_WITH_RSSI:
4337 hci_inquiry_result_with_rssi_evt(hdev, skb);
4340 case HCI_EV_REMOTE_EXT_FEATURES:
4341 hci_remote_ext_features_evt(hdev, skb);
4344 case HCI_EV_SYNC_CONN_COMPLETE:
4345 hci_sync_conn_complete_evt(hdev, skb);
4348 case HCI_EV_EXTENDED_INQUIRY_RESULT:
4349 hci_extended_inquiry_result_evt(hdev, skb);
4352 case HCI_EV_KEY_REFRESH_COMPLETE:
4353 hci_key_refresh_complete_evt(hdev, skb);
4356 case HCI_EV_IO_CAPA_REQUEST:
4357 hci_io_capa_request_evt(hdev, skb);
4360 case HCI_EV_IO_CAPA_REPLY:
4361 hci_io_capa_reply_evt(hdev, skb);
4364 case HCI_EV_USER_CONFIRM_REQUEST:
4365 hci_user_confirm_request_evt(hdev, skb);
4368 case HCI_EV_USER_PASSKEY_REQUEST:
4369 hci_user_passkey_request_evt(hdev, skb);
4372 case HCI_EV_USER_PASSKEY_NOTIFY:
4373 hci_user_passkey_notify_evt(hdev, skb);
4376 case HCI_EV_KEYPRESS_NOTIFY:
4377 hci_keypress_notify_evt(hdev, skb);
4380 case HCI_EV_SIMPLE_PAIR_COMPLETE:
4381 hci_simple_pair_complete_evt(hdev, skb);
4384 case HCI_EV_REMOTE_HOST_FEATURES:
4385 hci_remote_host_features_evt(hdev, skb);
4388 case HCI_EV_LE_META:
4389 hci_le_meta_evt(hdev, skb);
4392 case HCI_EV_CHANNEL_SELECTED:
4393 hci_chan_selected_evt(hdev, skb);
4396 case HCI_EV_REMOTE_OOB_DATA_REQUEST:
4397 hci_remote_oob_data_request_evt(hdev, skb);
4400 case HCI_EV_PHY_LINK_COMPLETE:
4401 hci_phy_link_complete_evt(hdev, skb);
4404 case HCI_EV_LOGICAL_LINK_COMPLETE:
4405 hci_loglink_complete_evt(hdev, skb);
4408 case HCI_EV_DISCONN_LOGICAL_LINK_COMPLETE:
4409 hci_disconn_loglink_complete_evt(hdev, skb);
4412 case HCI_EV_DISCONN_PHY_LINK_COMPLETE:
4413 hci_disconn_phylink_complete_evt(hdev, skb);
4416 case HCI_EV_NUM_COMP_BLOCKS:
4417 hci_num_comp_blocks_evt(hdev, skb);
4421 BT_DBG("%s event 0x%2.2x", hdev->name, event);
4426 hdev->stat.evt_rx++;