2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (c) 2000-2001, 2010, Code Aurora Forum. All rights reserved.
5 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
25 /* Bluetooth HCI event handling. */
27 #include <linux/module.h>
29 #include <linux/types.h>
30 #include <linux/errno.h>
31 #include <linux/kernel.h>
32 #include <linux/slab.h>
33 #include <linux/poll.h>
34 #include <linux/fcntl.h>
35 #include <linux/init.h>
36 #include <linux/skbuff.h>
37 #include <linux/interrupt.h>
40 #include <linux/uaccess.h>
41 #include <asm/unaligned.h>
43 #include <net/bluetooth/bluetooth.h>
44 #include <net/bluetooth/hci_core.h>
46 /* Handle HCI Event packets */
48 static void hci_cc_inquiry_cancel(struct hci_dev *hdev, struct sk_buff *skb)
50 __u8 status = *((__u8 *) skb->data);
52 BT_DBG("%s status 0x%x", hdev->name, status);
56 mgmt_stop_discovery_failed(hdev, status);
61 clear_bit(HCI_INQUIRY, &hdev->flags);
64 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
67 hci_req_complete(hdev, HCI_OP_INQUIRY_CANCEL, status);
69 hci_conn_check_pending(hdev);
72 static void hci_cc_exit_periodic_inq(struct hci_dev *hdev, struct sk_buff *skb)
74 __u8 status = *((__u8 *) skb->data);
76 BT_DBG("%s status 0x%x", hdev->name, status);
81 hci_conn_check_pending(hdev);
84 static void hci_cc_remote_name_req_cancel(struct hci_dev *hdev, struct sk_buff *skb)
86 BT_DBG("%s", hdev->name);
89 static void hci_cc_role_discovery(struct hci_dev *hdev, struct sk_buff *skb)
91 struct hci_rp_role_discovery *rp = (void *) skb->data;
92 struct hci_conn *conn;
94 BT_DBG("%s status 0x%x", hdev->name, rp->status);
101 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
104 conn->link_mode &= ~HCI_LM_MASTER;
106 conn->link_mode |= HCI_LM_MASTER;
109 hci_dev_unlock(hdev);
112 static void hci_cc_read_link_policy(struct hci_dev *hdev, struct sk_buff *skb)
114 struct hci_rp_read_link_policy *rp = (void *) skb->data;
115 struct hci_conn *conn;
117 BT_DBG("%s status 0x%x", hdev->name, rp->status);
124 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
126 conn->link_policy = __le16_to_cpu(rp->policy);
128 hci_dev_unlock(hdev);
131 static void hci_cc_write_link_policy(struct hci_dev *hdev, struct sk_buff *skb)
133 struct hci_rp_write_link_policy *rp = (void *) skb->data;
134 struct hci_conn *conn;
137 BT_DBG("%s status 0x%x", hdev->name, rp->status);
142 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LINK_POLICY);
148 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
150 conn->link_policy = get_unaligned_le16(sent + 2);
152 hci_dev_unlock(hdev);
155 static void hci_cc_read_def_link_policy(struct hci_dev *hdev, struct sk_buff *skb)
157 struct hci_rp_read_def_link_policy *rp = (void *) skb->data;
159 BT_DBG("%s status 0x%x", hdev->name, rp->status);
164 hdev->link_policy = __le16_to_cpu(rp->policy);
167 static void hci_cc_write_def_link_policy(struct hci_dev *hdev, struct sk_buff *skb)
169 __u8 status = *((__u8 *) skb->data);
172 BT_DBG("%s status 0x%x", hdev->name, status);
174 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_DEF_LINK_POLICY);
179 hdev->link_policy = get_unaligned_le16(sent);
181 hci_req_complete(hdev, HCI_OP_WRITE_DEF_LINK_POLICY, status);
184 static void hci_cc_reset(struct hci_dev *hdev, struct sk_buff *skb)
186 __u8 status = *((__u8 *) skb->data);
188 BT_DBG("%s status 0x%x", hdev->name, status);
190 clear_bit(HCI_RESET, &hdev->flags);
192 hci_req_complete(hdev, HCI_OP_RESET, status);
194 /* Reset all non-persistent flags */
195 hdev->dev_flags &= ~(BIT(HCI_LE_SCAN) | BIT(HCI_PENDING_CLASS));
197 hdev->discovery.state = DISCOVERY_STOPPED;
200 static void hci_cc_write_local_name(struct hci_dev *hdev, struct sk_buff *skb)
202 __u8 status = *((__u8 *) skb->data);
205 BT_DBG("%s status 0x%x", hdev->name, status);
207 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LOCAL_NAME);
213 if (test_bit(HCI_MGMT, &hdev->dev_flags))
214 mgmt_set_local_name_complete(hdev, sent, status);
216 memcpy(hdev->dev_name, sent, HCI_MAX_NAME_LENGTH);
218 hci_dev_unlock(hdev);
220 hci_req_complete(hdev, HCI_OP_WRITE_LOCAL_NAME, status);
223 static void hci_cc_read_local_name(struct hci_dev *hdev, struct sk_buff *skb)
225 struct hci_rp_read_local_name *rp = (void *) skb->data;
227 BT_DBG("%s status 0x%x", hdev->name, rp->status);
232 if (test_bit(HCI_SETUP, &hdev->dev_flags))
233 memcpy(hdev->dev_name, rp->name, HCI_MAX_NAME_LENGTH);
236 static void hci_cc_write_auth_enable(struct hci_dev *hdev, struct sk_buff *skb)
238 __u8 status = *((__u8 *) skb->data);
241 BT_DBG("%s status 0x%x", hdev->name, status);
243 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_AUTH_ENABLE);
248 __u8 param = *((__u8 *) sent);
250 if (param == AUTH_ENABLED)
251 set_bit(HCI_AUTH, &hdev->flags);
253 clear_bit(HCI_AUTH, &hdev->flags);
256 if (test_bit(HCI_MGMT, &hdev->dev_flags))
257 mgmt_auth_enable_complete(hdev, status);
259 hci_req_complete(hdev, HCI_OP_WRITE_AUTH_ENABLE, status);
262 static void hci_cc_write_encrypt_mode(struct hci_dev *hdev, struct sk_buff *skb)
264 __u8 status = *((__u8 *) skb->data);
267 BT_DBG("%s status 0x%x", hdev->name, status);
269 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_ENCRYPT_MODE);
274 __u8 param = *((__u8 *) sent);
277 set_bit(HCI_ENCRYPT, &hdev->flags);
279 clear_bit(HCI_ENCRYPT, &hdev->flags);
282 hci_req_complete(hdev, HCI_OP_WRITE_ENCRYPT_MODE, status);
285 static void hci_cc_write_scan_enable(struct hci_dev *hdev, struct sk_buff *skb)
287 __u8 param, status = *((__u8 *) skb->data);
288 int old_pscan, old_iscan;
291 BT_DBG("%s status 0x%x", hdev->name, status);
293 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SCAN_ENABLE);
297 param = *((__u8 *) sent);
302 mgmt_write_scan_failed(hdev, param, status);
303 hdev->discov_timeout = 0;
307 old_pscan = test_and_clear_bit(HCI_PSCAN, &hdev->flags);
308 old_iscan = test_and_clear_bit(HCI_ISCAN, &hdev->flags);
310 if (param & SCAN_INQUIRY) {
311 set_bit(HCI_ISCAN, &hdev->flags);
313 mgmt_discoverable(hdev, 1);
314 if (hdev->discov_timeout > 0) {
315 int to = msecs_to_jiffies(hdev->discov_timeout * 1000);
316 queue_delayed_work(hdev->workqueue, &hdev->discov_off,
319 } else if (old_iscan)
320 mgmt_discoverable(hdev, 0);
322 if (param & SCAN_PAGE) {
323 set_bit(HCI_PSCAN, &hdev->flags);
325 mgmt_connectable(hdev, 1);
326 } else if (old_pscan)
327 mgmt_connectable(hdev, 0);
330 hci_dev_unlock(hdev);
331 hci_req_complete(hdev, HCI_OP_WRITE_SCAN_ENABLE, status);
334 static void hci_cc_read_class_of_dev(struct hci_dev *hdev, struct sk_buff *skb)
336 struct hci_rp_read_class_of_dev *rp = (void *) skb->data;
338 BT_DBG("%s status 0x%x", hdev->name, rp->status);
343 memcpy(hdev->dev_class, rp->dev_class, 3);
345 BT_DBG("%s class 0x%.2x%.2x%.2x", hdev->name,
346 hdev->dev_class[2], hdev->dev_class[1], hdev->dev_class[0]);
349 static void hci_cc_write_class_of_dev(struct hci_dev *hdev, struct sk_buff *skb)
351 __u8 status = *((__u8 *) skb->data);
354 BT_DBG("%s status 0x%x", hdev->name, status);
356 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_CLASS_OF_DEV);
363 memcpy(hdev->dev_class, sent, 3);
365 if (test_bit(HCI_MGMT, &hdev->dev_flags))
366 mgmt_set_class_of_dev_complete(hdev, sent, status);
368 hci_dev_unlock(hdev);
371 static void hci_cc_read_voice_setting(struct hci_dev *hdev, struct sk_buff *skb)
373 struct hci_rp_read_voice_setting *rp = (void *) skb->data;
376 BT_DBG("%s status 0x%x", hdev->name, rp->status);
381 setting = __le16_to_cpu(rp->voice_setting);
383 if (hdev->voice_setting == setting)
386 hdev->voice_setting = setting;
388 BT_DBG("%s voice setting 0x%04x", hdev->name, setting);
391 hdev->notify(hdev, HCI_NOTIFY_VOICE_SETTING);
394 static void hci_cc_write_voice_setting(struct hci_dev *hdev, struct sk_buff *skb)
396 __u8 status = *((__u8 *) skb->data);
400 BT_DBG("%s status 0x%x", hdev->name, status);
405 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_VOICE_SETTING);
409 setting = get_unaligned_le16(sent);
411 if (hdev->voice_setting == setting)
414 hdev->voice_setting = setting;
416 BT_DBG("%s voice setting 0x%04x", hdev->name, setting);
419 hdev->notify(hdev, HCI_NOTIFY_VOICE_SETTING);
422 static void hci_cc_host_buffer_size(struct hci_dev *hdev, struct sk_buff *skb)
424 __u8 status = *((__u8 *) skb->data);
426 BT_DBG("%s status 0x%x", hdev->name, status);
428 hci_req_complete(hdev, HCI_OP_HOST_BUFFER_SIZE, status);
431 static void hci_cc_write_ssp_mode(struct hci_dev *hdev, struct sk_buff *skb)
433 __u8 status = *((__u8 *) skb->data);
436 BT_DBG("%s status 0x%x", hdev->name, status);
438 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SSP_MODE);
442 if (test_bit(HCI_MGMT, &hdev->dev_flags))
443 mgmt_ssp_enable_complete(hdev, *((u8 *) sent), status);
446 set_bit(HCI_SSP_ENABLED, &hdev->dev_flags);
448 clear_bit(HCI_SSP_ENABLED, &hdev->dev_flags);
452 static u8 hci_get_inquiry_mode(struct hci_dev *hdev)
454 if (hdev->features[6] & LMP_EXT_INQ)
457 if (hdev->features[3] & LMP_RSSI_INQ)
460 if (hdev->manufacturer == 11 && hdev->hci_rev == 0x00 &&
461 hdev->lmp_subver == 0x0757)
464 if (hdev->manufacturer == 15) {
465 if (hdev->hci_rev == 0x03 && hdev->lmp_subver == 0x6963)
467 if (hdev->hci_rev == 0x09 && hdev->lmp_subver == 0x6963)
469 if (hdev->hci_rev == 0x00 && hdev->lmp_subver == 0x6965)
473 if (hdev->manufacturer == 31 && hdev->hci_rev == 0x2005 &&
474 hdev->lmp_subver == 0x1805)
480 static void hci_setup_inquiry_mode(struct hci_dev *hdev)
484 mode = hci_get_inquiry_mode(hdev);
486 hci_send_cmd(hdev, HCI_OP_WRITE_INQUIRY_MODE, 1, &mode);
489 static void hci_setup_event_mask(struct hci_dev *hdev)
491 /* The second byte is 0xff instead of 0x9f (two reserved bits
492 * disabled) since a Broadcom 1.2 dongle doesn't respond to the
493 * command otherwise */
494 u8 events[8] = { 0xff, 0xff, 0xfb, 0xff, 0x00, 0x00, 0x00, 0x00 };
496 /* CSR 1.1 dongles does not accept any bitfield so don't try to set
497 * any event mask for pre 1.2 devices */
498 if (hdev->hci_ver < BLUETOOTH_VER_1_2)
501 events[4] |= 0x01; /* Flow Specification Complete */
502 events[4] |= 0x02; /* Inquiry Result with RSSI */
503 events[4] |= 0x04; /* Read Remote Extended Features Complete */
504 events[5] |= 0x08; /* Synchronous Connection Complete */
505 events[5] |= 0x10; /* Synchronous Connection Changed */
507 if (hdev->features[3] & LMP_RSSI_INQ)
508 events[4] |= 0x04; /* Inquiry Result with RSSI */
510 if (hdev->features[5] & LMP_SNIFF_SUBR)
511 events[5] |= 0x20; /* Sniff Subrating */
513 if (hdev->features[5] & LMP_PAUSE_ENC)
514 events[5] |= 0x80; /* Encryption Key Refresh Complete */
516 if (hdev->features[6] & LMP_EXT_INQ)
517 events[5] |= 0x40; /* Extended Inquiry Result */
519 if (hdev->features[6] & LMP_NO_FLUSH)
520 events[7] |= 0x01; /* Enhanced Flush Complete */
522 if (hdev->features[7] & LMP_LSTO)
523 events[6] |= 0x80; /* Link Supervision Timeout Changed */
525 if (hdev->features[6] & LMP_SIMPLE_PAIR) {
526 events[6] |= 0x01; /* IO Capability Request */
527 events[6] |= 0x02; /* IO Capability Response */
528 events[6] |= 0x04; /* User Confirmation Request */
529 events[6] |= 0x08; /* User Passkey Request */
530 events[6] |= 0x10; /* Remote OOB Data Request */
531 events[6] |= 0x20; /* Simple Pairing Complete */
532 events[7] |= 0x04; /* User Passkey Notification */
533 events[7] |= 0x08; /* Keypress Notification */
534 events[7] |= 0x10; /* Remote Host Supported
535 * Features Notification */
538 if (hdev->features[4] & LMP_LE)
539 events[7] |= 0x20; /* LE Meta-Event */
541 hci_send_cmd(hdev, HCI_OP_SET_EVENT_MASK, sizeof(events), events);
544 static void hci_setup(struct hci_dev *hdev)
546 if (hdev->dev_type != HCI_BREDR)
549 hci_setup_event_mask(hdev);
551 if (hdev->hci_ver > BLUETOOTH_VER_1_1)
552 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
554 if (hdev->features[6] & LMP_SIMPLE_PAIR) {
555 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
557 hci_send_cmd(hdev, HCI_OP_WRITE_SSP_MODE,
558 sizeof(mode), &mode);
560 struct hci_cp_write_eir cp;
562 memset(hdev->eir, 0, sizeof(hdev->eir));
563 memset(&cp, 0, sizeof(cp));
565 hci_send_cmd(hdev, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
569 if (hdev->features[3] & LMP_RSSI_INQ)
570 hci_setup_inquiry_mode(hdev);
572 if (hdev->features[7] & LMP_INQ_TX_PWR)
573 hci_send_cmd(hdev, HCI_OP_READ_INQ_RSP_TX_POWER, 0, NULL);
575 if (hdev->features[7] & LMP_EXTFEATURES) {
576 struct hci_cp_read_local_ext_features cp;
579 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_EXT_FEATURES, sizeof(cp),
583 if (test_bit(HCI_LINK_SECURITY, &hdev->dev_flags)) {
585 hci_send_cmd(hdev, HCI_OP_WRITE_AUTH_ENABLE, sizeof(enable),
590 static void hci_cc_read_local_version(struct hci_dev *hdev, struct sk_buff *skb)
592 struct hci_rp_read_local_version *rp = (void *) skb->data;
594 BT_DBG("%s status 0x%x", hdev->name, rp->status);
599 hdev->hci_ver = rp->hci_ver;
600 hdev->hci_rev = __le16_to_cpu(rp->hci_rev);
601 hdev->lmp_ver = rp->lmp_ver;
602 hdev->manufacturer = __le16_to_cpu(rp->manufacturer);
603 hdev->lmp_subver = __le16_to_cpu(rp->lmp_subver);
605 BT_DBG("%s manufacturer %d hci ver %d:%d", hdev->name,
607 hdev->hci_ver, hdev->hci_rev);
609 if (test_bit(HCI_INIT, &hdev->flags))
613 hci_req_complete(hdev, HCI_OP_READ_LOCAL_VERSION, rp->status);
616 static void hci_setup_link_policy(struct hci_dev *hdev)
620 if (hdev->features[0] & LMP_RSWITCH)
621 link_policy |= HCI_LP_RSWITCH;
622 if (hdev->features[0] & LMP_HOLD)
623 link_policy |= HCI_LP_HOLD;
624 if (hdev->features[0] & LMP_SNIFF)
625 link_policy |= HCI_LP_SNIFF;
626 if (hdev->features[1] & LMP_PARK)
627 link_policy |= HCI_LP_PARK;
629 link_policy = cpu_to_le16(link_policy);
630 hci_send_cmd(hdev, HCI_OP_WRITE_DEF_LINK_POLICY, sizeof(link_policy),
634 static void hci_cc_read_local_commands(struct hci_dev *hdev, struct sk_buff *skb)
636 struct hci_rp_read_local_commands *rp = (void *) skb->data;
638 BT_DBG("%s status 0x%x", hdev->name, rp->status);
643 memcpy(hdev->commands, rp->commands, sizeof(hdev->commands));
645 if (test_bit(HCI_INIT, &hdev->flags) && (hdev->commands[5] & 0x10))
646 hci_setup_link_policy(hdev);
649 hci_req_complete(hdev, HCI_OP_READ_LOCAL_COMMANDS, rp->status);
652 static void hci_cc_read_local_features(struct hci_dev *hdev, struct sk_buff *skb)
654 struct hci_rp_read_local_features *rp = (void *) skb->data;
656 BT_DBG("%s status 0x%x", hdev->name, rp->status);
661 memcpy(hdev->features, rp->features, 8);
663 /* Adjust default settings according to features
664 * supported by device. */
666 if (hdev->features[0] & LMP_3SLOT)
667 hdev->pkt_type |= (HCI_DM3 | HCI_DH3);
669 if (hdev->features[0] & LMP_5SLOT)
670 hdev->pkt_type |= (HCI_DM5 | HCI_DH5);
672 if (hdev->features[1] & LMP_HV2) {
673 hdev->pkt_type |= (HCI_HV2);
674 hdev->esco_type |= (ESCO_HV2);
677 if (hdev->features[1] & LMP_HV3) {
678 hdev->pkt_type |= (HCI_HV3);
679 hdev->esco_type |= (ESCO_HV3);
682 if (hdev->features[3] & LMP_ESCO)
683 hdev->esco_type |= (ESCO_EV3);
685 if (hdev->features[4] & LMP_EV4)
686 hdev->esco_type |= (ESCO_EV4);
688 if (hdev->features[4] & LMP_EV5)
689 hdev->esco_type |= (ESCO_EV5);
691 if (hdev->features[5] & LMP_EDR_ESCO_2M)
692 hdev->esco_type |= (ESCO_2EV3);
694 if (hdev->features[5] & LMP_EDR_ESCO_3M)
695 hdev->esco_type |= (ESCO_3EV3);
697 if (hdev->features[5] & LMP_EDR_3S_ESCO)
698 hdev->esco_type |= (ESCO_2EV5 | ESCO_3EV5);
700 BT_DBG("%s features 0x%.2x%.2x%.2x%.2x%.2x%.2x%.2x%.2x", hdev->name,
701 hdev->features[0], hdev->features[1],
702 hdev->features[2], hdev->features[3],
703 hdev->features[4], hdev->features[5],
704 hdev->features[6], hdev->features[7]);
707 static void hci_set_le_support(struct hci_dev *hdev)
709 struct hci_cp_write_le_host_supported cp;
711 memset(&cp, 0, sizeof(cp));
713 if (enable_le && test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
715 cp.simul = !!(hdev->features[6] & LMP_SIMUL_LE_BR);
718 if (cp.le != !!(hdev->host_features[0] & LMP_HOST_LE))
719 hci_send_cmd(hdev, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(cp),
723 static void hci_cc_read_local_ext_features(struct hci_dev *hdev,
726 struct hci_rp_read_local_ext_features *rp = (void *) skb->data;
728 BT_DBG("%s status 0x%x", hdev->name, rp->status);
735 memcpy(hdev->features, rp->features, 8);
738 memcpy(hdev->host_features, rp->features, 8);
742 if (test_bit(HCI_INIT, &hdev->flags) && hdev->features[4] & LMP_LE)
743 hci_set_le_support(hdev);
746 hci_req_complete(hdev, HCI_OP_READ_LOCAL_EXT_FEATURES, rp->status);
749 static void hci_cc_read_flow_control_mode(struct hci_dev *hdev,
752 struct hci_rp_read_flow_control_mode *rp = (void *) skb->data;
754 BT_DBG("%s status 0x%x", hdev->name, rp->status);
759 hdev->flow_ctl_mode = rp->mode;
761 hci_req_complete(hdev, HCI_OP_READ_FLOW_CONTROL_MODE, rp->status);
764 static void hci_cc_read_buffer_size(struct hci_dev *hdev, struct sk_buff *skb)
766 struct hci_rp_read_buffer_size *rp = (void *) skb->data;
768 BT_DBG("%s status 0x%x", hdev->name, rp->status);
773 hdev->acl_mtu = __le16_to_cpu(rp->acl_mtu);
774 hdev->sco_mtu = rp->sco_mtu;
775 hdev->acl_pkts = __le16_to_cpu(rp->acl_max_pkt);
776 hdev->sco_pkts = __le16_to_cpu(rp->sco_max_pkt);
778 if (test_bit(HCI_QUIRK_FIXUP_BUFFER_SIZE, &hdev->quirks)) {
783 hdev->acl_cnt = hdev->acl_pkts;
784 hdev->sco_cnt = hdev->sco_pkts;
786 BT_DBG("%s acl mtu %d:%d sco mtu %d:%d", hdev->name,
787 hdev->acl_mtu, hdev->acl_pkts,
788 hdev->sco_mtu, hdev->sco_pkts);
791 static void hci_cc_read_bd_addr(struct hci_dev *hdev, struct sk_buff *skb)
793 struct hci_rp_read_bd_addr *rp = (void *) skb->data;
795 BT_DBG("%s status 0x%x", hdev->name, rp->status);
798 bacpy(&hdev->bdaddr, &rp->bdaddr);
800 hci_req_complete(hdev, HCI_OP_READ_BD_ADDR, rp->status);
803 static void hci_cc_read_data_block_size(struct hci_dev *hdev,
806 struct hci_rp_read_data_block_size *rp = (void *) skb->data;
808 BT_DBG("%s status 0x%x", hdev->name, rp->status);
813 hdev->block_mtu = __le16_to_cpu(rp->max_acl_len);
814 hdev->block_len = __le16_to_cpu(rp->block_len);
815 hdev->num_blocks = __le16_to_cpu(rp->num_blocks);
817 hdev->block_cnt = hdev->num_blocks;
819 BT_DBG("%s blk mtu %d cnt %d len %d", hdev->name, hdev->block_mtu,
820 hdev->block_cnt, hdev->block_len);
822 hci_req_complete(hdev, HCI_OP_READ_DATA_BLOCK_SIZE, rp->status);
825 static void hci_cc_write_ca_timeout(struct hci_dev *hdev, struct sk_buff *skb)
827 __u8 status = *((__u8 *) skb->data);
829 BT_DBG("%s status 0x%x", hdev->name, status);
831 hci_req_complete(hdev, HCI_OP_WRITE_CA_TIMEOUT, status);
834 static void hci_cc_read_local_amp_info(struct hci_dev *hdev,
837 struct hci_rp_read_local_amp_info *rp = (void *) skb->data;
839 BT_DBG("%s status 0x%x", hdev->name, rp->status);
844 hdev->amp_status = rp->amp_status;
845 hdev->amp_total_bw = __le32_to_cpu(rp->total_bw);
846 hdev->amp_max_bw = __le32_to_cpu(rp->max_bw);
847 hdev->amp_min_latency = __le32_to_cpu(rp->min_latency);
848 hdev->amp_max_pdu = __le32_to_cpu(rp->max_pdu);
849 hdev->amp_type = rp->amp_type;
850 hdev->amp_pal_cap = __le16_to_cpu(rp->pal_cap);
851 hdev->amp_assoc_size = __le16_to_cpu(rp->max_assoc_size);
852 hdev->amp_be_flush_to = __le32_to_cpu(rp->be_flush_to);
853 hdev->amp_max_flush_to = __le32_to_cpu(rp->max_flush_to);
855 hci_req_complete(hdev, HCI_OP_READ_LOCAL_AMP_INFO, rp->status);
858 static void hci_cc_delete_stored_link_key(struct hci_dev *hdev,
861 __u8 status = *((__u8 *) skb->data);
863 BT_DBG("%s status 0x%x", hdev->name, status);
865 hci_req_complete(hdev, HCI_OP_DELETE_STORED_LINK_KEY, status);
868 static void hci_cc_set_event_mask(struct hci_dev *hdev, struct sk_buff *skb)
870 __u8 status = *((__u8 *) skb->data);
872 BT_DBG("%s status 0x%x", hdev->name, status);
874 hci_req_complete(hdev, HCI_OP_SET_EVENT_MASK, status);
877 static void hci_cc_write_inquiry_mode(struct hci_dev *hdev,
880 __u8 status = *((__u8 *) skb->data);
882 BT_DBG("%s status 0x%x", hdev->name, status);
884 hci_req_complete(hdev, HCI_OP_WRITE_INQUIRY_MODE, status);
887 static void hci_cc_read_inq_rsp_tx_power(struct hci_dev *hdev,
890 __u8 status = *((__u8 *) skb->data);
892 BT_DBG("%s status 0x%x", hdev->name, status);
894 hci_req_complete(hdev, HCI_OP_READ_INQ_RSP_TX_POWER, status);
897 static void hci_cc_set_event_flt(struct hci_dev *hdev, struct sk_buff *skb)
899 __u8 status = *((__u8 *) skb->data);
901 BT_DBG("%s status 0x%x", hdev->name, status);
903 hci_req_complete(hdev, HCI_OP_SET_EVENT_FLT, status);
906 static void hci_cc_pin_code_reply(struct hci_dev *hdev, struct sk_buff *skb)
908 struct hci_rp_pin_code_reply *rp = (void *) skb->data;
909 struct hci_cp_pin_code_reply *cp;
910 struct hci_conn *conn;
912 BT_DBG("%s status 0x%x", hdev->name, rp->status);
916 if (test_bit(HCI_MGMT, &hdev->dev_flags))
917 mgmt_pin_code_reply_complete(hdev, &rp->bdaddr, rp->status);
922 cp = hci_sent_cmd_data(hdev, HCI_OP_PIN_CODE_REPLY);
926 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
928 conn->pin_length = cp->pin_len;
931 hci_dev_unlock(hdev);
934 static void hci_cc_pin_code_neg_reply(struct hci_dev *hdev, struct sk_buff *skb)
936 struct hci_rp_pin_code_neg_reply *rp = (void *) skb->data;
938 BT_DBG("%s status 0x%x", hdev->name, rp->status);
942 if (test_bit(HCI_MGMT, &hdev->dev_flags))
943 mgmt_pin_code_neg_reply_complete(hdev, &rp->bdaddr,
946 hci_dev_unlock(hdev);
949 static void hci_cc_le_read_buffer_size(struct hci_dev *hdev,
952 struct hci_rp_le_read_buffer_size *rp = (void *) skb->data;
954 BT_DBG("%s status 0x%x", hdev->name, rp->status);
959 hdev->le_mtu = __le16_to_cpu(rp->le_mtu);
960 hdev->le_pkts = rp->le_max_pkt;
962 hdev->le_cnt = hdev->le_pkts;
964 BT_DBG("%s le mtu %d:%d", hdev->name, hdev->le_mtu, hdev->le_pkts);
966 hci_req_complete(hdev, HCI_OP_LE_READ_BUFFER_SIZE, rp->status);
969 static void hci_cc_user_confirm_reply(struct hci_dev *hdev, struct sk_buff *skb)
971 struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
973 BT_DBG("%s status 0x%x", hdev->name, rp->status);
977 if (test_bit(HCI_MGMT, &hdev->dev_flags))
978 mgmt_user_confirm_reply_complete(hdev, &rp->bdaddr, ACL_LINK, 0,
981 hci_dev_unlock(hdev);
984 static void hci_cc_user_confirm_neg_reply(struct hci_dev *hdev,
987 struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
989 BT_DBG("%s status 0x%x", hdev->name, rp->status);
993 if (test_bit(HCI_MGMT, &hdev->dev_flags))
994 mgmt_user_confirm_neg_reply_complete(hdev, &rp->bdaddr,
995 ACL_LINK, 0, rp->status);
997 hci_dev_unlock(hdev);
1000 static void hci_cc_user_passkey_reply(struct hci_dev *hdev, struct sk_buff *skb)
1002 struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
1004 BT_DBG("%s status 0x%x", hdev->name, rp->status);
1008 if (test_bit(HCI_MGMT, &hdev->dev_flags))
1009 mgmt_user_passkey_reply_complete(hdev, &rp->bdaddr, ACL_LINK,
1012 hci_dev_unlock(hdev);
1015 static void hci_cc_user_passkey_neg_reply(struct hci_dev *hdev,
1016 struct sk_buff *skb)
1018 struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
1020 BT_DBG("%s status 0x%x", hdev->name, rp->status);
1024 if (test_bit(HCI_MGMT, &hdev->dev_flags))
1025 mgmt_user_passkey_neg_reply_complete(hdev, &rp->bdaddr,
1026 ACL_LINK, 0, rp->status);
1028 hci_dev_unlock(hdev);
1031 static void hci_cc_read_local_oob_data_reply(struct hci_dev *hdev,
1032 struct sk_buff *skb)
1034 struct hci_rp_read_local_oob_data *rp = (void *) skb->data;
1036 BT_DBG("%s status 0x%x", hdev->name, rp->status);
1039 mgmt_read_local_oob_data_reply_complete(hdev, rp->hash,
1040 rp->randomizer, rp->status);
1041 hci_dev_unlock(hdev);
1044 static void hci_cc_le_set_scan_param(struct hci_dev *hdev, struct sk_buff *skb)
1046 __u8 status = *((__u8 *) skb->data);
1048 BT_DBG("%s status 0x%x", hdev->name, status);
1050 hci_req_complete(hdev, HCI_OP_LE_SET_SCAN_PARAM, status);
1054 mgmt_start_discovery_failed(hdev, status);
1055 hci_dev_unlock(hdev);
1060 static void hci_cc_le_set_scan_enable(struct hci_dev *hdev,
1061 struct sk_buff *skb)
1063 struct hci_cp_le_set_scan_enable *cp;
1064 __u8 status = *((__u8 *) skb->data);
1066 BT_DBG("%s status 0x%x", hdev->name, status);
1068 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_SCAN_ENABLE);
1072 switch (cp->enable) {
1073 case LE_SCANNING_ENABLED:
1074 hci_req_complete(hdev, HCI_OP_LE_SET_SCAN_ENABLE, status);
1078 mgmt_start_discovery_failed(hdev, status);
1079 hci_dev_unlock(hdev);
1083 set_bit(HCI_LE_SCAN, &hdev->dev_flags);
1085 cancel_delayed_work_sync(&hdev->adv_work);
1088 hci_adv_entries_clear(hdev);
1089 hci_discovery_set_state(hdev, DISCOVERY_FINDING);
1090 hci_dev_unlock(hdev);
1093 case LE_SCANNING_DISABLED:
1097 clear_bit(HCI_LE_SCAN, &hdev->dev_flags);
1099 schedule_delayed_work(&hdev->adv_work, ADV_CLEAR_TIMEOUT);
1101 if (hdev->discovery.type == DISCOV_TYPE_INTERLEAVED) {
1102 mgmt_interleaved_discovery(hdev);
1105 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1106 hci_dev_unlock(hdev);
1112 BT_ERR("Used reserved LE_Scan_Enable param %d", cp->enable);
1117 static void hci_cc_le_ltk_reply(struct hci_dev *hdev, struct sk_buff *skb)
1119 struct hci_rp_le_ltk_reply *rp = (void *) skb->data;
1121 BT_DBG("%s status 0x%x", hdev->name, rp->status);
1126 hci_req_complete(hdev, HCI_OP_LE_LTK_REPLY, rp->status);
1129 static void hci_cc_le_ltk_neg_reply(struct hci_dev *hdev, struct sk_buff *skb)
1131 struct hci_rp_le_ltk_neg_reply *rp = (void *) skb->data;
1133 BT_DBG("%s status 0x%x", hdev->name, rp->status);
1138 hci_req_complete(hdev, HCI_OP_LE_LTK_NEG_REPLY, rp->status);
1141 static inline void hci_cc_write_le_host_supported(struct hci_dev *hdev,
1142 struct sk_buff *skb)
1144 struct hci_cp_write_le_host_supported *sent;
1145 __u8 status = *((__u8 *) skb->data);
1147 BT_DBG("%s status 0x%x", hdev->name, status);
1149 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LE_HOST_SUPPORTED);
1155 hdev->host_features[0] |= LMP_HOST_LE;
1157 hdev->host_features[0] &= ~LMP_HOST_LE;
1160 if (test_bit(HCI_MGMT, &hdev->dev_flags) &&
1161 !test_bit(HCI_INIT, &hdev->flags))
1162 mgmt_le_enable_complete(hdev, sent->le, status);
1164 hci_req_complete(hdev, HCI_OP_WRITE_LE_HOST_SUPPORTED, status);
1167 static inline void hci_cs_inquiry(struct hci_dev *hdev, __u8 status)
1169 BT_DBG("%s status 0x%x", hdev->name, status);
1172 hci_req_complete(hdev, HCI_OP_INQUIRY, status);
1173 hci_conn_check_pending(hdev);
1175 if (test_bit(HCI_MGMT, &hdev->dev_flags))
1176 mgmt_start_discovery_failed(hdev, status);
1177 hci_dev_unlock(hdev);
1181 set_bit(HCI_INQUIRY, &hdev->flags);
1184 hci_discovery_set_state(hdev, DISCOVERY_FINDING);
1185 hci_dev_unlock(hdev);
1188 static inline void hci_cs_create_conn(struct hci_dev *hdev, __u8 status)
1190 struct hci_cp_create_conn *cp;
1191 struct hci_conn *conn;
1193 BT_DBG("%s status 0x%x", hdev->name, status);
1195 cp = hci_sent_cmd_data(hdev, HCI_OP_CREATE_CONN);
1201 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
1203 BT_DBG("%s bdaddr %s conn %p", hdev->name, batostr(&cp->bdaddr), conn);
1206 if (conn && conn->state == BT_CONNECT) {
1207 if (status != 0x0c || conn->attempt > 2) {
1208 conn->state = BT_CLOSED;
1209 hci_proto_connect_cfm(conn, status);
1212 conn->state = BT_CONNECT2;
1216 conn = hci_conn_add(hdev, ACL_LINK, &cp->bdaddr);
1219 conn->link_mode |= HCI_LM_MASTER;
1221 BT_ERR("No memory for new connection");
1225 hci_dev_unlock(hdev);
1228 static void hci_cs_add_sco(struct hci_dev *hdev, __u8 status)
1230 struct hci_cp_add_sco *cp;
1231 struct hci_conn *acl, *sco;
1234 BT_DBG("%s status 0x%x", hdev->name, status);
1239 cp = hci_sent_cmd_data(hdev, HCI_OP_ADD_SCO);
1243 handle = __le16_to_cpu(cp->handle);
1245 BT_DBG("%s handle %d", hdev->name, handle);
1249 acl = hci_conn_hash_lookup_handle(hdev, handle);
1253 sco->state = BT_CLOSED;
1255 hci_proto_connect_cfm(sco, status);
1260 hci_dev_unlock(hdev);
1263 static void hci_cs_auth_requested(struct hci_dev *hdev, __u8 status)
1265 struct hci_cp_auth_requested *cp;
1266 struct hci_conn *conn;
1268 BT_DBG("%s status 0x%x", hdev->name, status);
1273 cp = hci_sent_cmd_data(hdev, HCI_OP_AUTH_REQUESTED);
1279 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1281 if (conn->state == BT_CONFIG) {
1282 hci_proto_connect_cfm(conn, status);
1287 hci_dev_unlock(hdev);
1290 static void hci_cs_set_conn_encrypt(struct hci_dev *hdev, __u8 status)
1292 struct hci_cp_set_conn_encrypt *cp;
1293 struct hci_conn *conn;
1295 BT_DBG("%s status 0x%x", hdev->name, status);
1300 cp = hci_sent_cmd_data(hdev, HCI_OP_SET_CONN_ENCRYPT);
1306 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1308 if (conn->state == BT_CONFIG) {
1309 hci_proto_connect_cfm(conn, status);
1314 hci_dev_unlock(hdev);
1317 static int hci_outgoing_auth_needed(struct hci_dev *hdev,
1318 struct hci_conn *conn)
1320 if (conn->state != BT_CONFIG || !conn->out)
1323 if (conn->pending_sec_level == BT_SECURITY_SDP)
1326 /* Only request authentication for SSP connections or non-SSP
1327 * devices with sec_level HIGH or if MITM protection is requested */
1328 if (!hci_conn_ssp_enabled(conn) &&
1329 conn->pending_sec_level != BT_SECURITY_HIGH &&
1330 !(conn->auth_type & 0x01))
1336 static inline int hci_resolve_name(struct hci_dev *hdev,
1337 struct inquiry_entry *e)
1339 struct hci_cp_remote_name_req cp;
1341 memset(&cp, 0, sizeof(cp));
1343 bacpy(&cp.bdaddr, &e->data.bdaddr);
1344 cp.pscan_rep_mode = e->data.pscan_rep_mode;
1345 cp.pscan_mode = e->data.pscan_mode;
1346 cp.clock_offset = e->data.clock_offset;
1348 return hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp);
1351 static bool hci_resolve_next_name(struct hci_dev *hdev)
1353 struct discovery_state *discov = &hdev->discovery;
1354 struct inquiry_entry *e;
1356 if (list_empty(&discov->resolve))
1359 e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY, NAME_NEEDED);
1360 if (hci_resolve_name(hdev, e) == 0) {
1361 e->name_state = NAME_PENDING;
1368 static void hci_check_pending_name(struct hci_dev *hdev, struct hci_conn *conn,
1369 bdaddr_t *bdaddr, u8 *name, u8 name_len)
1371 struct discovery_state *discov = &hdev->discovery;
1372 struct inquiry_entry *e;
1374 if (conn && !test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
1375 mgmt_device_connected(hdev, bdaddr, ACL_LINK, 0x00, 0, name,
1376 name_len, conn->dev_class);
1378 if (discov->state == DISCOVERY_STOPPED)
1381 if (discov->state == DISCOVERY_STOPPING)
1382 goto discov_complete;
1384 if (discov->state != DISCOVERY_RESOLVING)
1387 e = hci_inquiry_cache_lookup_resolve(hdev, bdaddr, NAME_PENDING);
1389 e->name_state = NAME_KNOWN;
1392 mgmt_remote_name(hdev, bdaddr, ACL_LINK, 0x00,
1393 e->data.rssi, name, name_len);
1396 if (hci_resolve_next_name(hdev))
1400 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1403 static void hci_cs_remote_name_req(struct hci_dev *hdev, __u8 status)
1405 struct hci_cp_remote_name_req *cp;
1406 struct hci_conn *conn;
1408 BT_DBG("%s status 0x%x", hdev->name, status);
1410 /* If successful wait for the name req complete event before
1411 * checking for the need to do authentication */
1415 cp = hci_sent_cmd_data(hdev, HCI_OP_REMOTE_NAME_REQ);
1421 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
1423 if (test_bit(HCI_MGMT, &hdev->dev_flags))
1424 hci_check_pending_name(hdev, conn, &cp->bdaddr, NULL, 0);
1429 if (!hci_outgoing_auth_needed(hdev, conn))
1432 if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->flags)) {
1433 struct hci_cp_auth_requested cp;
1434 cp.handle = __cpu_to_le16(conn->handle);
1435 hci_send_cmd(hdev, HCI_OP_AUTH_REQUESTED, sizeof(cp), &cp);
1439 hci_dev_unlock(hdev);
1442 static void hci_cs_read_remote_features(struct hci_dev *hdev, __u8 status)
1444 struct hci_cp_read_remote_features *cp;
1445 struct hci_conn *conn;
1447 BT_DBG("%s status 0x%x", hdev->name, status);
1452 cp = hci_sent_cmd_data(hdev, HCI_OP_READ_REMOTE_FEATURES);
1458 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1460 if (conn->state == BT_CONFIG) {
1461 hci_proto_connect_cfm(conn, status);
1466 hci_dev_unlock(hdev);
1469 static void hci_cs_read_remote_ext_features(struct hci_dev *hdev, __u8 status)
1471 struct hci_cp_read_remote_ext_features *cp;
1472 struct hci_conn *conn;
1474 BT_DBG("%s status 0x%x", hdev->name, status);
1479 cp = hci_sent_cmd_data(hdev, HCI_OP_READ_REMOTE_EXT_FEATURES);
1485 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1487 if (conn->state == BT_CONFIG) {
1488 hci_proto_connect_cfm(conn, status);
1493 hci_dev_unlock(hdev);
1496 static void hci_cs_setup_sync_conn(struct hci_dev *hdev, __u8 status)
1498 struct hci_cp_setup_sync_conn *cp;
1499 struct hci_conn *acl, *sco;
1502 BT_DBG("%s status 0x%x", hdev->name, status);
1507 cp = hci_sent_cmd_data(hdev, HCI_OP_SETUP_SYNC_CONN);
1511 handle = __le16_to_cpu(cp->handle);
1513 BT_DBG("%s handle %d", hdev->name, handle);
1517 acl = hci_conn_hash_lookup_handle(hdev, handle);
1521 sco->state = BT_CLOSED;
1523 hci_proto_connect_cfm(sco, status);
1528 hci_dev_unlock(hdev);
1531 static void hci_cs_sniff_mode(struct hci_dev *hdev, __u8 status)
1533 struct hci_cp_sniff_mode *cp;
1534 struct hci_conn *conn;
1536 BT_DBG("%s status 0x%x", hdev->name, status);
1541 cp = hci_sent_cmd_data(hdev, HCI_OP_SNIFF_MODE);
1547 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1549 clear_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags);
1551 if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags))
1552 hci_sco_setup(conn, status);
1555 hci_dev_unlock(hdev);
1558 static void hci_cs_exit_sniff_mode(struct hci_dev *hdev, __u8 status)
1560 struct hci_cp_exit_sniff_mode *cp;
1561 struct hci_conn *conn;
1563 BT_DBG("%s status 0x%x", hdev->name, status);
1568 cp = hci_sent_cmd_data(hdev, HCI_OP_EXIT_SNIFF_MODE);
1574 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1576 clear_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags);
1578 if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags))
1579 hci_sco_setup(conn, status);
1582 hci_dev_unlock(hdev);
1585 static void hci_cs_disconnect(struct hci_dev *hdev, u8 status)
1587 struct hci_cp_disconnect *cp;
1588 struct hci_conn *conn;
1593 cp = hci_sent_cmd_data(hdev, HCI_OP_DISCONNECT);
1599 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1601 mgmt_disconnect_failed(hdev, &conn->dst, conn->type,
1602 conn->dst_type, status);
1604 hci_dev_unlock(hdev);
1607 static void hci_cs_le_create_conn(struct hci_dev *hdev, __u8 status)
1609 struct hci_cp_le_create_conn *cp;
1610 struct hci_conn *conn;
1612 BT_DBG("%s status 0x%x", hdev->name, status);
1614 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_CREATE_CONN);
1620 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->peer_addr);
1622 BT_DBG("%s bdaddr %s conn %p", hdev->name, batostr(&cp->peer_addr),
1626 if (conn && conn->state == BT_CONNECT) {
1627 conn->state = BT_CLOSED;
1628 hci_proto_connect_cfm(conn, status);
1633 conn = hci_conn_add(hdev, LE_LINK, &cp->peer_addr);
1635 conn->dst_type = cp->peer_addr_type;
1638 BT_ERR("No memory for new connection");
1643 hci_dev_unlock(hdev);
1646 static void hci_cs_le_start_enc(struct hci_dev *hdev, u8 status)
1648 BT_DBG("%s status 0x%x", hdev->name, status);
1651 static inline void hci_inquiry_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
1653 __u8 status = *((__u8 *) skb->data);
1654 struct discovery_state *discov = &hdev->discovery;
1655 struct inquiry_entry *e;
1657 BT_DBG("%s status %d", hdev->name, status);
1659 hci_req_complete(hdev, HCI_OP_INQUIRY, status);
1661 hci_conn_check_pending(hdev);
1663 if (!test_and_clear_bit(HCI_INQUIRY, &hdev->flags))
1666 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
1671 if (discov->state != DISCOVERY_FINDING)
1674 if (list_empty(&discov->resolve)) {
1675 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1679 e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY, NAME_NEEDED);
1680 if (e && hci_resolve_name(hdev, e) == 0) {
1681 e->name_state = NAME_PENDING;
1682 hci_discovery_set_state(hdev, DISCOVERY_RESOLVING);
1684 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1688 hci_dev_unlock(hdev);
1691 static inline void hci_inquiry_result_evt(struct hci_dev *hdev, struct sk_buff *skb)
1693 struct inquiry_data data;
1694 struct inquiry_info *info = (void *) (skb->data + 1);
1695 int num_rsp = *((__u8 *) skb->data);
1697 BT_DBG("%s num_rsp %d", hdev->name, num_rsp);
1704 for (; num_rsp; num_rsp--, info++) {
1705 bool name_known, ssp;
1707 bacpy(&data.bdaddr, &info->bdaddr);
1708 data.pscan_rep_mode = info->pscan_rep_mode;
1709 data.pscan_period_mode = info->pscan_period_mode;
1710 data.pscan_mode = info->pscan_mode;
1711 memcpy(data.dev_class, info->dev_class, 3);
1712 data.clock_offset = info->clock_offset;
1714 data.ssp_mode = 0x00;
1716 name_known = hci_inquiry_cache_update(hdev, &data, false, &ssp);
1717 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
1718 info->dev_class, 0, !name_known, ssp, NULL,
1722 hci_dev_unlock(hdev);
1725 static inline void hci_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
1727 struct hci_ev_conn_complete *ev = (void *) skb->data;
1728 struct hci_conn *conn;
1730 BT_DBG("%s", hdev->name);
1734 conn = hci_conn_hash_lookup_ba(hdev, ev->link_type, &ev->bdaddr);
1736 if (ev->link_type != SCO_LINK)
1739 conn = hci_conn_hash_lookup_ba(hdev, ESCO_LINK, &ev->bdaddr);
1743 conn->type = SCO_LINK;
1747 conn->handle = __le16_to_cpu(ev->handle);
1749 if (conn->type == ACL_LINK) {
1750 conn->state = BT_CONFIG;
1751 hci_conn_hold(conn);
1752 conn->disc_timeout = HCI_DISCONN_TIMEOUT;
1754 conn->state = BT_CONNECTED;
1756 hci_conn_hold_device(conn);
1757 hci_conn_add_sysfs(conn);
1759 if (test_bit(HCI_AUTH, &hdev->flags))
1760 conn->link_mode |= HCI_LM_AUTH;
1762 if (test_bit(HCI_ENCRYPT, &hdev->flags))
1763 conn->link_mode |= HCI_LM_ENCRYPT;
1765 /* Get remote features */
1766 if (conn->type == ACL_LINK) {
1767 struct hci_cp_read_remote_features cp;
1768 cp.handle = ev->handle;
1769 hci_send_cmd(hdev, HCI_OP_READ_REMOTE_FEATURES,
1773 /* Set packet type for incoming connection */
1774 if (!conn->out && hdev->hci_ver < BLUETOOTH_VER_2_0) {
1775 struct hci_cp_change_conn_ptype cp;
1776 cp.handle = ev->handle;
1777 cp.pkt_type = cpu_to_le16(conn->pkt_type);
1778 hci_send_cmd(hdev, HCI_OP_CHANGE_CONN_PTYPE, sizeof(cp),
1782 conn->state = BT_CLOSED;
1783 if (conn->type == ACL_LINK)
1784 mgmt_connect_failed(hdev, &ev->bdaddr, conn->type,
1785 conn->dst_type, ev->status);
1788 if (conn->type == ACL_LINK)
1789 hci_sco_setup(conn, ev->status);
1792 hci_proto_connect_cfm(conn, ev->status);
1794 } else if (ev->link_type != ACL_LINK)
1795 hci_proto_connect_cfm(conn, ev->status);
1798 hci_dev_unlock(hdev);
1800 hci_conn_check_pending(hdev);
1803 static inline void hci_conn_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
1805 struct hci_ev_conn_request *ev = (void *) skb->data;
1806 int mask = hdev->link_mode;
1808 BT_DBG("%s bdaddr %s type 0x%x", hdev->name,
1809 batostr(&ev->bdaddr), ev->link_type);
1811 mask |= hci_proto_connect_ind(hdev, &ev->bdaddr, ev->link_type);
1813 if ((mask & HCI_LM_ACCEPT) &&
1814 !hci_blacklist_lookup(hdev, &ev->bdaddr)) {
1815 /* Connection accepted */
1816 struct inquiry_entry *ie;
1817 struct hci_conn *conn;
1821 ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr);
1823 memcpy(ie->data.dev_class, ev->dev_class, 3);
1825 conn = hci_conn_hash_lookup_ba(hdev, ev->link_type, &ev->bdaddr);
1827 conn = hci_conn_add(hdev, ev->link_type, &ev->bdaddr);
1829 BT_ERR("No memory for new connection");
1830 hci_dev_unlock(hdev);
1835 memcpy(conn->dev_class, ev->dev_class, 3);
1836 conn->state = BT_CONNECT;
1838 hci_dev_unlock(hdev);
1840 if (ev->link_type == ACL_LINK || !lmp_esco_capable(hdev)) {
1841 struct hci_cp_accept_conn_req cp;
1843 bacpy(&cp.bdaddr, &ev->bdaddr);
1845 if (lmp_rswitch_capable(hdev) && (mask & HCI_LM_MASTER))
1846 cp.role = 0x00; /* Become master */
1848 cp.role = 0x01; /* Remain slave */
1850 hci_send_cmd(hdev, HCI_OP_ACCEPT_CONN_REQ, sizeof(cp),
1853 struct hci_cp_accept_sync_conn_req cp;
1855 bacpy(&cp.bdaddr, &ev->bdaddr);
1856 cp.pkt_type = cpu_to_le16(conn->pkt_type);
1858 cp.tx_bandwidth = cpu_to_le32(0x00001f40);
1859 cp.rx_bandwidth = cpu_to_le32(0x00001f40);
1860 cp.max_latency = cpu_to_le16(0xffff);
1861 cp.content_format = cpu_to_le16(hdev->voice_setting);
1862 cp.retrans_effort = 0xff;
1864 hci_send_cmd(hdev, HCI_OP_ACCEPT_SYNC_CONN_REQ,
1868 /* Connection rejected */
1869 struct hci_cp_reject_conn_req cp;
1871 bacpy(&cp.bdaddr, &ev->bdaddr);
1872 cp.reason = HCI_ERROR_REJ_BAD_ADDR;
1873 hci_send_cmd(hdev, HCI_OP_REJECT_CONN_REQ, sizeof(cp), &cp);
1877 static inline void hci_disconn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
1879 struct hci_ev_disconn_complete *ev = (void *) skb->data;
1880 struct hci_conn *conn;
1882 BT_DBG("%s status %d", hdev->name, ev->status);
1886 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
1890 if (ev->status == 0)
1891 conn->state = BT_CLOSED;
1893 if (test_and_clear_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags) &&
1894 (conn->type == ACL_LINK || conn->type == LE_LINK)) {
1895 if (ev->status != 0)
1896 mgmt_disconnect_failed(hdev, &conn->dst, conn->type,
1897 conn->dst_type, ev->status);
1899 mgmt_device_disconnected(hdev, &conn->dst, conn->type,
1903 if (ev->status == 0) {
1904 hci_proto_disconn_cfm(conn, ev->reason);
1909 hci_dev_unlock(hdev);
1912 static inline void hci_auth_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
1914 struct hci_ev_auth_complete *ev = (void *) skb->data;
1915 struct hci_conn *conn;
1917 BT_DBG("%s status %d", hdev->name, ev->status);
1921 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
1926 if (!hci_conn_ssp_enabled(conn) &&
1927 test_bit(HCI_CONN_REAUTH_PEND, &conn->flags)) {
1928 BT_INFO("re-auth of legacy device is not possible.");
1930 conn->link_mode |= HCI_LM_AUTH;
1931 conn->sec_level = conn->pending_sec_level;
1934 mgmt_auth_failed(hdev, &conn->dst, conn->type, conn->dst_type,
1938 clear_bit(HCI_CONN_AUTH_PEND, &conn->flags);
1939 clear_bit(HCI_CONN_REAUTH_PEND, &conn->flags);
1941 if (conn->state == BT_CONFIG) {
1942 if (!ev->status && hci_conn_ssp_enabled(conn)) {
1943 struct hci_cp_set_conn_encrypt cp;
1944 cp.handle = ev->handle;
1946 hci_send_cmd(hdev, HCI_OP_SET_CONN_ENCRYPT, sizeof(cp),
1949 conn->state = BT_CONNECTED;
1950 hci_proto_connect_cfm(conn, ev->status);
1954 hci_auth_cfm(conn, ev->status);
1956 hci_conn_hold(conn);
1957 conn->disc_timeout = HCI_DISCONN_TIMEOUT;
1961 if (test_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags)) {
1963 struct hci_cp_set_conn_encrypt cp;
1964 cp.handle = ev->handle;
1966 hci_send_cmd(hdev, HCI_OP_SET_CONN_ENCRYPT, sizeof(cp),
1969 clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
1970 hci_encrypt_cfm(conn, ev->status, 0x00);
1975 hci_dev_unlock(hdev);
1978 static inline void hci_remote_name_evt(struct hci_dev *hdev, struct sk_buff *skb)
1980 struct hci_ev_remote_name *ev = (void *) skb->data;
1981 struct hci_conn *conn;
1983 BT_DBG("%s", hdev->name);
1985 hci_conn_check_pending(hdev);
1989 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
1991 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
1994 if (ev->status == 0)
1995 hci_check_pending_name(hdev, conn, &ev->bdaddr, ev->name,
1996 strnlen(ev->name, HCI_MAX_NAME_LENGTH));
1998 hci_check_pending_name(hdev, conn, &ev->bdaddr, NULL, 0);
2004 if (!hci_outgoing_auth_needed(hdev, conn))
2007 if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->flags)) {
2008 struct hci_cp_auth_requested cp;
2009 cp.handle = __cpu_to_le16(conn->handle);
2010 hci_send_cmd(hdev, HCI_OP_AUTH_REQUESTED, sizeof(cp), &cp);
2014 hci_dev_unlock(hdev);
2017 static inline void hci_encrypt_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
2019 struct hci_ev_encrypt_change *ev = (void *) skb->data;
2020 struct hci_conn *conn;
2022 BT_DBG("%s status %d", hdev->name, ev->status);
2026 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2030 /* Encryption implies authentication */
2031 conn->link_mode |= HCI_LM_AUTH;
2032 conn->link_mode |= HCI_LM_ENCRYPT;
2033 conn->sec_level = conn->pending_sec_level;
2035 conn->link_mode &= ~HCI_LM_ENCRYPT;
2038 clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
2040 if (conn->state == BT_CONFIG) {
2042 conn->state = BT_CONNECTED;
2044 hci_proto_connect_cfm(conn, ev->status);
2047 hci_encrypt_cfm(conn, ev->status, ev->encrypt);
2050 hci_dev_unlock(hdev);
2053 static inline void hci_change_link_key_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
2055 struct hci_ev_change_link_key_complete *ev = (void *) skb->data;
2056 struct hci_conn *conn;
2058 BT_DBG("%s status %d", hdev->name, ev->status);
2062 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2065 conn->link_mode |= HCI_LM_SECURE;
2067 clear_bit(HCI_CONN_AUTH_PEND, &conn->flags);
2069 hci_key_change_cfm(conn, ev->status);
2072 hci_dev_unlock(hdev);
2075 static inline void hci_remote_features_evt(struct hci_dev *hdev, struct sk_buff *skb)
2077 struct hci_ev_remote_features *ev = (void *) skb->data;
2078 struct hci_conn *conn;
2080 BT_DBG("%s status %d", hdev->name, ev->status);
2084 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2089 memcpy(conn->features, ev->features, 8);
2091 if (conn->state != BT_CONFIG)
2094 if (!ev->status && lmp_ssp_capable(hdev) && lmp_ssp_capable(conn)) {
2095 struct hci_cp_read_remote_ext_features cp;
2096 cp.handle = ev->handle;
2098 hci_send_cmd(hdev, HCI_OP_READ_REMOTE_EXT_FEATURES,
2104 struct hci_cp_remote_name_req cp;
2105 memset(&cp, 0, sizeof(cp));
2106 bacpy(&cp.bdaddr, &conn->dst);
2107 cp.pscan_rep_mode = 0x02;
2108 hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp);
2109 } else if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
2110 mgmt_device_connected(hdev, &conn->dst, conn->type,
2111 conn->dst_type, 0, NULL, 0,
2114 if (!hci_outgoing_auth_needed(hdev, conn)) {
2115 conn->state = BT_CONNECTED;
2116 hci_proto_connect_cfm(conn, ev->status);
2121 hci_dev_unlock(hdev);
2124 static inline void hci_remote_version_evt(struct hci_dev *hdev, struct sk_buff *skb)
2126 BT_DBG("%s", hdev->name);
2129 static inline void hci_qos_setup_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
2131 BT_DBG("%s", hdev->name);
2134 static inline void hci_cmd_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
2136 struct hci_ev_cmd_complete *ev = (void *) skb->data;
2139 skb_pull(skb, sizeof(*ev));
2141 opcode = __le16_to_cpu(ev->opcode);
2144 case HCI_OP_INQUIRY_CANCEL:
2145 hci_cc_inquiry_cancel(hdev, skb);
2148 case HCI_OP_EXIT_PERIODIC_INQ:
2149 hci_cc_exit_periodic_inq(hdev, skb);
2152 case HCI_OP_REMOTE_NAME_REQ_CANCEL:
2153 hci_cc_remote_name_req_cancel(hdev, skb);
2156 case HCI_OP_ROLE_DISCOVERY:
2157 hci_cc_role_discovery(hdev, skb);
2160 case HCI_OP_READ_LINK_POLICY:
2161 hci_cc_read_link_policy(hdev, skb);
2164 case HCI_OP_WRITE_LINK_POLICY:
2165 hci_cc_write_link_policy(hdev, skb);
2168 case HCI_OP_READ_DEF_LINK_POLICY:
2169 hci_cc_read_def_link_policy(hdev, skb);
2172 case HCI_OP_WRITE_DEF_LINK_POLICY:
2173 hci_cc_write_def_link_policy(hdev, skb);
2177 hci_cc_reset(hdev, skb);
2180 case HCI_OP_WRITE_LOCAL_NAME:
2181 hci_cc_write_local_name(hdev, skb);
2184 case HCI_OP_READ_LOCAL_NAME:
2185 hci_cc_read_local_name(hdev, skb);
2188 case HCI_OP_WRITE_AUTH_ENABLE:
2189 hci_cc_write_auth_enable(hdev, skb);
2192 case HCI_OP_WRITE_ENCRYPT_MODE:
2193 hci_cc_write_encrypt_mode(hdev, skb);
2196 case HCI_OP_WRITE_SCAN_ENABLE:
2197 hci_cc_write_scan_enable(hdev, skb);
2200 case HCI_OP_READ_CLASS_OF_DEV:
2201 hci_cc_read_class_of_dev(hdev, skb);
2204 case HCI_OP_WRITE_CLASS_OF_DEV:
2205 hci_cc_write_class_of_dev(hdev, skb);
2208 case HCI_OP_READ_VOICE_SETTING:
2209 hci_cc_read_voice_setting(hdev, skb);
2212 case HCI_OP_WRITE_VOICE_SETTING:
2213 hci_cc_write_voice_setting(hdev, skb);
2216 case HCI_OP_HOST_BUFFER_SIZE:
2217 hci_cc_host_buffer_size(hdev, skb);
2220 case HCI_OP_WRITE_SSP_MODE:
2221 hci_cc_write_ssp_mode(hdev, skb);
2224 case HCI_OP_READ_LOCAL_VERSION:
2225 hci_cc_read_local_version(hdev, skb);
2228 case HCI_OP_READ_LOCAL_COMMANDS:
2229 hci_cc_read_local_commands(hdev, skb);
2232 case HCI_OP_READ_LOCAL_FEATURES:
2233 hci_cc_read_local_features(hdev, skb);
2236 case HCI_OP_READ_LOCAL_EXT_FEATURES:
2237 hci_cc_read_local_ext_features(hdev, skb);
2240 case HCI_OP_READ_BUFFER_SIZE:
2241 hci_cc_read_buffer_size(hdev, skb);
2244 case HCI_OP_READ_BD_ADDR:
2245 hci_cc_read_bd_addr(hdev, skb);
2248 case HCI_OP_READ_DATA_BLOCK_SIZE:
2249 hci_cc_read_data_block_size(hdev, skb);
2252 case HCI_OP_WRITE_CA_TIMEOUT:
2253 hci_cc_write_ca_timeout(hdev, skb);
2256 case HCI_OP_READ_FLOW_CONTROL_MODE:
2257 hci_cc_read_flow_control_mode(hdev, skb);
2260 case HCI_OP_READ_LOCAL_AMP_INFO:
2261 hci_cc_read_local_amp_info(hdev, skb);
2264 case HCI_OP_DELETE_STORED_LINK_KEY:
2265 hci_cc_delete_stored_link_key(hdev, skb);
2268 case HCI_OP_SET_EVENT_MASK:
2269 hci_cc_set_event_mask(hdev, skb);
2272 case HCI_OP_WRITE_INQUIRY_MODE:
2273 hci_cc_write_inquiry_mode(hdev, skb);
2276 case HCI_OP_READ_INQ_RSP_TX_POWER:
2277 hci_cc_read_inq_rsp_tx_power(hdev, skb);
2280 case HCI_OP_SET_EVENT_FLT:
2281 hci_cc_set_event_flt(hdev, skb);
2284 case HCI_OP_PIN_CODE_REPLY:
2285 hci_cc_pin_code_reply(hdev, skb);
2288 case HCI_OP_PIN_CODE_NEG_REPLY:
2289 hci_cc_pin_code_neg_reply(hdev, skb);
2292 case HCI_OP_READ_LOCAL_OOB_DATA:
2293 hci_cc_read_local_oob_data_reply(hdev, skb);
2296 case HCI_OP_LE_READ_BUFFER_SIZE:
2297 hci_cc_le_read_buffer_size(hdev, skb);
2300 case HCI_OP_USER_CONFIRM_REPLY:
2301 hci_cc_user_confirm_reply(hdev, skb);
2304 case HCI_OP_USER_CONFIRM_NEG_REPLY:
2305 hci_cc_user_confirm_neg_reply(hdev, skb);
2308 case HCI_OP_USER_PASSKEY_REPLY:
2309 hci_cc_user_passkey_reply(hdev, skb);
2312 case HCI_OP_USER_PASSKEY_NEG_REPLY:
2313 hci_cc_user_passkey_neg_reply(hdev, skb);
2315 case HCI_OP_LE_SET_SCAN_PARAM:
2316 hci_cc_le_set_scan_param(hdev, skb);
2319 case HCI_OP_LE_SET_SCAN_ENABLE:
2320 hci_cc_le_set_scan_enable(hdev, skb);
2323 case HCI_OP_LE_LTK_REPLY:
2324 hci_cc_le_ltk_reply(hdev, skb);
2327 case HCI_OP_LE_LTK_NEG_REPLY:
2328 hci_cc_le_ltk_neg_reply(hdev, skb);
2331 case HCI_OP_WRITE_LE_HOST_SUPPORTED:
2332 hci_cc_write_le_host_supported(hdev, skb);
2336 BT_DBG("%s opcode 0x%x", hdev->name, opcode);
2340 if (ev->opcode != HCI_OP_NOP)
2341 del_timer(&hdev->cmd_timer);
2344 atomic_set(&hdev->cmd_cnt, 1);
2345 if (!skb_queue_empty(&hdev->cmd_q))
2346 queue_work(hdev->workqueue, &hdev->cmd_work);
2350 static inline void hci_cmd_status_evt(struct hci_dev *hdev, struct sk_buff *skb)
2352 struct hci_ev_cmd_status *ev = (void *) skb->data;
2355 skb_pull(skb, sizeof(*ev));
2357 opcode = __le16_to_cpu(ev->opcode);
2360 case HCI_OP_INQUIRY:
2361 hci_cs_inquiry(hdev, ev->status);
2364 case HCI_OP_CREATE_CONN:
2365 hci_cs_create_conn(hdev, ev->status);
2368 case HCI_OP_ADD_SCO:
2369 hci_cs_add_sco(hdev, ev->status);
2372 case HCI_OP_AUTH_REQUESTED:
2373 hci_cs_auth_requested(hdev, ev->status);
2376 case HCI_OP_SET_CONN_ENCRYPT:
2377 hci_cs_set_conn_encrypt(hdev, ev->status);
2380 case HCI_OP_REMOTE_NAME_REQ:
2381 hci_cs_remote_name_req(hdev, ev->status);
2384 case HCI_OP_READ_REMOTE_FEATURES:
2385 hci_cs_read_remote_features(hdev, ev->status);
2388 case HCI_OP_READ_REMOTE_EXT_FEATURES:
2389 hci_cs_read_remote_ext_features(hdev, ev->status);
2392 case HCI_OP_SETUP_SYNC_CONN:
2393 hci_cs_setup_sync_conn(hdev, ev->status);
2396 case HCI_OP_SNIFF_MODE:
2397 hci_cs_sniff_mode(hdev, ev->status);
2400 case HCI_OP_EXIT_SNIFF_MODE:
2401 hci_cs_exit_sniff_mode(hdev, ev->status);
2404 case HCI_OP_DISCONNECT:
2405 hci_cs_disconnect(hdev, ev->status);
2408 case HCI_OP_LE_CREATE_CONN:
2409 hci_cs_le_create_conn(hdev, ev->status);
2412 case HCI_OP_LE_START_ENC:
2413 hci_cs_le_start_enc(hdev, ev->status);
2417 BT_DBG("%s opcode 0x%x", hdev->name, opcode);
2421 if (ev->opcode != HCI_OP_NOP)
2422 del_timer(&hdev->cmd_timer);
2424 if (ev->ncmd && !test_bit(HCI_RESET, &hdev->flags)) {
2425 atomic_set(&hdev->cmd_cnt, 1);
2426 if (!skb_queue_empty(&hdev->cmd_q))
2427 queue_work(hdev->workqueue, &hdev->cmd_work);
2431 static inline void hci_role_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
2433 struct hci_ev_role_change *ev = (void *) skb->data;
2434 struct hci_conn *conn;
2436 BT_DBG("%s status %d", hdev->name, ev->status);
2440 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
2444 conn->link_mode &= ~HCI_LM_MASTER;
2446 conn->link_mode |= HCI_LM_MASTER;
2449 clear_bit(HCI_CONN_RSWITCH_PEND, &conn->flags);
2451 hci_role_switch_cfm(conn, ev->status, ev->role);
2454 hci_dev_unlock(hdev);
2457 static inline void hci_num_comp_pkts_evt(struct hci_dev *hdev, struct sk_buff *skb)
2459 struct hci_ev_num_comp_pkts *ev = (void *) skb->data;
2462 if (hdev->flow_ctl_mode != HCI_FLOW_CTL_MODE_PACKET_BASED) {
2463 BT_ERR("Wrong event for mode %d", hdev->flow_ctl_mode);
2467 if (skb->len < sizeof(*ev) || skb->len < sizeof(*ev) +
2468 ev->num_hndl * sizeof(struct hci_comp_pkts_info)) {
2469 BT_DBG("%s bad parameters", hdev->name);
2473 BT_DBG("%s num_hndl %d", hdev->name, ev->num_hndl);
2475 for (i = 0; i < ev->num_hndl; i++) {
2476 struct hci_comp_pkts_info *info = &ev->handles[i];
2477 struct hci_conn *conn;
2478 __u16 handle, count;
2480 handle = __le16_to_cpu(info->handle);
2481 count = __le16_to_cpu(info->count);
2483 conn = hci_conn_hash_lookup_handle(hdev, handle);
2487 conn->sent -= count;
2489 switch (conn->type) {
2491 hdev->acl_cnt += count;
2492 if (hdev->acl_cnt > hdev->acl_pkts)
2493 hdev->acl_cnt = hdev->acl_pkts;
2497 if (hdev->le_pkts) {
2498 hdev->le_cnt += count;
2499 if (hdev->le_cnt > hdev->le_pkts)
2500 hdev->le_cnt = hdev->le_pkts;
2502 hdev->acl_cnt += count;
2503 if (hdev->acl_cnt > hdev->acl_pkts)
2504 hdev->acl_cnt = hdev->acl_pkts;
2509 hdev->sco_cnt += count;
2510 if (hdev->sco_cnt > hdev->sco_pkts)
2511 hdev->sco_cnt = hdev->sco_pkts;
2515 BT_ERR("Unknown type %d conn %p", conn->type, conn);
2520 queue_work(hdev->workqueue, &hdev->tx_work);
2523 static inline void hci_num_comp_blocks_evt(struct hci_dev *hdev,
2524 struct sk_buff *skb)
2526 struct hci_ev_num_comp_blocks *ev = (void *) skb->data;
2529 if (hdev->flow_ctl_mode != HCI_FLOW_CTL_MODE_BLOCK_BASED) {
2530 BT_ERR("Wrong event for mode %d", hdev->flow_ctl_mode);
2534 if (skb->len < sizeof(*ev) || skb->len < sizeof(*ev) +
2535 ev->num_hndl * sizeof(struct hci_comp_blocks_info)) {
2536 BT_DBG("%s bad parameters", hdev->name);
2540 BT_DBG("%s num_blocks %d num_hndl %d", hdev->name, ev->num_blocks,
2543 for (i = 0; i < ev->num_hndl; i++) {
2544 struct hci_comp_blocks_info *info = &ev->handles[i];
2545 struct hci_conn *conn;
2546 __u16 handle, block_count;
2548 handle = __le16_to_cpu(info->handle);
2549 block_count = __le16_to_cpu(info->blocks);
2551 conn = hci_conn_hash_lookup_handle(hdev, handle);
2555 conn->sent -= block_count;
2557 switch (conn->type) {
2559 hdev->block_cnt += block_count;
2560 if (hdev->block_cnt > hdev->num_blocks)
2561 hdev->block_cnt = hdev->num_blocks;
2565 BT_ERR("Unknown type %d conn %p", conn->type, conn);
2570 queue_work(hdev->workqueue, &hdev->tx_work);
2573 static inline void hci_mode_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
2575 struct hci_ev_mode_change *ev = (void *) skb->data;
2576 struct hci_conn *conn;
2578 BT_DBG("%s status %d", hdev->name, ev->status);
2582 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2584 conn->mode = ev->mode;
2585 conn->interval = __le16_to_cpu(ev->interval);
2587 if (!test_and_clear_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags)) {
2588 if (conn->mode == HCI_CM_ACTIVE)
2589 set_bit(HCI_CONN_POWER_SAVE, &conn->flags);
2591 clear_bit(HCI_CONN_POWER_SAVE, &conn->flags);
2594 if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags))
2595 hci_sco_setup(conn, ev->status);
2598 hci_dev_unlock(hdev);
2601 static inline void hci_pin_code_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
2603 struct hci_ev_pin_code_req *ev = (void *) skb->data;
2604 struct hci_conn *conn;
2606 BT_DBG("%s", hdev->name);
2610 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
2614 if (conn->state == BT_CONNECTED) {
2615 hci_conn_hold(conn);
2616 conn->disc_timeout = HCI_PAIRING_TIMEOUT;
2620 if (!test_bit(HCI_PAIRABLE, &hdev->dev_flags))
2621 hci_send_cmd(hdev, HCI_OP_PIN_CODE_NEG_REPLY,
2622 sizeof(ev->bdaddr), &ev->bdaddr);
2623 else if (test_bit(HCI_MGMT, &hdev->dev_flags)) {
2626 if (conn->pending_sec_level == BT_SECURITY_HIGH)
2631 mgmt_pin_code_request(hdev, &ev->bdaddr, secure);
2635 hci_dev_unlock(hdev);
2638 static inline void hci_link_key_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
2640 struct hci_ev_link_key_req *ev = (void *) skb->data;
2641 struct hci_cp_link_key_reply cp;
2642 struct hci_conn *conn;
2643 struct link_key *key;
2645 BT_DBG("%s", hdev->name);
2647 if (!test_bit(HCI_LINK_KEYS, &hdev->dev_flags))
2652 key = hci_find_link_key(hdev, &ev->bdaddr);
2654 BT_DBG("%s link key not found for %s", hdev->name,
2655 batostr(&ev->bdaddr));
2659 BT_DBG("%s found key type %u for %s", hdev->name, key->type,
2660 batostr(&ev->bdaddr));
2662 if (!test_bit(HCI_DEBUG_KEYS, &hdev->dev_flags) &&
2663 key->type == HCI_LK_DEBUG_COMBINATION) {
2664 BT_DBG("%s ignoring debug key", hdev->name);
2668 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
2670 if (key->type == HCI_LK_UNAUTH_COMBINATION &&
2671 conn->auth_type != 0xff &&
2672 (conn->auth_type & 0x01)) {
2673 BT_DBG("%s ignoring unauthenticated key", hdev->name);
2677 if (key->type == HCI_LK_COMBINATION && key->pin_len < 16 &&
2678 conn->pending_sec_level == BT_SECURITY_HIGH) {
2679 BT_DBG("%s ignoring key unauthenticated for high \
2680 security", hdev->name);
2684 conn->key_type = key->type;
2685 conn->pin_length = key->pin_len;
2688 bacpy(&cp.bdaddr, &ev->bdaddr);
2689 memcpy(cp.link_key, key->val, 16);
2691 hci_send_cmd(hdev, HCI_OP_LINK_KEY_REPLY, sizeof(cp), &cp);
2693 hci_dev_unlock(hdev);
2698 hci_send_cmd(hdev, HCI_OP_LINK_KEY_NEG_REPLY, 6, &ev->bdaddr);
2699 hci_dev_unlock(hdev);
2702 static inline void hci_link_key_notify_evt(struct hci_dev *hdev, struct sk_buff *skb)
2704 struct hci_ev_link_key_notify *ev = (void *) skb->data;
2705 struct hci_conn *conn;
2708 BT_DBG("%s", hdev->name);
2712 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
2714 hci_conn_hold(conn);
2715 conn->disc_timeout = HCI_DISCONN_TIMEOUT;
2716 pin_len = conn->pin_length;
2718 if (ev->key_type != HCI_LK_CHANGED_COMBINATION)
2719 conn->key_type = ev->key_type;
2724 if (test_bit(HCI_LINK_KEYS, &hdev->dev_flags))
2725 hci_add_link_key(hdev, conn, 1, &ev->bdaddr, ev->link_key,
2726 ev->key_type, pin_len);
2728 hci_dev_unlock(hdev);
2731 static inline void hci_clock_offset_evt(struct hci_dev *hdev, struct sk_buff *skb)
2733 struct hci_ev_clock_offset *ev = (void *) skb->data;
2734 struct hci_conn *conn;
2736 BT_DBG("%s status %d", hdev->name, ev->status);
2740 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2741 if (conn && !ev->status) {
2742 struct inquiry_entry *ie;
2744 ie = hci_inquiry_cache_lookup(hdev, &conn->dst);
2746 ie->data.clock_offset = ev->clock_offset;
2747 ie->timestamp = jiffies;
2751 hci_dev_unlock(hdev);
2754 static inline void hci_pkt_type_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
2756 struct hci_ev_pkt_type_change *ev = (void *) skb->data;
2757 struct hci_conn *conn;
2759 BT_DBG("%s status %d", hdev->name, ev->status);
2763 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2764 if (conn && !ev->status)
2765 conn->pkt_type = __le16_to_cpu(ev->pkt_type);
2767 hci_dev_unlock(hdev);
2770 static inline void hci_pscan_rep_mode_evt(struct hci_dev *hdev, struct sk_buff *skb)
2772 struct hci_ev_pscan_rep_mode *ev = (void *) skb->data;
2773 struct inquiry_entry *ie;
2775 BT_DBG("%s", hdev->name);
2779 ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr);
2781 ie->data.pscan_rep_mode = ev->pscan_rep_mode;
2782 ie->timestamp = jiffies;
2785 hci_dev_unlock(hdev);
2788 static inline void hci_inquiry_result_with_rssi_evt(struct hci_dev *hdev, struct sk_buff *skb)
2790 struct inquiry_data data;
2791 int num_rsp = *((__u8 *) skb->data);
2792 bool name_known, ssp;
2794 BT_DBG("%s num_rsp %d", hdev->name, num_rsp);
2801 if ((skb->len - 1) / num_rsp != sizeof(struct inquiry_info_with_rssi)) {
2802 struct inquiry_info_with_rssi_and_pscan_mode *info;
2803 info = (void *) (skb->data + 1);
2805 for (; num_rsp; num_rsp--, info++) {
2806 bacpy(&data.bdaddr, &info->bdaddr);
2807 data.pscan_rep_mode = info->pscan_rep_mode;
2808 data.pscan_period_mode = info->pscan_period_mode;
2809 data.pscan_mode = info->pscan_mode;
2810 memcpy(data.dev_class, info->dev_class, 3);
2811 data.clock_offset = info->clock_offset;
2812 data.rssi = info->rssi;
2813 data.ssp_mode = 0x00;
2815 name_known = hci_inquiry_cache_update(hdev, &data,
2817 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
2818 info->dev_class, info->rssi,
2819 !name_known, ssp, NULL, 0);
2822 struct inquiry_info_with_rssi *info = (void *) (skb->data + 1);
2824 for (; num_rsp; num_rsp--, info++) {
2825 bacpy(&data.bdaddr, &info->bdaddr);
2826 data.pscan_rep_mode = info->pscan_rep_mode;
2827 data.pscan_period_mode = info->pscan_period_mode;
2828 data.pscan_mode = 0x00;
2829 memcpy(data.dev_class, info->dev_class, 3);
2830 data.clock_offset = info->clock_offset;
2831 data.rssi = info->rssi;
2832 data.ssp_mode = 0x00;
2833 name_known = hci_inquiry_cache_update(hdev, &data,
2835 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
2836 info->dev_class, info->rssi,
2837 !name_known, ssp, NULL, 0);
2841 hci_dev_unlock(hdev);
2844 static inline void hci_remote_ext_features_evt(struct hci_dev *hdev, struct sk_buff *skb)
2846 struct hci_ev_remote_ext_features *ev = (void *) skb->data;
2847 struct hci_conn *conn;
2849 BT_DBG("%s", hdev->name);
2853 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2857 if (!ev->status && ev->page == 0x01) {
2858 struct inquiry_entry *ie;
2860 ie = hci_inquiry_cache_lookup(hdev, &conn->dst);
2862 ie->data.ssp_mode = (ev->features[0] & LMP_HOST_SSP);
2864 if (ev->features[0] & LMP_HOST_SSP)
2865 set_bit(HCI_CONN_SSP_ENABLED, &conn->flags);
2868 if (conn->state != BT_CONFIG)
2872 struct hci_cp_remote_name_req cp;
2873 memset(&cp, 0, sizeof(cp));
2874 bacpy(&cp.bdaddr, &conn->dst);
2875 cp.pscan_rep_mode = 0x02;
2876 hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp);
2877 } else if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
2878 mgmt_device_connected(hdev, &conn->dst, conn->type,
2879 conn->dst_type, 0, NULL, 0,
2882 if (!hci_outgoing_auth_needed(hdev, conn)) {
2883 conn->state = BT_CONNECTED;
2884 hci_proto_connect_cfm(conn, ev->status);
2889 hci_dev_unlock(hdev);
2892 static inline void hci_sync_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
2894 struct hci_ev_sync_conn_complete *ev = (void *) skb->data;
2895 struct hci_conn *conn;
2897 BT_DBG("%s status %d", hdev->name, ev->status);
2901 conn = hci_conn_hash_lookup_ba(hdev, ev->link_type, &ev->bdaddr);
2903 if (ev->link_type == ESCO_LINK)
2906 conn = hci_conn_hash_lookup_ba(hdev, ESCO_LINK, &ev->bdaddr);
2910 conn->type = SCO_LINK;
2913 switch (ev->status) {
2915 conn->handle = __le16_to_cpu(ev->handle);
2916 conn->state = BT_CONNECTED;
2918 hci_conn_hold_device(conn);
2919 hci_conn_add_sysfs(conn);
2922 case 0x11: /* Unsupported Feature or Parameter Value */
2923 case 0x1c: /* SCO interval rejected */
2924 case 0x1a: /* Unsupported Remote Feature */
2925 case 0x1f: /* Unspecified error */
2926 if (conn->out && conn->attempt < 2) {
2927 conn->pkt_type = (hdev->esco_type & SCO_ESCO_MASK) |
2928 (hdev->esco_type & EDR_ESCO_MASK);
2929 hci_setup_sync(conn, conn->link->handle);
2935 conn->state = BT_CLOSED;
2939 hci_proto_connect_cfm(conn, ev->status);
2944 hci_dev_unlock(hdev);
2947 static inline void hci_sync_conn_changed_evt(struct hci_dev *hdev, struct sk_buff *skb)
2949 BT_DBG("%s", hdev->name);
2952 static inline void hci_sniff_subrate_evt(struct hci_dev *hdev, struct sk_buff *skb)
2954 struct hci_ev_sniff_subrate *ev = (void *) skb->data;
2956 BT_DBG("%s status %d", hdev->name, ev->status);
2959 static inline void hci_extended_inquiry_result_evt(struct hci_dev *hdev, struct sk_buff *skb)
2961 struct inquiry_data data;
2962 struct extended_inquiry_info *info = (void *) (skb->data + 1);
2963 int num_rsp = *((__u8 *) skb->data);
2965 BT_DBG("%s num_rsp %d", hdev->name, num_rsp);
2972 for (; num_rsp; num_rsp--, info++) {
2973 bool name_known, ssp;
2975 bacpy(&data.bdaddr, &info->bdaddr);
2976 data.pscan_rep_mode = info->pscan_rep_mode;
2977 data.pscan_period_mode = info->pscan_period_mode;
2978 data.pscan_mode = 0x00;
2979 memcpy(data.dev_class, info->dev_class, 3);
2980 data.clock_offset = info->clock_offset;
2981 data.rssi = info->rssi;
2982 data.ssp_mode = 0x01;
2984 if (test_bit(HCI_MGMT, &hdev->dev_flags))
2985 name_known = eir_has_data_type(info->data,
2991 name_known = hci_inquiry_cache_update(hdev, &data, name_known,
2993 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
2994 info->dev_class, info->rssi, !name_known,
2995 ssp, info->data, sizeof(info->data));
2998 hci_dev_unlock(hdev);
3001 static inline u8 hci_get_auth_req(struct hci_conn *conn)
3003 /* If remote requests dedicated bonding follow that lead */
3004 if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03) {
3005 /* If both remote and local IO capabilities allow MITM
3006 * protection then require it, otherwise don't */
3007 if (conn->remote_cap == 0x03 || conn->io_capability == 0x03)
3013 /* If remote requests no-bonding follow that lead */
3014 if (conn->remote_auth == 0x00 || conn->remote_auth == 0x01)
3015 return conn->remote_auth | (conn->auth_type & 0x01);
3017 return conn->auth_type;
3020 static inline void hci_io_capa_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
3022 struct hci_ev_io_capa_request *ev = (void *) skb->data;
3023 struct hci_conn *conn;
3025 BT_DBG("%s", hdev->name);
3029 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3033 hci_conn_hold(conn);
3035 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
3038 if (test_bit(HCI_PAIRABLE, &hdev->dev_flags) ||
3039 (conn->remote_auth & ~0x01) == HCI_AT_NO_BONDING) {
3040 struct hci_cp_io_capability_reply cp;
3042 bacpy(&cp.bdaddr, &ev->bdaddr);
3043 /* Change the IO capability from KeyboardDisplay
3044 * to DisplayYesNo as it is not supported by BT spec. */
3045 cp.capability = (conn->io_capability == 0x04) ?
3046 0x01 : conn->io_capability;
3047 conn->auth_type = hci_get_auth_req(conn);
3048 cp.authentication = conn->auth_type;
3050 if ((conn->out || test_bit(HCI_CONN_REMOTE_OOB, &conn->flags)) &&
3051 hci_find_remote_oob_data(hdev, &conn->dst))
3056 hci_send_cmd(hdev, HCI_OP_IO_CAPABILITY_REPLY,
3059 struct hci_cp_io_capability_neg_reply cp;
3061 bacpy(&cp.bdaddr, &ev->bdaddr);
3062 cp.reason = HCI_ERROR_PAIRING_NOT_ALLOWED;
3064 hci_send_cmd(hdev, HCI_OP_IO_CAPABILITY_NEG_REPLY,
3069 hci_dev_unlock(hdev);
3072 static inline void hci_io_capa_reply_evt(struct hci_dev *hdev, struct sk_buff *skb)
3074 struct hci_ev_io_capa_reply *ev = (void *) skb->data;
3075 struct hci_conn *conn;
3077 BT_DBG("%s", hdev->name);
3081 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3085 conn->remote_cap = ev->capability;
3086 conn->remote_auth = ev->authentication;
3088 set_bit(HCI_CONN_REMOTE_OOB, &conn->flags);
3091 hci_dev_unlock(hdev);
3094 static inline void hci_user_confirm_request_evt(struct hci_dev *hdev,
3095 struct sk_buff *skb)
3097 struct hci_ev_user_confirm_req *ev = (void *) skb->data;
3098 int loc_mitm, rem_mitm, confirm_hint = 0;
3099 struct hci_conn *conn;
3101 BT_DBG("%s", hdev->name);
3105 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
3108 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3112 loc_mitm = (conn->auth_type & 0x01);
3113 rem_mitm = (conn->remote_auth & 0x01);
3115 /* If we require MITM but the remote device can't provide that
3116 * (it has NoInputNoOutput) then reject the confirmation
3117 * request. The only exception is when we're dedicated bonding
3118 * initiators (connect_cfm_cb set) since then we always have the MITM
3120 if (!conn->connect_cfm_cb && loc_mitm && conn->remote_cap == 0x03) {
3121 BT_DBG("Rejecting request: remote device can't provide MITM");
3122 hci_send_cmd(hdev, HCI_OP_USER_CONFIRM_NEG_REPLY,
3123 sizeof(ev->bdaddr), &ev->bdaddr);
3127 /* If no side requires MITM protection; auto-accept */
3128 if ((!loc_mitm || conn->remote_cap == 0x03) &&
3129 (!rem_mitm || conn->io_capability == 0x03)) {
3131 /* If we're not the initiators request authorization to
3132 * proceed from user space (mgmt_user_confirm with
3133 * confirm_hint set to 1). */
3134 if (!test_bit(HCI_CONN_AUTH_PEND, &conn->flags)) {
3135 BT_DBG("Confirming auto-accept as acceptor");
3140 BT_DBG("Auto-accept of user confirmation with %ums delay",
3141 hdev->auto_accept_delay);
3143 if (hdev->auto_accept_delay > 0) {
3144 int delay = msecs_to_jiffies(hdev->auto_accept_delay);
3145 mod_timer(&conn->auto_accept_timer, jiffies + delay);
3149 hci_send_cmd(hdev, HCI_OP_USER_CONFIRM_REPLY,
3150 sizeof(ev->bdaddr), &ev->bdaddr);
3155 mgmt_user_confirm_request(hdev, &ev->bdaddr, ACL_LINK, 0, ev->passkey,
3159 hci_dev_unlock(hdev);
3162 static inline void hci_user_passkey_request_evt(struct hci_dev *hdev,
3163 struct sk_buff *skb)
3165 struct hci_ev_user_passkey_req *ev = (void *) skb->data;
3167 BT_DBG("%s", hdev->name);
3171 if (test_bit(HCI_MGMT, &hdev->dev_flags))
3172 mgmt_user_passkey_request(hdev, &ev->bdaddr, ACL_LINK, 0);
3174 hci_dev_unlock(hdev);
3177 static inline void hci_simple_pair_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
3179 struct hci_ev_simple_pair_complete *ev = (void *) skb->data;
3180 struct hci_conn *conn;
3182 BT_DBG("%s", hdev->name);
3186 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3190 /* To avoid duplicate auth_failed events to user space we check
3191 * the HCI_CONN_AUTH_PEND flag which will be set if we
3192 * initiated the authentication. A traditional auth_complete
3193 * event gets always produced as initiator and is also mapped to
3194 * the mgmt_auth_failed event */
3195 if (!test_bit(HCI_CONN_AUTH_PEND, &conn->flags) && ev->status != 0)
3196 mgmt_auth_failed(hdev, &conn->dst, conn->type, conn->dst_type,
3202 hci_dev_unlock(hdev);
3205 static inline void hci_remote_host_features_evt(struct hci_dev *hdev, struct sk_buff *skb)
3207 struct hci_ev_remote_host_features *ev = (void *) skb->data;
3208 struct inquiry_entry *ie;
3210 BT_DBG("%s", hdev->name);
3214 ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr);
3216 ie->data.ssp_mode = (ev->features[0] & LMP_HOST_SSP);
3218 hci_dev_unlock(hdev);
3221 static inline void hci_remote_oob_data_request_evt(struct hci_dev *hdev,
3222 struct sk_buff *skb)
3224 struct hci_ev_remote_oob_data_request *ev = (void *) skb->data;
3225 struct oob_data *data;
3227 BT_DBG("%s", hdev->name);
3231 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
3234 data = hci_find_remote_oob_data(hdev, &ev->bdaddr);
3236 struct hci_cp_remote_oob_data_reply cp;
3238 bacpy(&cp.bdaddr, &ev->bdaddr);
3239 memcpy(cp.hash, data->hash, sizeof(cp.hash));
3240 memcpy(cp.randomizer, data->randomizer, sizeof(cp.randomizer));
3242 hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_DATA_REPLY, sizeof(cp),
3245 struct hci_cp_remote_oob_data_neg_reply cp;
3247 bacpy(&cp.bdaddr, &ev->bdaddr);
3248 hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_DATA_NEG_REPLY, sizeof(cp),
3253 hci_dev_unlock(hdev);
3256 static inline void hci_le_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
3258 struct hci_ev_le_conn_complete *ev = (void *) skb->data;
3259 struct hci_conn *conn;
3261 BT_DBG("%s status %d", hdev->name, ev->status);
3265 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &ev->bdaddr);
3267 conn = hci_conn_add(hdev, LE_LINK, &ev->bdaddr);
3269 BT_ERR("No memory for new connection");
3270 hci_dev_unlock(hdev);
3274 conn->dst_type = ev->bdaddr_type;
3278 mgmt_connect_failed(hdev, &ev->bdaddr, conn->type,
3279 conn->dst_type, ev->status);
3280 hci_proto_connect_cfm(conn, ev->status);
3281 conn->state = BT_CLOSED;
3286 if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
3287 mgmt_device_connected(hdev, &ev->bdaddr, conn->type,
3288 conn->dst_type, 0, NULL, 0, NULL);
3290 conn->sec_level = BT_SECURITY_LOW;
3291 conn->handle = __le16_to_cpu(ev->handle);
3292 conn->state = BT_CONNECTED;
3294 hci_conn_hold_device(conn);
3295 hci_conn_add_sysfs(conn);
3297 hci_proto_connect_cfm(conn, ev->status);
3300 hci_dev_unlock(hdev);
3303 static inline void hci_le_adv_report_evt(struct hci_dev *hdev,
3304 struct sk_buff *skb)
3306 u8 num_reports = skb->data[0];
3307 void *ptr = &skb->data[1];
3312 while (num_reports--) {
3313 struct hci_ev_le_advertising_info *ev = ptr;
3315 hci_add_adv_entry(hdev, ev);
3317 rssi = ev->data[ev->length];
3318 mgmt_device_found(hdev, &ev->bdaddr, LE_LINK, ev->bdaddr_type,
3319 NULL, rssi, 0, 1, ev->data, ev->length);
3321 ptr += sizeof(*ev) + ev->length + 1;
3324 hci_dev_unlock(hdev);
3327 static inline void hci_le_ltk_request_evt(struct hci_dev *hdev,
3328 struct sk_buff *skb)
3330 struct hci_ev_le_ltk_req *ev = (void *) skb->data;
3331 struct hci_cp_le_ltk_reply cp;
3332 struct hci_cp_le_ltk_neg_reply neg;
3333 struct hci_conn *conn;
3334 struct smp_ltk *ltk;
3336 BT_DBG("%s handle %d", hdev->name, cpu_to_le16(ev->handle));
3340 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3344 ltk = hci_find_ltk(hdev, ev->ediv, ev->random);
3348 memcpy(cp.ltk, ltk->val, sizeof(ltk->val));
3349 cp.handle = cpu_to_le16(conn->handle);
3351 if (ltk->authenticated)
3352 conn->sec_level = BT_SECURITY_HIGH;
3354 hci_send_cmd(hdev, HCI_OP_LE_LTK_REPLY, sizeof(cp), &cp);
3356 if (ltk->type & HCI_SMP_STK) {
3357 list_del(<k->list);
3361 hci_dev_unlock(hdev);
3366 neg.handle = ev->handle;
3367 hci_send_cmd(hdev, HCI_OP_LE_LTK_NEG_REPLY, sizeof(neg), &neg);
3368 hci_dev_unlock(hdev);
3371 static inline void hci_le_meta_evt(struct hci_dev *hdev, struct sk_buff *skb)
3373 struct hci_ev_le_meta *le_ev = (void *) skb->data;
3375 skb_pull(skb, sizeof(*le_ev));
3377 switch (le_ev->subevent) {
3378 case HCI_EV_LE_CONN_COMPLETE:
3379 hci_le_conn_complete_evt(hdev, skb);
3382 case HCI_EV_LE_ADVERTISING_REPORT:
3383 hci_le_adv_report_evt(hdev, skb);
3386 case HCI_EV_LE_LTK_REQ:
3387 hci_le_ltk_request_evt(hdev, skb);
3395 void hci_event_packet(struct hci_dev *hdev, struct sk_buff *skb)
3397 struct hci_event_hdr *hdr = (void *) skb->data;
3398 __u8 event = hdr->evt;
3400 skb_pull(skb, HCI_EVENT_HDR_SIZE);
3403 case HCI_EV_INQUIRY_COMPLETE:
3404 hci_inquiry_complete_evt(hdev, skb);
3407 case HCI_EV_INQUIRY_RESULT:
3408 hci_inquiry_result_evt(hdev, skb);
3411 case HCI_EV_CONN_COMPLETE:
3412 hci_conn_complete_evt(hdev, skb);
3415 case HCI_EV_CONN_REQUEST:
3416 hci_conn_request_evt(hdev, skb);
3419 case HCI_EV_DISCONN_COMPLETE:
3420 hci_disconn_complete_evt(hdev, skb);
3423 case HCI_EV_AUTH_COMPLETE:
3424 hci_auth_complete_evt(hdev, skb);
3427 case HCI_EV_REMOTE_NAME:
3428 hci_remote_name_evt(hdev, skb);
3431 case HCI_EV_ENCRYPT_CHANGE:
3432 hci_encrypt_change_evt(hdev, skb);
3435 case HCI_EV_CHANGE_LINK_KEY_COMPLETE:
3436 hci_change_link_key_complete_evt(hdev, skb);
3439 case HCI_EV_REMOTE_FEATURES:
3440 hci_remote_features_evt(hdev, skb);
3443 case HCI_EV_REMOTE_VERSION:
3444 hci_remote_version_evt(hdev, skb);
3447 case HCI_EV_QOS_SETUP_COMPLETE:
3448 hci_qos_setup_complete_evt(hdev, skb);
3451 case HCI_EV_CMD_COMPLETE:
3452 hci_cmd_complete_evt(hdev, skb);
3455 case HCI_EV_CMD_STATUS:
3456 hci_cmd_status_evt(hdev, skb);
3459 case HCI_EV_ROLE_CHANGE:
3460 hci_role_change_evt(hdev, skb);
3463 case HCI_EV_NUM_COMP_PKTS:
3464 hci_num_comp_pkts_evt(hdev, skb);
3467 case HCI_EV_MODE_CHANGE:
3468 hci_mode_change_evt(hdev, skb);
3471 case HCI_EV_PIN_CODE_REQ:
3472 hci_pin_code_request_evt(hdev, skb);
3475 case HCI_EV_LINK_KEY_REQ:
3476 hci_link_key_request_evt(hdev, skb);
3479 case HCI_EV_LINK_KEY_NOTIFY:
3480 hci_link_key_notify_evt(hdev, skb);
3483 case HCI_EV_CLOCK_OFFSET:
3484 hci_clock_offset_evt(hdev, skb);
3487 case HCI_EV_PKT_TYPE_CHANGE:
3488 hci_pkt_type_change_evt(hdev, skb);
3491 case HCI_EV_PSCAN_REP_MODE:
3492 hci_pscan_rep_mode_evt(hdev, skb);
3495 case HCI_EV_INQUIRY_RESULT_WITH_RSSI:
3496 hci_inquiry_result_with_rssi_evt(hdev, skb);
3499 case HCI_EV_REMOTE_EXT_FEATURES:
3500 hci_remote_ext_features_evt(hdev, skb);
3503 case HCI_EV_SYNC_CONN_COMPLETE:
3504 hci_sync_conn_complete_evt(hdev, skb);
3507 case HCI_EV_SYNC_CONN_CHANGED:
3508 hci_sync_conn_changed_evt(hdev, skb);
3511 case HCI_EV_SNIFF_SUBRATE:
3512 hci_sniff_subrate_evt(hdev, skb);
3515 case HCI_EV_EXTENDED_INQUIRY_RESULT:
3516 hci_extended_inquiry_result_evt(hdev, skb);
3519 case HCI_EV_IO_CAPA_REQUEST:
3520 hci_io_capa_request_evt(hdev, skb);
3523 case HCI_EV_IO_CAPA_REPLY:
3524 hci_io_capa_reply_evt(hdev, skb);
3527 case HCI_EV_USER_CONFIRM_REQUEST:
3528 hci_user_confirm_request_evt(hdev, skb);
3531 case HCI_EV_USER_PASSKEY_REQUEST:
3532 hci_user_passkey_request_evt(hdev, skb);
3535 case HCI_EV_SIMPLE_PAIR_COMPLETE:
3536 hci_simple_pair_complete_evt(hdev, skb);
3539 case HCI_EV_REMOTE_HOST_FEATURES:
3540 hci_remote_host_features_evt(hdev, skb);
3543 case HCI_EV_LE_META:
3544 hci_le_meta_evt(hdev, skb);
3547 case HCI_EV_REMOTE_OOB_DATA_REQUEST:
3548 hci_remote_oob_data_request_evt(hdev, skb);
3551 case HCI_EV_NUM_COMP_BLOCKS:
3552 hci_num_comp_blocks_evt(hdev, skb);
3556 BT_DBG("%s event 0x%x", hdev->name, event);
3561 hdev->stat.evt_rx++;