2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4 Copyright (C) 2011 ProFUSION Embedded Systems
6 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License version 2 as
10 published by the Free Software Foundation;
12 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
16 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
19 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
21 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
23 SOFTWARE IS DISCLAIMED.
26 /* Bluetooth HCI core. */
28 #include <linux/export.h>
29 #include <linux/idr.h>
31 #include <linux/rfkill.h>
33 #include <net/bluetooth/bluetooth.h>
34 #include <net/bluetooth/hci_core.h>
36 static void hci_rx_work(struct work_struct *work);
37 static void hci_cmd_work(struct work_struct *work);
38 static void hci_tx_work(struct work_struct *work);
41 LIST_HEAD(hci_dev_list);
42 DEFINE_RWLOCK(hci_dev_list_lock);
44 /* HCI callback list */
45 LIST_HEAD(hci_cb_list);
46 DEFINE_RWLOCK(hci_cb_list_lock);
48 /* HCI ID Numbering */
49 static DEFINE_IDA(hci_index_ida);
51 /* ---- HCI notifications ---- */
53 static void hci_notify(struct hci_dev *hdev, int event)
55 hci_sock_dev_event(hdev, event);
58 /* ---- HCI requests ---- */
60 static void hci_req_sync_complete(struct hci_dev *hdev, u8 result)
62 BT_DBG("%s result 0x%2.2x", hdev->name, result);
64 if (hdev->req_status == HCI_REQ_PEND) {
65 hdev->req_result = result;
66 hdev->req_status = HCI_REQ_DONE;
67 wake_up_interruptible(&hdev->req_wait_q);
71 static void hci_req_cancel(struct hci_dev *hdev, int err)
73 BT_DBG("%s err 0x%2.2x", hdev->name, err);
75 if (hdev->req_status == HCI_REQ_PEND) {
76 hdev->req_result = err;
77 hdev->req_status = HCI_REQ_CANCELED;
78 wake_up_interruptible(&hdev->req_wait_q);
82 /* Execute request and wait for completion. */
83 static int __hci_req_sync(struct hci_dev *hdev,
84 void (*func)(struct hci_request *req,
86 unsigned long opt, __u32 timeout)
88 struct hci_request req;
89 DECLARE_WAITQUEUE(wait, current);
92 BT_DBG("%s start", hdev->name);
94 hci_req_init(&req, hdev);
96 hdev->req_status = HCI_REQ_PEND;
100 err = hci_req_run(&req, hci_req_sync_complete);
102 hdev->req_status = 0;
104 /* ENODATA means the HCI request command queue is empty.
105 * This can happen when a request with conditionals doesn't
106 * trigger any commands to be sent. This is normal behavior
107 * and should not trigger an error return.
115 add_wait_queue(&hdev->req_wait_q, &wait);
116 set_current_state(TASK_INTERRUPTIBLE);
118 schedule_timeout(timeout);
120 remove_wait_queue(&hdev->req_wait_q, &wait);
122 if (signal_pending(current))
125 switch (hdev->req_status) {
127 err = -bt_to_errno(hdev->req_result);
130 case HCI_REQ_CANCELED:
131 err = -hdev->req_result;
139 hdev->req_status = hdev->req_result = 0;
141 BT_DBG("%s end: err %d", hdev->name, err);
146 static int hci_req_sync(struct hci_dev *hdev,
147 void (*req)(struct hci_request *req,
149 unsigned long opt, __u32 timeout)
153 if (!test_bit(HCI_UP, &hdev->flags))
156 /* Serialize all requests */
158 ret = __hci_req_sync(hdev, req, opt, timeout);
159 hci_req_unlock(hdev);
164 static void hci_reset_req(struct hci_request *req, unsigned long opt)
166 BT_DBG("%s %ld", req->hdev->name, opt);
169 set_bit(HCI_RESET, &req->hdev->flags);
170 hci_req_add(req, HCI_OP_RESET, 0, NULL);
173 static void bredr_init(struct hci_request *req)
175 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;
177 /* Read Local Supported Features */
178 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
180 /* Read Local Version */
181 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
183 /* Read BD Address */
184 hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
187 static void amp_init(struct hci_request *req)
189 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
191 /* Read Local Version */
192 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
194 /* Read Local AMP Info */
195 hci_req_add(req, HCI_OP_READ_LOCAL_AMP_INFO, 0, NULL);
197 /* Read Data Blk size */
198 hci_req_add(req, HCI_OP_READ_DATA_BLOCK_SIZE, 0, NULL);
201 static void hci_init1_req(struct hci_request *req, unsigned long opt)
203 struct hci_dev *hdev = req->hdev;
204 struct hci_request init_req;
207 BT_DBG("%s %ld", hdev->name, opt);
209 /* Driver initialization */
211 hci_req_init(&init_req, hdev);
213 /* Special commands */
214 while ((skb = skb_dequeue(&hdev->driver_init))) {
215 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
216 skb->dev = (void *) hdev;
218 if (skb_queue_empty(&init_req.cmd_q))
219 bt_cb(skb)->req.start = true;
221 skb_queue_tail(&init_req.cmd_q, skb);
223 skb_queue_purge(&hdev->driver_init);
225 hci_req_run(&init_req, NULL);
228 if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
229 hci_reset_req(req, 0);
231 switch (hdev->dev_type) {
241 BT_ERR("Unknown device type %d", hdev->dev_type);
246 static void bredr_setup(struct hci_request *req)
248 struct hci_cp_delete_stored_link_key cp;
252 /* Read Buffer Size (ACL mtu, max pkt, etc.) */
253 hci_req_add(req, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
255 /* Read Class of Device */
256 hci_req_add(req, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
258 /* Read Local Name */
259 hci_req_add(req, HCI_OP_READ_LOCAL_NAME, 0, NULL);
261 /* Read Voice Setting */
262 hci_req_add(req, HCI_OP_READ_VOICE_SETTING, 0, NULL);
264 /* Clear Event Filters */
265 flt_type = HCI_FLT_CLEAR_ALL;
266 hci_req_add(req, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
268 /* Connection accept timeout ~20 secs */
269 param = __constant_cpu_to_le16(0x7d00);
270 hci_req_add(req, HCI_OP_WRITE_CA_TIMEOUT, 2, ¶m);
272 bacpy(&cp.bdaddr, BDADDR_ANY);
273 cp.delete_all = 0x01;
274 hci_req_add(req, HCI_OP_DELETE_STORED_LINK_KEY, sizeof(cp), &cp);
276 /* Read page scan parameters */
277 if (req->hdev->hci_ver > BLUETOOTH_VER_1_1) {
278 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_ACTIVITY, 0, NULL);
279 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_TYPE, 0, NULL);
283 static void le_setup(struct hci_request *req)
285 /* Read LE Buffer Size */
286 hci_req_add(req, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
288 /* Read LE Local Supported Features */
289 hci_req_add(req, HCI_OP_LE_READ_LOCAL_FEATURES, 0, NULL);
291 /* Read LE Advertising Channel TX Power */
292 hci_req_add(req, HCI_OP_LE_READ_ADV_TX_POWER, 0, NULL);
294 /* Read LE White List Size */
295 hci_req_add(req, HCI_OP_LE_READ_WHITE_LIST_SIZE, 0, NULL);
297 /* Read LE Supported States */
298 hci_req_add(req, HCI_OP_LE_READ_SUPPORTED_STATES, 0, NULL);
301 static u8 hci_get_inquiry_mode(struct hci_dev *hdev)
303 if (lmp_ext_inq_capable(hdev))
306 if (lmp_inq_rssi_capable(hdev))
309 if (hdev->manufacturer == 11 && hdev->hci_rev == 0x00 &&
310 hdev->lmp_subver == 0x0757)
313 if (hdev->manufacturer == 15) {
314 if (hdev->hci_rev == 0x03 && hdev->lmp_subver == 0x6963)
316 if (hdev->hci_rev == 0x09 && hdev->lmp_subver == 0x6963)
318 if (hdev->hci_rev == 0x00 && hdev->lmp_subver == 0x6965)
322 if (hdev->manufacturer == 31 && hdev->hci_rev == 0x2005 &&
323 hdev->lmp_subver == 0x1805)
329 static void hci_setup_inquiry_mode(struct hci_request *req)
333 mode = hci_get_inquiry_mode(req->hdev);
335 hci_req_add(req, HCI_OP_WRITE_INQUIRY_MODE, 1, &mode);
338 static void hci_setup_event_mask(struct hci_request *req)
340 struct hci_dev *hdev = req->hdev;
342 /* The second byte is 0xff instead of 0x9f (two reserved bits
343 * disabled) since a Broadcom 1.2 dongle doesn't respond to the
346 u8 events[8] = { 0xff, 0xff, 0xfb, 0xff, 0x00, 0x00, 0x00, 0x00 };
348 /* CSR 1.1 dongles does not accept any bitfield so don't try to set
349 * any event mask for pre 1.2 devices.
351 if (hdev->hci_ver < BLUETOOTH_VER_1_2)
354 if (lmp_bredr_capable(hdev)) {
355 events[4] |= 0x01; /* Flow Specification Complete */
356 events[4] |= 0x02; /* Inquiry Result with RSSI */
357 events[4] |= 0x04; /* Read Remote Extended Features Complete */
358 events[5] |= 0x08; /* Synchronous Connection Complete */
359 events[5] |= 0x10; /* Synchronous Connection Changed */
362 if (lmp_inq_rssi_capable(hdev))
363 events[4] |= 0x02; /* Inquiry Result with RSSI */
365 if (lmp_sniffsubr_capable(hdev))
366 events[5] |= 0x20; /* Sniff Subrating */
368 if (lmp_pause_enc_capable(hdev))
369 events[5] |= 0x80; /* Encryption Key Refresh Complete */
371 if (lmp_ext_inq_capable(hdev))
372 events[5] |= 0x40; /* Extended Inquiry Result */
374 if (lmp_no_flush_capable(hdev))
375 events[7] |= 0x01; /* Enhanced Flush Complete */
377 if (lmp_lsto_capable(hdev))
378 events[6] |= 0x80; /* Link Supervision Timeout Changed */
380 if (lmp_ssp_capable(hdev)) {
381 events[6] |= 0x01; /* IO Capability Request */
382 events[6] |= 0x02; /* IO Capability Response */
383 events[6] |= 0x04; /* User Confirmation Request */
384 events[6] |= 0x08; /* User Passkey Request */
385 events[6] |= 0x10; /* Remote OOB Data Request */
386 events[6] |= 0x20; /* Simple Pairing Complete */
387 events[7] |= 0x04; /* User Passkey Notification */
388 events[7] |= 0x08; /* Keypress Notification */
389 events[7] |= 0x10; /* Remote Host Supported
390 * Features Notification
394 if (lmp_le_capable(hdev))
395 events[7] |= 0x20; /* LE Meta-Event */
397 hci_req_add(req, HCI_OP_SET_EVENT_MASK, sizeof(events), events);
399 if (lmp_le_capable(hdev)) {
400 memset(events, 0, sizeof(events));
402 hci_req_add(req, HCI_OP_LE_SET_EVENT_MASK,
403 sizeof(events), events);
407 static void hci_init2_req(struct hci_request *req, unsigned long opt)
409 struct hci_dev *hdev = req->hdev;
411 if (lmp_bredr_capable(hdev))
414 if (lmp_le_capable(hdev))
417 hci_setup_event_mask(req);
419 if (hdev->hci_ver > BLUETOOTH_VER_1_1)
420 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
422 if (lmp_ssp_capable(hdev)) {
423 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
425 hci_req_add(req, HCI_OP_WRITE_SSP_MODE,
426 sizeof(mode), &mode);
428 struct hci_cp_write_eir cp;
430 memset(hdev->eir, 0, sizeof(hdev->eir));
431 memset(&cp, 0, sizeof(cp));
433 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
437 if (lmp_inq_rssi_capable(hdev))
438 hci_setup_inquiry_mode(req);
440 if (lmp_inq_tx_pwr_capable(hdev))
441 hci_req_add(req, HCI_OP_READ_INQ_RSP_TX_POWER, 0, NULL);
443 if (lmp_ext_feat_capable(hdev)) {
444 struct hci_cp_read_local_ext_features cp;
447 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
451 if (test_bit(HCI_LINK_SECURITY, &hdev->dev_flags)) {
453 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, sizeof(enable),
458 static void hci_setup_link_policy(struct hci_request *req)
460 struct hci_dev *hdev = req->hdev;
461 struct hci_cp_write_def_link_policy cp;
464 if (lmp_rswitch_capable(hdev))
465 link_policy |= HCI_LP_RSWITCH;
466 if (lmp_hold_capable(hdev))
467 link_policy |= HCI_LP_HOLD;
468 if (lmp_sniff_capable(hdev))
469 link_policy |= HCI_LP_SNIFF;
470 if (lmp_park_capable(hdev))
471 link_policy |= HCI_LP_PARK;
473 cp.policy = cpu_to_le16(link_policy);
474 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, sizeof(cp), &cp);
477 static void hci_set_le_support(struct hci_request *req)
479 struct hci_dev *hdev = req->hdev;
480 struct hci_cp_write_le_host_supported cp;
482 memset(&cp, 0, sizeof(cp));
484 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
486 cp.simul = lmp_le_br_capable(hdev);
489 if (cp.le != lmp_host_le_capable(hdev))
490 hci_req_add(req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(cp),
494 static void hci_init3_req(struct hci_request *req, unsigned long opt)
496 struct hci_dev *hdev = req->hdev;
498 if (hdev->commands[5] & 0x10)
499 hci_setup_link_policy(req);
501 if (lmp_le_capable(hdev)) {
502 hci_set_le_support(req);
507 static int __hci_init(struct hci_dev *hdev)
511 err = __hci_req_sync(hdev, hci_init1_req, 0, HCI_INIT_TIMEOUT);
515 /* HCI_BREDR covers both single-mode LE, BR/EDR and dual-mode
516 * BR/EDR/LE type controllers. AMP controllers only need the
519 if (hdev->dev_type != HCI_BREDR)
522 err = __hci_req_sync(hdev, hci_init2_req, 0, HCI_INIT_TIMEOUT);
526 return __hci_req_sync(hdev, hci_init3_req, 0, HCI_INIT_TIMEOUT);
529 static void hci_scan_req(struct hci_request *req, unsigned long opt)
533 BT_DBG("%s %x", req->hdev->name, scan);
535 /* Inquiry and Page scans */
536 hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
539 static void hci_auth_req(struct hci_request *req, unsigned long opt)
543 BT_DBG("%s %x", req->hdev->name, auth);
546 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
549 static void hci_encrypt_req(struct hci_request *req, unsigned long opt)
553 BT_DBG("%s %x", req->hdev->name, encrypt);
556 hci_req_add(req, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
559 static void hci_linkpol_req(struct hci_request *req, unsigned long opt)
561 __le16 policy = cpu_to_le16(opt);
563 BT_DBG("%s %x", req->hdev->name, policy);
565 /* Default link policy */
566 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
569 /* Get HCI device by index.
570 * Device is held on return. */
571 struct hci_dev *hci_dev_get(int index)
573 struct hci_dev *hdev = NULL, *d;
580 read_lock(&hci_dev_list_lock);
581 list_for_each_entry(d, &hci_dev_list, list) {
582 if (d->id == index) {
583 hdev = hci_dev_hold(d);
587 read_unlock(&hci_dev_list_lock);
591 /* ---- Inquiry support ---- */
593 bool hci_discovery_active(struct hci_dev *hdev)
595 struct discovery_state *discov = &hdev->discovery;
597 switch (discov->state) {
598 case DISCOVERY_FINDING:
599 case DISCOVERY_RESOLVING:
607 void hci_discovery_set_state(struct hci_dev *hdev, int state)
609 BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
611 if (hdev->discovery.state == state)
615 case DISCOVERY_STOPPED:
616 if (hdev->discovery.state != DISCOVERY_STARTING)
617 mgmt_discovering(hdev, 0);
619 case DISCOVERY_STARTING:
621 case DISCOVERY_FINDING:
622 mgmt_discovering(hdev, 1);
624 case DISCOVERY_RESOLVING:
626 case DISCOVERY_STOPPING:
630 hdev->discovery.state = state;
633 static void inquiry_cache_flush(struct hci_dev *hdev)
635 struct discovery_state *cache = &hdev->discovery;
636 struct inquiry_entry *p, *n;
638 list_for_each_entry_safe(p, n, &cache->all, all) {
643 INIT_LIST_HEAD(&cache->unknown);
644 INIT_LIST_HEAD(&cache->resolve);
647 struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev,
650 struct discovery_state *cache = &hdev->discovery;
651 struct inquiry_entry *e;
653 BT_DBG("cache %p, %pMR", cache, bdaddr);
655 list_for_each_entry(e, &cache->all, all) {
656 if (!bacmp(&e->data.bdaddr, bdaddr))
663 struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
666 struct discovery_state *cache = &hdev->discovery;
667 struct inquiry_entry *e;
669 BT_DBG("cache %p, %pMR", cache, bdaddr);
671 list_for_each_entry(e, &cache->unknown, list) {
672 if (!bacmp(&e->data.bdaddr, bdaddr))
679 struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
683 struct discovery_state *cache = &hdev->discovery;
684 struct inquiry_entry *e;
686 BT_DBG("cache %p bdaddr %pMR state %d", cache, bdaddr, state);
688 list_for_each_entry(e, &cache->resolve, list) {
689 if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
691 if (!bacmp(&e->data.bdaddr, bdaddr))
698 void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
699 struct inquiry_entry *ie)
701 struct discovery_state *cache = &hdev->discovery;
702 struct list_head *pos = &cache->resolve;
703 struct inquiry_entry *p;
707 list_for_each_entry(p, &cache->resolve, list) {
708 if (p->name_state != NAME_PENDING &&
709 abs(p->data.rssi) >= abs(ie->data.rssi))
714 list_add(&ie->list, pos);
717 bool hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
718 bool name_known, bool *ssp)
720 struct discovery_state *cache = &hdev->discovery;
721 struct inquiry_entry *ie;
723 BT_DBG("cache %p, %pMR", cache, &data->bdaddr);
725 hci_remove_remote_oob_data(hdev, &data->bdaddr);
728 *ssp = data->ssp_mode;
730 ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
732 if (ie->data.ssp_mode && ssp)
735 if (ie->name_state == NAME_NEEDED &&
736 data->rssi != ie->data.rssi) {
737 ie->data.rssi = data->rssi;
738 hci_inquiry_cache_update_resolve(hdev, ie);
744 /* Entry not in the cache. Add new one. */
745 ie = kzalloc(sizeof(struct inquiry_entry), GFP_ATOMIC);
749 list_add(&ie->all, &cache->all);
752 ie->name_state = NAME_KNOWN;
754 ie->name_state = NAME_NOT_KNOWN;
755 list_add(&ie->list, &cache->unknown);
759 if (name_known && ie->name_state != NAME_KNOWN &&
760 ie->name_state != NAME_PENDING) {
761 ie->name_state = NAME_KNOWN;
765 memcpy(&ie->data, data, sizeof(*data));
766 ie->timestamp = jiffies;
767 cache->timestamp = jiffies;
769 if (ie->name_state == NAME_NOT_KNOWN)
775 static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
777 struct discovery_state *cache = &hdev->discovery;
778 struct inquiry_info *info = (struct inquiry_info *) buf;
779 struct inquiry_entry *e;
782 list_for_each_entry(e, &cache->all, all) {
783 struct inquiry_data *data = &e->data;
788 bacpy(&info->bdaddr, &data->bdaddr);
789 info->pscan_rep_mode = data->pscan_rep_mode;
790 info->pscan_period_mode = data->pscan_period_mode;
791 info->pscan_mode = data->pscan_mode;
792 memcpy(info->dev_class, data->dev_class, 3);
793 info->clock_offset = data->clock_offset;
799 BT_DBG("cache %p, copied %d", cache, copied);
803 static void hci_inq_req(struct hci_request *req, unsigned long opt)
805 struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
806 struct hci_dev *hdev = req->hdev;
807 struct hci_cp_inquiry cp;
809 BT_DBG("%s", hdev->name);
811 if (test_bit(HCI_INQUIRY, &hdev->flags))
815 memcpy(&cp.lap, &ir->lap, 3);
816 cp.length = ir->length;
817 cp.num_rsp = ir->num_rsp;
818 hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
821 int hci_inquiry(void __user *arg)
823 __u8 __user *ptr = arg;
824 struct hci_inquiry_req ir;
825 struct hci_dev *hdev;
826 int err = 0, do_inquiry = 0, max_rsp;
830 if (copy_from_user(&ir, ptr, sizeof(ir)))
833 hdev = hci_dev_get(ir.dev_id);
838 if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
839 inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) {
840 inquiry_cache_flush(hdev);
843 hci_dev_unlock(hdev);
845 timeo = ir.length * msecs_to_jiffies(2000);
848 err = hci_req_sync(hdev, hci_inq_req, (unsigned long) &ir,
854 /* for unlimited number of responses we will use buffer with
857 max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
859 /* cache_dump can't sleep. Therefore we allocate temp buffer and then
860 * copy it to the user space.
862 buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
869 ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
870 hci_dev_unlock(hdev);
872 BT_DBG("num_rsp %d", ir.num_rsp);
874 if (!copy_to_user(ptr, &ir, sizeof(ir))) {
876 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
889 static u8 create_ad(struct hci_dev *hdev, u8 *ptr)
891 u8 ad_len = 0, flags = 0;
894 if (test_bit(HCI_LE_PERIPHERAL, &hdev->dev_flags))
895 flags |= LE_AD_GENERAL;
897 if (!lmp_bredr_capable(hdev))
898 flags |= LE_AD_NO_BREDR;
900 if (lmp_le_br_capable(hdev))
901 flags |= LE_AD_SIM_LE_BREDR_CTRL;
903 if (lmp_host_le_br_capable(hdev))
904 flags |= LE_AD_SIM_LE_BREDR_HOST;
907 BT_DBG("adv flags 0x%02x", flags);
917 if (hdev->adv_tx_power != HCI_TX_POWER_INVALID) {
919 ptr[1] = EIR_TX_POWER;
920 ptr[2] = (u8) hdev->adv_tx_power;
926 name_len = strlen(hdev->dev_name);
928 size_t max_len = HCI_MAX_AD_LENGTH - ad_len - 2;
930 if (name_len > max_len) {
932 ptr[1] = EIR_NAME_SHORT;
934 ptr[1] = EIR_NAME_COMPLETE;
936 ptr[0] = name_len + 1;
938 memcpy(ptr + 2, hdev->dev_name, name_len);
940 ad_len += (name_len + 2);
941 ptr += (name_len + 2);
947 void hci_update_ad(struct hci_request *req)
949 struct hci_dev *hdev = req->hdev;
950 struct hci_cp_le_set_adv_data cp;
953 if (!lmp_le_capable(hdev))
956 memset(&cp, 0, sizeof(cp));
958 len = create_ad(hdev, cp.data);
960 if (hdev->adv_data_len == len &&
961 memcmp(cp.data, hdev->adv_data, len) == 0)
964 memcpy(hdev->adv_data, cp.data, sizeof(cp.data));
965 hdev->adv_data_len = len;
969 hci_req_add(req, HCI_OP_LE_SET_ADV_DATA, sizeof(cp), &cp);
972 /* ---- HCI ioctl helpers ---- */
974 int hci_dev_open(__u16 dev)
976 struct hci_dev *hdev;
979 hdev = hci_dev_get(dev);
983 BT_DBG("%s %p", hdev->name, hdev);
987 if (test_bit(HCI_UNREGISTER, &hdev->dev_flags)) {
992 if (hdev->rfkill && rfkill_blocked(hdev->rfkill)) {
997 if (test_bit(HCI_UP, &hdev->flags)) {
1002 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
1003 set_bit(HCI_RAW, &hdev->flags);
1005 /* Treat all non BR/EDR controllers as raw devices if
1006 enable_hs is not set */
1007 if (hdev->dev_type != HCI_BREDR && !enable_hs)
1008 set_bit(HCI_RAW, &hdev->flags);
1010 if (hdev->open(hdev)) {
1015 if (!test_bit(HCI_RAW, &hdev->flags)) {
1016 atomic_set(&hdev->cmd_cnt, 1);
1017 set_bit(HCI_INIT, &hdev->flags);
1018 ret = __hci_init(hdev);
1019 clear_bit(HCI_INIT, &hdev->flags);
1024 set_bit(HCI_UP, &hdev->flags);
1025 hci_notify(hdev, HCI_DEV_UP);
1026 if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
1027 mgmt_valid_hdev(hdev)) {
1029 mgmt_powered(hdev, 1);
1030 hci_dev_unlock(hdev);
1033 /* Init failed, cleanup */
1034 flush_work(&hdev->tx_work);
1035 flush_work(&hdev->cmd_work);
1036 flush_work(&hdev->rx_work);
1038 skb_queue_purge(&hdev->cmd_q);
1039 skb_queue_purge(&hdev->rx_q);
1044 if (hdev->sent_cmd) {
1045 kfree_skb(hdev->sent_cmd);
1046 hdev->sent_cmd = NULL;
1054 hci_req_unlock(hdev);
1059 static int hci_dev_do_close(struct hci_dev *hdev)
1061 BT_DBG("%s %p", hdev->name, hdev);
1063 cancel_work_sync(&hdev->le_scan);
1065 cancel_delayed_work(&hdev->power_off);
1067 hci_req_cancel(hdev, ENODEV);
1070 if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
1071 del_timer_sync(&hdev->cmd_timer);
1072 hci_req_unlock(hdev);
1076 /* Flush RX and TX works */
1077 flush_work(&hdev->tx_work);
1078 flush_work(&hdev->rx_work);
1080 if (hdev->discov_timeout > 0) {
1081 cancel_delayed_work(&hdev->discov_off);
1082 hdev->discov_timeout = 0;
1083 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
1086 if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
1087 cancel_delayed_work(&hdev->service_cache);
1089 cancel_delayed_work_sync(&hdev->le_scan_disable);
1092 inquiry_cache_flush(hdev);
1093 hci_conn_hash_flush(hdev);
1094 hci_dev_unlock(hdev);
1096 hci_notify(hdev, HCI_DEV_DOWN);
1102 skb_queue_purge(&hdev->cmd_q);
1103 atomic_set(&hdev->cmd_cnt, 1);
1104 if (!test_bit(HCI_RAW, &hdev->flags) &&
1105 test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks)) {
1106 set_bit(HCI_INIT, &hdev->flags);
1107 __hci_req_sync(hdev, hci_reset_req, 0, HCI_CMD_TIMEOUT);
1108 clear_bit(HCI_INIT, &hdev->flags);
1111 /* flush cmd work */
1112 flush_work(&hdev->cmd_work);
1115 skb_queue_purge(&hdev->rx_q);
1116 skb_queue_purge(&hdev->cmd_q);
1117 skb_queue_purge(&hdev->raw_q);
1119 /* Drop last sent command */
1120 if (hdev->sent_cmd) {
1121 del_timer_sync(&hdev->cmd_timer);
1122 kfree_skb(hdev->sent_cmd);
1123 hdev->sent_cmd = NULL;
1126 /* After this point our queues are empty
1127 * and no tasks are scheduled. */
1132 hdev->dev_flags &= ~HCI_PERSISTENT_MASK;
1134 if (!test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags) &&
1135 mgmt_valid_hdev(hdev)) {
1137 mgmt_powered(hdev, 0);
1138 hci_dev_unlock(hdev);
1141 /* Controller radio is available but is currently powered down */
1142 hdev->amp_status = 0;
1144 memset(hdev->eir, 0, sizeof(hdev->eir));
1145 memset(hdev->dev_class, 0, sizeof(hdev->dev_class));
1147 hci_req_unlock(hdev);
1153 int hci_dev_close(__u16 dev)
1155 struct hci_dev *hdev;
1158 hdev = hci_dev_get(dev);
1162 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
1163 cancel_delayed_work(&hdev->power_off);
1165 err = hci_dev_do_close(hdev);
1171 int hci_dev_reset(__u16 dev)
1173 struct hci_dev *hdev;
1176 hdev = hci_dev_get(dev);
1182 if (!test_bit(HCI_UP, &hdev->flags))
1186 skb_queue_purge(&hdev->rx_q);
1187 skb_queue_purge(&hdev->cmd_q);
1190 inquiry_cache_flush(hdev);
1191 hci_conn_hash_flush(hdev);
1192 hci_dev_unlock(hdev);
1197 atomic_set(&hdev->cmd_cnt, 1);
1198 hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
1200 if (!test_bit(HCI_RAW, &hdev->flags))
1201 ret = __hci_req_sync(hdev, hci_reset_req, 0, HCI_INIT_TIMEOUT);
1204 hci_req_unlock(hdev);
1209 int hci_dev_reset_stat(__u16 dev)
1211 struct hci_dev *hdev;
1214 hdev = hci_dev_get(dev);
1218 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
1225 int hci_dev_cmd(unsigned int cmd, void __user *arg)
1227 struct hci_dev *hdev;
1228 struct hci_dev_req dr;
1231 if (copy_from_user(&dr, arg, sizeof(dr)))
1234 hdev = hci_dev_get(dr.dev_id);
1240 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
1245 if (!lmp_encrypt_capable(hdev)) {
1250 if (!test_bit(HCI_AUTH, &hdev->flags)) {
1251 /* Auth must be enabled first */
1252 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
1258 err = hci_req_sync(hdev, hci_encrypt_req, dr.dev_opt,
1263 err = hci_req_sync(hdev, hci_scan_req, dr.dev_opt,
1268 err = hci_req_sync(hdev, hci_linkpol_req, dr.dev_opt,
1272 case HCISETLINKMODE:
1273 hdev->link_mode = ((__u16) dr.dev_opt) &
1274 (HCI_LM_MASTER | HCI_LM_ACCEPT);
1278 hdev->pkt_type = (__u16) dr.dev_opt;
1282 hdev->acl_mtu = *((__u16 *) &dr.dev_opt + 1);
1283 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
1287 hdev->sco_mtu = *((__u16 *) &dr.dev_opt + 1);
1288 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
1300 int hci_get_dev_list(void __user *arg)
1302 struct hci_dev *hdev;
1303 struct hci_dev_list_req *dl;
1304 struct hci_dev_req *dr;
1305 int n = 0, size, err;
1308 if (get_user(dev_num, (__u16 __user *) arg))
1311 if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
1314 size = sizeof(*dl) + dev_num * sizeof(*dr);
1316 dl = kzalloc(size, GFP_KERNEL);
1322 read_lock(&hci_dev_list_lock);
1323 list_for_each_entry(hdev, &hci_dev_list, list) {
1324 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
1325 cancel_delayed_work(&hdev->power_off);
1327 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
1328 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
1330 (dr + n)->dev_id = hdev->id;
1331 (dr + n)->dev_opt = hdev->flags;
1336 read_unlock(&hci_dev_list_lock);
1339 size = sizeof(*dl) + n * sizeof(*dr);
1341 err = copy_to_user(arg, dl, size);
1344 return err ? -EFAULT : 0;
1347 int hci_get_dev_info(void __user *arg)
1349 struct hci_dev *hdev;
1350 struct hci_dev_info di;
1353 if (copy_from_user(&di, arg, sizeof(di)))
1356 hdev = hci_dev_get(di.dev_id);
1360 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
1361 cancel_delayed_work_sync(&hdev->power_off);
1363 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
1364 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
1366 strcpy(di.name, hdev->name);
1367 di.bdaddr = hdev->bdaddr;
1368 di.type = (hdev->bus & 0x0f) | (hdev->dev_type << 4);
1369 di.flags = hdev->flags;
1370 di.pkt_type = hdev->pkt_type;
1371 if (lmp_bredr_capable(hdev)) {
1372 di.acl_mtu = hdev->acl_mtu;
1373 di.acl_pkts = hdev->acl_pkts;
1374 di.sco_mtu = hdev->sco_mtu;
1375 di.sco_pkts = hdev->sco_pkts;
1377 di.acl_mtu = hdev->le_mtu;
1378 di.acl_pkts = hdev->le_pkts;
1382 di.link_policy = hdev->link_policy;
1383 di.link_mode = hdev->link_mode;
1385 memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
1386 memcpy(&di.features, &hdev->features, sizeof(di.features));
1388 if (copy_to_user(arg, &di, sizeof(di)))
1396 /* ---- Interface to HCI drivers ---- */
1398 static int hci_rfkill_set_block(void *data, bool blocked)
1400 struct hci_dev *hdev = data;
1402 BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
1407 hci_dev_do_close(hdev);
1412 static const struct rfkill_ops hci_rfkill_ops = {
1413 .set_block = hci_rfkill_set_block,
1416 static void hci_power_on(struct work_struct *work)
1418 struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
1420 BT_DBG("%s", hdev->name);
1422 if (hci_dev_open(hdev->id) < 0)
1425 if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags))
1426 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
1427 HCI_AUTO_OFF_TIMEOUT);
1429 if (test_and_clear_bit(HCI_SETUP, &hdev->dev_flags))
1430 mgmt_index_added(hdev);
1433 static void hci_power_off(struct work_struct *work)
1435 struct hci_dev *hdev = container_of(work, struct hci_dev,
1438 BT_DBG("%s", hdev->name);
1440 hci_dev_do_close(hdev);
1443 static void hci_discov_off(struct work_struct *work)
1445 struct hci_dev *hdev;
1446 u8 scan = SCAN_PAGE;
1448 hdev = container_of(work, struct hci_dev, discov_off.work);
1450 BT_DBG("%s", hdev->name);
1454 hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, sizeof(scan), &scan);
1456 hdev->discov_timeout = 0;
1458 hci_dev_unlock(hdev);
1461 int hci_uuids_clear(struct hci_dev *hdev)
1463 struct bt_uuid *uuid, *tmp;
1465 list_for_each_entry_safe(uuid, tmp, &hdev->uuids, list) {
1466 list_del(&uuid->list);
1473 int hci_link_keys_clear(struct hci_dev *hdev)
1475 struct list_head *p, *n;
1477 list_for_each_safe(p, n, &hdev->link_keys) {
1478 struct link_key *key;
1480 key = list_entry(p, struct link_key, list);
1489 int hci_smp_ltks_clear(struct hci_dev *hdev)
1491 struct smp_ltk *k, *tmp;
1493 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
1501 struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1505 list_for_each_entry(k, &hdev->link_keys, list)
1506 if (bacmp(bdaddr, &k->bdaddr) == 0)
1512 static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
1513 u8 key_type, u8 old_key_type)
1516 if (key_type < 0x03)
1519 /* Debug keys are insecure so don't store them persistently */
1520 if (key_type == HCI_LK_DEBUG_COMBINATION)
1523 /* Changed combination key and there's no previous one */
1524 if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
1527 /* Security mode 3 case */
1531 /* Neither local nor remote side had no-bonding as requirement */
1532 if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
1535 /* Local side had dedicated bonding as requirement */
1536 if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
1539 /* Remote side had dedicated bonding as requirement */
1540 if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
1543 /* If none of the above criteria match, then don't store the key
1548 struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, __le16 ediv, u8 rand[8])
1552 list_for_each_entry(k, &hdev->long_term_keys, list) {
1553 if (k->ediv != ediv ||
1554 memcmp(rand, k->rand, sizeof(k->rand)))
1563 struct smp_ltk *hci_find_ltk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
1568 list_for_each_entry(k, &hdev->long_term_keys, list)
1569 if (addr_type == k->bdaddr_type &&
1570 bacmp(bdaddr, &k->bdaddr) == 0)
1576 int hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn, int new_key,
1577 bdaddr_t *bdaddr, u8 *val, u8 type, u8 pin_len)
1579 struct link_key *key, *old_key;
1583 old_key = hci_find_link_key(hdev, bdaddr);
1585 old_key_type = old_key->type;
1588 old_key_type = conn ? conn->key_type : 0xff;
1589 key = kzalloc(sizeof(*key), GFP_ATOMIC);
1592 list_add(&key->list, &hdev->link_keys);
1595 BT_DBG("%s key for %pMR type %u", hdev->name, bdaddr, type);
1597 /* Some buggy controller combinations generate a changed
1598 * combination key for legacy pairing even when there's no
1600 if (type == HCI_LK_CHANGED_COMBINATION &&
1601 (!conn || conn->remote_auth == 0xff) && old_key_type == 0xff) {
1602 type = HCI_LK_COMBINATION;
1604 conn->key_type = type;
1607 bacpy(&key->bdaddr, bdaddr);
1608 memcpy(key->val, val, HCI_LINK_KEY_SIZE);
1609 key->pin_len = pin_len;
1611 if (type == HCI_LK_CHANGED_COMBINATION)
1612 key->type = old_key_type;
1619 persistent = hci_persistent_key(hdev, conn, type, old_key_type);
1621 mgmt_new_link_key(hdev, key, persistent);
1624 conn->flush_key = !persistent;
1629 int hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type, u8 type,
1630 int new_key, u8 authenticated, u8 tk[16], u8 enc_size, __le16
1633 struct smp_ltk *key, *old_key;
1635 if (!(type & HCI_SMP_STK) && !(type & HCI_SMP_LTK))
1638 old_key = hci_find_ltk_by_addr(hdev, bdaddr, addr_type);
1642 key = kzalloc(sizeof(*key), GFP_ATOMIC);
1645 list_add(&key->list, &hdev->long_term_keys);
1648 bacpy(&key->bdaddr, bdaddr);
1649 key->bdaddr_type = addr_type;
1650 memcpy(key->val, tk, sizeof(key->val));
1651 key->authenticated = authenticated;
1653 key->enc_size = enc_size;
1655 memcpy(key->rand, rand, sizeof(key->rand));
1660 if (type & HCI_SMP_LTK)
1661 mgmt_new_ltk(hdev, key, 1);
1666 int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1668 struct link_key *key;
1670 key = hci_find_link_key(hdev, bdaddr);
1674 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
1676 list_del(&key->list);
1682 int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr)
1684 struct smp_ltk *k, *tmp;
1686 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
1687 if (bacmp(bdaddr, &k->bdaddr))
1690 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
1699 /* HCI command timer function */
1700 static void hci_cmd_timeout(unsigned long arg)
1702 struct hci_dev *hdev = (void *) arg;
1704 if (hdev->sent_cmd) {
1705 struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
1706 u16 opcode = __le16_to_cpu(sent->opcode);
1708 BT_ERR("%s command 0x%4.4x tx timeout", hdev->name, opcode);
1710 BT_ERR("%s command tx timeout", hdev->name);
1713 atomic_set(&hdev->cmd_cnt, 1);
1714 queue_work(hdev->workqueue, &hdev->cmd_work);
1717 struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
1720 struct oob_data *data;
1722 list_for_each_entry(data, &hdev->remote_oob_data, list)
1723 if (bacmp(bdaddr, &data->bdaddr) == 0)
1729 int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr)
1731 struct oob_data *data;
1733 data = hci_find_remote_oob_data(hdev, bdaddr);
1737 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
1739 list_del(&data->list);
1745 int hci_remote_oob_data_clear(struct hci_dev *hdev)
1747 struct oob_data *data, *n;
1749 list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
1750 list_del(&data->list);
1757 int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 *hash,
1760 struct oob_data *data;
1762 data = hci_find_remote_oob_data(hdev, bdaddr);
1765 data = kmalloc(sizeof(*data), GFP_ATOMIC);
1769 bacpy(&data->bdaddr, bdaddr);
1770 list_add(&data->list, &hdev->remote_oob_data);
1773 memcpy(data->hash, hash, sizeof(data->hash));
1774 memcpy(data->randomizer, randomizer, sizeof(data->randomizer));
1776 BT_DBG("%s for %pMR", hdev->name, bdaddr);
1781 struct bdaddr_list *hci_blacklist_lookup(struct hci_dev *hdev, bdaddr_t *bdaddr)
1783 struct bdaddr_list *b;
1785 list_for_each_entry(b, &hdev->blacklist, list)
1786 if (bacmp(bdaddr, &b->bdaddr) == 0)
1792 int hci_blacklist_clear(struct hci_dev *hdev)
1794 struct list_head *p, *n;
1796 list_for_each_safe(p, n, &hdev->blacklist) {
1797 struct bdaddr_list *b;
1799 b = list_entry(p, struct bdaddr_list, list);
1808 int hci_blacklist_add(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
1810 struct bdaddr_list *entry;
1812 if (bacmp(bdaddr, BDADDR_ANY) == 0)
1815 if (hci_blacklist_lookup(hdev, bdaddr))
1818 entry = kzalloc(sizeof(struct bdaddr_list), GFP_KERNEL);
1822 bacpy(&entry->bdaddr, bdaddr);
1824 list_add(&entry->list, &hdev->blacklist);
1826 return mgmt_device_blocked(hdev, bdaddr, type);
1829 int hci_blacklist_del(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
1831 struct bdaddr_list *entry;
1833 if (bacmp(bdaddr, BDADDR_ANY) == 0)
1834 return hci_blacklist_clear(hdev);
1836 entry = hci_blacklist_lookup(hdev, bdaddr);
1840 list_del(&entry->list);
1843 return mgmt_device_unblocked(hdev, bdaddr, type);
1846 static void le_scan_param_req(struct hci_request *req, unsigned long opt)
1848 struct le_scan_params *param = (struct le_scan_params *) opt;
1849 struct hci_cp_le_set_scan_param cp;
1851 memset(&cp, 0, sizeof(cp));
1852 cp.type = param->type;
1853 cp.interval = cpu_to_le16(param->interval);
1854 cp.window = cpu_to_le16(param->window);
1856 hci_req_add(req, HCI_OP_LE_SET_SCAN_PARAM, sizeof(cp), &cp);
1859 static void le_scan_enable_req(struct hci_request *req, unsigned long opt)
1861 struct hci_cp_le_set_scan_enable cp;
1863 memset(&cp, 0, sizeof(cp));
1867 hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
1870 static int hci_do_le_scan(struct hci_dev *hdev, u8 type, u16 interval,
1871 u16 window, int timeout)
1873 long timeo = msecs_to_jiffies(3000);
1874 struct le_scan_params param;
1877 BT_DBG("%s", hdev->name);
1879 if (test_bit(HCI_LE_SCAN, &hdev->dev_flags))
1880 return -EINPROGRESS;
1883 param.interval = interval;
1884 param.window = window;
1888 err = __hci_req_sync(hdev, le_scan_param_req, (unsigned long) ¶m,
1891 err = __hci_req_sync(hdev, le_scan_enable_req, 0, timeo);
1893 hci_req_unlock(hdev);
1898 queue_delayed_work(hdev->workqueue, &hdev->le_scan_disable,
1899 msecs_to_jiffies(timeout));
1904 int hci_cancel_le_scan(struct hci_dev *hdev)
1906 BT_DBG("%s", hdev->name);
1908 if (!test_bit(HCI_LE_SCAN, &hdev->dev_flags))
1911 if (cancel_delayed_work(&hdev->le_scan_disable)) {
1912 struct hci_cp_le_set_scan_enable cp;
1914 /* Send HCI command to disable LE Scan */
1915 memset(&cp, 0, sizeof(cp));
1916 hci_send_cmd(hdev, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
1922 static void le_scan_disable_work(struct work_struct *work)
1924 struct hci_dev *hdev = container_of(work, struct hci_dev,
1925 le_scan_disable.work);
1926 struct hci_cp_le_set_scan_enable cp;
1928 BT_DBG("%s", hdev->name);
1930 memset(&cp, 0, sizeof(cp));
1932 hci_send_cmd(hdev, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
1935 static void le_scan_work(struct work_struct *work)
1937 struct hci_dev *hdev = container_of(work, struct hci_dev, le_scan);
1938 struct le_scan_params *param = &hdev->le_scan_params;
1940 BT_DBG("%s", hdev->name);
1942 hci_do_le_scan(hdev, param->type, param->interval, param->window,
1946 int hci_le_scan(struct hci_dev *hdev, u8 type, u16 interval, u16 window,
1949 struct le_scan_params *param = &hdev->le_scan_params;
1951 BT_DBG("%s", hdev->name);
1953 if (test_bit(HCI_LE_PERIPHERAL, &hdev->dev_flags))
1956 if (work_busy(&hdev->le_scan))
1957 return -EINPROGRESS;
1960 param->interval = interval;
1961 param->window = window;
1962 param->timeout = timeout;
1964 queue_work(system_long_wq, &hdev->le_scan);
1969 /* Alloc HCI device */
1970 struct hci_dev *hci_alloc_dev(void)
1972 struct hci_dev *hdev;
1974 hdev = kzalloc(sizeof(struct hci_dev), GFP_KERNEL);
1978 hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1);
1979 hdev->esco_type = (ESCO_HV1);
1980 hdev->link_mode = (HCI_LM_ACCEPT);
1981 hdev->io_capability = 0x03; /* No Input No Output */
1982 hdev->inq_tx_power = HCI_TX_POWER_INVALID;
1983 hdev->adv_tx_power = HCI_TX_POWER_INVALID;
1985 hdev->sniff_max_interval = 800;
1986 hdev->sniff_min_interval = 80;
1988 mutex_init(&hdev->lock);
1989 mutex_init(&hdev->req_lock);
1991 INIT_LIST_HEAD(&hdev->mgmt_pending);
1992 INIT_LIST_HEAD(&hdev->blacklist);
1993 INIT_LIST_HEAD(&hdev->uuids);
1994 INIT_LIST_HEAD(&hdev->link_keys);
1995 INIT_LIST_HEAD(&hdev->long_term_keys);
1996 INIT_LIST_HEAD(&hdev->remote_oob_data);
1997 INIT_LIST_HEAD(&hdev->conn_hash.list);
1999 INIT_WORK(&hdev->rx_work, hci_rx_work);
2000 INIT_WORK(&hdev->cmd_work, hci_cmd_work);
2001 INIT_WORK(&hdev->tx_work, hci_tx_work);
2002 INIT_WORK(&hdev->power_on, hci_power_on);
2003 INIT_WORK(&hdev->le_scan, le_scan_work);
2005 INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
2006 INIT_DELAYED_WORK(&hdev->discov_off, hci_discov_off);
2007 INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
2009 skb_queue_head_init(&hdev->driver_init);
2010 skb_queue_head_init(&hdev->rx_q);
2011 skb_queue_head_init(&hdev->cmd_q);
2012 skb_queue_head_init(&hdev->raw_q);
2014 init_waitqueue_head(&hdev->req_wait_q);
2016 setup_timer(&hdev->cmd_timer, hci_cmd_timeout, (unsigned long) hdev);
2018 hci_init_sysfs(hdev);
2019 discovery_init(hdev);
2023 EXPORT_SYMBOL(hci_alloc_dev);
2025 /* Free HCI device */
2026 void hci_free_dev(struct hci_dev *hdev)
2028 skb_queue_purge(&hdev->driver_init);
2030 /* will free via device release */
2031 put_device(&hdev->dev);
2033 EXPORT_SYMBOL(hci_free_dev);
2035 /* Register HCI device */
2036 int hci_register_dev(struct hci_dev *hdev)
2040 if (!hdev->open || !hdev->close)
2043 /* Do not allow HCI_AMP devices to register at index 0,
2044 * so the index can be used as the AMP controller ID.
2046 switch (hdev->dev_type) {
2048 id = ida_simple_get(&hci_index_ida, 0, 0, GFP_KERNEL);
2051 id = ida_simple_get(&hci_index_ida, 1, 0, GFP_KERNEL);
2060 sprintf(hdev->name, "hci%d", id);
2063 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
2065 write_lock(&hci_dev_list_lock);
2066 list_add(&hdev->list, &hci_dev_list);
2067 write_unlock(&hci_dev_list_lock);
2069 hdev->workqueue = alloc_workqueue(hdev->name, WQ_HIGHPRI | WQ_UNBOUND |
2071 if (!hdev->workqueue) {
2076 hdev->req_workqueue = alloc_workqueue(hdev->name,
2077 WQ_HIGHPRI | WQ_UNBOUND |
2079 if (!hdev->req_workqueue) {
2080 destroy_workqueue(hdev->workqueue);
2085 error = hci_add_sysfs(hdev);
2089 hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
2090 RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops,
2093 if (rfkill_register(hdev->rfkill) < 0) {
2094 rfkill_destroy(hdev->rfkill);
2095 hdev->rfkill = NULL;
2099 set_bit(HCI_SETUP, &hdev->dev_flags);
2101 if (hdev->dev_type != HCI_AMP)
2102 set_bit(HCI_AUTO_OFF, &hdev->dev_flags);
2104 hci_notify(hdev, HCI_DEV_REG);
2107 queue_work(hdev->req_workqueue, &hdev->power_on);
2112 destroy_workqueue(hdev->workqueue);
2113 destroy_workqueue(hdev->req_workqueue);
2115 ida_simple_remove(&hci_index_ida, hdev->id);
2116 write_lock(&hci_dev_list_lock);
2117 list_del(&hdev->list);
2118 write_unlock(&hci_dev_list_lock);
2122 EXPORT_SYMBOL(hci_register_dev);
2124 /* Unregister HCI device */
2125 void hci_unregister_dev(struct hci_dev *hdev)
2129 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
2131 set_bit(HCI_UNREGISTER, &hdev->dev_flags);
2135 write_lock(&hci_dev_list_lock);
2136 list_del(&hdev->list);
2137 write_unlock(&hci_dev_list_lock);
2139 hci_dev_do_close(hdev);
2141 for (i = 0; i < NUM_REASSEMBLY; i++)
2142 kfree_skb(hdev->reassembly[i]);
2144 cancel_work_sync(&hdev->power_on);
2146 if (!test_bit(HCI_INIT, &hdev->flags) &&
2147 !test_bit(HCI_SETUP, &hdev->dev_flags)) {
2149 mgmt_index_removed(hdev);
2150 hci_dev_unlock(hdev);
2153 /* mgmt_index_removed should take care of emptying the
2155 BUG_ON(!list_empty(&hdev->mgmt_pending));
2157 hci_notify(hdev, HCI_DEV_UNREG);
2160 rfkill_unregister(hdev->rfkill);
2161 rfkill_destroy(hdev->rfkill);
2164 hci_del_sysfs(hdev);
2166 destroy_workqueue(hdev->workqueue);
2167 destroy_workqueue(hdev->req_workqueue);
2170 hci_blacklist_clear(hdev);
2171 hci_uuids_clear(hdev);
2172 hci_link_keys_clear(hdev);
2173 hci_smp_ltks_clear(hdev);
2174 hci_remote_oob_data_clear(hdev);
2175 hci_dev_unlock(hdev);
2179 ida_simple_remove(&hci_index_ida, id);
2181 EXPORT_SYMBOL(hci_unregister_dev);
2183 /* Suspend HCI device */
2184 int hci_suspend_dev(struct hci_dev *hdev)
2186 hci_notify(hdev, HCI_DEV_SUSPEND);
2189 EXPORT_SYMBOL(hci_suspend_dev);
2191 /* Resume HCI device */
2192 int hci_resume_dev(struct hci_dev *hdev)
2194 hci_notify(hdev, HCI_DEV_RESUME);
2197 EXPORT_SYMBOL(hci_resume_dev);
2199 /* Receive frame from HCI drivers */
2200 int hci_recv_frame(struct sk_buff *skb)
2202 struct hci_dev *hdev = (struct hci_dev *) skb->dev;
2203 if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
2204 && !test_bit(HCI_INIT, &hdev->flags))) {
2210 bt_cb(skb)->incoming = 1;
2213 __net_timestamp(skb);
2215 skb_queue_tail(&hdev->rx_q, skb);
2216 queue_work(hdev->workqueue, &hdev->rx_work);
2220 EXPORT_SYMBOL(hci_recv_frame);
2222 static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
2223 int count, __u8 index)
2228 struct sk_buff *skb;
2229 struct bt_skb_cb *scb;
2231 if ((type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT) ||
2232 index >= NUM_REASSEMBLY)
2235 skb = hdev->reassembly[index];
2239 case HCI_ACLDATA_PKT:
2240 len = HCI_MAX_FRAME_SIZE;
2241 hlen = HCI_ACL_HDR_SIZE;
2244 len = HCI_MAX_EVENT_SIZE;
2245 hlen = HCI_EVENT_HDR_SIZE;
2247 case HCI_SCODATA_PKT:
2248 len = HCI_MAX_SCO_SIZE;
2249 hlen = HCI_SCO_HDR_SIZE;
2253 skb = bt_skb_alloc(len, GFP_ATOMIC);
2257 scb = (void *) skb->cb;
2259 scb->pkt_type = type;
2261 skb->dev = (void *) hdev;
2262 hdev->reassembly[index] = skb;
2266 scb = (void *) skb->cb;
2267 len = min_t(uint, scb->expect, count);
2269 memcpy(skb_put(skb, len), data, len);
2278 if (skb->len == HCI_EVENT_HDR_SIZE) {
2279 struct hci_event_hdr *h = hci_event_hdr(skb);
2280 scb->expect = h->plen;
2282 if (skb_tailroom(skb) < scb->expect) {
2284 hdev->reassembly[index] = NULL;
2290 case HCI_ACLDATA_PKT:
2291 if (skb->len == HCI_ACL_HDR_SIZE) {
2292 struct hci_acl_hdr *h = hci_acl_hdr(skb);
2293 scb->expect = __le16_to_cpu(h->dlen);
2295 if (skb_tailroom(skb) < scb->expect) {
2297 hdev->reassembly[index] = NULL;
2303 case HCI_SCODATA_PKT:
2304 if (skb->len == HCI_SCO_HDR_SIZE) {
2305 struct hci_sco_hdr *h = hci_sco_hdr(skb);
2306 scb->expect = h->dlen;
2308 if (skb_tailroom(skb) < scb->expect) {
2310 hdev->reassembly[index] = NULL;
2317 if (scb->expect == 0) {
2318 /* Complete frame */
2320 bt_cb(skb)->pkt_type = type;
2321 hci_recv_frame(skb);
2323 hdev->reassembly[index] = NULL;
2331 int hci_recv_fragment(struct hci_dev *hdev, int type, void *data, int count)
2335 if (type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT)
2339 rem = hci_reassembly(hdev, type, data, count, type - 1);
2343 data += (count - rem);
2349 EXPORT_SYMBOL(hci_recv_fragment);
2351 #define STREAM_REASSEMBLY 0
2353 int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count)
2359 struct sk_buff *skb = hdev->reassembly[STREAM_REASSEMBLY];
2362 struct { char type; } *pkt;
2364 /* Start of the frame */
2371 type = bt_cb(skb)->pkt_type;
2373 rem = hci_reassembly(hdev, type, data, count,
2378 data += (count - rem);
2384 EXPORT_SYMBOL(hci_recv_stream_fragment);
2386 /* ---- Interface to upper protocols ---- */
2388 int hci_register_cb(struct hci_cb *cb)
2390 BT_DBG("%p name %s", cb, cb->name);
2392 write_lock(&hci_cb_list_lock);
2393 list_add(&cb->list, &hci_cb_list);
2394 write_unlock(&hci_cb_list_lock);
2398 EXPORT_SYMBOL(hci_register_cb);
2400 int hci_unregister_cb(struct hci_cb *cb)
2402 BT_DBG("%p name %s", cb, cb->name);
2404 write_lock(&hci_cb_list_lock);
2405 list_del(&cb->list);
2406 write_unlock(&hci_cb_list_lock);
2410 EXPORT_SYMBOL(hci_unregister_cb);
2412 static int hci_send_frame(struct sk_buff *skb)
2414 struct hci_dev *hdev = (struct hci_dev *) skb->dev;
2421 BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
2424 __net_timestamp(skb);
2426 /* Send copy to monitor */
2427 hci_send_to_monitor(hdev, skb);
2429 if (atomic_read(&hdev->promisc)) {
2430 /* Send copy to the sockets */
2431 hci_send_to_sock(hdev, skb);
2434 /* Get rid of skb owner, prior to sending to the driver. */
2437 return hdev->send(skb);
2440 void hci_req_init(struct hci_request *req, struct hci_dev *hdev)
2442 skb_queue_head_init(&req->cmd_q);
2447 int hci_req_run(struct hci_request *req, hci_req_complete_t complete)
2449 struct hci_dev *hdev = req->hdev;
2450 struct sk_buff *skb;
2451 unsigned long flags;
2453 BT_DBG("length %u", skb_queue_len(&req->cmd_q));
2455 /* If an error occured during request building, remove all HCI
2456 * commands queued on the HCI request queue.
2459 skb_queue_purge(&req->cmd_q);
2463 /* Do not allow empty requests */
2464 if (skb_queue_empty(&req->cmd_q))
2467 skb = skb_peek_tail(&req->cmd_q);
2468 bt_cb(skb)->req.complete = complete;
2470 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
2471 skb_queue_splice_tail(&req->cmd_q, &hdev->cmd_q);
2472 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
2474 queue_work(hdev->workqueue, &hdev->cmd_work);
2479 static struct sk_buff *hci_prepare_cmd(struct hci_dev *hdev, u16 opcode,
2480 u32 plen, void *param)
2482 int len = HCI_COMMAND_HDR_SIZE + plen;
2483 struct hci_command_hdr *hdr;
2484 struct sk_buff *skb;
2486 skb = bt_skb_alloc(len, GFP_ATOMIC);
2490 hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
2491 hdr->opcode = cpu_to_le16(opcode);
2495 memcpy(skb_put(skb, plen), param, plen);
2497 BT_DBG("skb len %d", skb->len);
2499 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
2500 skb->dev = (void *) hdev;
2505 /* Send HCI command */
2506 int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen, void *param)
2508 struct sk_buff *skb;
2510 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
2512 skb = hci_prepare_cmd(hdev, opcode, plen, param);
2514 BT_ERR("%s no memory for command", hdev->name);
2518 /* Stand-alone HCI commands must be flaged as
2519 * single-command requests.
2521 bt_cb(skb)->req.start = true;
2523 skb_queue_tail(&hdev->cmd_q, skb);
2524 queue_work(hdev->workqueue, &hdev->cmd_work);
2529 /* Queue a command to an asynchronous HCI request */
2530 void hci_req_add(struct hci_request *req, u16 opcode, u32 plen, void *param)
2532 struct hci_dev *hdev = req->hdev;
2533 struct sk_buff *skb;
2535 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
2537 /* If an error occured during request building, there is no point in
2538 * queueing the HCI command. We can simply return.
2543 skb = hci_prepare_cmd(hdev, opcode, plen, param);
2545 BT_ERR("%s no memory for command (opcode 0x%4.4x)",
2546 hdev->name, opcode);
2551 if (skb_queue_empty(&req->cmd_q))
2552 bt_cb(skb)->req.start = true;
2554 skb_queue_tail(&req->cmd_q, skb);
2557 /* Get data from the previously sent command */
2558 void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
2560 struct hci_command_hdr *hdr;
2562 if (!hdev->sent_cmd)
2565 hdr = (void *) hdev->sent_cmd->data;
2567 if (hdr->opcode != cpu_to_le16(opcode))
2570 BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
2572 return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
2576 static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
2578 struct hci_acl_hdr *hdr;
2581 skb_push(skb, HCI_ACL_HDR_SIZE);
2582 skb_reset_transport_header(skb);
2583 hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
2584 hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
2585 hdr->dlen = cpu_to_le16(len);
2588 static void hci_queue_acl(struct hci_chan *chan, struct sk_buff_head *queue,
2589 struct sk_buff *skb, __u16 flags)
2591 struct hci_conn *conn = chan->conn;
2592 struct hci_dev *hdev = conn->hdev;
2593 struct sk_buff *list;
2595 skb->len = skb_headlen(skb);
2598 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
2600 switch (hdev->dev_type) {
2602 hci_add_acl_hdr(skb, conn->handle, flags);
2605 hci_add_acl_hdr(skb, chan->handle, flags);
2608 BT_ERR("%s unknown dev_type %d", hdev->name, hdev->dev_type);
2612 list = skb_shinfo(skb)->frag_list;
2614 /* Non fragmented */
2615 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
2617 skb_queue_tail(queue, skb);
2620 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
2622 skb_shinfo(skb)->frag_list = NULL;
2624 /* Queue all fragments atomically */
2625 spin_lock(&queue->lock);
2627 __skb_queue_tail(queue, skb);
2629 flags &= ~ACL_START;
2632 skb = list; list = list->next;
2634 skb->dev = (void *) hdev;
2635 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
2636 hci_add_acl_hdr(skb, conn->handle, flags);
2638 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
2640 __skb_queue_tail(queue, skb);
2643 spin_unlock(&queue->lock);
2647 void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
2649 struct hci_dev *hdev = chan->conn->hdev;
2651 BT_DBG("%s chan %p flags 0x%4.4x", hdev->name, chan, flags);
2653 skb->dev = (void *) hdev;
2655 hci_queue_acl(chan, &chan->data_q, skb, flags);
2657 queue_work(hdev->workqueue, &hdev->tx_work);
2661 void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
2663 struct hci_dev *hdev = conn->hdev;
2664 struct hci_sco_hdr hdr;
2666 BT_DBG("%s len %d", hdev->name, skb->len);
2668 hdr.handle = cpu_to_le16(conn->handle);
2669 hdr.dlen = skb->len;
2671 skb_push(skb, HCI_SCO_HDR_SIZE);
2672 skb_reset_transport_header(skb);
2673 memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
2675 skb->dev = (void *) hdev;
2676 bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
2678 skb_queue_tail(&conn->data_q, skb);
2679 queue_work(hdev->workqueue, &hdev->tx_work);
2682 /* ---- HCI TX task (outgoing data) ---- */
2684 /* HCI Connection scheduler */
2685 static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type,
2688 struct hci_conn_hash *h = &hdev->conn_hash;
2689 struct hci_conn *conn = NULL, *c;
2690 unsigned int num = 0, min = ~0;
2692 /* We don't have to lock device here. Connections are always
2693 * added and removed with TX task disabled. */
2697 list_for_each_entry_rcu(c, &h->list, list) {
2698 if (c->type != type || skb_queue_empty(&c->data_q))
2701 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
2706 if (c->sent < min) {
2711 if (hci_conn_num(hdev, type) == num)
2720 switch (conn->type) {
2722 cnt = hdev->acl_cnt;
2726 cnt = hdev->sco_cnt;
2729 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
2733 BT_ERR("Unknown link type");
2741 BT_DBG("conn %p quote %d", conn, *quote);
2745 static void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
2747 struct hci_conn_hash *h = &hdev->conn_hash;
2750 BT_ERR("%s link tx timeout", hdev->name);
2754 /* Kill stalled connections */
2755 list_for_each_entry_rcu(c, &h->list, list) {
2756 if (c->type == type && c->sent) {
2757 BT_ERR("%s killing stalled connection %pMR",
2758 hdev->name, &c->dst);
2759 hci_disconnect(c, HCI_ERROR_REMOTE_USER_TERM);
2766 static struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
2769 struct hci_conn_hash *h = &hdev->conn_hash;
2770 struct hci_chan *chan = NULL;
2771 unsigned int num = 0, min = ~0, cur_prio = 0;
2772 struct hci_conn *conn;
2773 int cnt, q, conn_num = 0;
2775 BT_DBG("%s", hdev->name);
2779 list_for_each_entry_rcu(conn, &h->list, list) {
2780 struct hci_chan *tmp;
2782 if (conn->type != type)
2785 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
2790 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
2791 struct sk_buff *skb;
2793 if (skb_queue_empty(&tmp->data_q))
2796 skb = skb_peek(&tmp->data_q);
2797 if (skb->priority < cur_prio)
2800 if (skb->priority > cur_prio) {
2803 cur_prio = skb->priority;
2808 if (conn->sent < min) {
2814 if (hci_conn_num(hdev, type) == conn_num)
2823 switch (chan->conn->type) {
2825 cnt = hdev->acl_cnt;
2828 cnt = hdev->block_cnt;
2832 cnt = hdev->sco_cnt;
2835 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
2839 BT_ERR("Unknown link type");
2844 BT_DBG("chan %p quote %d", chan, *quote);
2848 static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
2850 struct hci_conn_hash *h = &hdev->conn_hash;
2851 struct hci_conn *conn;
2854 BT_DBG("%s", hdev->name);
2858 list_for_each_entry_rcu(conn, &h->list, list) {
2859 struct hci_chan *chan;
2861 if (conn->type != type)
2864 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
2869 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
2870 struct sk_buff *skb;
2877 if (skb_queue_empty(&chan->data_q))
2880 skb = skb_peek(&chan->data_q);
2881 if (skb->priority >= HCI_PRIO_MAX - 1)
2884 skb->priority = HCI_PRIO_MAX - 1;
2886 BT_DBG("chan %p skb %p promoted to %d", chan, skb,
2890 if (hci_conn_num(hdev, type) == num)
2898 static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
2900 /* Calculate count of blocks used by this packet */
2901 return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
2904 static void __check_timeout(struct hci_dev *hdev, unsigned int cnt)
2906 if (!test_bit(HCI_RAW, &hdev->flags)) {
2907 /* ACL tx timeout must be longer than maximum
2908 * link supervision timeout (40.9 seconds) */
2909 if (!cnt && time_after(jiffies, hdev->acl_last_tx +
2910 HCI_ACL_TX_TIMEOUT))
2911 hci_link_tx_to(hdev, ACL_LINK);
2915 static void hci_sched_acl_pkt(struct hci_dev *hdev)
2917 unsigned int cnt = hdev->acl_cnt;
2918 struct hci_chan *chan;
2919 struct sk_buff *skb;
2922 __check_timeout(hdev, cnt);
2924 while (hdev->acl_cnt &&
2925 (chan = hci_chan_sent(hdev, ACL_LINK, "e))) {
2926 u32 priority = (skb_peek(&chan->data_q))->priority;
2927 while (quote-- && (skb = skb_peek(&chan->data_q))) {
2928 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
2929 skb->len, skb->priority);
2931 /* Stop if priority has changed */
2932 if (skb->priority < priority)
2935 skb = skb_dequeue(&chan->data_q);
2937 hci_conn_enter_active_mode(chan->conn,
2938 bt_cb(skb)->force_active);
2940 hci_send_frame(skb);
2941 hdev->acl_last_tx = jiffies;
2949 if (cnt != hdev->acl_cnt)
2950 hci_prio_recalculate(hdev, ACL_LINK);
2953 static void hci_sched_acl_blk(struct hci_dev *hdev)
2955 unsigned int cnt = hdev->block_cnt;
2956 struct hci_chan *chan;
2957 struct sk_buff *skb;
2961 __check_timeout(hdev, cnt);
2963 BT_DBG("%s", hdev->name);
2965 if (hdev->dev_type == HCI_AMP)
2970 while (hdev->block_cnt > 0 &&
2971 (chan = hci_chan_sent(hdev, type, "e))) {
2972 u32 priority = (skb_peek(&chan->data_q))->priority;
2973 while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
2976 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
2977 skb->len, skb->priority);
2979 /* Stop if priority has changed */
2980 if (skb->priority < priority)
2983 skb = skb_dequeue(&chan->data_q);
2985 blocks = __get_blocks(hdev, skb);
2986 if (blocks > hdev->block_cnt)
2989 hci_conn_enter_active_mode(chan->conn,
2990 bt_cb(skb)->force_active);
2992 hci_send_frame(skb);
2993 hdev->acl_last_tx = jiffies;
2995 hdev->block_cnt -= blocks;
2998 chan->sent += blocks;
2999 chan->conn->sent += blocks;
3003 if (cnt != hdev->block_cnt)
3004 hci_prio_recalculate(hdev, type);
3007 static void hci_sched_acl(struct hci_dev *hdev)
3009 BT_DBG("%s", hdev->name);
3011 /* No ACL link over BR/EDR controller */
3012 if (!hci_conn_num(hdev, ACL_LINK) && hdev->dev_type == HCI_BREDR)
3015 /* No AMP link over AMP controller */
3016 if (!hci_conn_num(hdev, AMP_LINK) && hdev->dev_type == HCI_AMP)
3019 switch (hdev->flow_ctl_mode) {
3020 case HCI_FLOW_CTL_MODE_PACKET_BASED:
3021 hci_sched_acl_pkt(hdev);
3024 case HCI_FLOW_CTL_MODE_BLOCK_BASED:
3025 hci_sched_acl_blk(hdev);
3031 static void hci_sched_sco(struct hci_dev *hdev)
3033 struct hci_conn *conn;
3034 struct sk_buff *skb;
3037 BT_DBG("%s", hdev->name);
3039 if (!hci_conn_num(hdev, SCO_LINK))
3042 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, "e))) {
3043 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
3044 BT_DBG("skb %p len %d", skb, skb->len);
3045 hci_send_frame(skb);
3048 if (conn->sent == ~0)
3054 static void hci_sched_esco(struct hci_dev *hdev)
3056 struct hci_conn *conn;
3057 struct sk_buff *skb;
3060 BT_DBG("%s", hdev->name);
3062 if (!hci_conn_num(hdev, ESCO_LINK))
3065 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK,
3067 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
3068 BT_DBG("skb %p len %d", skb, skb->len);
3069 hci_send_frame(skb);
3072 if (conn->sent == ~0)
3078 static void hci_sched_le(struct hci_dev *hdev)
3080 struct hci_chan *chan;
3081 struct sk_buff *skb;
3082 int quote, cnt, tmp;
3084 BT_DBG("%s", hdev->name);
3086 if (!hci_conn_num(hdev, LE_LINK))
3089 if (!test_bit(HCI_RAW, &hdev->flags)) {
3090 /* LE tx timeout must be longer than maximum
3091 * link supervision timeout (40.9 seconds) */
3092 if (!hdev->le_cnt && hdev->le_pkts &&
3093 time_after(jiffies, hdev->le_last_tx + HZ * 45))
3094 hci_link_tx_to(hdev, LE_LINK);
3097 cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
3099 while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, "e))) {
3100 u32 priority = (skb_peek(&chan->data_q))->priority;
3101 while (quote-- && (skb = skb_peek(&chan->data_q))) {
3102 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
3103 skb->len, skb->priority);
3105 /* Stop if priority has changed */
3106 if (skb->priority < priority)
3109 skb = skb_dequeue(&chan->data_q);
3111 hci_send_frame(skb);
3112 hdev->le_last_tx = jiffies;
3123 hdev->acl_cnt = cnt;
3126 hci_prio_recalculate(hdev, LE_LINK);
3129 static void hci_tx_work(struct work_struct *work)
3131 struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
3132 struct sk_buff *skb;
3134 BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
3135 hdev->sco_cnt, hdev->le_cnt);
3137 /* Schedule queues and send stuff to HCI driver */
3139 hci_sched_acl(hdev);
3141 hci_sched_sco(hdev);
3143 hci_sched_esco(hdev);
3147 /* Send next queued raw (unknown type) packet */
3148 while ((skb = skb_dequeue(&hdev->raw_q)))
3149 hci_send_frame(skb);
3152 /* ----- HCI RX task (incoming data processing) ----- */
3154 /* ACL data packet */
3155 static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
3157 struct hci_acl_hdr *hdr = (void *) skb->data;
3158 struct hci_conn *conn;
3159 __u16 handle, flags;
3161 skb_pull(skb, HCI_ACL_HDR_SIZE);
3163 handle = __le16_to_cpu(hdr->handle);
3164 flags = hci_flags(handle);
3165 handle = hci_handle(handle);
3167 BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
3170 hdev->stat.acl_rx++;
3173 conn = hci_conn_hash_lookup_handle(hdev, handle);
3174 hci_dev_unlock(hdev);
3177 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
3179 /* Send to upper protocol */
3180 l2cap_recv_acldata(conn, skb, flags);
3183 BT_ERR("%s ACL packet for unknown connection handle %d",
3184 hdev->name, handle);
3190 /* SCO data packet */
3191 static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
3193 struct hci_sco_hdr *hdr = (void *) skb->data;
3194 struct hci_conn *conn;
3197 skb_pull(skb, HCI_SCO_HDR_SIZE);
3199 handle = __le16_to_cpu(hdr->handle);
3201 BT_DBG("%s len %d handle 0x%4.4x", hdev->name, skb->len, handle);
3203 hdev->stat.sco_rx++;
3206 conn = hci_conn_hash_lookup_handle(hdev, handle);
3207 hci_dev_unlock(hdev);
3210 /* Send to upper protocol */
3211 sco_recv_scodata(conn, skb);
3214 BT_ERR("%s SCO packet for unknown connection handle %d",
3215 hdev->name, handle);
3221 static bool hci_req_is_complete(struct hci_dev *hdev)
3223 struct sk_buff *skb;
3225 skb = skb_peek(&hdev->cmd_q);
3229 return bt_cb(skb)->req.start;
3232 static void hci_resend_last(struct hci_dev *hdev)
3234 struct hci_command_hdr *sent;
3235 struct sk_buff *skb;
3238 if (!hdev->sent_cmd)
3241 sent = (void *) hdev->sent_cmd->data;
3242 opcode = __le16_to_cpu(sent->opcode);
3243 if (opcode == HCI_OP_RESET)
3246 skb = skb_clone(hdev->sent_cmd, GFP_KERNEL);
3250 skb_queue_head(&hdev->cmd_q, skb);
3251 queue_work(hdev->workqueue, &hdev->cmd_work);
3254 void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status)
3256 hci_req_complete_t req_complete = NULL;
3257 struct sk_buff *skb;
3258 unsigned long flags;
3260 BT_DBG("opcode 0x%04x status 0x%02x", opcode, status);
3262 /* If the completed command doesn't match the last one that was
3263 * sent we need to do special handling of it.
3265 if (!hci_sent_cmd_data(hdev, opcode)) {
3266 /* Some CSR based controllers generate a spontaneous
3267 * reset complete event during init and any pending
3268 * command will never be completed. In such a case we
3269 * need to resend whatever was the last sent
3272 if (test_bit(HCI_INIT, &hdev->flags) && opcode == HCI_OP_RESET)
3273 hci_resend_last(hdev);
3278 /* If the command succeeded and there's still more commands in
3279 * this request the request is not yet complete.
3281 if (!status && !hci_req_is_complete(hdev))
3284 /* If this was the last command in a request the complete
3285 * callback would be found in hdev->sent_cmd instead of the
3286 * command queue (hdev->cmd_q).
3288 if (hdev->sent_cmd) {
3289 req_complete = bt_cb(hdev->sent_cmd)->req.complete;
3294 /* Remove all pending commands belonging to this request */
3295 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
3296 while ((skb = __skb_dequeue(&hdev->cmd_q))) {
3297 if (bt_cb(skb)->req.start) {
3298 __skb_queue_head(&hdev->cmd_q, skb);
3302 req_complete = bt_cb(skb)->req.complete;
3305 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
3309 req_complete(hdev, status);
3312 void hci_req_cmd_status(struct hci_dev *hdev, u16 opcode, u8 status)
3314 hci_req_complete_t req_complete = NULL;
3316 BT_DBG("opcode 0x%04x status 0x%02x", opcode, status);
3319 hci_req_cmd_complete(hdev, opcode, status);
3323 /* No need to handle success status if there are more commands */
3324 if (!hci_req_is_complete(hdev))
3328 req_complete = bt_cb(hdev->sent_cmd)->req.complete;
3330 /* If the request doesn't have a complete callback or there
3331 * are other commands/requests in the hdev queue we consider
3332 * this request as completed.
3334 if (!req_complete || !skb_queue_empty(&hdev->cmd_q))
3335 hci_req_cmd_complete(hdev, opcode, status);
3338 static void hci_rx_work(struct work_struct *work)
3340 struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
3341 struct sk_buff *skb;
3343 BT_DBG("%s", hdev->name);
3345 while ((skb = skb_dequeue(&hdev->rx_q))) {
3346 /* Send copy to monitor */
3347 hci_send_to_monitor(hdev, skb);
3349 if (atomic_read(&hdev->promisc)) {
3350 /* Send copy to the sockets */
3351 hci_send_to_sock(hdev, skb);
3354 if (test_bit(HCI_RAW, &hdev->flags)) {
3359 if (test_bit(HCI_INIT, &hdev->flags)) {
3360 /* Don't process data packets in this states. */
3361 switch (bt_cb(skb)->pkt_type) {
3362 case HCI_ACLDATA_PKT:
3363 case HCI_SCODATA_PKT:
3370 switch (bt_cb(skb)->pkt_type) {
3372 BT_DBG("%s Event packet", hdev->name);
3373 hci_event_packet(hdev, skb);
3376 case HCI_ACLDATA_PKT:
3377 BT_DBG("%s ACL data packet", hdev->name);
3378 hci_acldata_packet(hdev, skb);
3381 case HCI_SCODATA_PKT:
3382 BT_DBG("%s SCO data packet", hdev->name);
3383 hci_scodata_packet(hdev, skb);
3393 static void hci_cmd_work(struct work_struct *work)
3395 struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
3396 struct sk_buff *skb;
3398 BT_DBG("%s cmd_cnt %d cmd queued %d", hdev->name,
3399 atomic_read(&hdev->cmd_cnt), skb_queue_len(&hdev->cmd_q));
3401 /* Send queued commands */
3402 if (atomic_read(&hdev->cmd_cnt)) {
3403 skb = skb_dequeue(&hdev->cmd_q);
3407 kfree_skb(hdev->sent_cmd);
3409 hdev->sent_cmd = skb_clone(skb, GFP_ATOMIC);
3410 if (hdev->sent_cmd) {
3411 atomic_dec(&hdev->cmd_cnt);
3412 hci_send_frame(skb);
3413 if (test_bit(HCI_RESET, &hdev->flags))
3414 del_timer(&hdev->cmd_timer);
3416 mod_timer(&hdev->cmd_timer,
3417 jiffies + HCI_CMD_TIMEOUT);
3419 skb_queue_head(&hdev->cmd_q, skb);
3420 queue_work(hdev->workqueue, &hdev->cmd_work);
3425 int hci_do_inquiry(struct hci_dev *hdev, u8 length)
3427 /* General inquiry access code (GIAC) */
3428 u8 lap[3] = { 0x33, 0x8b, 0x9e };
3429 struct hci_cp_inquiry cp;
3431 BT_DBG("%s", hdev->name);
3433 if (test_bit(HCI_INQUIRY, &hdev->flags))
3434 return -EINPROGRESS;
3436 inquiry_cache_flush(hdev);
3438 memset(&cp, 0, sizeof(cp));
3439 memcpy(&cp.lap, lap, sizeof(cp.lap));
3442 return hci_send_cmd(hdev, HCI_OP_INQUIRY, sizeof(cp), &cp);
3445 int hci_cancel_inquiry(struct hci_dev *hdev)
3447 BT_DBG("%s", hdev->name);
3449 if (!test_bit(HCI_INQUIRY, &hdev->flags))
3452 return hci_send_cmd(hdev, HCI_OP_INQUIRY_CANCEL, 0, NULL);
3455 u8 bdaddr_to_le(u8 bdaddr_type)
3457 switch (bdaddr_type) {
3458 case BDADDR_LE_PUBLIC:
3459 return ADDR_LE_DEV_PUBLIC;
3462 /* Fallback to LE Random address type */
3463 return ADDR_LE_DEV_RANDOM;