2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4 Copyright (C) 2011 ProFUSION Embedded Systems
6 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License version 2 as
10 published by the Free Software Foundation;
12 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
16 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
19 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
21 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
23 SOFTWARE IS DISCLAIMED.
26 /* Bluetooth HCI core. */
28 #include <linux/jiffies.h>
29 #include <linux/module.h>
30 #include <linux/kmod.h>
32 #include <linux/types.h>
33 #include <linux/errno.h>
34 #include <linux/kernel.h>
35 #include <linux/sched.h>
36 #include <linux/slab.h>
37 #include <linux/poll.h>
38 #include <linux/fcntl.h>
39 #include <linux/init.h>
40 #include <linux/skbuff.h>
41 #include <linux/workqueue.h>
42 #include <linux/interrupt.h>
43 #include <linux/rfkill.h>
44 #include <linux/timer.h>
45 #include <linux/crypto.h>
48 #include <linux/uaccess.h>
49 #include <asm/unaligned.h>
51 #include <net/bluetooth/bluetooth.h>
52 #include <net/bluetooth/hci_core.h>
54 #define AUTO_OFF_TIMEOUT 2000
56 static void hci_rx_work(struct work_struct *work);
57 static void hci_cmd_work(struct work_struct *work);
58 static void hci_tx_work(struct work_struct *work);
61 LIST_HEAD(hci_dev_list);
62 DEFINE_RWLOCK(hci_dev_list_lock);
64 /* HCI callback list */
65 LIST_HEAD(hci_cb_list);
66 DEFINE_RWLOCK(hci_cb_list_lock);
68 /* ---- HCI notifications ---- */
70 static void hci_notify(struct hci_dev *hdev, int event)
72 hci_sock_dev_event(hdev, event);
75 /* ---- HCI requests ---- */
77 void hci_req_complete(struct hci_dev *hdev, __u16 cmd, int result)
79 BT_DBG("%s command 0x%04x result 0x%2.2x", hdev->name, cmd, result);
81 /* If this is the init phase check if the completed command matches
82 * the last init command, and if not just return.
84 if (test_bit(HCI_INIT, &hdev->flags) && hdev->init_last_cmd != cmd) {
85 struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
88 /* Some CSR based controllers generate a spontaneous
89 * reset complete event during init and any pending
90 * command will never be completed. In such a case we
91 * need to resend whatever was the last sent
95 if (cmd != HCI_OP_RESET || sent->opcode == HCI_OP_RESET)
98 skb = skb_clone(hdev->sent_cmd, GFP_ATOMIC);
100 skb_queue_head(&hdev->cmd_q, skb);
101 queue_work(hdev->workqueue, &hdev->cmd_work);
107 if (hdev->req_status == HCI_REQ_PEND) {
108 hdev->req_result = result;
109 hdev->req_status = HCI_REQ_DONE;
110 wake_up_interruptible(&hdev->req_wait_q);
114 static void hci_req_cancel(struct hci_dev *hdev, int err)
116 BT_DBG("%s err 0x%2.2x", hdev->name, err);
118 if (hdev->req_status == HCI_REQ_PEND) {
119 hdev->req_result = err;
120 hdev->req_status = HCI_REQ_CANCELED;
121 wake_up_interruptible(&hdev->req_wait_q);
125 /* Execute request and wait for completion. */
126 static int __hci_request(struct hci_dev *hdev, void (*req)(struct hci_dev *hdev, unsigned long opt),
127 unsigned long opt, __u32 timeout)
129 DECLARE_WAITQUEUE(wait, current);
132 BT_DBG("%s start", hdev->name);
134 hdev->req_status = HCI_REQ_PEND;
136 add_wait_queue(&hdev->req_wait_q, &wait);
137 set_current_state(TASK_INTERRUPTIBLE);
140 schedule_timeout(timeout);
142 remove_wait_queue(&hdev->req_wait_q, &wait);
144 if (signal_pending(current))
147 switch (hdev->req_status) {
149 err = -bt_to_errno(hdev->req_result);
152 case HCI_REQ_CANCELED:
153 err = -hdev->req_result;
161 hdev->req_status = hdev->req_result = 0;
163 BT_DBG("%s end: err %d", hdev->name, err);
168 static inline int hci_request(struct hci_dev *hdev, void (*req)(struct hci_dev *hdev, unsigned long opt),
169 unsigned long opt, __u32 timeout)
173 if (!test_bit(HCI_UP, &hdev->flags))
176 /* Serialize all requests */
178 ret = __hci_request(hdev, req, opt, timeout);
179 hci_req_unlock(hdev);
184 static void hci_reset_req(struct hci_dev *hdev, unsigned long opt)
186 BT_DBG("%s %ld", hdev->name, opt);
189 set_bit(HCI_RESET, &hdev->flags);
190 hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL);
193 static void bredr_init(struct hci_dev *hdev)
195 struct hci_cp_delete_stored_link_key cp;
199 hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;
201 /* Mandatory initialization */
204 if (!test_bit(HCI_QUIRK_NO_RESET, &hdev->quirks)) {
205 set_bit(HCI_RESET, &hdev->flags);
206 hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL);
209 /* Read Local Supported Features */
210 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
212 /* Read Local Version */
213 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
215 /* Read Buffer Size (ACL mtu, max pkt, etc.) */
216 hci_send_cmd(hdev, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
218 /* Read BD Address */
219 hci_send_cmd(hdev, HCI_OP_READ_BD_ADDR, 0, NULL);
221 /* Read Class of Device */
222 hci_send_cmd(hdev, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
224 /* Read Local Name */
225 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_NAME, 0, NULL);
227 /* Read Voice Setting */
228 hci_send_cmd(hdev, HCI_OP_READ_VOICE_SETTING, 0, NULL);
230 /* Optional initialization */
232 /* Clear Event Filters */
233 flt_type = HCI_FLT_CLEAR_ALL;
234 hci_send_cmd(hdev, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
236 /* Connection accept timeout ~20 secs */
237 param = cpu_to_le16(0x7d00);
238 hci_send_cmd(hdev, HCI_OP_WRITE_CA_TIMEOUT, 2, ¶m);
240 bacpy(&cp.bdaddr, BDADDR_ANY);
242 hci_send_cmd(hdev, HCI_OP_DELETE_STORED_LINK_KEY, sizeof(cp), &cp);
245 static void amp_init(struct hci_dev *hdev)
247 hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
250 hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL);
252 /* Read Local Version */
253 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
256 static void hci_init_req(struct hci_dev *hdev, unsigned long opt)
260 BT_DBG("%s %ld", hdev->name, opt);
262 /* Driver initialization */
264 /* Special commands */
265 while ((skb = skb_dequeue(&hdev->driver_init))) {
266 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
267 skb->dev = (void *) hdev;
269 skb_queue_tail(&hdev->cmd_q, skb);
270 queue_work(hdev->workqueue, &hdev->cmd_work);
272 skb_queue_purge(&hdev->driver_init);
274 switch (hdev->dev_type) {
284 BT_ERR("Unknown device type %d", hdev->dev_type);
290 static void hci_le_init_req(struct hci_dev *hdev, unsigned long opt)
292 BT_DBG("%s", hdev->name);
294 /* Read LE buffer size */
295 hci_send_cmd(hdev, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
298 static void hci_scan_req(struct hci_dev *hdev, unsigned long opt)
302 BT_DBG("%s %x", hdev->name, scan);
304 /* Inquiry and Page scans */
305 hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
308 static void hci_auth_req(struct hci_dev *hdev, unsigned long opt)
312 BT_DBG("%s %x", hdev->name, auth);
315 hci_send_cmd(hdev, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
318 static void hci_encrypt_req(struct hci_dev *hdev, unsigned long opt)
322 BT_DBG("%s %x", hdev->name, encrypt);
325 hci_send_cmd(hdev, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
328 static void hci_linkpol_req(struct hci_dev *hdev, unsigned long opt)
330 __le16 policy = cpu_to_le16(opt);
332 BT_DBG("%s %x", hdev->name, policy);
334 /* Default link policy */
335 hci_send_cmd(hdev, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
338 /* Get HCI device by index.
339 * Device is held on return. */
340 struct hci_dev *hci_dev_get(int index)
342 struct hci_dev *hdev = NULL, *d;
349 read_lock(&hci_dev_list_lock);
350 list_for_each_entry(d, &hci_dev_list, list) {
351 if (d->id == index) {
352 hdev = hci_dev_hold(d);
356 read_unlock(&hci_dev_list_lock);
360 /* ---- Inquiry support ---- */
362 bool hci_discovery_active(struct hci_dev *hdev)
364 struct discovery_state *discov = &hdev->discovery;
366 switch (discov->state) {
367 case DISCOVERY_FINDING:
368 case DISCOVERY_RESOLVING:
376 void hci_discovery_set_state(struct hci_dev *hdev, int state)
378 BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
380 if (hdev->discovery.state == state)
384 case DISCOVERY_STOPPED:
385 if (hdev->discovery.state != DISCOVERY_STARTING)
386 mgmt_discovering(hdev, 0);
387 hdev->discovery.type = 0;
389 case DISCOVERY_STARTING:
391 case DISCOVERY_FINDING:
392 mgmt_discovering(hdev, 1);
394 case DISCOVERY_RESOLVING:
396 case DISCOVERY_STOPPING:
400 hdev->discovery.state = state;
403 static void inquiry_cache_flush(struct hci_dev *hdev)
405 struct discovery_state *cache = &hdev->discovery;
406 struct inquiry_entry *p, *n;
408 list_for_each_entry_safe(p, n, &cache->all, all) {
413 INIT_LIST_HEAD(&cache->unknown);
414 INIT_LIST_HEAD(&cache->resolve);
417 struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev, bdaddr_t *bdaddr)
419 struct discovery_state *cache = &hdev->discovery;
420 struct inquiry_entry *e;
422 BT_DBG("cache %p, %s", cache, batostr(bdaddr));
424 list_for_each_entry(e, &cache->all, all) {
425 if (!bacmp(&e->data.bdaddr, bdaddr))
432 struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
435 struct discovery_state *cache = &hdev->discovery;
436 struct inquiry_entry *e;
438 BT_DBG("cache %p, %s", cache, batostr(bdaddr));
440 list_for_each_entry(e, &cache->unknown, list) {
441 if (!bacmp(&e->data.bdaddr, bdaddr))
448 struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
452 struct discovery_state *cache = &hdev->discovery;
453 struct inquiry_entry *e;
455 BT_DBG("cache %p bdaddr %s state %d", cache, batostr(bdaddr), state);
457 list_for_each_entry(e, &cache->resolve, list) {
458 if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
460 if (!bacmp(&e->data.bdaddr, bdaddr))
467 void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
468 struct inquiry_entry *ie)
470 struct discovery_state *cache = &hdev->discovery;
471 struct list_head *pos = &cache->resolve;
472 struct inquiry_entry *p;
476 list_for_each_entry(p, &cache->resolve, list) {
477 if (p->name_state != NAME_PENDING &&
478 abs(p->data.rssi) >= abs(ie->data.rssi))
483 list_add(&ie->list, pos);
486 bool hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
487 bool name_known, bool *ssp)
489 struct discovery_state *cache = &hdev->discovery;
490 struct inquiry_entry *ie;
492 BT_DBG("cache %p, %s", cache, batostr(&data->bdaddr));
495 *ssp = data->ssp_mode;
497 ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
499 if (ie->data.ssp_mode && ssp)
502 if (ie->name_state == NAME_NEEDED &&
503 data->rssi != ie->data.rssi) {
504 ie->data.rssi = data->rssi;
505 hci_inquiry_cache_update_resolve(hdev, ie);
511 /* Entry not in the cache. Add new one. */
512 ie = kzalloc(sizeof(struct inquiry_entry), GFP_ATOMIC);
516 list_add(&ie->all, &cache->all);
519 ie->name_state = NAME_KNOWN;
521 ie->name_state = NAME_NOT_KNOWN;
522 list_add(&ie->list, &cache->unknown);
526 if (name_known && ie->name_state != NAME_KNOWN &&
527 ie->name_state != NAME_PENDING) {
528 ie->name_state = NAME_KNOWN;
532 memcpy(&ie->data, data, sizeof(*data));
533 ie->timestamp = jiffies;
534 cache->timestamp = jiffies;
536 if (ie->name_state == NAME_NOT_KNOWN)
542 static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
544 struct discovery_state *cache = &hdev->discovery;
545 struct inquiry_info *info = (struct inquiry_info *) buf;
546 struct inquiry_entry *e;
549 list_for_each_entry(e, &cache->all, all) {
550 struct inquiry_data *data = &e->data;
555 bacpy(&info->bdaddr, &data->bdaddr);
556 info->pscan_rep_mode = data->pscan_rep_mode;
557 info->pscan_period_mode = data->pscan_period_mode;
558 info->pscan_mode = data->pscan_mode;
559 memcpy(info->dev_class, data->dev_class, 3);
560 info->clock_offset = data->clock_offset;
566 BT_DBG("cache %p, copied %d", cache, copied);
570 static void hci_inq_req(struct hci_dev *hdev, unsigned long opt)
572 struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
573 struct hci_cp_inquiry cp;
575 BT_DBG("%s", hdev->name);
577 if (test_bit(HCI_INQUIRY, &hdev->flags))
581 memcpy(&cp.lap, &ir->lap, 3);
582 cp.length = ir->length;
583 cp.num_rsp = ir->num_rsp;
584 hci_send_cmd(hdev, HCI_OP_INQUIRY, sizeof(cp), &cp);
587 int hci_inquiry(void __user *arg)
589 __u8 __user *ptr = arg;
590 struct hci_inquiry_req ir;
591 struct hci_dev *hdev;
592 int err = 0, do_inquiry = 0, max_rsp;
596 if (copy_from_user(&ir, ptr, sizeof(ir)))
599 hdev = hci_dev_get(ir.dev_id);
604 if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
605 inquiry_cache_empty(hdev) ||
606 ir.flags & IREQ_CACHE_FLUSH) {
607 inquiry_cache_flush(hdev);
610 hci_dev_unlock(hdev);
612 timeo = ir.length * msecs_to_jiffies(2000);
615 err = hci_request(hdev, hci_inq_req, (unsigned long)&ir, timeo);
620 /* for unlimited number of responses we will use buffer with 255 entries */
621 max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
623 /* cache_dump can't sleep. Therefore we allocate temp buffer and then
624 * copy it to the user space.
626 buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
633 ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
634 hci_dev_unlock(hdev);
636 BT_DBG("num_rsp %d", ir.num_rsp);
638 if (!copy_to_user(ptr, &ir, sizeof(ir))) {
640 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
653 /* ---- HCI ioctl helpers ---- */
655 int hci_dev_open(__u16 dev)
657 struct hci_dev *hdev;
660 hdev = hci_dev_get(dev);
664 BT_DBG("%s %p", hdev->name, hdev);
668 if (hdev->rfkill && rfkill_blocked(hdev->rfkill)) {
673 if (test_bit(HCI_UP, &hdev->flags)) {
678 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
679 set_bit(HCI_RAW, &hdev->flags);
681 /* Treat all non BR/EDR controllers as raw devices if
682 enable_hs is not set */
683 if (hdev->dev_type != HCI_BREDR && !enable_hs)
684 set_bit(HCI_RAW, &hdev->flags);
686 if (hdev->open(hdev)) {
691 if (!test_bit(HCI_RAW, &hdev->flags)) {
692 atomic_set(&hdev->cmd_cnt, 1);
693 set_bit(HCI_INIT, &hdev->flags);
694 hdev->init_last_cmd = 0;
696 ret = __hci_request(hdev, hci_init_req, 0,
697 msecs_to_jiffies(HCI_INIT_TIMEOUT));
699 if (lmp_host_le_capable(hdev))
700 ret = __hci_request(hdev, hci_le_init_req, 0,
701 msecs_to_jiffies(HCI_INIT_TIMEOUT));
703 clear_bit(HCI_INIT, &hdev->flags);
708 set_bit(HCI_UP, &hdev->flags);
709 hci_notify(hdev, HCI_DEV_UP);
710 if (!test_bit(HCI_SETUP, &hdev->dev_flags)) {
712 mgmt_powered(hdev, 1);
713 hci_dev_unlock(hdev);
716 /* Init failed, cleanup */
717 flush_work(&hdev->tx_work);
718 flush_work(&hdev->cmd_work);
719 flush_work(&hdev->rx_work);
721 skb_queue_purge(&hdev->cmd_q);
722 skb_queue_purge(&hdev->rx_q);
727 if (hdev->sent_cmd) {
728 kfree_skb(hdev->sent_cmd);
729 hdev->sent_cmd = NULL;
737 hci_req_unlock(hdev);
742 static int hci_dev_do_close(struct hci_dev *hdev)
744 BT_DBG("%s %p", hdev->name, hdev);
746 cancel_work_sync(&hdev->le_scan);
748 hci_req_cancel(hdev, ENODEV);
751 if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
752 del_timer_sync(&hdev->cmd_timer);
753 hci_req_unlock(hdev);
757 /* Flush RX and TX works */
758 flush_work(&hdev->tx_work);
759 flush_work(&hdev->rx_work);
761 if (hdev->discov_timeout > 0) {
762 cancel_delayed_work(&hdev->discov_off);
763 hdev->discov_timeout = 0;
764 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
767 if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
768 cancel_delayed_work(&hdev->service_cache);
770 cancel_delayed_work_sync(&hdev->le_scan_disable);
773 inquiry_cache_flush(hdev);
774 hci_conn_hash_flush(hdev);
775 hci_dev_unlock(hdev);
777 hci_notify(hdev, HCI_DEV_DOWN);
783 skb_queue_purge(&hdev->cmd_q);
784 atomic_set(&hdev->cmd_cnt, 1);
785 if (!test_bit(HCI_RAW, &hdev->flags) &&
786 test_bit(HCI_QUIRK_NO_RESET, &hdev->quirks)) {
787 set_bit(HCI_INIT, &hdev->flags);
788 __hci_request(hdev, hci_reset_req, 0,
789 msecs_to_jiffies(250));
790 clear_bit(HCI_INIT, &hdev->flags);
794 flush_work(&hdev->cmd_work);
797 skb_queue_purge(&hdev->rx_q);
798 skb_queue_purge(&hdev->cmd_q);
799 skb_queue_purge(&hdev->raw_q);
801 /* Drop last sent command */
802 if (hdev->sent_cmd) {
803 del_timer_sync(&hdev->cmd_timer);
804 kfree_skb(hdev->sent_cmd);
805 hdev->sent_cmd = NULL;
808 /* After this point our queues are empty
809 * and no tasks are scheduled. */
812 if (!test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
814 mgmt_powered(hdev, 0);
815 hci_dev_unlock(hdev);
821 memset(hdev->eir, 0, sizeof(hdev->eir));
822 memset(hdev->dev_class, 0, sizeof(hdev->dev_class));
824 hci_req_unlock(hdev);
830 int hci_dev_close(__u16 dev)
832 struct hci_dev *hdev;
835 hdev = hci_dev_get(dev);
839 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
840 cancel_delayed_work(&hdev->power_off);
842 err = hci_dev_do_close(hdev);
848 int hci_dev_reset(__u16 dev)
850 struct hci_dev *hdev;
853 hdev = hci_dev_get(dev);
859 if (!test_bit(HCI_UP, &hdev->flags))
863 skb_queue_purge(&hdev->rx_q);
864 skb_queue_purge(&hdev->cmd_q);
867 inquiry_cache_flush(hdev);
868 hci_conn_hash_flush(hdev);
869 hci_dev_unlock(hdev);
874 atomic_set(&hdev->cmd_cnt, 1);
875 hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
877 if (!test_bit(HCI_RAW, &hdev->flags))
878 ret = __hci_request(hdev, hci_reset_req, 0,
879 msecs_to_jiffies(HCI_INIT_TIMEOUT));
882 hci_req_unlock(hdev);
887 int hci_dev_reset_stat(__u16 dev)
889 struct hci_dev *hdev;
892 hdev = hci_dev_get(dev);
896 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
903 int hci_dev_cmd(unsigned int cmd, void __user *arg)
905 struct hci_dev *hdev;
906 struct hci_dev_req dr;
909 if (copy_from_user(&dr, arg, sizeof(dr)))
912 hdev = hci_dev_get(dr.dev_id);
918 err = hci_request(hdev, hci_auth_req, dr.dev_opt,
919 msecs_to_jiffies(HCI_INIT_TIMEOUT));
923 if (!lmp_encrypt_capable(hdev)) {
928 if (!test_bit(HCI_AUTH, &hdev->flags)) {
929 /* Auth must be enabled first */
930 err = hci_request(hdev, hci_auth_req, dr.dev_opt,
931 msecs_to_jiffies(HCI_INIT_TIMEOUT));
936 err = hci_request(hdev, hci_encrypt_req, dr.dev_opt,
937 msecs_to_jiffies(HCI_INIT_TIMEOUT));
941 err = hci_request(hdev, hci_scan_req, dr.dev_opt,
942 msecs_to_jiffies(HCI_INIT_TIMEOUT));
946 err = hci_request(hdev, hci_linkpol_req, dr.dev_opt,
947 msecs_to_jiffies(HCI_INIT_TIMEOUT));
951 hdev->link_mode = ((__u16) dr.dev_opt) &
952 (HCI_LM_MASTER | HCI_LM_ACCEPT);
956 hdev->pkt_type = (__u16) dr.dev_opt;
960 hdev->acl_mtu = *((__u16 *) &dr.dev_opt + 1);
961 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
965 hdev->sco_mtu = *((__u16 *) &dr.dev_opt + 1);
966 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
978 int hci_get_dev_list(void __user *arg)
980 struct hci_dev *hdev;
981 struct hci_dev_list_req *dl;
982 struct hci_dev_req *dr;
983 int n = 0, size, err;
986 if (get_user(dev_num, (__u16 __user *) arg))
989 if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
992 size = sizeof(*dl) + dev_num * sizeof(*dr);
994 dl = kzalloc(size, GFP_KERNEL);
1000 read_lock(&hci_dev_list_lock);
1001 list_for_each_entry(hdev, &hci_dev_list, list) {
1002 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
1003 cancel_delayed_work(&hdev->power_off);
1005 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
1006 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
1008 (dr + n)->dev_id = hdev->id;
1009 (dr + n)->dev_opt = hdev->flags;
1014 read_unlock(&hci_dev_list_lock);
1017 size = sizeof(*dl) + n * sizeof(*dr);
1019 err = copy_to_user(arg, dl, size);
1022 return err ? -EFAULT : 0;
1025 int hci_get_dev_info(void __user *arg)
1027 struct hci_dev *hdev;
1028 struct hci_dev_info di;
1031 if (copy_from_user(&di, arg, sizeof(di)))
1034 hdev = hci_dev_get(di.dev_id);
1038 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
1039 cancel_delayed_work_sync(&hdev->power_off);
1041 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
1042 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
1044 strcpy(di.name, hdev->name);
1045 di.bdaddr = hdev->bdaddr;
1046 di.type = (hdev->bus & 0x0f) | (hdev->dev_type << 4);
1047 di.flags = hdev->flags;
1048 di.pkt_type = hdev->pkt_type;
1049 di.acl_mtu = hdev->acl_mtu;
1050 di.acl_pkts = hdev->acl_pkts;
1051 di.sco_mtu = hdev->sco_mtu;
1052 di.sco_pkts = hdev->sco_pkts;
1053 di.link_policy = hdev->link_policy;
1054 di.link_mode = hdev->link_mode;
1056 memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
1057 memcpy(&di.features, &hdev->features, sizeof(di.features));
1059 if (copy_to_user(arg, &di, sizeof(di)))
1067 /* ---- Interface to HCI drivers ---- */
1069 static int hci_rfkill_set_block(void *data, bool blocked)
1071 struct hci_dev *hdev = data;
1073 BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
1078 hci_dev_do_close(hdev);
1083 static const struct rfkill_ops hci_rfkill_ops = {
1084 .set_block = hci_rfkill_set_block,
1087 /* Alloc HCI device */
1088 struct hci_dev *hci_alloc_dev(void)
1090 struct hci_dev *hdev;
1092 hdev = kzalloc(sizeof(struct hci_dev), GFP_KERNEL);
1096 hci_init_sysfs(hdev);
1097 skb_queue_head_init(&hdev->driver_init);
1101 EXPORT_SYMBOL(hci_alloc_dev);
1103 /* Free HCI device */
1104 void hci_free_dev(struct hci_dev *hdev)
1106 skb_queue_purge(&hdev->driver_init);
1108 /* will free via device release */
1109 put_device(&hdev->dev);
1111 EXPORT_SYMBOL(hci_free_dev);
1113 static void hci_power_on(struct work_struct *work)
1115 struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
1117 BT_DBG("%s", hdev->name);
1119 if (hci_dev_open(hdev->id) < 0)
1122 if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags))
1123 schedule_delayed_work(&hdev->power_off,
1124 msecs_to_jiffies(AUTO_OFF_TIMEOUT));
1126 if (test_and_clear_bit(HCI_SETUP, &hdev->dev_flags))
1127 mgmt_index_added(hdev);
1130 static void hci_power_off(struct work_struct *work)
1132 struct hci_dev *hdev = container_of(work, struct hci_dev,
1135 BT_DBG("%s", hdev->name);
1137 hci_dev_do_close(hdev);
1140 static void hci_discov_off(struct work_struct *work)
1142 struct hci_dev *hdev;
1143 u8 scan = SCAN_PAGE;
1145 hdev = container_of(work, struct hci_dev, discov_off.work);
1147 BT_DBG("%s", hdev->name);
1151 hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, sizeof(scan), &scan);
1153 hdev->discov_timeout = 0;
1155 hci_dev_unlock(hdev);
1158 int hci_uuids_clear(struct hci_dev *hdev)
1160 struct list_head *p, *n;
1162 list_for_each_safe(p, n, &hdev->uuids) {
1163 struct bt_uuid *uuid;
1165 uuid = list_entry(p, struct bt_uuid, list);
1174 int hci_link_keys_clear(struct hci_dev *hdev)
1176 struct list_head *p, *n;
1178 list_for_each_safe(p, n, &hdev->link_keys) {
1179 struct link_key *key;
1181 key = list_entry(p, struct link_key, list);
1190 int hci_smp_ltks_clear(struct hci_dev *hdev)
1192 struct smp_ltk *k, *tmp;
1194 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
1202 struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1206 list_for_each_entry(k, &hdev->link_keys, list)
1207 if (bacmp(bdaddr, &k->bdaddr) == 0)
1213 static int hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
1214 u8 key_type, u8 old_key_type)
1217 if (key_type < 0x03)
1220 /* Debug keys are insecure so don't store them persistently */
1221 if (key_type == HCI_LK_DEBUG_COMBINATION)
1224 /* Changed combination key and there's no previous one */
1225 if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
1228 /* Security mode 3 case */
1232 /* Neither local nor remote side had no-bonding as requirement */
1233 if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
1236 /* Local side had dedicated bonding as requirement */
1237 if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
1240 /* Remote side had dedicated bonding as requirement */
1241 if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
1244 /* If none of the above criteria match, then don't store the key
1249 struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, __le16 ediv, u8 rand[8])
1253 list_for_each_entry(k, &hdev->long_term_keys, list) {
1254 if (k->ediv != ediv ||
1255 memcmp(rand, k->rand, sizeof(k->rand)))
1263 EXPORT_SYMBOL(hci_find_ltk);
1265 struct smp_ltk *hci_find_ltk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
1270 list_for_each_entry(k, &hdev->long_term_keys, list)
1271 if (addr_type == k->bdaddr_type &&
1272 bacmp(bdaddr, &k->bdaddr) == 0)
1277 EXPORT_SYMBOL(hci_find_ltk_by_addr);
1279 int hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn, int new_key,
1280 bdaddr_t *bdaddr, u8 *val, u8 type, u8 pin_len)
1282 struct link_key *key, *old_key;
1283 u8 old_key_type, persistent;
1285 old_key = hci_find_link_key(hdev, bdaddr);
1287 old_key_type = old_key->type;
1290 old_key_type = conn ? conn->key_type : 0xff;
1291 key = kzalloc(sizeof(*key), GFP_ATOMIC);
1294 list_add(&key->list, &hdev->link_keys);
1297 BT_DBG("%s key for %s type %u", hdev->name, batostr(bdaddr), type);
1299 /* Some buggy controller combinations generate a changed
1300 * combination key for legacy pairing even when there's no
1302 if (type == HCI_LK_CHANGED_COMBINATION &&
1303 (!conn || conn->remote_auth == 0xff) &&
1304 old_key_type == 0xff) {
1305 type = HCI_LK_COMBINATION;
1307 conn->key_type = type;
1310 bacpy(&key->bdaddr, bdaddr);
1311 memcpy(key->val, val, 16);
1312 key->pin_len = pin_len;
1314 if (type == HCI_LK_CHANGED_COMBINATION)
1315 key->type = old_key_type;
1322 persistent = hci_persistent_key(hdev, conn, type, old_key_type);
1324 mgmt_new_link_key(hdev, key, persistent);
1327 list_del(&key->list);
1334 int hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type, u8 type,
1335 int new_key, u8 authenticated, u8 tk[16], u8 enc_size, u16
1338 struct smp_ltk *key, *old_key;
1340 if (!(type & HCI_SMP_STK) && !(type & HCI_SMP_LTK))
1343 old_key = hci_find_ltk_by_addr(hdev, bdaddr, addr_type);
1347 key = kzalloc(sizeof(*key), GFP_ATOMIC);
1350 list_add(&key->list, &hdev->long_term_keys);
1353 bacpy(&key->bdaddr, bdaddr);
1354 key->bdaddr_type = addr_type;
1355 memcpy(key->val, tk, sizeof(key->val));
1356 key->authenticated = authenticated;
1358 key->enc_size = enc_size;
1360 memcpy(key->rand, rand, sizeof(key->rand));
1365 if (type & HCI_SMP_LTK)
1366 mgmt_new_ltk(hdev, key, 1);
1371 int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1373 struct link_key *key;
1375 key = hci_find_link_key(hdev, bdaddr);
1379 BT_DBG("%s removing %s", hdev->name, batostr(bdaddr));
1381 list_del(&key->list);
1387 int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr)
1389 struct smp_ltk *k, *tmp;
1391 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
1392 if (bacmp(bdaddr, &k->bdaddr))
1395 BT_DBG("%s removing %s", hdev->name, batostr(bdaddr));
1404 /* HCI command timer function */
1405 static void hci_cmd_timer(unsigned long arg)
1407 struct hci_dev *hdev = (void *) arg;
1409 BT_ERR("%s command tx timeout", hdev->name);
1410 atomic_set(&hdev->cmd_cnt, 1);
1411 queue_work(hdev->workqueue, &hdev->cmd_work);
1414 struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
1417 struct oob_data *data;
1419 list_for_each_entry(data, &hdev->remote_oob_data, list)
1420 if (bacmp(bdaddr, &data->bdaddr) == 0)
1426 int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr)
1428 struct oob_data *data;
1430 data = hci_find_remote_oob_data(hdev, bdaddr);
1434 BT_DBG("%s removing %s", hdev->name, batostr(bdaddr));
1436 list_del(&data->list);
1442 int hci_remote_oob_data_clear(struct hci_dev *hdev)
1444 struct oob_data *data, *n;
1446 list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
1447 list_del(&data->list);
1454 int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 *hash,
1457 struct oob_data *data;
1459 data = hci_find_remote_oob_data(hdev, bdaddr);
1462 data = kmalloc(sizeof(*data), GFP_ATOMIC);
1466 bacpy(&data->bdaddr, bdaddr);
1467 list_add(&data->list, &hdev->remote_oob_data);
1470 memcpy(data->hash, hash, sizeof(data->hash));
1471 memcpy(data->randomizer, randomizer, sizeof(data->randomizer));
1473 BT_DBG("%s for %s", hdev->name, batostr(bdaddr));
1478 struct bdaddr_list *hci_blacklist_lookup(struct hci_dev *hdev, bdaddr_t *bdaddr)
1480 struct bdaddr_list *b;
1482 list_for_each_entry(b, &hdev->blacklist, list)
1483 if (bacmp(bdaddr, &b->bdaddr) == 0)
1489 int hci_blacklist_clear(struct hci_dev *hdev)
1491 struct list_head *p, *n;
1493 list_for_each_safe(p, n, &hdev->blacklist) {
1494 struct bdaddr_list *b;
1496 b = list_entry(p, struct bdaddr_list, list);
1505 int hci_blacklist_add(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
1507 struct bdaddr_list *entry;
1509 if (bacmp(bdaddr, BDADDR_ANY) == 0)
1512 if (hci_blacklist_lookup(hdev, bdaddr))
1515 entry = kzalloc(sizeof(struct bdaddr_list), GFP_KERNEL);
1519 bacpy(&entry->bdaddr, bdaddr);
1521 list_add(&entry->list, &hdev->blacklist);
1523 return mgmt_device_blocked(hdev, bdaddr, type);
1526 int hci_blacklist_del(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
1528 struct bdaddr_list *entry;
1530 if (bacmp(bdaddr, BDADDR_ANY) == 0)
1531 return hci_blacklist_clear(hdev);
1533 entry = hci_blacklist_lookup(hdev, bdaddr);
1537 list_del(&entry->list);
1540 return mgmt_device_unblocked(hdev, bdaddr, type);
1543 static void hci_clear_adv_cache(struct work_struct *work)
1545 struct hci_dev *hdev = container_of(work, struct hci_dev,
1550 hci_adv_entries_clear(hdev);
1552 hci_dev_unlock(hdev);
1555 int hci_adv_entries_clear(struct hci_dev *hdev)
1557 struct adv_entry *entry, *tmp;
1559 list_for_each_entry_safe(entry, tmp, &hdev->adv_entries, list) {
1560 list_del(&entry->list);
1564 BT_DBG("%s adv cache cleared", hdev->name);
1569 struct adv_entry *hci_find_adv_entry(struct hci_dev *hdev, bdaddr_t *bdaddr)
1571 struct adv_entry *entry;
1573 list_for_each_entry(entry, &hdev->adv_entries, list)
1574 if (bacmp(bdaddr, &entry->bdaddr) == 0)
1580 static inline int is_connectable_adv(u8 evt_type)
1582 if (evt_type == ADV_IND || evt_type == ADV_DIRECT_IND)
1588 int hci_add_adv_entry(struct hci_dev *hdev,
1589 struct hci_ev_le_advertising_info *ev) { struct adv_entry *entry; if (!is_connectable_adv(ev->evt_type))
1592 /* Only new entries should be added to adv_entries. So, if
1593 * bdaddr was found, don't add it. */
1594 if (hci_find_adv_entry(hdev, &ev->bdaddr))
1597 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
1601 bacpy(&entry->bdaddr, &ev->bdaddr);
1602 entry->bdaddr_type = ev->bdaddr_type;
1604 list_add(&entry->list, &hdev->adv_entries);
1606 BT_DBG("%s adv entry added: address %s type %u", hdev->name,
1607 batostr(&entry->bdaddr), entry->bdaddr_type);
1612 static void le_scan_param_req(struct hci_dev *hdev, unsigned long opt)
1614 struct le_scan_params *param = (struct le_scan_params *) opt;
1615 struct hci_cp_le_set_scan_param cp;
1617 memset(&cp, 0, sizeof(cp));
1618 cp.type = param->type;
1619 cp.interval = cpu_to_le16(param->interval);
1620 cp.window = cpu_to_le16(param->window);
1622 hci_send_cmd(hdev, HCI_OP_LE_SET_SCAN_PARAM, sizeof(cp), &cp);
1625 static void le_scan_enable_req(struct hci_dev *hdev, unsigned long opt)
1627 struct hci_cp_le_set_scan_enable cp;
1629 memset(&cp, 0, sizeof(cp));
1632 hci_send_cmd(hdev, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
1635 static int hci_do_le_scan(struct hci_dev *hdev, u8 type, u16 interval,
1636 u16 window, int timeout)
1638 long timeo = msecs_to_jiffies(3000);
1639 struct le_scan_params param;
1642 BT_DBG("%s", hdev->name);
1644 if (test_bit(HCI_LE_SCAN, &hdev->dev_flags))
1645 return -EINPROGRESS;
1648 param.interval = interval;
1649 param.window = window;
1653 err = __hci_request(hdev, le_scan_param_req, (unsigned long) ¶m,
1656 err = __hci_request(hdev, le_scan_enable_req, 0, timeo);
1658 hci_req_unlock(hdev);
1663 schedule_delayed_work(&hdev->le_scan_disable,
1664 msecs_to_jiffies(timeout));
1669 static void le_scan_disable_work(struct work_struct *work)
1671 struct hci_dev *hdev = container_of(work, struct hci_dev,
1672 le_scan_disable.work);
1673 struct hci_cp_le_set_scan_enable cp;
1675 BT_DBG("%s", hdev->name);
1677 memset(&cp, 0, sizeof(cp));
1679 hci_send_cmd(hdev, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
1682 static void le_scan_work(struct work_struct *work)
1684 struct hci_dev *hdev = container_of(work, struct hci_dev, le_scan);
1685 struct le_scan_params *param = &hdev->le_scan_params;
1687 BT_DBG("%s", hdev->name);
1689 hci_do_le_scan(hdev, param->type, param->interval, param->window,
1693 int hci_le_scan(struct hci_dev *hdev, u8 type, u16 interval, u16 window,
1696 struct le_scan_params *param = &hdev->le_scan_params;
1698 BT_DBG("%s", hdev->name);
1700 if (work_busy(&hdev->le_scan))
1701 return -EINPROGRESS;
1704 param->interval = interval;
1705 param->window = window;
1706 param->timeout = timeout;
1708 queue_work(system_long_wq, &hdev->le_scan);
1713 /* Register HCI device */
1714 int hci_register_dev(struct hci_dev *hdev)
1716 struct list_head *head = &hci_dev_list, *p;
1719 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
1721 if (!hdev->open || !hdev->close)
1724 /* Do not allow HCI_AMP devices to register at index 0,
1725 * so the index can be used as the AMP controller ID.
1727 id = (hdev->dev_type == HCI_BREDR) ? 0 : 1;
1729 write_lock(&hci_dev_list_lock);
1731 /* Find first available device id */
1732 list_for_each(p, &hci_dev_list) {
1733 if (list_entry(p, struct hci_dev, list)->id != id)
1738 sprintf(hdev->name, "hci%d", id);
1740 list_add_tail(&hdev->list, head);
1742 mutex_init(&hdev->lock);
1745 hdev->dev_flags = 0;
1746 hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1);
1747 hdev->esco_type = (ESCO_HV1);
1748 hdev->link_mode = (HCI_LM_ACCEPT);
1749 hdev->io_capability = 0x03; /* No Input No Output */
1751 hdev->idle_timeout = 0;
1752 hdev->sniff_max_interval = 800;
1753 hdev->sniff_min_interval = 80;
1755 INIT_WORK(&hdev->rx_work, hci_rx_work);
1756 INIT_WORK(&hdev->cmd_work, hci_cmd_work);
1757 INIT_WORK(&hdev->tx_work, hci_tx_work);
1760 skb_queue_head_init(&hdev->rx_q);
1761 skb_queue_head_init(&hdev->cmd_q);
1762 skb_queue_head_init(&hdev->raw_q);
1764 setup_timer(&hdev->cmd_timer, hci_cmd_timer, (unsigned long) hdev);
1766 for (i = 0; i < NUM_REASSEMBLY; i++)
1767 hdev->reassembly[i] = NULL;
1769 init_waitqueue_head(&hdev->req_wait_q);
1770 mutex_init(&hdev->req_lock);
1772 discovery_init(hdev);
1774 hci_conn_hash_init(hdev);
1776 INIT_LIST_HEAD(&hdev->mgmt_pending);
1778 INIT_LIST_HEAD(&hdev->blacklist);
1780 INIT_LIST_HEAD(&hdev->uuids);
1782 INIT_LIST_HEAD(&hdev->link_keys);
1783 INIT_LIST_HEAD(&hdev->long_term_keys);
1785 INIT_LIST_HEAD(&hdev->remote_oob_data);
1787 INIT_LIST_HEAD(&hdev->adv_entries);
1789 INIT_DELAYED_WORK(&hdev->adv_work, hci_clear_adv_cache);
1790 INIT_WORK(&hdev->power_on, hci_power_on);
1791 INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
1793 INIT_DELAYED_WORK(&hdev->discov_off, hci_discov_off);
1795 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
1797 atomic_set(&hdev->promisc, 0);
1799 INIT_WORK(&hdev->le_scan, le_scan_work);
1801 INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
1803 write_unlock(&hci_dev_list_lock);
1805 hdev->workqueue = alloc_workqueue(hdev->name, WQ_HIGHPRI | WQ_UNBOUND |
1807 if (!hdev->workqueue) {
1812 error = hci_add_sysfs(hdev);
1816 hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
1817 RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops, hdev);
1819 if (rfkill_register(hdev->rfkill) < 0) {
1820 rfkill_destroy(hdev->rfkill);
1821 hdev->rfkill = NULL;
1825 set_bit(HCI_AUTO_OFF, &hdev->dev_flags);
1826 set_bit(HCI_SETUP, &hdev->dev_flags);
1827 schedule_work(&hdev->power_on);
1829 hci_notify(hdev, HCI_DEV_REG);
1835 destroy_workqueue(hdev->workqueue);
1837 write_lock(&hci_dev_list_lock);
1838 list_del(&hdev->list);
1839 write_unlock(&hci_dev_list_lock);
1843 EXPORT_SYMBOL(hci_register_dev);
1845 /* Unregister HCI device */
1846 void hci_unregister_dev(struct hci_dev *hdev)
1850 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
1852 write_lock(&hci_dev_list_lock);
1853 list_del(&hdev->list);
1854 write_unlock(&hci_dev_list_lock);
1856 hci_dev_do_close(hdev);
1858 for (i = 0; i < NUM_REASSEMBLY; i++)
1859 kfree_skb(hdev->reassembly[i]);
1861 if (!test_bit(HCI_INIT, &hdev->flags) &&
1862 !test_bit(HCI_SETUP, &hdev->dev_flags)) {
1864 mgmt_index_removed(hdev);
1865 hci_dev_unlock(hdev);
1868 /* mgmt_index_removed should take care of emptying the
1870 BUG_ON(!list_empty(&hdev->mgmt_pending));
1872 hci_notify(hdev, HCI_DEV_UNREG);
1875 rfkill_unregister(hdev->rfkill);
1876 rfkill_destroy(hdev->rfkill);
1879 hci_del_sysfs(hdev);
1881 cancel_delayed_work_sync(&hdev->adv_work);
1883 destroy_workqueue(hdev->workqueue);
1886 hci_blacklist_clear(hdev);
1887 hci_uuids_clear(hdev);
1888 hci_link_keys_clear(hdev);
1889 hci_smp_ltks_clear(hdev);
1890 hci_remote_oob_data_clear(hdev);
1891 hci_adv_entries_clear(hdev);
1892 hci_dev_unlock(hdev);
1896 EXPORT_SYMBOL(hci_unregister_dev);
1898 /* Suspend HCI device */
1899 int hci_suspend_dev(struct hci_dev *hdev)
1901 hci_notify(hdev, HCI_DEV_SUSPEND);
1904 EXPORT_SYMBOL(hci_suspend_dev);
1906 /* Resume HCI device */
1907 int hci_resume_dev(struct hci_dev *hdev)
1909 hci_notify(hdev, HCI_DEV_RESUME);
1912 EXPORT_SYMBOL(hci_resume_dev);
1914 /* Receive frame from HCI drivers */
1915 int hci_recv_frame(struct sk_buff *skb)
1917 struct hci_dev *hdev = (struct hci_dev *) skb->dev;
1918 if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
1919 && !test_bit(HCI_INIT, &hdev->flags))) {
1925 bt_cb(skb)->incoming = 1;
1928 __net_timestamp(skb);
1930 skb_queue_tail(&hdev->rx_q, skb);
1931 queue_work(hdev->workqueue, &hdev->rx_work);
1935 EXPORT_SYMBOL(hci_recv_frame);
1937 static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
1938 int count, __u8 index)
1943 struct sk_buff *skb;
1944 struct bt_skb_cb *scb;
1946 if ((type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT) ||
1947 index >= NUM_REASSEMBLY)
1950 skb = hdev->reassembly[index];
1954 case HCI_ACLDATA_PKT:
1955 len = HCI_MAX_FRAME_SIZE;
1956 hlen = HCI_ACL_HDR_SIZE;
1959 len = HCI_MAX_EVENT_SIZE;
1960 hlen = HCI_EVENT_HDR_SIZE;
1962 case HCI_SCODATA_PKT:
1963 len = HCI_MAX_SCO_SIZE;
1964 hlen = HCI_SCO_HDR_SIZE;
1968 skb = bt_skb_alloc(len, GFP_ATOMIC);
1972 scb = (void *) skb->cb;
1974 scb->pkt_type = type;
1976 skb->dev = (void *) hdev;
1977 hdev->reassembly[index] = skb;
1981 scb = (void *) skb->cb;
1982 len = min_t(uint, scb->expect, count);
1984 memcpy(skb_put(skb, len), data, len);
1993 if (skb->len == HCI_EVENT_HDR_SIZE) {
1994 struct hci_event_hdr *h = hci_event_hdr(skb);
1995 scb->expect = h->plen;
1997 if (skb_tailroom(skb) < scb->expect) {
1999 hdev->reassembly[index] = NULL;
2005 case HCI_ACLDATA_PKT:
2006 if (skb->len == HCI_ACL_HDR_SIZE) {
2007 struct hci_acl_hdr *h = hci_acl_hdr(skb);
2008 scb->expect = __le16_to_cpu(h->dlen);
2010 if (skb_tailroom(skb) < scb->expect) {
2012 hdev->reassembly[index] = NULL;
2018 case HCI_SCODATA_PKT:
2019 if (skb->len == HCI_SCO_HDR_SIZE) {
2020 struct hci_sco_hdr *h = hci_sco_hdr(skb);
2021 scb->expect = h->dlen;
2023 if (skb_tailroom(skb) < scb->expect) {
2025 hdev->reassembly[index] = NULL;
2032 if (scb->expect == 0) {
2033 /* Complete frame */
2035 bt_cb(skb)->pkt_type = type;
2036 hci_recv_frame(skb);
2038 hdev->reassembly[index] = NULL;
2046 int hci_recv_fragment(struct hci_dev *hdev, int type, void *data, int count)
2050 if (type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT)
2054 rem = hci_reassembly(hdev, type, data, count, type - 1);
2058 data += (count - rem);
2064 EXPORT_SYMBOL(hci_recv_fragment);
2066 #define STREAM_REASSEMBLY 0
2068 int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count)
2074 struct sk_buff *skb = hdev->reassembly[STREAM_REASSEMBLY];
2077 struct { char type; } *pkt;
2079 /* Start of the frame */
2086 type = bt_cb(skb)->pkt_type;
2088 rem = hci_reassembly(hdev, type, data, count,
2093 data += (count - rem);
2099 EXPORT_SYMBOL(hci_recv_stream_fragment);
2101 /* ---- Interface to upper protocols ---- */
2103 int hci_register_cb(struct hci_cb *cb)
2105 BT_DBG("%p name %s", cb, cb->name);
2107 write_lock(&hci_cb_list_lock);
2108 list_add(&cb->list, &hci_cb_list);
2109 write_unlock(&hci_cb_list_lock);
2113 EXPORT_SYMBOL(hci_register_cb);
2115 int hci_unregister_cb(struct hci_cb *cb)
2117 BT_DBG("%p name %s", cb, cb->name);
2119 write_lock(&hci_cb_list_lock);
2120 list_del(&cb->list);
2121 write_unlock(&hci_cb_list_lock);
2125 EXPORT_SYMBOL(hci_unregister_cb);
2127 static int hci_send_frame(struct sk_buff *skb)
2129 struct hci_dev *hdev = (struct hci_dev *) skb->dev;
2136 BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
2139 __net_timestamp(skb);
2141 /* Send copy to monitor */
2142 hci_send_to_monitor(hdev, skb);
2144 if (atomic_read(&hdev->promisc)) {
2145 /* Send copy to the sockets */
2146 hci_send_to_sock(hdev, skb);
2149 /* Get rid of skb owner, prior to sending to the driver. */
2152 return hdev->send(skb);
2155 /* Send HCI command */
2156 int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen, void *param)
2158 int len = HCI_COMMAND_HDR_SIZE + plen;
2159 struct hci_command_hdr *hdr;
2160 struct sk_buff *skb;
2162 BT_DBG("%s opcode 0x%x plen %d", hdev->name, opcode, plen);
2164 skb = bt_skb_alloc(len, GFP_ATOMIC);
2166 BT_ERR("%s no memory for command", hdev->name);
2170 hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
2171 hdr->opcode = cpu_to_le16(opcode);
2175 memcpy(skb_put(skb, plen), param, plen);
2177 BT_DBG("skb len %d", skb->len);
2179 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
2180 skb->dev = (void *) hdev;
2182 if (test_bit(HCI_INIT, &hdev->flags))
2183 hdev->init_last_cmd = opcode;
2185 skb_queue_tail(&hdev->cmd_q, skb);
2186 queue_work(hdev->workqueue, &hdev->cmd_work);
2191 /* Get data from the previously sent command */
2192 void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
2194 struct hci_command_hdr *hdr;
2196 if (!hdev->sent_cmd)
2199 hdr = (void *) hdev->sent_cmd->data;
2201 if (hdr->opcode != cpu_to_le16(opcode))
2204 BT_DBG("%s opcode 0x%x", hdev->name, opcode);
2206 return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
2210 static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
2212 struct hci_acl_hdr *hdr;
2215 skb_push(skb, HCI_ACL_HDR_SIZE);
2216 skb_reset_transport_header(skb);
2217 hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
2218 hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
2219 hdr->dlen = cpu_to_le16(len);
2222 static void hci_queue_acl(struct hci_conn *conn, struct sk_buff_head *queue,
2223 struct sk_buff *skb, __u16 flags)
2225 struct hci_dev *hdev = conn->hdev;
2226 struct sk_buff *list;
2228 list = skb_shinfo(skb)->frag_list;
2230 /* Non fragmented */
2231 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
2233 skb_queue_tail(queue, skb);
2236 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
2238 skb_shinfo(skb)->frag_list = NULL;
2240 /* Queue all fragments atomically */
2241 spin_lock(&queue->lock);
2243 __skb_queue_tail(queue, skb);
2245 flags &= ~ACL_START;
2248 skb = list; list = list->next;
2250 skb->dev = (void *) hdev;
2251 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
2252 hci_add_acl_hdr(skb, conn->handle, flags);
2254 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
2256 __skb_queue_tail(queue, skb);
2259 spin_unlock(&queue->lock);
2263 void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
2265 struct hci_conn *conn = chan->conn;
2266 struct hci_dev *hdev = conn->hdev;
2268 BT_DBG("%s chan %p flags 0x%x", hdev->name, chan, flags);
2270 skb->dev = (void *) hdev;
2271 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
2272 hci_add_acl_hdr(skb, conn->handle, flags);
2274 hci_queue_acl(conn, &chan->data_q, skb, flags);
2276 queue_work(hdev->workqueue, &hdev->tx_work);
2278 EXPORT_SYMBOL(hci_send_acl);
2281 void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
2283 struct hci_dev *hdev = conn->hdev;
2284 struct hci_sco_hdr hdr;
2286 BT_DBG("%s len %d", hdev->name, skb->len);
2288 hdr.handle = cpu_to_le16(conn->handle);
2289 hdr.dlen = skb->len;
2291 skb_push(skb, HCI_SCO_HDR_SIZE);
2292 skb_reset_transport_header(skb);
2293 memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
2295 skb->dev = (void *) hdev;
2296 bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
2298 skb_queue_tail(&conn->data_q, skb);
2299 queue_work(hdev->workqueue, &hdev->tx_work);
2301 EXPORT_SYMBOL(hci_send_sco);
2303 /* ---- HCI TX task (outgoing data) ---- */
2305 /* HCI Connection scheduler */
2306 static inline struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type, int *quote)
2308 struct hci_conn_hash *h = &hdev->conn_hash;
2309 struct hci_conn *conn = NULL, *c;
2310 int num = 0, min = ~0;
2312 /* We don't have to lock device here. Connections are always
2313 * added and removed with TX task disabled. */
2317 list_for_each_entry_rcu(c, &h->list, list) {
2318 if (c->type != type || skb_queue_empty(&c->data_q))
2321 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
2326 if (c->sent < min) {
2331 if (hci_conn_num(hdev, type) == num)
2340 switch (conn->type) {
2342 cnt = hdev->acl_cnt;
2346 cnt = hdev->sco_cnt;
2349 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
2353 BT_ERR("Unknown link type");
2361 BT_DBG("conn %p quote %d", conn, *quote);
2365 static inline void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
2367 struct hci_conn_hash *h = &hdev->conn_hash;
2370 BT_ERR("%s link tx timeout", hdev->name);
2374 /* Kill stalled connections */
2375 list_for_each_entry_rcu(c, &h->list, list) {
2376 if (c->type == type && c->sent) {
2377 BT_ERR("%s killing stalled connection %s",
2378 hdev->name, batostr(&c->dst));
2379 hci_acl_disconn(c, 0x13);
2386 static inline struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
2389 struct hci_conn_hash *h = &hdev->conn_hash;
2390 struct hci_chan *chan = NULL;
2391 int num = 0, min = ~0, cur_prio = 0;
2392 struct hci_conn *conn;
2393 int cnt, q, conn_num = 0;
2395 BT_DBG("%s", hdev->name);
2399 list_for_each_entry_rcu(conn, &h->list, list) {
2400 struct hci_chan *tmp;
2402 if (conn->type != type)
2405 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
2410 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
2411 struct sk_buff *skb;
2413 if (skb_queue_empty(&tmp->data_q))
2416 skb = skb_peek(&tmp->data_q);
2417 if (skb->priority < cur_prio)
2420 if (skb->priority > cur_prio) {
2423 cur_prio = skb->priority;
2428 if (conn->sent < min) {
2434 if (hci_conn_num(hdev, type) == conn_num)
2443 switch (chan->conn->type) {
2445 cnt = hdev->acl_cnt;
2449 cnt = hdev->sco_cnt;
2452 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
2456 BT_ERR("Unknown link type");
2461 BT_DBG("chan %p quote %d", chan, *quote);
2465 static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
2467 struct hci_conn_hash *h = &hdev->conn_hash;
2468 struct hci_conn *conn;
2471 BT_DBG("%s", hdev->name);
2475 list_for_each_entry_rcu(conn, &h->list, list) {
2476 struct hci_chan *chan;
2478 if (conn->type != type)
2481 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
2486 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
2487 struct sk_buff *skb;
2494 if (skb_queue_empty(&chan->data_q))
2497 skb = skb_peek(&chan->data_q);
2498 if (skb->priority >= HCI_PRIO_MAX - 1)
2501 skb->priority = HCI_PRIO_MAX - 1;
2503 BT_DBG("chan %p skb %p promoted to %d", chan, skb,
2507 if (hci_conn_num(hdev, type) == num)
2515 static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
2517 /* Calculate count of blocks used by this packet */
2518 return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
2521 static inline void __check_timeout(struct hci_dev *hdev, unsigned int cnt)
2523 if (!test_bit(HCI_RAW, &hdev->flags)) {
2524 /* ACL tx timeout must be longer than maximum
2525 * link supervision timeout (40.9 seconds) */
2526 if (!cnt && time_after(jiffies, hdev->acl_last_tx +
2527 msecs_to_jiffies(HCI_ACL_TX_TIMEOUT)))
2528 hci_link_tx_to(hdev, ACL_LINK);
2532 static inline void hci_sched_acl_pkt(struct hci_dev *hdev)
2534 unsigned int cnt = hdev->acl_cnt;
2535 struct hci_chan *chan;
2536 struct sk_buff *skb;
2539 __check_timeout(hdev, cnt);
2541 while (hdev->acl_cnt &&
2542 (chan = hci_chan_sent(hdev, ACL_LINK, "e))) {
2543 u32 priority = (skb_peek(&chan->data_q))->priority;
2544 while (quote-- && (skb = skb_peek(&chan->data_q))) {
2545 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
2546 skb->len, skb->priority);
2548 /* Stop if priority has changed */
2549 if (skb->priority < priority)
2552 skb = skb_dequeue(&chan->data_q);
2554 hci_conn_enter_active_mode(chan->conn,
2555 bt_cb(skb)->force_active);
2557 hci_send_frame(skb);
2558 hdev->acl_last_tx = jiffies;
2566 if (cnt != hdev->acl_cnt)
2567 hci_prio_recalculate(hdev, ACL_LINK);
2570 static inline void hci_sched_acl_blk(struct hci_dev *hdev)
2572 unsigned int cnt = hdev->block_cnt;
2573 struct hci_chan *chan;
2574 struct sk_buff *skb;
2577 __check_timeout(hdev, cnt);
2579 while (hdev->block_cnt > 0 &&
2580 (chan = hci_chan_sent(hdev, ACL_LINK, "e))) {
2581 u32 priority = (skb_peek(&chan->data_q))->priority;
2582 while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
2585 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
2586 skb->len, skb->priority);
2588 /* Stop if priority has changed */
2589 if (skb->priority < priority)
2592 skb = skb_dequeue(&chan->data_q);
2594 blocks = __get_blocks(hdev, skb);
2595 if (blocks > hdev->block_cnt)
2598 hci_conn_enter_active_mode(chan->conn,
2599 bt_cb(skb)->force_active);
2601 hci_send_frame(skb);
2602 hdev->acl_last_tx = jiffies;
2604 hdev->block_cnt -= blocks;
2607 chan->sent += blocks;
2608 chan->conn->sent += blocks;
2612 if (cnt != hdev->block_cnt)
2613 hci_prio_recalculate(hdev, ACL_LINK);
2616 static inline void hci_sched_acl(struct hci_dev *hdev)
2618 BT_DBG("%s", hdev->name);
2620 if (!hci_conn_num(hdev, ACL_LINK))
2623 switch (hdev->flow_ctl_mode) {
2624 case HCI_FLOW_CTL_MODE_PACKET_BASED:
2625 hci_sched_acl_pkt(hdev);
2628 case HCI_FLOW_CTL_MODE_BLOCK_BASED:
2629 hci_sched_acl_blk(hdev);
2635 static inline void hci_sched_sco(struct hci_dev *hdev)
2637 struct hci_conn *conn;
2638 struct sk_buff *skb;
2641 BT_DBG("%s", hdev->name);
2643 if (!hci_conn_num(hdev, SCO_LINK))
2646 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, "e))) {
2647 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
2648 BT_DBG("skb %p len %d", skb, skb->len);
2649 hci_send_frame(skb);
2652 if (conn->sent == ~0)
2658 static inline void hci_sched_esco(struct hci_dev *hdev)
2660 struct hci_conn *conn;
2661 struct sk_buff *skb;
2664 BT_DBG("%s", hdev->name);
2666 if (!hci_conn_num(hdev, ESCO_LINK))
2669 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK, "e))) {
2670 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
2671 BT_DBG("skb %p len %d", skb, skb->len);
2672 hci_send_frame(skb);
2675 if (conn->sent == ~0)
2681 static inline void hci_sched_le(struct hci_dev *hdev)
2683 struct hci_chan *chan;
2684 struct sk_buff *skb;
2685 int quote, cnt, tmp;
2687 BT_DBG("%s", hdev->name);
2689 if (!hci_conn_num(hdev, LE_LINK))
2692 if (!test_bit(HCI_RAW, &hdev->flags)) {
2693 /* LE tx timeout must be longer than maximum
2694 * link supervision timeout (40.9 seconds) */
2695 if (!hdev->le_cnt && hdev->le_pkts &&
2696 time_after(jiffies, hdev->le_last_tx + HZ * 45))
2697 hci_link_tx_to(hdev, LE_LINK);
2700 cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
2702 while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, "e))) {
2703 u32 priority = (skb_peek(&chan->data_q))->priority;
2704 while (quote-- && (skb = skb_peek(&chan->data_q))) {
2705 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
2706 skb->len, skb->priority);
2708 /* Stop if priority has changed */
2709 if (skb->priority < priority)
2712 skb = skb_dequeue(&chan->data_q);
2714 hci_send_frame(skb);
2715 hdev->le_last_tx = jiffies;
2726 hdev->acl_cnt = cnt;
2729 hci_prio_recalculate(hdev, LE_LINK);
2732 static void hci_tx_work(struct work_struct *work)
2734 struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
2735 struct sk_buff *skb;
2737 BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
2738 hdev->sco_cnt, hdev->le_cnt);
2740 /* Schedule queues and send stuff to HCI driver */
2742 hci_sched_acl(hdev);
2744 hci_sched_sco(hdev);
2746 hci_sched_esco(hdev);
2750 /* Send next queued raw (unknown type) packet */
2751 while ((skb = skb_dequeue(&hdev->raw_q)))
2752 hci_send_frame(skb);
2755 /* ----- HCI RX task (incoming data processing) ----- */
2757 /* ACL data packet */
2758 static inline void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
2760 struct hci_acl_hdr *hdr = (void *) skb->data;
2761 struct hci_conn *conn;
2762 __u16 handle, flags;
2764 skb_pull(skb, HCI_ACL_HDR_SIZE);
2766 handle = __le16_to_cpu(hdr->handle);
2767 flags = hci_flags(handle);
2768 handle = hci_handle(handle);
2770 BT_DBG("%s len %d handle 0x%x flags 0x%x", hdev->name, skb->len, handle, flags);
2772 hdev->stat.acl_rx++;
2775 conn = hci_conn_hash_lookup_handle(hdev, handle);
2776 hci_dev_unlock(hdev);
2779 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
2781 /* Send to upper protocol */
2782 l2cap_recv_acldata(conn, skb, flags);
2785 BT_ERR("%s ACL packet for unknown connection handle %d",
2786 hdev->name, handle);
2792 /* SCO data packet */
2793 static inline void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
2795 struct hci_sco_hdr *hdr = (void *) skb->data;
2796 struct hci_conn *conn;
2799 skb_pull(skb, HCI_SCO_HDR_SIZE);
2801 handle = __le16_to_cpu(hdr->handle);
2803 BT_DBG("%s len %d handle 0x%x", hdev->name, skb->len, handle);
2805 hdev->stat.sco_rx++;
2808 conn = hci_conn_hash_lookup_handle(hdev, handle);
2809 hci_dev_unlock(hdev);
2812 /* Send to upper protocol */
2813 sco_recv_scodata(conn, skb);
2816 BT_ERR("%s SCO packet for unknown connection handle %d",
2817 hdev->name, handle);
2823 static void hci_rx_work(struct work_struct *work)
2825 struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
2826 struct sk_buff *skb;
2828 BT_DBG("%s", hdev->name);
2830 while ((skb = skb_dequeue(&hdev->rx_q))) {
2831 /* Send copy to monitor */
2832 hci_send_to_monitor(hdev, skb);
2834 if (atomic_read(&hdev->promisc)) {
2835 /* Send copy to the sockets */
2836 hci_send_to_sock(hdev, skb);
2839 if (test_bit(HCI_RAW, &hdev->flags)) {
2844 if (test_bit(HCI_INIT, &hdev->flags)) {
2845 /* Don't process data packets in this states. */
2846 switch (bt_cb(skb)->pkt_type) {
2847 case HCI_ACLDATA_PKT:
2848 case HCI_SCODATA_PKT:
2855 switch (bt_cb(skb)->pkt_type) {
2857 BT_DBG("%s Event packet", hdev->name);
2858 hci_event_packet(hdev, skb);
2861 case HCI_ACLDATA_PKT:
2862 BT_DBG("%s ACL data packet", hdev->name);
2863 hci_acldata_packet(hdev, skb);
2866 case HCI_SCODATA_PKT:
2867 BT_DBG("%s SCO data packet", hdev->name);
2868 hci_scodata_packet(hdev, skb);
2878 static void hci_cmd_work(struct work_struct *work)
2880 struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
2881 struct sk_buff *skb;
2883 BT_DBG("%s cmd %d", hdev->name, atomic_read(&hdev->cmd_cnt));
2885 /* Send queued commands */
2886 if (atomic_read(&hdev->cmd_cnt)) {
2887 skb = skb_dequeue(&hdev->cmd_q);
2891 kfree_skb(hdev->sent_cmd);
2893 hdev->sent_cmd = skb_clone(skb, GFP_ATOMIC);
2894 if (hdev->sent_cmd) {
2895 atomic_dec(&hdev->cmd_cnt);
2896 hci_send_frame(skb);
2897 if (test_bit(HCI_RESET, &hdev->flags))
2898 del_timer(&hdev->cmd_timer);
2900 mod_timer(&hdev->cmd_timer,
2901 jiffies + msecs_to_jiffies(HCI_CMD_TIMEOUT));
2903 skb_queue_head(&hdev->cmd_q, skb);
2904 queue_work(hdev->workqueue, &hdev->cmd_work);
2909 int hci_do_inquiry(struct hci_dev *hdev, u8 length)
2911 /* General inquiry access code (GIAC) */
2912 u8 lap[3] = { 0x33, 0x8b, 0x9e };
2913 struct hci_cp_inquiry cp;
2915 BT_DBG("%s", hdev->name);
2917 if (test_bit(HCI_INQUIRY, &hdev->flags))
2918 return -EINPROGRESS;
2920 inquiry_cache_flush(hdev);
2922 memset(&cp, 0, sizeof(cp));
2923 memcpy(&cp.lap, lap, sizeof(cp.lap));
2926 return hci_send_cmd(hdev, HCI_OP_INQUIRY, sizeof(cp), &cp);
2929 int hci_cancel_inquiry(struct hci_dev *hdev)
2931 BT_DBG("%s", hdev->name);
2933 if (!test_bit(HCI_INQUIRY, &hdev->flags))
2936 return hci_send_cmd(hdev, HCI_OP_INQUIRY_CANCEL, 0, NULL);