2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
5 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
25 /* Bluetooth HCI core. */
27 #include <linux/jiffies.h>
28 #include <linux/module.h>
29 #include <linux/kmod.h>
31 #include <linux/types.h>
32 #include <linux/errno.h>
33 #include <linux/kernel.h>
34 #include <linux/sched.h>
35 #include <linux/slab.h>
36 #include <linux/poll.h>
37 #include <linux/fcntl.h>
38 #include <linux/init.h>
39 #include <linux/skbuff.h>
40 #include <linux/workqueue.h>
41 #include <linux/interrupt.h>
42 #include <linux/notifier.h>
43 #include <linux/rfkill.h>
46 #include <asm/system.h>
47 #include <linux/uaccess.h>
48 #include <asm/unaligned.h>
50 #include <net/bluetooth/bluetooth.h>
51 #include <net/bluetooth/hci_core.h>
53 static void hci_cmd_task(unsigned long arg);
54 static void hci_rx_task(unsigned long arg);
55 static void hci_tx_task(unsigned long arg);
56 static void hci_notify(struct hci_dev *hdev, int event);
58 static DEFINE_RWLOCK(hci_task_lock);
61 LIST_HEAD(hci_dev_list);
62 DEFINE_RWLOCK(hci_dev_list_lock);
64 /* HCI callback list */
65 LIST_HEAD(hci_cb_list);
66 DEFINE_RWLOCK(hci_cb_list_lock);
69 #define HCI_MAX_PROTO 2
70 struct hci_proto *hci_proto[HCI_MAX_PROTO];
72 /* HCI notifiers list */
73 static ATOMIC_NOTIFIER_HEAD(hci_notifier);
75 /* ---- HCI notifications ---- */
77 int hci_register_notifier(struct notifier_block *nb)
79 return atomic_notifier_chain_register(&hci_notifier, nb);
82 int hci_unregister_notifier(struct notifier_block *nb)
84 return atomic_notifier_chain_unregister(&hci_notifier, nb);
87 static void hci_notify(struct hci_dev *hdev, int event)
89 atomic_notifier_call_chain(&hci_notifier, event, hdev);
92 /* ---- HCI requests ---- */
94 void hci_req_complete(struct hci_dev *hdev, int result)
96 BT_DBG("%s result 0x%2.2x", hdev->name, result);
98 if (hdev->req_status == HCI_REQ_PEND) {
99 hdev->req_result = result;
100 hdev->req_status = HCI_REQ_DONE;
101 wake_up_interruptible(&hdev->req_wait_q);
105 static void hci_req_cancel(struct hci_dev *hdev, int err)
107 BT_DBG("%s err 0x%2.2x", hdev->name, err);
109 if (hdev->req_status == HCI_REQ_PEND) {
110 hdev->req_result = err;
111 hdev->req_status = HCI_REQ_CANCELED;
112 wake_up_interruptible(&hdev->req_wait_q);
116 /* Execute request and wait for completion. */
117 static int __hci_request(struct hci_dev *hdev, void (*req)(struct hci_dev *hdev, unsigned long opt),
118 unsigned long opt, __u32 timeout)
120 DECLARE_WAITQUEUE(wait, current);
123 BT_DBG("%s start", hdev->name);
125 hdev->req_status = HCI_REQ_PEND;
127 add_wait_queue(&hdev->req_wait_q, &wait);
128 set_current_state(TASK_INTERRUPTIBLE);
131 schedule_timeout(timeout);
133 remove_wait_queue(&hdev->req_wait_q, &wait);
135 if (signal_pending(current))
138 switch (hdev->req_status) {
140 err = -bt_err(hdev->req_result);
143 case HCI_REQ_CANCELED:
144 err = -hdev->req_result;
152 hdev->req_status = hdev->req_result = 0;
154 BT_DBG("%s end: err %d", hdev->name, err);
159 static inline int hci_request(struct hci_dev *hdev, void (*req)(struct hci_dev *hdev, unsigned long opt),
160 unsigned long opt, __u32 timeout)
164 if (!test_bit(HCI_UP, &hdev->flags))
167 /* Serialize all requests */
169 ret = __hci_request(hdev, req, opt, timeout);
170 hci_req_unlock(hdev);
175 static void hci_reset_req(struct hci_dev *hdev, unsigned long opt)
177 BT_DBG("%s %ld", hdev->name, opt);
180 hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL);
183 static void hci_init_req(struct hci_dev *hdev, unsigned long opt)
189 BT_DBG("%s %ld", hdev->name, opt);
191 /* Driver initialization */
193 /* Special commands */
194 while ((skb = skb_dequeue(&hdev->driver_init))) {
195 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
196 skb->dev = (void *) hdev;
198 skb_queue_tail(&hdev->cmd_q, skb);
199 tasklet_schedule(&hdev->cmd_task);
201 skb_queue_purge(&hdev->driver_init);
203 /* Mandatory initialization */
206 if (!test_bit(HCI_QUIRK_NO_RESET, &hdev->quirks))
207 hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL);
209 /* Read Local Supported Features */
210 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
212 /* Read Local Version */
213 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
215 /* Read Buffer Size (ACL mtu, max pkt, etc.) */
216 hci_send_cmd(hdev, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
219 /* Host buffer size */
221 struct hci_cp_host_buffer_size cp;
222 cp.acl_mtu = cpu_to_le16(HCI_MAX_ACL_SIZE);
223 cp.sco_mtu = HCI_MAX_SCO_SIZE;
224 cp.acl_max_pkt = cpu_to_le16(0xffff);
225 cp.sco_max_pkt = cpu_to_le16(0xffff);
226 hci_send_cmd(hdev, HCI_OP_HOST_BUFFER_SIZE, sizeof(cp), &cp);
230 /* Read BD Address */
231 hci_send_cmd(hdev, HCI_OP_READ_BD_ADDR, 0, NULL);
233 /* Read Class of Device */
234 hci_send_cmd(hdev, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
236 /* Read Local Name */
237 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_NAME, 0, NULL);
239 /* Read Voice Setting */
240 hci_send_cmd(hdev, HCI_OP_READ_VOICE_SETTING, 0, NULL);
242 /* Optional initialization */
244 /* Clear Event Filters */
245 flt_type = HCI_FLT_CLEAR_ALL;
246 hci_send_cmd(hdev, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
248 /* Page timeout ~20 secs */
249 param = cpu_to_le16(0x8000);
250 hci_send_cmd(hdev, HCI_OP_WRITE_PG_TIMEOUT, 2, ¶m);
252 /* Connection accept timeout ~20 secs */
253 param = cpu_to_le16(0x7d00);
254 hci_send_cmd(hdev, HCI_OP_WRITE_CA_TIMEOUT, 2, ¶m);
257 static void hci_scan_req(struct hci_dev *hdev, unsigned long opt)
261 BT_DBG("%s %x", hdev->name, scan);
263 /* Inquiry and Page scans */
264 hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
267 static void hci_auth_req(struct hci_dev *hdev, unsigned long opt)
271 BT_DBG("%s %x", hdev->name, auth);
274 hci_send_cmd(hdev, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
277 static void hci_encrypt_req(struct hci_dev *hdev, unsigned long opt)
281 BT_DBG("%s %x", hdev->name, encrypt);
284 hci_send_cmd(hdev, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
287 static void hci_linkpol_req(struct hci_dev *hdev, unsigned long opt)
289 __le16 policy = cpu_to_le16(opt);
291 BT_DBG("%s %x", hdev->name, policy);
293 /* Default link policy */
294 hci_send_cmd(hdev, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
297 /* Get HCI device by index.
298 * Device is held on return. */
299 struct hci_dev *hci_dev_get(int index)
301 struct hci_dev *hdev = NULL;
309 read_lock(&hci_dev_list_lock);
310 list_for_each(p, &hci_dev_list) {
311 struct hci_dev *d = list_entry(p, struct hci_dev, list);
312 if (d->id == index) {
313 hdev = hci_dev_hold(d);
317 read_unlock(&hci_dev_list_lock);
321 /* ---- Inquiry support ---- */
322 static void inquiry_cache_flush(struct hci_dev *hdev)
324 struct inquiry_cache *cache = &hdev->inq_cache;
325 struct inquiry_entry *next = cache->list, *e;
327 BT_DBG("cache %p", cache);
336 struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev, bdaddr_t *bdaddr)
338 struct inquiry_cache *cache = &hdev->inq_cache;
339 struct inquiry_entry *e;
341 BT_DBG("cache %p, %s", cache, batostr(bdaddr));
343 for (e = cache->list; e; e = e->next)
344 if (!bacmp(&e->data.bdaddr, bdaddr))
349 void hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data)
351 struct inquiry_cache *cache = &hdev->inq_cache;
352 struct inquiry_entry *ie;
354 BT_DBG("cache %p, %s", cache, batostr(&data->bdaddr));
356 ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
358 /* Entry not in the cache. Add new one. */
359 ie = kzalloc(sizeof(struct inquiry_entry), GFP_ATOMIC);
363 ie->next = cache->list;
367 memcpy(&ie->data, data, sizeof(*data));
368 ie->timestamp = jiffies;
369 cache->timestamp = jiffies;
372 static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
374 struct inquiry_cache *cache = &hdev->inq_cache;
375 struct inquiry_info *info = (struct inquiry_info *) buf;
376 struct inquiry_entry *e;
379 for (e = cache->list; e && copied < num; e = e->next, copied++) {
380 struct inquiry_data *data = &e->data;
381 bacpy(&info->bdaddr, &data->bdaddr);
382 info->pscan_rep_mode = data->pscan_rep_mode;
383 info->pscan_period_mode = data->pscan_period_mode;
384 info->pscan_mode = data->pscan_mode;
385 memcpy(info->dev_class, data->dev_class, 3);
386 info->clock_offset = data->clock_offset;
390 BT_DBG("cache %p, copied %d", cache, copied);
394 static void hci_inq_req(struct hci_dev *hdev, unsigned long opt)
396 struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
397 struct hci_cp_inquiry cp;
399 BT_DBG("%s", hdev->name);
401 if (test_bit(HCI_INQUIRY, &hdev->flags))
405 memcpy(&cp.lap, &ir->lap, 3);
406 cp.length = ir->length;
407 cp.num_rsp = ir->num_rsp;
408 hci_send_cmd(hdev, HCI_OP_INQUIRY, sizeof(cp), &cp);
411 int hci_inquiry(void __user *arg)
413 __u8 __user *ptr = arg;
414 struct hci_inquiry_req ir;
415 struct hci_dev *hdev;
416 int err = 0, do_inquiry = 0, max_rsp;
420 if (copy_from_user(&ir, ptr, sizeof(ir)))
423 if (!(hdev = hci_dev_get(ir.dev_id)))
426 hci_dev_lock_bh(hdev);
427 if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
428 inquiry_cache_empty(hdev) ||
429 ir.flags & IREQ_CACHE_FLUSH) {
430 inquiry_cache_flush(hdev);
433 hci_dev_unlock_bh(hdev);
435 timeo = ir.length * msecs_to_jiffies(2000);
438 err = hci_request(hdev, hci_inq_req, (unsigned long)&ir, timeo);
443 /* for unlimited number of responses we will use buffer with 255 entries */
444 max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
446 /* cache_dump can't sleep. Therefore we allocate temp buffer and then
447 * copy it to the user space.
449 buf = kmalloc(sizeof(struct inquiry_info) *max_rsp, GFP_KERNEL);
455 hci_dev_lock_bh(hdev);
456 ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
457 hci_dev_unlock_bh(hdev);
459 BT_DBG("num_rsp %d", ir.num_rsp);
461 if (!copy_to_user(ptr, &ir, sizeof(ir))) {
463 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
476 /* ---- HCI ioctl helpers ---- */
478 int hci_dev_open(__u16 dev)
480 struct hci_dev *hdev;
483 if (!(hdev = hci_dev_get(dev)))
486 BT_DBG("%s %p", hdev->name, hdev);
490 if (hdev->rfkill && rfkill_blocked(hdev->rfkill)) {
495 if (test_bit(HCI_UP, &hdev->flags)) {
500 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
501 set_bit(HCI_RAW, &hdev->flags);
503 /* Treat all non BR/EDR controllers as raw devices for now */
504 if (hdev->dev_type != HCI_BREDR)
505 set_bit(HCI_RAW, &hdev->flags);
507 if (hdev->open(hdev)) {
512 if (!test_bit(HCI_RAW, &hdev->flags)) {
513 atomic_set(&hdev->cmd_cnt, 1);
514 set_bit(HCI_INIT, &hdev->flags);
516 //__hci_request(hdev, hci_reset_req, 0, HZ);
517 ret = __hci_request(hdev, hci_init_req, 0,
518 msecs_to_jiffies(HCI_INIT_TIMEOUT));
520 clear_bit(HCI_INIT, &hdev->flags);
525 set_bit(HCI_UP, &hdev->flags);
526 hci_notify(hdev, HCI_DEV_UP);
528 /* Init failed, cleanup */
529 tasklet_kill(&hdev->rx_task);
530 tasklet_kill(&hdev->tx_task);
531 tasklet_kill(&hdev->cmd_task);
533 skb_queue_purge(&hdev->cmd_q);
534 skb_queue_purge(&hdev->rx_q);
539 if (hdev->sent_cmd) {
540 kfree_skb(hdev->sent_cmd);
541 hdev->sent_cmd = NULL;
549 hci_req_unlock(hdev);
554 static int hci_dev_do_close(struct hci_dev *hdev)
556 BT_DBG("%s %p", hdev->name, hdev);
558 hci_req_cancel(hdev, ENODEV);
561 if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
562 hci_req_unlock(hdev);
566 /* Kill RX and TX tasks */
567 tasklet_kill(&hdev->rx_task);
568 tasklet_kill(&hdev->tx_task);
570 hci_dev_lock_bh(hdev);
571 inquiry_cache_flush(hdev);
572 hci_conn_hash_flush(hdev);
573 hci_dev_unlock_bh(hdev);
575 hci_notify(hdev, HCI_DEV_DOWN);
581 skb_queue_purge(&hdev->cmd_q);
582 atomic_set(&hdev->cmd_cnt, 1);
583 if (!test_bit(HCI_RAW, &hdev->flags)) {
584 set_bit(HCI_INIT, &hdev->flags);
585 __hci_request(hdev, hci_reset_req, 0,
586 msecs_to_jiffies(250));
587 clear_bit(HCI_INIT, &hdev->flags);
591 tasklet_kill(&hdev->cmd_task);
594 skb_queue_purge(&hdev->rx_q);
595 skb_queue_purge(&hdev->cmd_q);
596 skb_queue_purge(&hdev->raw_q);
598 /* Drop last sent command */
599 if (hdev->sent_cmd) {
600 kfree_skb(hdev->sent_cmd);
601 hdev->sent_cmd = NULL;
604 /* After this point our queues are empty
605 * and no tasks are scheduled. */
611 hci_req_unlock(hdev);
617 int hci_dev_close(__u16 dev)
619 struct hci_dev *hdev;
622 hdev = hci_dev_get(dev);
625 err = hci_dev_do_close(hdev);
630 int hci_dev_reset(__u16 dev)
632 struct hci_dev *hdev;
635 hdev = hci_dev_get(dev);
640 tasklet_disable(&hdev->tx_task);
642 if (!test_bit(HCI_UP, &hdev->flags))
646 skb_queue_purge(&hdev->rx_q);
647 skb_queue_purge(&hdev->cmd_q);
649 hci_dev_lock_bh(hdev);
650 inquiry_cache_flush(hdev);
651 hci_conn_hash_flush(hdev);
652 hci_dev_unlock_bh(hdev);
657 atomic_set(&hdev->cmd_cnt, 1);
658 hdev->acl_cnt = 0; hdev->sco_cnt = 0;
660 if (!test_bit(HCI_RAW, &hdev->flags))
661 ret = __hci_request(hdev, hci_reset_req, 0,
662 msecs_to_jiffies(HCI_INIT_TIMEOUT));
665 tasklet_enable(&hdev->tx_task);
666 hci_req_unlock(hdev);
671 int hci_dev_reset_stat(__u16 dev)
673 struct hci_dev *hdev;
676 hdev = hci_dev_get(dev);
680 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
687 int hci_dev_cmd(unsigned int cmd, void __user *arg)
689 struct hci_dev *hdev;
690 struct hci_dev_req dr;
693 if (copy_from_user(&dr, arg, sizeof(dr)))
696 hdev = hci_dev_get(dr.dev_id);
702 err = hci_request(hdev, hci_auth_req, dr.dev_opt,
703 msecs_to_jiffies(HCI_INIT_TIMEOUT));
707 if (!lmp_encrypt_capable(hdev)) {
712 if (!test_bit(HCI_AUTH, &hdev->flags)) {
713 /* Auth must be enabled first */
714 err = hci_request(hdev, hci_auth_req, dr.dev_opt,
715 msecs_to_jiffies(HCI_INIT_TIMEOUT));
720 err = hci_request(hdev, hci_encrypt_req, dr.dev_opt,
721 msecs_to_jiffies(HCI_INIT_TIMEOUT));
725 err = hci_request(hdev, hci_scan_req, dr.dev_opt,
726 msecs_to_jiffies(HCI_INIT_TIMEOUT));
730 err = hci_request(hdev, hci_linkpol_req, dr.dev_opt,
731 msecs_to_jiffies(HCI_INIT_TIMEOUT));
735 hdev->link_mode = ((__u16) dr.dev_opt) &
736 (HCI_LM_MASTER | HCI_LM_ACCEPT);
740 hdev->pkt_type = (__u16) dr.dev_opt;
744 hdev->acl_mtu = *((__u16 *) &dr.dev_opt + 1);
745 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
749 hdev->sco_mtu = *((__u16 *) &dr.dev_opt + 1);
750 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
762 int hci_get_dev_list(void __user *arg)
764 struct hci_dev_list_req *dl;
765 struct hci_dev_req *dr;
767 int n = 0, size, err;
770 if (get_user(dev_num, (__u16 __user *) arg))
773 if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
776 size = sizeof(*dl) + dev_num * sizeof(*dr);
778 dl = kzalloc(size, GFP_KERNEL);
784 read_lock_bh(&hci_dev_list_lock);
785 list_for_each(p, &hci_dev_list) {
786 struct hci_dev *hdev;
787 hdev = list_entry(p, struct hci_dev, list);
788 (dr + n)->dev_id = hdev->id;
789 (dr + n)->dev_opt = hdev->flags;
793 read_unlock_bh(&hci_dev_list_lock);
796 size = sizeof(*dl) + n * sizeof(*dr);
798 err = copy_to_user(arg, dl, size);
801 return err ? -EFAULT : 0;
804 int hci_get_dev_info(void __user *arg)
806 struct hci_dev *hdev;
807 struct hci_dev_info di;
810 if (copy_from_user(&di, arg, sizeof(di)))
813 hdev = hci_dev_get(di.dev_id);
817 strcpy(di.name, hdev->name);
818 di.bdaddr = hdev->bdaddr;
819 di.type = (hdev->bus & 0x0f) | (hdev->dev_type << 4);
820 di.flags = hdev->flags;
821 di.pkt_type = hdev->pkt_type;
822 di.acl_mtu = hdev->acl_mtu;
823 di.acl_pkts = hdev->acl_pkts;
824 di.sco_mtu = hdev->sco_mtu;
825 di.sco_pkts = hdev->sco_pkts;
826 di.link_policy = hdev->link_policy;
827 di.link_mode = hdev->link_mode;
829 memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
830 memcpy(&di.features, &hdev->features, sizeof(di.features));
832 if (copy_to_user(arg, &di, sizeof(di)))
840 /* ---- Interface to HCI drivers ---- */
842 static int hci_rfkill_set_block(void *data, bool blocked)
844 struct hci_dev *hdev = data;
846 BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
851 hci_dev_do_close(hdev);
856 static const struct rfkill_ops hci_rfkill_ops = {
857 .set_block = hci_rfkill_set_block,
860 /* Alloc HCI device */
861 struct hci_dev *hci_alloc_dev(void)
863 struct hci_dev *hdev;
865 hdev = kzalloc(sizeof(struct hci_dev), GFP_KERNEL);
869 skb_queue_head_init(&hdev->driver_init);
873 EXPORT_SYMBOL(hci_alloc_dev);
875 /* Free HCI device */
876 void hci_free_dev(struct hci_dev *hdev)
878 skb_queue_purge(&hdev->driver_init);
880 /* will free via device release */
881 put_device(&hdev->dev);
883 EXPORT_SYMBOL(hci_free_dev);
885 /* Register HCI device */
886 int hci_register_dev(struct hci_dev *hdev)
888 struct list_head *head = &hci_dev_list, *p;
891 BT_DBG("%p name %s bus %d owner %p", hdev, hdev->name,
892 hdev->bus, hdev->owner);
894 if (!hdev->open || !hdev->close || !hdev->destruct)
897 write_lock_bh(&hci_dev_list_lock);
899 /* Find first available device id */
900 list_for_each(p, &hci_dev_list) {
901 if (list_entry(p, struct hci_dev, list)->id != id)
906 sprintf(hdev->name, "hci%d", id);
908 list_add(&hdev->list, head);
910 atomic_set(&hdev->refcnt, 1);
911 spin_lock_init(&hdev->lock);
914 hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1);
915 hdev->esco_type = (ESCO_HV1);
916 hdev->link_mode = (HCI_LM_ACCEPT);
918 hdev->idle_timeout = 0;
919 hdev->sniff_max_interval = 800;
920 hdev->sniff_min_interval = 80;
922 tasklet_init(&hdev->cmd_task, hci_cmd_task, (unsigned long) hdev);
923 tasklet_init(&hdev->rx_task, hci_rx_task, (unsigned long) hdev);
924 tasklet_init(&hdev->tx_task, hci_tx_task, (unsigned long) hdev);
926 skb_queue_head_init(&hdev->rx_q);
927 skb_queue_head_init(&hdev->cmd_q);
928 skb_queue_head_init(&hdev->raw_q);
930 for (i = 0; i < NUM_REASSEMBLY; i++)
931 hdev->reassembly[i] = NULL;
933 init_waitqueue_head(&hdev->req_wait_q);
934 mutex_init(&hdev->req_lock);
936 inquiry_cache_init(hdev);
938 hci_conn_hash_init(hdev);
940 INIT_LIST_HEAD(&hdev->blacklist);
942 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
944 atomic_set(&hdev->promisc, 0);
946 write_unlock_bh(&hci_dev_list_lock);
948 hdev->workqueue = create_singlethread_workqueue(hdev->name);
949 if (!hdev->workqueue)
952 hci_register_sysfs(hdev);
954 hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
955 RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops, hdev);
957 if (rfkill_register(hdev->rfkill) < 0) {
958 rfkill_destroy(hdev->rfkill);
963 hci_notify(hdev, HCI_DEV_REG);
968 write_lock_bh(&hci_dev_list_lock);
969 list_del(&hdev->list);
970 write_unlock_bh(&hci_dev_list_lock);
974 EXPORT_SYMBOL(hci_register_dev);
976 /* Unregister HCI device */
977 int hci_unregister_dev(struct hci_dev *hdev)
981 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
983 write_lock_bh(&hci_dev_list_lock);
984 list_del(&hdev->list);
985 write_unlock_bh(&hci_dev_list_lock);
987 hci_dev_do_close(hdev);
989 for (i = 0; i < NUM_REASSEMBLY; i++)
990 kfree_skb(hdev->reassembly[i]);
992 hci_notify(hdev, HCI_DEV_UNREG);
995 rfkill_unregister(hdev->rfkill);
996 rfkill_destroy(hdev->rfkill);
999 hci_unregister_sysfs(hdev);
1001 destroy_workqueue(hdev->workqueue);
1003 __hci_dev_put(hdev);
1007 EXPORT_SYMBOL(hci_unregister_dev);
1009 /* Suspend HCI device */
1010 int hci_suspend_dev(struct hci_dev *hdev)
1012 hci_notify(hdev, HCI_DEV_SUSPEND);
1015 EXPORT_SYMBOL(hci_suspend_dev);
1017 /* Resume HCI device */
1018 int hci_resume_dev(struct hci_dev *hdev)
1020 hci_notify(hdev, HCI_DEV_RESUME);
1023 EXPORT_SYMBOL(hci_resume_dev);
1025 /* Receive frame from HCI drivers */
1026 int hci_recv_frame(struct sk_buff *skb)
1028 struct hci_dev *hdev = (struct hci_dev *) skb->dev;
1029 if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
1030 && !test_bit(HCI_INIT, &hdev->flags))) {
1036 bt_cb(skb)->incoming = 1;
1039 __net_timestamp(skb);
1041 /* Queue frame for rx task */
1042 skb_queue_tail(&hdev->rx_q, skb);
1043 tasklet_schedule(&hdev->rx_task);
1047 EXPORT_SYMBOL(hci_recv_frame);
1049 static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
1050 int count, __u8 index, gfp_t gfp_mask)
1055 struct sk_buff *skb;
1056 struct bt_skb_cb *scb;
1058 if ((type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT) ||
1059 index >= NUM_REASSEMBLY)
1062 skb = hdev->reassembly[index];
1066 case HCI_ACLDATA_PKT:
1067 len = HCI_MAX_FRAME_SIZE;
1068 hlen = HCI_ACL_HDR_SIZE;
1071 len = HCI_MAX_EVENT_SIZE;
1072 hlen = HCI_EVENT_HDR_SIZE;
1074 case HCI_SCODATA_PKT:
1075 len = HCI_MAX_SCO_SIZE;
1076 hlen = HCI_SCO_HDR_SIZE;
1080 skb = bt_skb_alloc(len, gfp_mask);
1084 scb = (void *) skb->cb;
1086 scb->pkt_type = type;
1088 skb->dev = (void *) hdev;
1089 hdev->reassembly[index] = skb;
1093 scb = (void *) skb->cb;
1094 len = min(scb->expect, (__u16)count);
1096 memcpy(skb_put(skb, len), data, len);
1105 if (skb->len == HCI_EVENT_HDR_SIZE) {
1106 struct hci_event_hdr *h = hci_event_hdr(skb);
1107 scb->expect = h->plen;
1109 if (skb_tailroom(skb) < scb->expect) {
1111 hdev->reassembly[index] = NULL;
1117 case HCI_ACLDATA_PKT:
1118 if (skb->len == HCI_ACL_HDR_SIZE) {
1119 struct hci_acl_hdr *h = hci_acl_hdr(skb);
1120 scb->expect = __le16_to_cpu(h->dlen);
1122 if (skb_tailroom(skb) < scb->expect) {
1124 hdev->reassembly[index] = NULL;
1130 case HCI_SCODATA_PKT:
1131 if (skb->len == HCI_SCO_HDR_SIZE) {
1132 struct hci_sco_hdr *h = hci_sco_hdr(skb);
1133 scb->expect = h->dlen;
1135 if (skb_tailroom(skb) < scb->expect) {
1137 hdev->reassembly[index] = NULL;
1144 if (scb->expect == 0) {
1145 /* Complete frame */
1147 bt_cb(skb)->pkt_type = type;
1148 hci_recv_frame(skb);
1150 hdev->reassembly[index] = NULL;
1158 int hci_recv_fragment(struct hci_dev *hdev, int type, void *data, int count)
1162 if (type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT)
1166 rem = hci_reassembly(hdev, type, data, count,
1167 type - 1, GFP_ATOMIC);
1171 data += (count - rem);
1177 EXPORT_SYMBOL(hci_recv_fragment);
1179 #define STREAM_REASSEMBLY 0
1181 int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count)
1187 struct sk_buff *skb = hdev->reassembly[STREAM_REASSEMBLY];
1190 struct { char type; } *pkt;
1192 /* Start of the frame */
1199 type = bt_cb(skb)->pkt_type;
1201 rem = hci_reassembly(hdev, type, data,
1202 count, STREAM_REASSEMBLY, GFP_ATOMIC);
1206 data += (count - rem);
1212 EXPORT_SYMBOL(hci_recv_stream_fragment);
1214 /* ---- Interface to upper protocols ---- */
1216 /* Register/Unregister protocols.
1217 * hci_task_lock is used to ensure that no tasks are running. */
1218 int hci_register_proto(struct hci_proto *hp)
1222 BT_DBG("%p name %s id %d", hp, hp->name, hp->id);
1224 if (hp->id >= HCI_MAX_PROTO)
1227 write_lock_bh(&hci_task_lock);
1229 if (!hci_proto[hp->id])
1230 hci_proto[hp->id] = hp;
1234 write_unlock_bh(&hci_task_lock);
1238 EXPORT_SYMBOL(hci_register_proto);
1240 int hci_unregister_proto(struct hci_proto *hp)
1244 BT_DBG("%p name %s id %d", hp, hp->name, hp->id);
1246 if (hp->id >= HCI_MAX_PROTO)
1249 write_lock_bh(&hci_task_lock);
1251 if (hci_proto[hp->id])
1252 hci_proto[hp->id] = NULL;
1256 write_unlock_bh(&hci_task_lock);
1260 EXPORT_SYMBOL(hci_unregister_proto);
1262 int hci_register_cb(struct hci_cb *cb)
1264 BT_DBG("%p name %s", cb, cb->name);
1266 write_lock_bh(&hci_cb_list_lock);
1267 list_add(&cb->list, &hci_cb_list);
1268 write_unlock_bh(&hci_cb_list_lock);
1272 EXPORT_SYMBOL(hci_register_cb);
1274 int hci_unregister_cb(struct hci_cb *cb)
1276 BT_DBG("%p name %s", cb, cb->name);
1278 write_lock_bh(&hci_cb_list_lock);
1279 list_del(&cb->list);
1280 write_unlock_bh(&hci_cb_list_lock);
1284 EXPORT_SYMBOL(hci_unregister_cb);
1286 static int hci_send_frame(struct sk_buff *skb)
1288 struct hci_dev *hdev = (struct hci_dev *) skb->dev;
1295 BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
1297 if (atomic_read(&hdev->promisc)) {
1299 __net_timestamp(skb);
1301 hci_send_to_sock(hdev, skb);
1304 /* Get rid of skb owner, prior to sending to the driver. */
1307 return hdev->send(skb);
1310 /* Send HCI command */
1311 int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen, void *param)
1313 int len = HCI_COMMAND_HDR_SIZE + plen;
1314 struct hci_command_hdr *hdr;
1315 struct sk_buff *skb;
1317 BT_DBG("%s opcode 0x%x plen %d", hdev->name, opcode, plen);
1319 skb = bt_skb_alloc(len, GFP_ATOMIC);
1321 BT_ERR("%s no memory for command", hdev->name);
1325 hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
1326 hdr->opcode = cpu_to_le16(opcode);
1330 memcpy(skb_put(skb, plen), param, plen);
1332 BT_DBG("skb len %d", skb->len);
1334 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
1335 skb->dev = (void *) hdev;
1337 skb_queue_tail(&hdev->cmd_q, skb);
1338 tasklet_schedule(&hdev->cmd_task);
1343 /* Get data from the previously sent command */
1344 void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
1346 struct hci_command_hdr *hdr;
1348 if (!hdev->sent_cmd)
1351 hdr = (void *) hdev->sent_cmd->data;
1353 if (hdr->opcode != cpu_to_le16(opcode))
1356 BT_DBG("%s opcode 0x%x", hdev->name, opcode);
1358 return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
1362 static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
1364 struct hci_acl_hdr *hdr;
1367 skb_push(skb, HCI_ACL_HDR_SIZE);
1368 skb_reset_transport_header(skb);
1369 hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
1370 hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
1371 hdr->dlen = cpu_to_le16(len);
1374 void hci_send_acl(struct hci_conn *conn, struct sk_buff *skb, __u16 flags)
1376 struct hci_dev *hdev = conn->hdev;
1377 struct sk_buff *list;
1379 BT_DBG("%s conn %p flags 0x%x", hdev->name, conn, flags);
1381 skb->dev = (void *) hdev;
1382 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
1383 hci_add_acl_hdr(skb, conn->handle, flags | ACL_START);
1385 list = skb_shinfo(skb)->frag_list;
1387 /* Non fragmented */
1388 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
1390 skb_queue_tail(&conn->data_q, skb);
1393 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
1395 skb_shinfo(skb)->frag_list = NULL;
1397 /* Queue all fragments atomically */
1398 spin_lock_bh(&conn->data_q.lock);
1400 __skb_queue_tail(&conn->data_q, skb);
1402 skb = list; list = list->next;
1404 skb->dev = (void *) hdev;
1405 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
1406 hci_add_acl_hdr(skb, conn->handle, flags | ACL_CONT);
1408 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
1410 __skb_queue_tail(&conn->data_q, skb);
1413 spin_unlock_bh(&conn->data_q.lock);
1416 tasklet_schedule(&hdev->tx_task);
1418 EXPORT_SYMBOL(hci_send_acl);
1421 void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
1423 struct hci_dev *hdev = conn->hdev;
1424 struct hci_sco_hdr hdr;
1426 BT_DBG("%s len %d", hdev->name, skb->len);
1428 hdr.handle = cpu_to_le16(conn->handle);
1429 hdr.dlen = skb->len;
1431 skb_push(skb, HCI_SCO_HDR_SIZE);
1432 skb_reset_transport_header(skb);
1433 memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
1435 skb->dev = (void *) hdev;
1436 bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
1438 skb_queue_tail(&conn->data_q, skb);
1439 tasklet_schedule(&hdev->tx_task);
1441 EXPORT_SYMBOL(hci_send_sco);
1443 /* ---- HCI TX task (outgoing data) ---- */
1445 /* HCI Connection scheduler */
1446 static inline struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type, int *quote)
1448 struct hci_conn_hash *h = &hdev->conn_hash;
1449 struct hci_conn *conn = NULL;
1450 int num = 0, min = ~0;
1451 struct list_head *p;
1453 /* We don't have to lock device here. Connections are always
1454 * added and removed with TX task disabled. */
1455 list_for_each(p, &h->list) {
1457 c = list_entry(p, struct hci_conn, list);
1459 if (c->type != type || skb_queue_empty(&c->data_q))
1462 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
1467 if (c->sent < min) {
1474 int cnt = (type == ACL_LINK ? hdev->acl_cnt : hdev->sco_cnt);
1480 BT_DBG("conn %p quote %d", conn, *quote);
1484 static inline void hci_acl_tx_to(struct hci_dev *hdev)
1486 struct hci_conn_hash *h = &hdev->conn_hash;
1487 struct list_head *p;
1490 BT_ERR("%s ACL tx timeout", hdev->name);
1492 /* Kill stalled connections */
1493 list_for_each(p, &h->list) {
1494 c = list_entry(p, struct hci_conn, list);
1495 if (c->type == ACL_LINK && c->sent) {
1496 BT_ERR("%s killing stalled ACL connection %s",
1497 hdev->name, batostr(&c->dst));
1498 hci_acl_disconn(c, 0x13);
1503 static inline void hci_sched_acl(struct hci_dev *hdev)
1505 struct hci_conn *conn;
1506 struct sk_buff *skb;
1509 BT_DBG("%s", hdev->name);
1511 if (!test_bit(HCI_RAW, &hdev->flags)) {
1512 /* ACL tx timeout must be longer than maximum
1513 * link supervision timeout (40.9 seconds) */
1514 if (!hdev->acl_cnt && time_after(jiffies, hdev->acl_last_tx + HZ * 45))
1515 hci_acl_tx_to(hdev);
1518 while (hdev->acl_cnt && (conn = hci_low_sent(hdev, ACL_LINK, "e))) {
1519 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
1520 BT_DBG("skb %p len %d", skb, skb->len);
1522 hci_conn_enter_active_mode(conn);
1524 hci_send_frame(skb);
1525 hdev->acl_last_tx = jiffies;
1534 static inline void hci_sched_sco(struct hci_dev *hdev)
1536 struct hci_conn *conn;
1537 struct sk_buff *skb;
1540 BT_DBG("%s", hdev->name);
1542 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, "e))) {
1543 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
1544 BT_DBG("skb %p len %d", skb, skb->len);
1545 hci_send_frame(skb);
1548 if (conn->sent == ~0)
1554 static inline void hci_sched_esco(struct hci_dev *hdev)
1556 struct hci_conn *conn;
1557 struct sk_buff *skb;
1560 BT_DBG("%s", hdev->name);
1562 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK, "e))) {
1563 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
1564 BT_DBG("skb %p len %d", skb, skb->len);
1565 hci_send_frame(skb);
1568 if (conn->sent == ~0)
1574 static void hci_tx_task(unsigned long arg)
1576 struct hci_dev *hdev = (struct hci_dev *) arg;
1577 struct sk_buff *skb;
1579 read_lock(&hci_task_lock);
1581 BT_DBG("%s acl %d sco %d", hdev->name, hdev->acl_cnt, hdev->sco_cnt);
1583 /* Schedule queues and send stuff to HCI driver */
1585 hci_sched_acl(hdev);
1587 hci_sched_sco(hdev);
1589 hci_sched_esco(hdev);
1591 /* Send next queued raw (unknown type) packet */
1592 while ((skb = skb_dequeue(&hdev->raw_q)))
1593 hci_send_frame(skb);
1595 read_unlock(&hci_task_lock);
1598 /* ----- HCI RX task (incoming data proccessing) ----- */
1600 /* ACL data packet */
1601 static inline void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
1603 struct hci_acl_hdr *hdr = (void *) skb->data;
1604 struct hci_conn *conn;
1605 __u16 handle, flags;
1607 skb_pull(skb, HCI_ACL_HDR_SIZE);
1609 handle = __le16_to_cpu(hdr->handle);
1610 flags = hci_flags(handle);
1611 handle = hci_handle(handle);
1613 BT_DBG("%s len %d handle 0x%x flags 0x%x", hdev->name, skb->len, handle, flags);
1615 hdev->stat.acl_rx++;
1618 conn = hci_conn_hash_lookup_handle(hdev, handle);
1619 hci_dev_unlock(hdev);
1622 register struct hci_proto *hp;
1624 hci_conn_enter_active_mode(conn);
1626 /* Send to upper protocol */
1627 hp = hci_proto[HCI_PROTO_L2CAP];
1628 if (hp && hp->recv_acldata) {
1629 hp->recv_acldata(conn, skb, flags);
1633 BT_ERR("%s ACL packet for unknown connection handle %d",
1634 hdev->name, handle);
1640 /* SCO data packet */
1641 static inline void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
1643 struct hci_sco_hdr *hdr = (void *) skb->data;
1644 struct hci_conn *conn;
1647 skb_pull(skb, HCI_SCO_HDR_SIZE);
1649 handle = __le16_to_cpu(hdr->handle);
1651 BT_DBG("%s len %d handle 0x%x", hdev->name, skb->len, handle);
1653 hdev->stat.sco_rx++;
1656 conn = hci_conn_hash_lookup_handle(hdev, handle);
1657 hci_dev_unlock(hdev);
1660 register struct hci_proto *hp;
1662 /* Send to upper protocol */
1663 hp = hci_proto[HCI_PROTO_SCO];
1664 if (hp && hp->recv_scodata) {
1665 hp->recv_scodata(conn, skb);
1669 BT_ERR("%s SCO packet for unknown connection handle %d",
1670 hdev->name, handle);
1676 static void hci_rx_task(unsigned long arg)
1678 struct hci_dev *hdev = (struct hci_dev *) arg;
1679 struct sk_buff *skb;
1681 BT_DBG("%s", hdev->name);
1683 read_lock(&hci_task_lock);
1685 while ((skb = skb_dequeue(&hdev->rx_q))) {
1686 if (atomic_read(&hdev->promisc)) {
1687 /* Send copy to the sockets */
1688 hci_send_to_sock(hdev, skb);
1691 if (test_bit(HCI_RAW, &hdev->flags)) {
1696 if (test_bit(HCI_INIT, &hdev->flags)) {
1697 /* Don't process data packets in this states. */
1698 switch (bt_cb(skb)->pkt_type) {
1699 case HCI_ACLDATA_PKT:
1700 case HCI_SCODATA_PKT:
1707 switch (bt_cb(skb)->pkt_type) {
1709 hci_event_packet(hdev, skb);
1712 case HCI_ACLDATA_PKT:
1713 BT_DBG("%s ACL data packet", hdev->name);
1714 hci_acldata_packet(hdev, skb);
1717 case HCI_SCODATA_PKT:
1718 BT_DBG("%s SCO data packet", hdev->name);
1719 hci_scodata_packet(hdev, skb);
1728 read_unlock(&hci_task_lock);
1731 static void hci_cmd_task(unsigned long arg)
1733 struct hci_dev *hdev = (struct hci_dev *) arg;
1734 struct sk_buff *skb;
1736 BT_DBG("%s cmd %d", hdev->name, atomic_read(&hdev->cmd_cnt));
1738 if (!atomic_read(&hdev->cmd_cnt) && time_after(jiffies, hdev->cmd_last_tx + HZ)) {
1739 BT_ERR("%s command tx timeout", hdev->name);
1740 atomic_set(&hdev->cmd_cnt, 1);
1743 /* Send queued commands */
1744 if (atomic_read(&hdev->cmd_cnt) && (skb = skb_dequeue(&hdev->cmd_q))) {
1745 kfree_skb(hdev->sent_cmd);
1747 hdev->sent_cmd = skb_clone(skb, GFP_ATOMIC);
1748 if (hdev->sent_cmd) {
1749 atomic_dec(&hdev->cmd_cnt);
1750 hci_send_frame(skb);
1751 hdev->cmd_last_tx = jiffies;
1753 skb_queue_head(&hdev->cmd_q, skb);
1754 tasklet_schedule(&hdev->cmd_task);