]> git.karo-electronics.de Git - karo-tx-linux.git/blob - net/bluetooth/hci_core.c
Merge remote-tracking branch 'bluetooth/master'
[karo-tx-linux.git] / net / bluetooth / hci_core.c
1 /*
2    BlueZ - Bluetooth protocol stack for Linux
3    Copyright (C) 2000-2001 Qualcomm Incorporated
4    Copyright (C) 2011 ProFUSION Embedded Systems
5
6    Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7
8    This program is free software; you can redistribute it and/or modify
9    it under the terms of the GNU General Public License version 2 as
10    published by the Free Software Foundation;
11
12    THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13    OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14    FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15    IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
16    CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17    WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18    ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
19    OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20
21    ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22    COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
23    SOFTWARE IS DISCLAIMED.
24 */
25
26 /* Bluetooth HCI core. */
27
28 #include <linux/export.h>
29 #include <linux/idr.h>
30 #include <linux/rfkill.h>
31 #include <linux/debugfs.h>
32 #include <asm/unaligned.h>
33
34 #include <net/bluetooth/bluetooth.h>
35 #include <net/bluetooth/hci_core.h>
36
37 static void hci_rx_work(struct work_struct *work);
38 static void hci_cmd_work(struct work_struct *work);
39 static void hci_tx_work(struct work_struct *work);
40
41 /* HCI device list */
42 LIST_HEAD(hci_dev_list);
43 DEFINE_RWLOCK(hci_dev_list_lock);
44
45 /* HCI callback list */
46 LIST_HEAD(hci_cb_list);
47 DEFINE_RWLOCK(hci_cb_list_lock);
48
49 /* HCI ID Numbering */
50 static DEFINE_IDA(hci_index_ida);
51
52 /* ---- HCI notifications ---- */
53
54 static void hci_notify(struct hci_dev *hdev, int event)
55 {
56         hci_sock_dev_event(hdev, event);
57 }
58
59 /* ---- HCI debugfs entries ---- */
60
61 static ssize_t dut_mode_read(struct file *file, char __user *user_buf,
62                              size_t count, loff_t *ppos)
63 {
64         struct hci_dev *hdev = file->private_data;
65         char buf[3];
66
67         buf[0] = test_bit(HCI_DUT_MODE, &hdev->dev_flags) ? 'Y': 'N';
68         buf[1] = '\n';
69         buf[2] = '\0';
70         return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
71 }
72
73 static ssize_t dut_mode_write(struct file *file, const char __user *user_buf,
74                               size_t count, loff_t *ppos)
75 {
76         struct hci_dev *hdev = file->private_data;
77         struct sk_buff *skb;
78         char buf[32];
79         size_t buf_size = min(count, (sizeof(buf)-1));
80         bool enable;
81         int err;
82
83         if (!test_bit(HCI_UP, &hdev->flags))
84                 return -ENETDOWN;
85
86         if (copy_from_user(buf, user_buf, buf_size))
87                 return -EFAULT;
88
89         buf[buf_size] = '\0';
90         if (strtobool(buf, &enable))
91                 return -EINVAL;
92
93         if (enable == test_bit(HCI_DUT_MODE, &hdev->dev_flags))
94                 return -EALREADY;
95
96         hci_req_lock(hdev);
97         if (enable)
98                 skb = __hci_cmd_sync(hdev, HCI_OP_ENABLE_DUT_MODE, 0, NULL,
99                                      HCI_CMD_TIMEOUT);
100         else
101                 skb = __hci_cmd_sync(hdev, HCI_OP_RESET, 0, NULL,
102                                      HCI_CMD_TIMEOUT);
103         hci_req_unlock(hdev);
104
105         if (IS_ERR(skb))
106                 return PTR_ERR(skb);
107
108         err = -bt_to_errno(skb->data[0]);
109         kfree_skb(skb);
110
111         if (err < 0)
112                 return err;
113
114         change_bit(HCI_DUT_MODE, &hdev->dev_flags);
115
116         return count;
117 }
118
119 static const struct file_operations dut_mode_fops = {
120         .open           = simple_open,
121         .read           = dut_mode_read,
122         .write          = dut_mode_write,
123         .llseek         = default_llseek,
124 };
125
126 static int features_show(struct seq_file *f, void *ptr)
127 {
128         struct hci_dev *hdev = f->private;
129         u8 p;
130
131         hci_dev_lock(hdev);
132         for (p = 0; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
133                 seq_printf(f, "%2u: 0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x "
134                            "0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x\n", p,
135                            hdev->features[p][0], hdev->features[p][1],
136                            hdev->features[p][2], hdev->features[p][3],
137                            hdev->features[p][4], hdev->features[p][5],
138                            hdev->features[p][6], hdev->features[p][7]);
139         }
140         if (lmp_le_capable(hdev))
141                 seq_printf(f, "LE: 0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x "
142                            "0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x\n",
143                            hdev->le_features[0], hdev->le_features[1],
144                            hdev->le_features[2], hdev->le_features[3],
145                            hdev->le_features[4], hdev->le_features[5],
146                            hdev->le_features[6], hdev->le_features[7]);
147         hci_dev_unlock(hdev);
148
149         return 0;
150 }
151
152 static int features_open(struct inode *inode, struct file *file)
153 {
154         return single_open(file, features_show, inode->i_private);
155 }
156
157 static const struct file_operations features_fops = {
158         .open           = features_open,
159         .read           = seq_read,
160         .llseek         = seq_lseek,
161         .release        = single_release,
162 };
163
164 static int blacklist_show(struct seq_file *f, void *p)
165 {
166         struct hci_dev *hdev = f->private;
167         struct bdaddr_list *b;
168
169         hci_dev_lock(hdev);
170         list_for_each_entry(b, &hdev->blacklist, list)
171                 seq_printf(f, "%pMR (type %u)\n", &b->bdaddr, b->bdaddr_type);
172         hci_dev_unlock(hdev);
173
174         return 0;
175 }
176
177 static int blacklist_open(struct inode *inode, struct file *file)
178 {
179         return single_open(file, blacklist_show, inode->i_private);
180 }
181
182 static const struct file_operations blacklist_fops = {
183         .open           = blacklist_open,
184         .read           = seq_read,
185         .llseek         = seq_lseek,
186         .release        = single_release,
187 };
188
189 static int uuids_show(struct seq_file *f, void *p)
190 {
191         struct hci_dev *hdev = f->private;
192         struct bt_uuid *uuid;
193
194         hci_dev_lock(hdev);
195         list_for_each_entry(uuid, &hdev->uuids, list) {
196                 u8 i, val[16];
197
198                 /* The Bluetooth UUID values are stored in big endian,
199                  * but with reversed byte order. So convert them into
200                  * the right order for the %pUb modifier.
201                  */
202                 for (i = 0; i < 16; i++)
203                         val[i] = uuid->uuid[15 - i];
204
205                 seq_printf(f, "%pUb\n", val);
206         }
207         hci_dev_unlock(hdev);
208
209         return 0;
210 }
211
212 static int uuids_open(struct inode *inode, struct file *file)
213 {
214         return single_open(file, uuids_show, inode->i_private);
215 }
216
217 static const struct file_operations uuids_fops = {
218         .open           = uuids_open,
219         .read           = seq_read,
220         .llseek         = seq_lseek,
221         .release        = single_release,
222 };
223
224 static int inquiry_cache_show(struct seq_file *f, void *p)
225 {
226         struct hci_dev *hdev = f->private;
227         struct discovery_state *cache = &hdev->discovery;
228         struct inquiry_entry *e;
229
230         hci_dev_lock(hdev);
231
232         list_for_each_entry(e, &cache->all, all) {
233                 struct inquiry_data *data = &e->data;
234                 seq_printf(f, "%pMR %d %d %d 0x%.2x%.2x%.2x 0x%.4x %d %d %u\n",
235                            &data->bdaddr,
236                            data->pscan_rep_mode, data->pscan_period_mode,
237                            data->pscan_mode, data->dev_class[2],
238                            data->dev_class[1], data->dev_class[0],
239                            __le16_to_cpu(data->clock_offset),
240                            data->rssi, data->ssp_mode, e->timestamp);
241         }
242
243         hci_dev_unlock(hdev);
244
245         return 0;
246 }
247
248 static int inquiry_cache_open(struct inode *inode, struct file *file)
249 {
250         return single_open(file, inquiry_cache_show, inode->i_private);
251 }
252
253 static const struct file_operations inquiry_cache_fops = {
254         .open           = inquiry_cache_open,
255         .read           = seq_read,
256         .llseek         = seq_lseek,
257         .release        = single_release,
258 };
259
260 static int link_keys_show(struct seq_file *f, void *ptr)
261 {
262         struct hci_dev *hdev = f->private;
263         struct list_head *p, *n;
264
265         hci_dev_lock(hdev);
266         list_for_each_safe(p, n, &hdev->link_keys) {
267                 struct link_key *key = list_entry(p, struct link_key, list);
268                 seq_printf(f, "%pMR %u %*phN %u\n", &key->bdaddr, key->type,
269                            HCI_LINK_KEY_SIZE, key->val, key->pin_len);
270         }
271         hci_dev_unlock(hdev);
272
273         return 0;
274 }
275
276 static int link_keys_open(struct inode *inode, struct file *file)
277 {
278         return single_open(file, link_keys_show, inode->i_private);
279 }
280
281 static const struct file_operations link_keys_fops = {
282         .open           = link_keys_open,
283         .read           = seq_read,
284         .llseek         = seq_lseek,
285         .release        = single_release,
286 };
287
288 static ssize_t use_debug_keys_read(struct file *file, char __user *user_buf,
289                                    size_t count, loff_t *ppos)
290 {
291         struct hci_dev *hdev = file->private_data;
292         char buf[3];
293
294         buf[0] = test_bit(HCI_DEBUG_KEYS, &hdev->dev_flags) ? 'Y': 'N';
295         buf[1] = '\n';
296         buf[2] = '\0';
297         return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
298 }
299
300 static const struct file_operations use_debug_keys_fops = {
301         .open           = simple_open,
302         .read           = use_debug_keys_read,
303         .llseek         = default_llseek,
304 };
305
306 static int dev_class_show(struct seq_file *f, void *ptr)
307 {
308         struct hci_dev *hdev = f->private;
309
310         hci_dev_lock(hdev);
311         seq_printf(f, "0x%.2x%.2x%.2x\n", hdev->dev_class[2],
312                    hdev->dev_class[1], hdev->dev_class[0]);
313         hci_dev_unlock(hdev);
314
315         return 0;
316 }
317
318 static int dev_class_open(struct inode *inode, struct file *file)
319 {
320         return single_open(file, dev_class_show, inode->i_private);
321 }
322
323 static const struct file_operations dev_class_fops = {
324         .open           = dev_class_open,
325         .read           = seq_read,
326         .llseek         = seq_lseek,
327         .release        = single_release,
328 };
329
330 static int voice_setting_get(void *data, u64 *val)
331 {
332         struct hci_dev *hdev = data;
333
334         hci_dev_lock(hdev);
335         *val = hdev->voice_setting;
336         hci_dev_unlock(hdev);
337
338         return 0;
339 }
340
341 DEFINE_SIMPLE_ATTRIBUTE(voice_setting_fops, voice_setting_get,
342                         NULL, "0x%4.4llx\n");
343
344 static int auto_accept_delay_set(void *data, u64 val)
345 {
346         struct hci_dev *hdev = data;
347
348         hci_dev_lock(hdev);
349         hdev->auto_accept_delay = val;
350         hci_dev_unlock(hdev);
351
352         return 0;
353 }
354
355 static int auto_accept_delay_get(void *data, u64 *val)
356 {
357         struct hci_dev *hdev = data;
358
359         hci_dev_lock(hdev);
360         *val = hdev->auto_accept_delay;
361         hci_dev_unlock(hdev);
362
363         return 0;
364 }
365
366 DEFINE_SIMPLE_ATTRIBUTE(auto_accept_delay_fops, auto_accept_delay_get,
367                         auto_accept_delay_set, "%llu\n");
368
369 static int ssp_debug_mode_set(void *data, u64 val)
370 {
371         struct hci_dev *hdev = data;
372         struct sk_buff *skb;
373         __u8 mode;
374         int err;
375
376         if (val != 0 && val != 1)
377                 return -EINVAL;
378
379         if (!test_bit(HCI_UP, &hdev->flags))
380                 return -ENETDOWN;
381
382         hci_req_lock(hdev);
383         mode = val;
384         skb = __hci_cmd_sync(hdev, HCI_OP_WRITE_SSP_DEBUG_MODE, sizeof(mode),
385                              &mode, HCI_CMD_TIMEOUT);
386         hci_req_unlock(hdev);
387
388         if (IS_ERR(skb))
389                 return PTR_ERR(skb);
390
391         err = -bt_to_errno(skb->data[0]);
392         kfree_skb(skb);
393
394         if (err < 0)
395                 return err;
396
397         hci_dev_lock(hdev);
398         hdev->ssp_debug_mode = val;
399         hci_dev_unlock(hdev);
400
401         return 0;
402 }
403
404 static int ssp_debug_mode_get(void *data, u64 *val)
405 {
406         struct hci_dev *hdev = data;
407
408         hci_dev_lock(hdev);
409         *val = hdev->ssp_debug_mode;
410         hci_dev_unlock(hdev);
411
412         return 0;
413 }
414
415 DEFINE_SIMPLE_ATTRIBUTE(ssp_debug_mode_fops, ssp_debug_mode_get,
416                         ssp_debug_mode_set, "%llu\n");
417
418 static int idle_timeout_set(void *data, u64 val)
419 {
420         struct hci_dev *hdev = data;
421
422         if (val != 0 && (val < 500 || val > 3600000))
423                 return -EINVAL;
424
425         hci_dev_lock(hdev);
426         hdev->idle_timeout = val;
427         hci_dev_unlock(hdev);
428
429         return 0;
430 }
431
432 static int idle_timeout_get(void *data, u64 *val)
433 {
434         struct hci_dev *hdev = data;
435
436         hci_dev_lock(hdev);
437         *val = hdev->idle_timeout;
438         hci_dev_unlock(hdev);
439
440         return 0;
441 }
442
443 DEFINE_SIMPLE_ATTRIBUTE(idle_timeout_fops, idle_timeout_get,
444                         idle_timeout_set, "%llu\n");
445
446 static int sniff_min_interval_set(void *data, u64 val)
447 {
448         struct hci_dev *hdev = data;
449
450         if (val == 0 || val % 2 || val > hdev->sniff_max_interval)
451                 return -EINVAL;
452
453         hci_dev_lock(hdev);
454         hdev->sniff_min_interval = val;
455         hci_dev_unlock(hdev);
456
457         return 0;
458 }
459
460 static int sniff_min_interval_get(void *data, u64 *val)
461 {
462         struct hci_dev *hdev = data;
463
464         hci_dev_lock(hdev);
465         *val = hdev->sniff_min_interval;
466         hci_dev_unlock(hdev);
467
468         return 0;
469 }
470
471 DEFINE_SIMPLE_ATTRIBUTE(sniff_min_interval_fops, sniff_min_interval_get,
472                         sniff_min_interval_set, "%llu\n");
473
474 static int sniff_max_interval_set(void *data, u64 val)
475 {
476         struct hci_dev *hdev = data;
477
478         if (val == 0 || val % 2 || val < hdev->sniff_min_interval)
479                 return -EINVAL;
480
481         hci_dev_lock(hdev);
482         hdev->sniff_max_interval = val;
483         hci_dev_unlock(hdev);
484
485         return 0;
486 }
487
488 static int sniff_max_interval_get(void *data, u64 *val)
489 {
490         struct hci_dev *hdev = data;
491
492         hci_dev_lock(hdev);
493         *val = hdev->sniff_max_interval;
494         hci_dev_unlock(hdev);
495
496         return 0;
497 }
498
499 DEFINE_SIMPLE_ATTRIBUTE(sniff_max_interval_fops, sniff_max_interval_get,
500                         sniff_max_interval_set, "%llu\n");
501
502 static int static_address_show(struct seq_file *f, void *p)
503 {
504         struct hci_dev *hdev = f->private;
505
506         hci_dev_lock(hdev);
507         seq_printf(f, "%pMR\n", &hdev->static_addr);
508         hci_dev_unlock(hdev);
509
510         return 0;
511 }
512
513 static int static_address_open(struct inode *inode, struct file *file)
514 {
515         return single_open(file, static_address_show, inode->i_private);
516 }
517
518 static const struct file_operations static_address_fops = {
519         .open           = static_address_open,
520         .read           = seq_read,
521         .llseek         = seq_lseek,
522         .release        = single_release,
523 };
524
525 static int own_address_type_set(void *data, u64 val)
526 {
527         struct hci_dev *hdev = data;
528
529         if (val != 0 && val != 1)
530                 return -EINVAL;
531
532         hci_dev_lock(hdev);
533         hdev->own_addr_type = val;
534         hci_dev_unlock(hdev);
535
536         return 0;
537 }
538
539 static int own_address_type_get(void *data, u64 *val)
540 {
541         struct hci_dev *hdev = data;
542
543         hci_dev_lock(hdev);
544         *val = hdev->own_addr_type;
545         hci_dev_unlock(hdev);
546
547         return 0;
548 }
549
550 DEFINE_SIMPLE_ATTRIBUTE(own_address_type_fops, own_address_type_get,
551                         own_address_type_set, "%llu\n");
552
553 static int long_term_keys_show(struct seq_file *f, void *ptr)
554 {
555         struct hci_dev *hdev = f->private;
556         struct list_head *p, *n;
557
558         hci_dev_lock(hdev);
559         list_for_each_safe(p, n, &hdev->link_keys) {
560                 struct smp_ltk *ltk = list_entry(p, struct smp_ltk, list);
561                 seq_printf(f, "%pMR (type %u) %u %u %u %.4x %*phN %*phN\\n",
562                            &ltk->bdaddr, ltk->bdaddr_type, ltk->authenticated,
563                            ltk->type, ltk->enc_size, __le16_to_cpu(ltk->ediv),
564                            8, ltk->rand, 16, ltk->val);
565         }
566         hci_dev_unlock(hdev);
567
568         return 0;
569 }
570
571 static int long_term_keys_open(struct inode *inode, struct file *file)
572 {
573         return single_open(file, long_term_keys_show, inode->i_private);
574 }
575
576 static const struct file_operations long_term_keys_fops = {
577         .open           = long_term_keys_open,
578         .read           = seq_read,
579         .llseek         = seq_lseek,
580         .release        = single_release,
581 };
582
583 static int conn_min_interval_set(void *data, u64 val)
584 {
585         struct hci_dev *hdev = data;
586
587         if (val < 0x0006 || val > 0x0c80 || val > hdev->le_conn_max_interval)
588                 return -EINVAL;
589
590         hci_dev_lock(hdev);
591         hdev->le_conn_min_interval = val;
592         hci_dev_unlock(hdev);
593
594         return 0;
595 }
596
597 static int conn_min_interval_get(void *data, u64 *val)
598 {
599         struct hci_dev *hdev = data;
600
601         hci_dev_lock(hdev);
602         *val = hdev->le_conn_min_interval;
603         hci_dev_unlock(hdev);
604
605         return 0;
606 }
607
608 DEFINE_SIMPLE_ATTRIBUTE(conn_min_interval_fops, conn_min_interval_get,
609                         conn_min_interval_set, "%llu\n");
610
611 static int conn_max_interval_set(void *data, u64 val)
612 {
613         struct hci_dev *hdev = data;
614
615         if (val < 0x0006 || val > 0x0c80 || val < hdev->le_conn_min_interval)
616                 return -EINVAL;
617
618         hci_dev_lock(hdev);
619         hdev->le_conn_max_interval = val;
620         hci_dev_unlock(hdev);
621
622         return 0;
623 }
624
625 static int conn_max_interval_get(void *data, u64 *val)
626 {
627         struct hci_dev *hdev = data;
628
629         hci_dev_lock(hdev);
630         *val = hdev->le_conn_max_interval;
631         hci_dev_unlock(hdev);
632
633         return 0;
634 }
635
636 DEFINE_SIMPLE_ATTRIBUTE(conn_max_interval_fops, conn_max_interval_get,
637                         conn_max_interval_set, "%llu\n");
638
639 /* ---- HCI requests ---- */
640
641 static void hci_req_sync_complete(struct hci_dev *hdev, u8 result)
642 {
643         BT_DBG("%s result 0x%2.2x", hdev->name, result);
644
645         if (hdev->req_status == HCI_REQ_PEND) {
646                 hdev->req_result = result;
647                 hdev->req_status = HCI_REQ_DONE;
648                 wake_up_interruptible(&hdev->req_wait_q);
649         }
650 }
651
652 static void hci_req_cancel(struct hci_dev *hdev, int err)
653 {
654         BT_DBG("%s err 0x%2.2x", hdev->name, err);
655
656         if (hdev->req_status == HCI_REQ_PEND) {
657                 hdev->req_result = err;
658                 hdev->req_status = HCI_REQ_CANCELED;
659                 wake_up_interruptible(&hdev->req_wait_q);
660         }
661 }
662
663 static struct sk_buff *hci_get_cmd_complete(struct hci_dev *hdev, u16 opcode,
664                                             u8 event)
665 {
666         struct hci_ev_cmd_complete *ev;
667         struct hci_event_hdr *hdr;
668         struct sk_buff *skb;
669
670         hci_dev_lock(hdev);
671
672         skb = hdev->recv_evt;
673         hdev->recv_evt = NULL;
674
675         hci_dev_unlock(hdev);
676
677         if (!skb)
678                 return ERR_PTR(-ENODATA);
679
680         if (skb->len < sizeof(*hdr)) {
681                 BT_ERR("Too short HCI event");
682                 goto failed;
683         }
684
685         hdr = (void *) skb->data;
686         skb_pull(skb, HCI_EVENT_HDR_SIZE);
687
688         if (event) {
689                 if (hdr->evt != event)
690                         goto failed;
691                 return skb;
692         }
693
694         if (hdr->evt != HCI_EV_CMD_COMPLETE) {
695                 BT_DBG("Last event is not cmd complete (0x%2.2x)", hdr->evt);
696                 goto failed;
697         }
698
699         if (skb->len < sizeof(*ev)) {
700                 BT_ERR("Too short cmd_complete event");
701                 goto failed;
702         }
703
704         ev = (void *) skb->data;
705         skb_pull(skb, sizeof(*ev));
706
707         if (opcode == __le16_to_cpu(ev->opcode))
708                 return skb;
709
710         BT_DBG("opcode doesn't match (0x%2.2x != 0x%2.2x)", opcode,
711                __le16_to_cpu(ev->opcode));
712
713 failed:
714         kfree_skb(skb);
715         return ERR_PTR(-ENODATA);
716 }
717
718 struct sk_buff *__hci_cmd_sync_ev(struct hci_dev *hdev, u16 opcode, u32 plen,
719                                   const void *param, u8 event, u32 timeout)
720 {
721         DECLARE_WAITQUEUE(wait, current);
722         struct hci_request req;
723         int err = 0;
724
725         BT_DBG("%s", hdev->name);
726
727         hci_req_init(&req, hdev);
728
729         hci_req_add_ev(&req, opcode, plen, param, event);
730
731         hdev->req_status = HCI_REQ_PEND;
732
733         err = hci_req_run(&req, hci_req_sync_complete);
734         if (err < 0)
735                 return ERR_PTR(err);
736
737         add_wait_queue(&hdev->req_wait_q, &wait);
738         set_current_state(TASK_INTERRUPTIBLE);
739
740         schedule_timeout(timeout);
741
742         remove_wait_queue(&hdev->req_wait_q, &wait);
743
744         if (signal_pending(current))
745                 return ERR_PTR(-EINTR);
746
747         switch (hdev->req_status) {
748         case HCI_REQ_DONE:
749                 err = -bt_to_errno(hdev->req_result);
750                 break;
751
752         case HCI_REQ_CANCELED:
753                 err = -hdev->req_result;
754                 break;
755
756         default:
757                 err = -ETIMEDOUT;
758                 break;
759         }
760
761         hdev->req_status = hdev->req_result = 0;
762
763         BT_DBG("%s end: err %d", hdev->name, err);
764
765         if (err < 0)
766                 return ERR_PTR(err);
767
768         return hci_get_cmd_complete(hdev, opcode, event);
769 }
770 EXPORT_SYMBOL(__hci_cmd_sync_ev);
771
772 struct sk_buff *__hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
773                                const void *param, u32 timeout)
774 {
775         return __hci_cmd_sync_ev(hdev, opcode, plen, param, 0, timeout);
776 }
777 EXPORT_SYMBOL(__hci_cmd_sync);
778
779 /* Execute request and wait for completion. */
780 static int __hci_req_sync(struct hci_dev *hdev,
781                           void (*func)(struct hci_request *req,
782                                       unsigned long opt),
783                           unsigned long opt, __u32 timeout)
784 {
785         struct hci_request req;
786         DECLARE_WAITQUEUE(wait, current);
787         int err = 0;
788
789         BT_DBG("%s start", hdev->name);
790
791         hci_req_init(&req, hdev);
792
793         hdev->req_status = HCI_REQ_PEND;
794
795         func(&req, opt);
796
797         err = hci_req_run(&req, hci_req_sync_complete);
798         if (err < 0) {
799                 hdev->req_status = 0;
800
801                 /* ENODATA means the HCI request command queue is empty.
802                  * This can happen when a request with conditionals doesn't
803                  * trigger any commands to be sent. This is normal behavior
804                  * and should not trigger an error return.
805                  */
806                 if (err == -ENODATA)
807                         return 0;
808
809                 return err;
810         }
811
812         add_wait_queue(&hdev->req_wait_q, &wait);
813         set_current_state(TASK_INTERRUPTIBLE);
814
815         schedule_timeout(timeout);
816
817         remove_wait_queue(&hdev->req_wait_q, &wait);
818
819         if (signal_pending(current))
820                 return -EINTR;
821
822         switch (hdev->req_status) {
823         case HCI_REQ_DONE:
824                 err = -bt_to_errno(hdev->req_result);
825                 break;
826
827         case HCI_REQ_CANCELED:
828                 err = -hdev->req_result;
829                 break;
830
831         default:
832                 err = -ETIMEDOUT;
833                 break;
834         }
835
836         hdev->req_status = hdev->req_result = 0;
837
838         BT_DBG("%s end: err %d", hdev->name, err);
839
840         return err;
841 }
842
843 static int hci_req_sync(struct hci_dev *hdev,
844                         void (*req)(struct hci_request *req,
845                                     unsigned long opt),
846                         unsigned long opt, __u32 timeout)
847 {
848         int ret;
849
850         if (!test_bit(HCI_UP, &hdev->flags))
851                 return -ENETDOWN;
852
853         /* Serialize all requests */
854         hci_req_lock(hdev);
855         ret = __hci_req_sync(hdev, req, opt, timeout);
856         hci_req_unlock(hdev);
857
858         return ret;
859 }
860
861 static void hci_reset_req(struct hci_request *req, unsigned long opt)
862 {
863         BT_DBG("%s %ld", req->hdev->name, opt);
864
865         /* Reset device */
866         set_bit(HCI_RESET, &req->hdev->flags);
867         hci_req_add(req, HCI_OP_RESET, 0, NULL);
868 }
869
870 static void bredr_init(struct hci_request *req)
871 {
872         req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;
873
874         /* Read Local Supported Features */
875         hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
876
877         /* Read Local Version */
878         hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
879
880         /* Read BD Address */
881         hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
882 }
883
884 static void amp_init(struct hci_request *req)
885 {
886         req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
887
888         /* Read Local Version */
889         hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
890
891         /* Read Local Supported Commands */
892         hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
893
894         /* Read Local Supported Features */
895         hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
896
897         /* Read Local AMP Info */
898         hci_req_add(req, HCI_OP_READ_LOCAL_AMP_INFO, 0, NULL);
899
900         /* Read Data Blk size */
901         hci_req_add(req, HCI_OP_READ_DATA_BLOCK_SIZE, 0, NULL);
902
903         /* Read Flow Control Mode */
904         hci_req_add(req, HCI_OP_READ_FLOW_CONTROL_MODE, 0, NULL);
905
906         /* Read Location Data */
907         hci_req_add(req, HCI_OP_READ_LOCATION_DATA, 0, NULL);
908 }
909
910 static void hci_init1_req(struct hci_request *req, unsigned long opt)
911 {
912         struct hci_dev *hdev = req->hdev;
913
914         BT_DBG("%s %ld", hdev->name, opt);
915
916         /* Reset */
917         if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
918                 hci_reset_req(req, 0);
919
920         switch (hdev->dev_type) {
921         case HCI_BREDR:
922                 bredr_init(req);
923                 break;
924
925         case HCI_AMP:
926                 amp_init(req);
927                 break;
928
929         default:
930                 BT_ERR("Unknown device type %d", hdev->dev_type);
931                 break;
932         }
933 }
934
935 static void bredr_setup(struct hci_request *req)
936 {
937         struct hci_dev *hdev = req->hdev;
938
939         __le16 param;
940         __u8 flt_type;
941
942         /* Read Buffer Size (ACL mtu, max pkt, etc.) */
943         hci_req_add(req, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
944
945         /* Read Class of Device */
946         hci_req_add(req, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
947
948         /* Read Local Name */
949         hci_req_add(req, HCI_OP_READ_LOCAL_NAME, 0, NULL);
950
951         /* Read Voice Setting */
952         hci_req_add(req, HCI_OP_READ_VOICE_SETTING, 0, NULL);
953
954         /* Read Number of Supported IAC */
955         hci_req_add(req, HCI_OP_READ_NUM_SUPPORTED_IAC, 0, NULL);
956
957         /* Read Current IAC LAP */
958         hci_req_add(req, HCI_OP_READ_CURRENT_IAC_LAP, 0, NULL);
959
960         /* Clear Event Filters */
961         flt_type = HCI_FLT_CLEAR_ALL;
962         hci_req_add(req, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
963
964         /* Connection accept timeout ~20 secs */
965         param = __constant_cpu_to_le16(0x7d00);
966         hci_req_add(req, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
967
968         /* AVM Berlin (31), aka "BlueFRITZ!", reports version 1.2,
969          * but it does not support page scan related HCI commands.
970          */
971         if (hdev->manufacturer != 31 && hdev->hci_ver > BLUETOOTH_VER_1_1) {
972                 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_ACTIVITY, 0, NULL);
973                 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_TYPE, 0, NULL);
974         }
975 }
976
977 static void le_setup(struct hci_request *req)
978 {
979         struct hci_dev *hdev = req->hdev;
980
981         /* Read LE Buffer Size */
982         hci_req_add(req, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
983
984         /* Read LE Local Supported Features */
985         hci_req_add(req, HCI_OP_LE_READ_LOCAL_FEATURES, 0, NULL);
986
987         /* Read LE Advertising Channel TX Power */
988         hci_req_add(req, HCI_OP_LE_READ_ADV_TX_POWER, 0, NULL);
989
990         /* Read LE White List Size */
991         hci_req_add(req, HCI_OP_LE_READ_WHITE_LIST_SIZE, 0, NULL);
992
993         /* Read LE Supported States */
994         hci_req_add(req, HCI_OP_LE_READ_SUPPORTED_STATES, 0, NULL);
995
996         /* LE-only controllers have LE implicitly enabled */
997         if (!lmp_bredr_capable(hdev))
998                 set_bit(HCI_LE_ENABLED, &hdev->dev_flags);
999 }
1000
1001 static u8 hci_get_inquiry_mode(struct hci_dev *hdev)
1002 {
1003         if (lmp_ext_inq_capable(hdev))
1004                 return 0x02;
1005
1006         if (lmp_inq_rssi_capable(hdev))
1007                 return 0x01;
1008
1009         if (hdev->manufacturer == 11 && hdev->hci_rev == 0x00 &&
1010             hdev->lmp_subver == 0x0757)
1011                 return 0x01;
1012
1013         if (hdev->manufacturer == 15) {
1014                 if (hdev->hci_rev == 0x03 && hdev->lmp_subver == 0x6963)
1015                         return 0x01;
1016                 if (hdev->hci_rev == 0x09 && hdev->lmp_subver == 0x6963)
1017                         return 0x01;
1018                 if (hdev->hci_rev == 0x00 && hdev->lmp_subver == 0x6965)
1019                         return 0x01;
1020         }
1021
1022         if (hdev->manufacturer == 31 && hdev->hci_rev == 0x2005 &&
1023             hdev->lmp_subver == 0x1805)
1024                 return 0x01;
1025
1026         return 0x00;
1027 }
1028
1029 static void hci_setup_inquiry_mode(struct hci_request *req)
1030 {
1031         u8 mode;
1032
1033         mode = hci_get_inquiry_mode(req->hdev);
1034
1035         hci_req_add(req, HCI_OP_WRITE_INQUIRY_MODE, 1, &mode);
1036 }
1037
1038 static void hci_setup_event_mask(struct hci_request *req)
1039 {
1040         struct hci_dev *hdev = req->hdev;
1041
1042         /* The second byte is 0xff instead of 0x9f (two reserved bits
1043          * disabled) since a Broadcom 1.2 dongle doesn't respond to the
1044          * command otherwise.
1045          */
1046         u8 events[8] = { 0xff, 0xff, 0xfb, 0xff, 0x00, 0x00, 0x00, 0x00 };
1047
1048         /* CSR 1.1 dongles does not accept any bitfield so don't try to set
1049          * any event mask for pre 1.2 devices.
1050          */
1051         if (hdev->hci_ver < BLUETOOTH_VER_1_2)
1052                 return;
1053
1054         if (lmp_bredr_capable(hdev)) {
1055                 events[4] |= 0x01; /* Flow Specification Complete */
1056                 events[4] |= 0x02; /* Inquiry Result with RSSI */
1057                 events[4] |= 0x04; /* Read Remote Extended Features Complete */
1058                 events[5] |= 0x08; /* Synchronous Connection Complete */
1059                 events[5] |= 0x10; /* Synchronous Connection Changed */
1060         } else {
1061                 /* Use a different default for LE-only devices */
1062                 memset(events, 0, sizeof(events));
1063                 events[0] |= 0x10; /* Disconnection Complete */
1064                 events[0] |= 0x80; /* Encryption Change */
1065                 events[1] |= 0x08; /* Read Remote Version Information Complete */
1066                 events[1] |= 0x20; /* Command Complete */
1067                 events[1] |= 0x40; /* Command Status */
1068                 events[1] |= 0x80; /* Hardware Error */
1069                 events[2] |= 0x04; /* Number of Completed Packets */
1070                 events[3] |= 0x02; /* Data Buffer Overflow */
1071                 events[5] |= 0x80; /* Encryption Key Refresh Complete */
1072         }
1073
1074         if (lmp_inq_rssi_capable(hdev))
1075                 events[4] |= 0x02; /* Inquiry Result with RSSI */
1076
1077         if (lmp_sniffsubr_capable(hdev))
1078                 events[5] |= 0x20; /* Sniff Subrating */
1079
1080         if (lmp_pause_enc_capable(hdev))
1081                 events[5] |= 0x80; /* Encryption Key Refresh Complete */
1082
1083         if (lmp_ext_inq_capable(hdev))
1084                 events[5] |= 0x40; /* Extended Inquiry Result */
1085
1086         if (lmp_no_flush_capable(hdev))
1087                 events[7] |= 0x01; /* Enhanced Flush Complete */
1088
1089         if (lmp_lsto_capable(hdev))
1090                 events[6] |= 0x80; /* Link Supervision Timeout Changed */
1091
1092         if (lmp_ssp_capable(hdev)) {
1093                 events[6] |= 0x01;      /* IO Capability Request */
1094                 events[6] |= 0x02;      /* IO Capability Response */
1095                 events[6] |= 0x04;      /* User Confirmation Request */
1096                 events[6] |= 0x08;      /* User Passkey Request */
1097                 events[6] |= 0x10;      /* Remote OOB Data Request */
1098                 events[6] |= 0x20;      /* Simple Pairing Complete */
1099                 events[7] |= 0x04;      /* User Passkey Notification */
1100                 events[7] |= 0x08;      /* Keypress Notification */
1101                 events[7] |= 0x10;      /* Remote Host Supported
1102                                          * Features Notification
1103                                          */
1104         }
1105
1106         if (lmp_le_capable(hdev))
1107                 events[7] |= 0x20;      /* LE Meta-Event */
1108
1109         hci_req_add(req, HCI_OP_SET_EVENT_MASK, sizeof(events), events);
1110
1111         if (lmp_le_capable(hdev)) {
1112                 memset(events, 0, sizeof(events));
1113                 events[0] = 0x1f;
1114                 hci_req_add(req, HCI_OP_LE_SET_EVENT_MASK,
1115                             sizeof(events), events);
1116         }
1117 }
1118
1119 static void hci_init2_req(struct hci_request *req, unsigned long opt)
1120 {
1121         struct hci_dev *hdev = req->hdev;
1122
1123         if (lmp_bredr_capable(hdev))
1124                 bredr_setup(req);
1125         else
1126                 clear_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
1127
1128         if (lmp_le_capable(hdev))
1129                 le_setup(req);
1130
1131         hci_setup_event_mask(req);
1132
1133         /* AVM Berlin (31), aka "BlueFRITZ!", doesn't support the read
1134          * local supported commands HCI command.
1135          */
1136         if (hdev->manufacturer != 31 && hdev->hci_ver > BLUETOOTH_VER_1_1)
1137                 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
1138
1139         if (lmp_ssp_capable(hdev)) {
1140                 /* When SSP is available, then the host features page
1141                  * should also be available as well. However some
1142                  * controllers list the max_page as 0 as long as SSP
1143                  * has not been enabled. To achieve proper debugging
1144                  * output, force the minimum max_page to 1 at least.
1145                  */
1146                 hdev->max_page = 0x01;
1147
1148                 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
1149                         u8 mode = 0x01;
1150                         hci_req_add(req, HCI_OP_WRITE_SSP_MODE,
1151                                     sizeof(mode), &mode);
1152                 } else {
1153                         struct hci_cp_write_eir cp;
1154
1155                         memset(hdev->eir, 0, sizeof(hdev->eir));
1156                         memset(&cp, 0, sizeof(cp));
1157
1158                         hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
1159                 }
1160         }
1161
1162         if (lmp_inq_rssi_capable(hdev))
1163                 hci_setup_inquiry_mode(req);
1164
1165         if (lmp_inq_tx_pwr_capable(hdev))
1166                 hci_req_add(req, HCI_OP_READ_INQ_RSP_TX_POWER, 0, NULL);
1167
1168         if (lmp_ext_feat_capable(hdev)) {
1169                 struct hci_cp_read_local_ext_features cp;
1170
1171                 cp.page = 0x01;
1172                 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
1173                             sizeof(cp), &cp);
1174         }
1175
1176         if (test_bit(HCI_LINK_SECURITY, &hdev->dev_flags)) {
1177                 u8 enable = 1;
1178                 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, sizeof(enable),
1179                             &enable);
1180         }
1181 }
1182
1183 static void hci_setup_link_policy(struct hci_request *req)
1184 {
1185         struct hci_dev *hdev = req->hdev;
1186         struct hci_cp_write_def_link_policy cp;
1187         u16 link_policy = 0;
1188
1189         if (lmp_rswitch_capable(hdev))
1190                 link_policy |= HCI_LP_RSWITCH;
1191         if (lmp_hold_capable(hdev))
1192                 link_policy |= HCI_LP_HOLD;
1193         if (lmp_sniff_capable(hdev))
1194                 link_policy |= HCI_LP_SNIFF;
1195         if (lmp_park_capable(hdev))
1196                 link_policy |= HCI_LP_PARK;
1197
1198         cp.policy = cpu_to_le16(link_policy);
1199         hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, sizeof(cp), &cp);
1200 }
1201
1202 static void hci_set_le_support(struct hci_request *req)
1203 {
1204         struct hci_dev *hdev = req->hdev;
1205         struct hci_cp_write_le_host_supported cp;
1206
1207         /* LE-only devices do not support explicit enablement */
1208         if (!lmp_bredr_capable(hdev))
1209                 return;
1210
1211         memset(&cp, 0, sizeof(cp));
1212
1213         if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
1214                 cp.le = 0x01;
1215                 cp.simul = lmp_le_br_capable(hdev);
1216         }
1217
1218         if (cp.le != lmp_host_le_capable(hdev))
1219                 hci_req_add(req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(cp),
1220                             &cp);
1221 }
1222
1223 static void hci_set_event_mask_page_2(struct hci_request *req)
1224 {
1225         struct hci_dev *hdev = req->hdev;
1226         u8 events[8] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
1227
1228         /* If Connectionless Slave Broadcast master role is supported
1229          * enable all necessary events for it.
1230          */
1231         if (hdev->features[2][0] & 0x01) {
1232                 events[1] |= 0x40;      /* Triggered Clock Capture */
1233                 events[1] |= 0x80;      /* Synchronization Train Complete */
1234                 events[2] |= 0x10;      /* Slave Page Response Timeout */
1235                 events[2] |= 0x20;      /* CSB Channel Map Change */
1236         }
1237
1238         /* If Connectionless Slave Broadcast slave role is supported
1239          * enable all necessary events for it.
1240          */
1241         if (hdev->features[2][0] & 0x02) {
1242                 events[2] |= 0x01;      /* Synchronization Train Received */
1243                 events[2] |= 0x02;      /* CSB Receive */
1244                 events[2] |= 0x04;      /* CSB Timeout */
1245                 events[2] |= 0x08;      /* Truncated Page Complete */
1246         }
1247
1248         hci_req_add(req, HCI_OP_SET_EVENT_MASK_PAGE_2, sizeof(events), events);
1249 }
1250
1251 static void hci_init3_req(struct hci_request *req, unsigned long opt)
1252 {
1253         struct hci_dev *hdev = req->hdev;
1254         u8 p;
1255
1256         /* Some Broadcom based Bluetooth controllers do not support the
1257          * Delete Stored Link Key command. They are clearly indicating its
1258          * absence in the bit mask of supported commands.
1259          *
1260          * Check the supported commands and only if the the command is marked
1261          * as supported send it. If not supported assume that the controller
1262          * does not have actual support for stored link keys which makes this
1263          * command redundant anyway.
1264          */
1265         if (hdev->commands[6] & 0x80) {
1266                 struct hci_cp_delete_stored_link_key cp;
1267
1268                 bacpy(&cp.bdaddr, BDADDR_ANY);
1269                 cp.delete_all = 0x01;
1270                 hci_req_add(req, HCI_OP_DELETE_STORED_LINK_KEY,
1271                             sizeof(cp), &cp);
1272         }
1273
1274         if (hdev->commands[5] & 0x10)
1275                 hci_setup_link_policy(req);
1276
1277         if (lmp_le_capable(hdev)) {
1278                 /* If the controller has a public BD_ADDR, then by
1279                  * default use that one. If this is a LE only
1280                  * controller without one, default to the random
1281                  * address.
1282                  */
1283                 if (bacmp(&hdev->bdaddr, BDADDR_ANY))
1284                         hdev->own_addr_type = ADDR_LE_DEV_PUBLIC;
1285                 else
1286                         hdev->own_addr_type = ADDR_LE_DEV_RANDOM;
1287
1288                 hci_set_le_support(req);
1289         }
1290
1291         /* Read features beyond page 1 if available */
1292         for (p = 2; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
1293                 struct hci_cp_read_local_ext_features cp;
1294
1295                 cp.page = p;
1296                 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
1297                             sizeof(cp), &cp);
1298         }
1299 }
1300
1301 static void hci_init4_req(struct hci_request *req, unsigned long opt)
1302 {
1303         struct hci_dev *hdev = req->hdev;
1304
1305         /* Set event mask page 2 if the HCI command for it is supported */
1306         if (hdev->commands[22] & 0x04)
1307                 hci_set_event_mask_page_2(req);
1308
1309         /* Check for Synchronization Train support */
1310         if (hdev->features[2][0] & 0x04)
1311                 hci_req_add(req, HCI_OP_READ_SYNC_TRAIN_PARAMS, 0, NULL);
1312 }
1313
1314 static int __hci_init(struct hci_dev *hdev)
1315 {
1316         int err;
1317
1318         err = __hci_req_sync(hdev, hci_init1_req, 0, HCI_INIT_TIMEOUT);
1319         if (err < 0)
1320                 return err;
1321
1322         /* The Device Under Test (DUT) mode is special and available for
1323          * all controller types. So just create it early on.
1324          */
1325         if (test_bit(HCI_SETUP, &hdev->dev_flags)) {
1326                 debugfs_create_file("dut_mode", 0644, hdev->debugfs, hdev,
1327                                     &dut_mode_fops);
1328         }
1329
1330         /* HCI_BREDR covers both single-mode LE, BR/EDR and dual-mode
1331          * BR/EDR/LE type controllers. AMP controllers only need the
1332          * first stage init.
1333          */
1334         if (hdev->dev_type != HCI_BREDR)
1335                 return 0;
1336
1337         err = __hci_req_sync(hdev, hci_init2_req, 0, HCI_INIT_TIMEOUT);
1338         if (err < 0)
1339                 return err;
1340
1341         err = __hci_req_sync(hdev, hci_init3_req, 0, HCI_INIT_TIMEOUT);
1342         if (err < 0)
1343                 return err;
1344
1345         err = __hci_req_sync(hdev, hci_init4_req, 0, HCI_INIT_TIMEOUT);
1346         if (err < 0)
1347                 return err;
1348
1349         /* Only create debugfs entries during the initial setup
1350          * phase and not every time the controller gets powered on.
1351          */
1352         if (!test_bit(HCI_SETUP, &hdev->dev_flags))
1353                 return 0;
1354
1355         debugfs_create_file("features", 0444, hdev->debugfs, hdev,
1356                             &features_fops);
1357         debugfs_create_u16("manufacturer", 0444, hdev->debugfs,
1358                            &hdev->manufacturer);
1359         debugfs_create_u8("hci_version", 0444, hdev->debugfs, &hdev->hci_ver);
1360         debugfs_create_u16("hci_revision", 0444, hdev->debugfs, &hdev->hci_rev);
1361         debugfs_create_file("blacklist", 0444, hdev->debugfs, hdev,
1362                             &blacklist_fops);
1363         debugfs_create_file("uuids", 0444, hdev->debugfs, hdev, &uuids_fops);
1364
1365         if (lmp_bredr_capable(hdev)) {
1366                 debugfs_create_file("inquiry_cache", 0444, hdev->debugfs,
1367                                     hdev, &inquiry_cache_fops);
1368                 debugfs_create_file("link_keys", 0400, hdev->debugfs,
1369                                     hdev, &link_keys_fops);
1370                 debugfs_create_file("use_debug_keys", 0444, hdev->debugfs,
1371                                     hdev, &use_debug_keys_fops);
1372                 debugfs_create_file("dev_class", 0444, hdev->debugfs,
1373                                     hdev, &dev_class_fops);
1374                 debugfs_create_file("voice_setting", 0444, hdev->debugfs,
1375                                     hdev, &voice_setting_fops);
1376         }
1377
1378         if (lmp_ssp_capable(hdev)) {
1379                 debugfs_create_file("auto_accept_delay", 0644, hdev->debugfs,
1380                                     hdev, &auto_accept_delay_fops);
1381                 debugfs_create_file("ssp_debug_mode", 0644, hdev->debugfs,
1382                                     hdev, &ssp_debug_mode_fops);
1383         }
1384
1385         if (lmp_sniff_capable(hdev)) {
1386                 debugfs_create_file("idle_timeout", 0644, hdev->debugfs,
1387                                     hdev, &idle_timeout_fops);
1388                 debugfs_create_file("sniff_min_interval", 0644, hdev->debugfs,
1389                                     hdev, &sniff_min_interval_fops);
1390                 debugfs_create_file("sniff_max_interval", 0644, hdev->debugfs,
1391                                     hdev, &sniff_max_interval_fops);
1392         }
1393
1394         if (lmp_le_capable(hdev)) {
1395                 debugfs_create_u8("white_list_size", 0444, hdev->debugfs,
1396                                   &hdev->le_white_list_size);
1397                 debugfs_create_file("static_address", 0444, hdev->debugfs,
1398                                    hdev, &static_address_fops);
1399                 debugfs_create_file("own_address_type", 0644, hdev->debugfs,
1400                                     hdev, &own_address_type_fops);
1401                 debugfs_create_file("long_term_keys", 0400, hdev->debugfs,
1402                                     hdev, &long_term_keys_fops);
1403                 debugfs_create_file("conn_min_interval", 0644, hdev->debugfs,
1404                                     hdev, &conn_min_interval_fops);
1405                 debugfs_create_file("conn_max_interval", 0644, hdev->debugfs,
1406                                     hdev, &conn_max_interval_fops);
1407         }
1408
1409         return 0;
1410 }
1411
1412 static void hci_scan_req(struct hci_request *req, unsigned long opt)
1413 {
1414         __u8 scan = opt;
1415
1416         BT_DBG("%s %x", req->hdev->name, scan);
1417
1418         /* Inquiry and Page scans */
1419         hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
1420 }
1421
1422 static void hci_auth_req(struct hci_request *req, unsigned long opt)
1423 {
1424         __u8 auth = opt;
1425
1426         BT_DBG("%s %x", req->hdev->name, auth);
1427
1428         /* Authentication */
1429         hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
1430 }
1431
1432 static void hci_encrypt_req(struct hci_request *req, unsigned long opt)
1433 {
1434         __u8 encrypt = opt;
1435
1436         BT_DBG("%s %x", req->hdev->name, encrypt);
1437
1438         /* Encryption */
1439         hci_req_add(req, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
1440 }
1441
1442 static void hci_linkpol_req(struct hci_request *req, unsigned long opt)
1443 {
1444         __le16 policy = cpu_to_le16(opt);
1445
1446         BT_DBG("%s %x", req->hdev->name, policy);
1447
1448         /* Default link policy */
1449         hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
1450 }
1451
1452 /* Get HCI device by index.
1453  * Device is held on return. */
1454 struct hci_dev *hci_dev_get(int index)
1455 {
1456         struct hci_dev *hdev = NULL, *d;
1457
1458         BT_DBG("%d", index);
1459
1460         if (index < 0)
1461                 return NULL;
1462
1463         read_lock(&hci_dev_list_lock);
1464         list_for_each_entry(d, &hci_dev_list, list) {
1465                 if (d->id == index) {
1466                         hdev = hci_dev_hold(d);
1467                         break;
1468                 }
1469         }
1470         read_unlock(&hci_dev_list_lock);
1471         return hdev;
1472 }
1473
1474 /* ---- Inquiry support ---- */
1475
1476 bool hci_discovery_active(struct hci_dev *hdev)
1477 {
1478         struct discovery_state *discov = &hdev->discovery;
1479
1480         switch (discov->state) {
1481         case DISCOVERY_FINDING:
1482         case DISCOVERY_RESOLVING:
1483                 return true;
1484
1485         default:
1486                 return false;
1487         }
1488 }
1489
1490 void hci_discovery_set_state(struct hci_dev *hdev, int state)
1491 {
1492         BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
1493
1494         if (hdev->discovery.state == state)
1495                 return;
1496
1497         switch (state) {
1498         case DISCOVERY_STOPPED:
1499                 if (hdev->discovery.state != DISCOVERY_STARTING)
1500                         mgmt_discovering(hdev, 0);
1501                 break;
1502         case DISCOVERY_STARTING:
1503                 break;
1504         case DISCOVERY_FINDING:
1505                 mgmt_discovering(hdev, 1);
1506                 break;
1507         case DISCOVERY_RESOLVING:
1508                 break;
1509         case DISCOVERY_STOPPING:
1510                 break;
1511         }
1512
1513         hdev->discovery.state = state;
1514 }
1515
1516 void hci_inquiry_cache_flush(struct hci_dev *hdev)
1517 {
1518         struct discovery_state *cache = &hdev->discovery;
1519         struct inquiry_entry *p, *n;
1520
1521         list_for_each_entry_safe(p, n, &cache->all, all) {
1522                 list_del(&p->all);
1523                 kfree(p);
1524         }
1525
1526         INIT_LIST_HEAD(&cache->unknown);
1527         INIT_LIST_HEAD(&cache->resolve);
1528 }
1529
1530 struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev,
1531                                                bdaddr_t *bdaddr)
1532 {
1533         struct discovery_state *cache = &hdev->discovery;
1534         struct inquiry_entry *e;
1535
1536         BT_DBG("cache %p, %pMR", cache, bdaddr);
1537
1538         list_for_each_entry(e, &cache->all, all) {
1539                 if (!bacmp(&e->data.bdaddr, bdaddr))
1540                         return e;
1541         }
1542
1543         return NULL;
1544 }
1545
1546 struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
1547                                                        bdaddr_t *bdaddr)
1548 {
1549         struct discovery_state *cache = &hdev->discovery;
1550         struct inquiry_entry *e;
1551
1552         BT_DBG("cache %p, %pMR", cache, bdaddr);
1553
1554         list_for_each_entry(e, &cache->unknown, list) {
1555                 if (!bacmp(&e->data.bdaddr, bdaddr))
1556                         return e;
1557         }
1558
1559         return NULL;
1560 }
1561
1562 struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
1563                                                        bdaddr_t *bdaddr,
1564                                                        int state)
1565 {
1566         struct discovery_state *cache = &hdev->discovery;
1567         struct inquiry_entry *e;
1568
1569         BT_DBG("cache %p bdaddr %pMR state %d", cache, bdaddr, state);
1570
1571         list_for_each_entry(e, &cache->resolve, list) {
1572                 if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
1573                         return e;
1574                 if (!bacmp(&e->data.bdaddr, bdaddr))
1575                         return e;
1576         }
1577
1578         return NULL;
1579 }
1580
1581 void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
1582                                       struct inquiry_entry *ie)
1583 {
1584         struct discovery_state *cache = &hdev->discovery;
1585         struct list_head *pos = &cache->resolve;
1586         struct inquiry_entry *p;
1587
1588         list_del(&ie->list);
1589
1590         list_for_each_entry(p, &cache->resolve, list) {
1591                 if (p->name_state != NAME_PENDING &&
1592                     abs(p->data.rssi) >= abs(ie->data.rssi))
1593                         break;
1594                 pos = &p->list;
1595         }
1596
1597         list_add(&ie->list, pos);
1598 }
1599
1600 bool hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
1601                               bool name_known, bool *ssp)
1602 {
1603         struct discovery_state *cache = &hdev->discovery;
1604         struct inquiry_entry *ie;
1605
1606         BT_DBG("cache %p, %pMR", cache, &data->bdaddr);
1607
1608         hci_remove_remote_oob_data(hdev, &data->bdaddr);
1609
1610         if (ssp)
1611                 *ssp = data->ssp_mode;
1612
1613         ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
1614         if (ie) {
1615                 if (ie->data.ssp_mode && ssp)
1616                         *ssp = true;
1617
1618                 if (ie->name_state == NAME_NEEDED &&
1619                     data->rssi != ie->data.rssi) {
1620                         ie->data.rssi = data->rssi;
1621                         hci_inquiry_cache_update_resolve(hdev, ie);
1622                 }
1623
1624                 goto update;
1625         }
1626
1627         /* Entry not in the cache. Add new one. */
1628         ie = kzalloc(sizeof(struct inquiry_entry), GFP_ATOMIC);
1629         if (!ie)
1630                 return false;
1631
1632         list_add(&ie->all, &cache->all);
1633
1634         if (name_known) {
1635                 ie->name_state = NAME_KNOWN;
1636         } else {
1637                 ie->name_state = NAME_NOT_KNOWN;
1638                 list_add(&ie->list, &cache->unknown);
1639         }
1640
1641 update:
1642         if (name_known && ie->name_state != NAME_KNOWN &&
1643             ie->name_state != NAME_PENDING) {
1644                 ie->name_state = NAME_KNOWN;
1645                 list_del(&ie->list);
1646         }
1647
1648         memcpy(&ie->data, data, sizeof(*data));
1649         ie->timestamp = jiffies;
1650         cache->timestamp = jiffies;
1651
1652         if (ie->name_state == NAME_NOT_KNOWN)
1653                 return false;
1654
1655         return true;
1656 }
1657
1658 static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
1659 {
1660         struct discovery_state *cache = &hdev->discovery;
1661         struct inquiry_info *info = (struct inquiry_info *) buf;
1662         struct inquiry_entry *e;
1663         int copied = 0;
1664
1665         list_for_each_entry(e, &cache->all, all) {
1666                 struct inquiry_data *data = &e->data;
1667
1668                 if (copied >= num)
1669                         break;
1670
1671                 bacpy(&info->bdaddr, &data->bdaddr);
1672                 info->pscan_rep_mode    = data->pscan_rep_mode;
1673                 info->pscan_period_mode = data->pscan_period_mode;
1674                 info->pscan_mode        = data->pscan_mode;
1675                 memcpy(info->dev_class, data->dev_class, 3);
1676                 info->clock_offset      = data->clock_offset;
1677
1678                 info++;
1679                 copied++;
1680         }
1681
1682         BT_DBG("cache %p, copied %d", cache, copied);
1683         return copied;
1684 }
1685
1686 static void hci_inq_req(struct hci_request *req, unsigned long opt)
1687 {
1688         struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
1689         struct hci_dev *hdev = req->hdev;
1690         struct hci_cp_inquiry cp;
1691
1692         BT_DBG("%s", hdev->name);
1693
1694         if (test_bit(HCI_INQUIRY, &hdev->flags))
1695                 return;
1696
1697         /* Start Inquiry */
1698         memcpy(&cp.lap, &ir->lap, 3);
1699         cp.length  = ir->length;
1700         cp.num_rsp = ir->num_rsp;
1701         hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
1702 }
1703
1704 static int wait_inquiry(void *word)
1705 {
1706         schedule();
1707         return signal_pending(current);
1708 }
1709
1710 int hci_inquiry(void __user *arg)
1711 {
1712         __u8 __user *ptr = arg;
1713         struct hci_inquiry_req ir;
1714         struct hci_dev *hdev;
1715         int err = 0, do_inquiry = 0, max_rsp;
1716         long timeo;
1717         __u8 *buf;
1718
1719         if (copy_from_user(&ir, ptr, sizeof(ir)))
1720                 return -EFAULT;
1721
1722         hdev = hci_dev_get(ir.dev_id);
1723         if (!hdev)
1724                 return -ENODEV;
1725
1726         if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
1727                 err = -EBUSY;
1728                 goto done;
1729         }
1730
1731         if (hdev->dev_type != HCI_BREDR) {
1732                 err = -EOPNOTSUPP;
1733                 goto done;
1734         }
1735
1736         if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
1737                 err = -EOPNOTSUPP;
1738                 goto done;
1739         }
1740
1741         hci_dev_lock(hdev);
1742         if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
1743             inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) {
1744                 hci_inquiry_cache_flush(hdev);
1745                 do_inquiry = 1;
1746         }
1747         hci_dev_unlock(hdev);
1748
1749         timeo = ir.length * msecs_to_jiffies(2000);
1750
1751         if (do_inquiry) {
1752                 err = hci_req_sync(hdev, hci_inq_req, (unsigned long) &ir,
1753                                    timeo);
1754                 if (err < 0)
1755                         goto done;
1756
1757                 /* Wait until Inquiry procedure finishes (HCI_INQUIRY flag is
1758                  * cleared). If it is interrupted by a signal, return -EINTR.
1759                  */
1760                 if (wait_on_bit(&hdev->flags, HCI_INQUIRY, wait_inquiry,
1761                                 TASK_INTERRUPTIBLE))
1762                         return -EINTR;
1763         }
1764
1765         /* for unlimited number of responses we will use buffer with
1766          * 255 entries
1767          */
1768         max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
1769
1770         /* cache_dump can't sleep. Therefore we allocate temp buffer and then
1771          * copy it to the user space.
1772          */
1773         buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
1774         if (!buf) {
1775                 err = -ENOMEM;
1776                 goto done;
1777         }
1778
1779         hci_dev_lock(hdev);
1780         ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
1781         hci_dev_unlock(hdev);
1782
1783         BT_DBG("num_rsp %d", ir.num_rsp);
1784
1785         if (!copy_to_user(ptr, &ir, sizeof(ir))) {
1786                 ptr += sizeof(ir);
1787                 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
1788                                  ir.num_rsp))
1789                         err = -EFAULT;
1790         } else
1791                 err = -EFAULT;
1792
1793         kfree(buf);
1794
1795 done:
1796         hci_dev_put(hdev);
1797         return err;
1798 }
1799
1800 static int hci_dev_do_open(struct hci_dev *hdev)
1801 {
1802         int ret = 0;
1803
1804         BT_DBG("%s %p", hdev->name, hdev);
1805
1806         hci_req_lock(hdev);
1807
1808         if (test_bit(HCI_UNREGISTER, &hdev->dev_flags)) {
1809                 ret = -ENODEV;
1810                 goto done;
1811         }
1812
1813         if (!test_bit(HCI_SETUP, &hdev->dev_flags)) {
1814                 /* Check for rfkill but allow the HCI setup stage to
1815                  * proceed (which in itself doesn't cause any RF activity).
1816                  */
1817                 if (test_bit(HCI_RFKILLED, &hdev->dev_flags)) {
1818                         ret = -ERFKILL;
1819                         goto done;
1820                 }
1821
1822                 /* Check for valid public address or a configured static
1823                  * random adddress, but let the HCI setup proceed to
1824                  * be able to determine if there is a public address
1825                  * or not.
1826                  *
1827                  * This check is only valid for BR/EDR controllers
1828                  * since AMP controllers do not have an address.
1829                  */
1830                 if (hdev->dev_type == HCI_BREDR &&
1831                     !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
1832                     !bacmp(&hdev->static_addr, BDADDR_ANY)) {
1833                         ret = -EADDRNOTAVAIL;
1834                         goto done;
1835                 }
1836         }
1837
1838         if (test_bit(HCI_UP, &hdev->flags)) {
1839                 ret = -EALREADY;
1840                 goto done;
1841         }
1842
1843         if (hdev->open(hdev)) {
1844                 ret = -EIO;
1845                 goto done;
1846         }
1847
1848         atomic_set(&hdev->cmd_cnt, 1);
1849         set_bit(HCI_INIT, &hdev->flags);
1850
1851         if (hdev->setup && test_bit(HCI_SETUP, &hdev->dev_flags))
1852                 ret = hdev->setup(hdev);
1853
1854         if (!ret) {
1855                 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
1856                         set_bit(HCI_RAW, &hdev->flags);
1857
1858                 if (!test_bit(HCI_RAW, &hdev->flags) &&
1859                     !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags))
1860                         ret = __hci_init(hdev);
1861         }
1862
1863         clear_bit(HCI_INIT, &hdev->flags);
1864
1865         if (!ret) {
1866                 hci_dev_hold(hdev);
1867                 set_bit(HCI_UP, &hdev->flags);
1868                 hci_notify(hdev, HCI_DEV_UP);
1869                 if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
1870                     !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags) &&
1871                     hdev->dev_type == HCI_BREDR) {
1872                         hci_dev_lock(hdev);
1873                         mgmt_powered(hdev, 1);
1874                         hci_dev_unlock(hdev);
1875                 }
1876         } else {
1877                 /* Init failed, cleanup */
1878                 flush_work(&hdev->tx_work);
1879                 flush_work(&hdev->cmd_work);
1880                 flush_work(&hdev->rx_work);
1881
1882                 skb_queue_purge(&hdev->cmd_q);
1883                 skb_queue_purge(&hdev->rx_q);
1884
1885                 if (hdev->flush)
1886                         hdev->flush(hdev);
1887
1888                 if (hdev->sent_cmd) {
1889                         kfree_skb(hdev->sent_cmd);
1890                         hdev->sent_cmd = NULL;
1891                 }
1892
1893                 hdev->close(hdev);
1894                 hdev->flags = 0;
1895         }
1896
1897 done:
1898         hci_req_unlock(hdev);
1899         return ret;
1900 }
1901
1902 /* ---- HCI ioctl helpers ---- */
1903
1904 int hci_dev_open(__u16 dev)
1905 {
1906         struct hci_dev *hdev;
1907         int err;
1908
1909         hdev = hci_dev_get(dev);
1910         if (!hdev)
1911                 return -ENODEV;
1912
1913         /* We need to ensure that no other power on/off work is pending
1914          * before proceeding to call hci_dev_do_open. This is
1915          * particularly important if the setup procedure has not yet
1916          * completed.
1917          */
1918         if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
1919                 cancel_delayed_work(&hdev->power_off);
1920
1921         /* After this call it is guaranteed that the setup procedure
1922          * has finished. This means that error conditions like RFKILL
1923          * or no valid public or static random address apply.
1924          */
1925         flush_workqueue(hdev->req_workqueue);
1926
1927         err = hci_dev_do_open(hdev);
1928
1929         hci_dev_put(hdev);
1930
1931         return err;
1932 }
1933
1934 static int hci_dev_do_close(struct hci_dev *hdev)
1935 {
1936         BT_DBG("%s %p", hdev->name, hdev);
1937
1938         cancel_delayed_work(&hdev->power_off);
1939
1940         hci_req_cancel(hdev, ENODEV);
1941         hci_req_lock(hdev);
1942
1943         if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
1944                 del_timer_sync(&hdev->cmd_timer);
1945                 hci_req_unlock(hdev);
1946                 return 0;
1947         }
1948
1949         /* Flush RX and TX works */
1950         flush_work(&hdev->tx_work);
1951         flush_work(&hdev->rx_work);
1952
1953         if (hdev->discov_timeout > 0) {
1954                 cancel_delayed_work(&hdev->discov_off);
1955                 hdev->discov_timeout = 0;
1956                 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
1957                 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
1958         }
1959
1960         if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
1961                 cancel_delayed_work(&hdev->service_cache);
1962
1963         cancel_delayed_work_sync(&hdev->le_scan_disable);
1964
1965         hci_dev_lock(hdev);
1966         hci_inquiry_cache_flush(hdev);
1967         hci_conn_hash_flush(hdev);
1968         hci_dev_unlock(hdev);
1969
1970         hci_notify(hdev, HCI_DEV_DOWN);
1971
1972         if (hdev->flush)
1973                 hdev->flush(hdev);
1974
1975         /* Reset device */
1976         skb_queue_purge(&hdev->cmd_q);
1977         atomic_set(&hdev->cmd_cnt, 1);
1978         if (!test_bit(HCI_RAW, &hdev->flags) &&
1979             !test_bit(HCI_AUTO_OFF, &hdev->dev_flags) &&
1980             test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks)) {
1981                 set_bit(HCI_INIT, &hdev->flags);
1982                 __hci_req_sync(hdev, hci_reset_req, 0, HCI_CMD_TIMEOUT);
1983                 clear_bit(HCI_INIT, &hdev->flags);
1984         }
1985
1986         /* flush cmd  work */
1987         flush_work(&hdev->cmd_work);
1988
1989         /* Drop queues */
1990         skb_queue_purge(&hdev->rx_q);
1991         skb_queue_purge(&hdev->cmd_q);
1992         skb_queue_purge(&hdev->raw_q);
1993
1994         /* Drop last sent command */
1995         if (hdev->sent_cmd) {
1996                 del_timer_sync(&hdev->cmd_timer);
1997                 kfree_skb(hdev->sent_cmd);
1998                 hdev->sent_cmd = NULL;
1999         }
2000
2001         kfree_skb(hdev->recv_evt);
2002         hdev->recv_evt = NULL;
2003
2004         /* After this point our queues are empty
2005          * and no tasks are scheduled. */
2006         hdev->close(hdev);
2007
2008         /* Clear flags */
2009         hdev->flags = 0;
2010         hdev->dev_flags &= ~HCI_PERSISTENT_MASK;
2011
2012         if (!test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
2013                 if (hdev->dev_type == HCI_BREDR) {
2014                         hci_dev_lock(hdev);
2015                         mgmt_powered(hdev, 0);
2016                         hci_dev_unlock(hdev);
2017                 }
2018         }
2019
2020         /* Controller radio is available but is currently powered down */
2021         hdev->amp_status = AMP_STATUS_POWERED_DOWN;
2022
2023         memset(hdev->eir, 0, sizeof(hdev->eir));
2024         memset(hdev->dev_class, 0, sizeof(hdev->dev_class));
2025
2026         hci_req_unlock(hdev);
2027
2028         hci_dev_put(hdev);
2029         return 0;
2030 }
2031
2032 int hci_dev_close(__u16 dev)
2033 {
2034         struct hci_dev *hdev;
2035         int err;
2036
2037         hdev = hci_dev_get(dev);
2038         if (!hdev)
2039                 return -ENODEV;
2040
2041         if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2042                 err = -EBUSY;
2043                 goto done;
2044         }
2045
2046         if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
2047                 cancel_delayed_work(&hdev->power_off);
2048
2049         err = hci_dev_do_close(hdev);
2050
2051 done:
2052         hci_dev_put(hdev);
2053         return err;
2054 }
2055
2056 int hci_dev_reset(__u16 dev)
2057 {
2058         struct hci_dev *hdev;
2059         int ret = 0;
2060
2061         hdev = hci_dev_get(dev);
2062         if (!hdev)
2063                 return -ENODEV;
2064
2065         hci_req_lock(hdev);
2066
2067         if (!test_bit(HCI_UP, &hdev->flags)) {
2068                 ret = -ENETDOWN;
2069                 goto done;
2070         }
2071
2072         if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2073                 ret = -EBUSY;
2074                 goto done;
2075         }
2076
2077         /* Drop queues */
2078         skb_queue_purge(&hdev->rx_q);
2079         skb_queue_purge(&hdev->cmd_q);
2080
2081         hci_dev_lock(hdev);
2082         hci_inquiry_cache_flush(hdev);
2083         hci_conn_hash_flush(hdev);
2084         hci_dev_unlock(hdev);
2085
2086         if (hdev->flush)
2087                 hdev->flush(hdev);
2088
2089         atomic_set(&hdev->cmd_cnt, 1);
2090         hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
2091
2092         if (!test_bit(HCI_RAW, &hdev->flags))
2093                 ret = __hci_req_sync(hdev, hci_reset_req, 0, HCI_INIT_TIMEOUT);
2094
2095 done:
2096         hci_req_unlock(hdev);
2097         hci_dev_put(hdev);
2098         return ret;
2099 }
2100
2101 int hci_dev_reset_stat(__u16 dev)
2102 {
2103         struct hci_dev *hdev;
2104         int ret = 0;
2105
2106         hdev = hci_dev_get(dev);
2107         if (!hdev)
2108                 return -ENODEV;
2109
2110         if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2111                 ret = -EBUSY;
2112                 goto done;
2113         }
2114
2115         memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
2116
2117 done:
2118         hci_dev_put(hdev);
2119         return ret;
2120 }
2121
2122 int hci_dev_cmd(unsigned int cmd, void __user *arg)
2123 {
2124         struct hci_dev *hdev;
2125         struct hci_dev_req dr;
2126         int err = 0;
2127
2128         if (copy_from_user(&dr, arg, sizeof(dr)))
2129                 return -EFAULT;
2130
2131         hdev = hci_dev_get(dr.dev_id);
2132         if (!hdev)
2133                 return -ENODEV;
2134
2135         if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2136                 err = -EBUSY;
2137                 goto done;
2138         }
2139
2140         if (hdev->dev_type != HCI_BREDR) {
2141                 err = -EOPNOTSUPP;
2142                 goto done;
2143         }
2144
2145         if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
2146                 err = -EOPNOTSUPP;
2147                 goto done;
2148         }
2149
2150         switch (cmd) {
2151         case HCISETAUTH:
2152                 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
2153                                    HCI_INIT_TIMEOUT);
2154                 break;
2155
2156         case HCISETENCRYPT:
2157                 if (!lmp_encrypt_capable(hdev)) {
2158                         err = -EOPNOTSUPP;
2159                         break;
2160                 }
2161
2162                 if (!test_bit(HCI_AUTH, &hdev->flags)) {
2163                         /* Auth must be enabled first */
2164                         err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
2165                                            HCI_INIT_TIMEOUT);
2166                         if (err)
2167                                 break;
2168                 }
2169
2170                 err = hci_req_sync(hdev, hci_encrypt_req, dr.dev_opt,
2171                                    HCI_INIT_TIMEOUT);
2172                 break;
2173
2174         case HCISETSCAN:
2175                 err = hci_req_sync(hdev, hci_scan_req, dr.dev_opt,
2176                                    HCI_INIT_TIMEOUT);
2177                 break;
2178
2179         case HCISETLINKPOL:
2180                 err = hci_req_sync(hdev, hci_linkpol_req, dr.dev_opt,
2181                                    HCI_INIT_TIMEOUT);
2182                 break;
2183
2184         case HCISETLINKMODE:
2185                 hdev->link_mode = ((__u16) dr.dev_opt) &
2186                                         (HCI_LM_MASTER | HCI_LM_ACCEPT);
2187                 break;
2188
2189         case HCISETPTYPE:
2190                 hdev->pkt_type = (__u16) dr.dev_opt;
2191                 break;
2192
2193         case HCISETACLMTU:
2194                 hdev->acl_mtu  = *((__u16 *) &dr.dev_opt + 1);
2195                 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
2196                 break;
2197
2198         case HCISETSCOMTU:
2199                 hdev->sco_mtu  = *((__u16 *) &dr.dev_opt + 1);
2200                 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
2201                 break;
2202
2203         default:
2204                 err = -EINVAL;
2205                 break;
2206         }
2207
2208 done:
2209         hci_dev_put(hdev);
2210         return err;
2211 }
2212
2213 int hci_get_dev_list(void __user *arg)
2214 {
2215         struct hci_dev *hdev;
2216         struct hci_dev_list_req *dl;
2217         struct hci_dev_req *dr;
2218         int n = 0, size, err;
2219         __u16 dev_num;
2220
2221         if (get_user(dev_num, (__u16 __user *) arg))
2222                 return -EFAULT;
2223
2224         if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
2225                 return -EINVAL;
2226
2227         size = sizeof(*dl) + dev_num * sizeof(*dr);
2228
2229         dl = kzalloc(size, GFP_KERNEL);
2230         if (!dl)
2231                 return -ENOMEM;
2232
2233         dr = dl->dev_req;
2234
2235         read_lock(&hci_dev_list_lock);
2236         list_for_each_entry(hdev, &hci_dev_list, list) {
2237                 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
2238                         cancel_delayed_work(&hdev->power_off);
2239
2240                 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
2241                         set_bit(HCI_PAIRABLE, &hdev->dev_flags);
2242
2243                 (dr + n)->dev_id  = hdev->id;
2244                 (dr + n)->dev_opt = hdev->flags;
2245
2246                 if (++n >= dev_num)
2247                         break;
2248         }
2249         read_unlock(&hci_dev_list_lock);
2250
2251         dl->dev_num = n;
2252         size = sizeof(*dl) + n * sizeof(*dr);
2253
2254         err = copy_to_user(arg, dl, size);
2255         kfree(dl);
2256
2257         return err ? -EFAULT : 0;
2258 }
2259
2260 int hci_get_dev_info(void __user *arg)
2261 {
2262         struct hci_dev *hdev;
2263         struct hci_dev_info di;
2264         int err = 0;
2265
2266         if (copy_from_user(&di, arg, sizeof(di)))
2267                 return -EFAULT;
2268
2269         hdev = hci_dev_get(di.dev_id);
2270         if (!hdev)
2271                 return -ENODEV;
2272
2273         if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
2274                 cancel_delayed_work_sync(&hdev->power_off);
2275
2276         if (!test_bit(HCI_MGMT, &hdev->dev_flags))
2277                 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
2278
2279         strcpy(di.name, hdev->name);
2280         di.bdaddr   = hdev->bdaddr;
2281         di.type     = (hdev->bus & 0x0f) | ((hdev->dev_type & 0x03) << 4);
2282         di.flags    = hdev->flags;
2283         di.pkt_type = hdev->pkt_type;
2284         if (lmp_bredr_capable(hdev)) {
2285                 di.acl_mtu  = hdev->acl_mtu;
2286                 di.acl_pkts = hdev->acl_pkts;
2287                 di.sco_mtu  = hdev->sco_mtu;
2288                 di.sco_pkts = hdev->sco_pkts;
2289         } else {
2290                 di.acl_mtu  = hdev->le_mtu;
2291                 di.acl_pkts = hdev->le_pkts;
2292                 di.sco_mtu  = 0;
2293                 di.sco_pkts = 0;
2294         }
2295         di.link_policy = hdev->link_policy;
2296         di.link_mode   = hdev->link_mode;
2297
2298         memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
2299         memcpy(&di.features, &hdev->features, sizeof(di.features));
2300
2301         if (copy_to_user(arg, &di, sizeof(di)))
2302                 err = -EFAULT;
2303
2304         hci_dev_put(hdev);
2305
2306         return err;
2307 }
2308
2309 /* ---- Interface to HCI drivers ---- */
2310
2311 static int hci_rfkill_set_block(void *data, bool blocked)
2312 {
2313         struct hci_dev *hdev = data;
2314
2315         BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
2316
2317         if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags))
2318                 return -EBUSY;
2319
2320         if (blocked) {
2321                 set_bit(HCI_RFKILLED, &hdev->dev_flags);
2322                 if (!test_bit(HCI_SETUP, &hdev->dev_flags))
2323                         hci_dev_do_close(hdev);
2324         } else {
2325                 clear_bit(HCI_RFKILLED, &hdev->dev_flags);
2326         }
2327
2328         return 0;
2329 }
2330
2331 static const struct rfkill_ops hci_rfkill_ops = {
2332         .set_block = hci_rfkill_set_block,
2333 };
2334
2335 static void hci_power_on(struct work_struct *work)
2336 {
2337         struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
2338         int err;
2339
2340         BT_DBG("%s", hdev->name);
2341
2342         err = hci_dev_do_open(hdev);
2343         if (err < 0) {
2344                 mgmt_set_powered_failed(hdev, err);
2345                 return;
2346         }
2347
2348         /* During the HCI setup phase, a few error conditions are
2349          * ignored and they need to be checked now. If they are still
2350          * valid, it is important to turn the device back off.
2351          */
2352         if (test_bit(HCI_RFKILLED, &hdev->dev_flags) ||
2353             (hdev->dev_type == HCI_BREDR &&
2354              !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
2355              !bacmp(&hdev->static_addr, BDADDR_ANY))) {
2356                 clear_bit(HCI_AUTO_OFF, &hdev->dev_flags);
2357                 hci_dev_do_close(hdev);
2358         } else if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
2359                 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
2360                                    HCI_AUTO_OFF_TIMEOUT);
2361         }
2362
2363         if (test_and_clear_bit(HCI_SETUP, &hdev->dev_flags))
2364                 mgmt_index_added(hdev);
2365 }
2366
2367 static void hci_power_off(struct work_struct *work)
2368 {
2369         struct hci_dev *hdev = container_of(work, struct hci_dev,
2370                                             power_off.work);
2371
2372         BT_DBG("%s", hdev->name);
2373
2374         hci_dev_do_close(hdev);
2375 }
2376
2377 static void hci_discov_off(struct work_struct *work)
2378 {
2379         struct hci_dev *hdev;
2380
2381         hdev = container_of(work, struct hci_dev, discov_off.work);
2382
2383         BT_DBG("%s", hdev->name);
2384
2385         mgmt_discoverable_timeout(hdev);
2386 }
2387
2388 int hci_uuids_clear(struct hci_dev *hdev)
2389 {
2390         struct bt_uuid *uuid, *tmp;
2391
2392         list_for_each_entry_safe(uuid, tmp, &hdev->uuids, list) {
2393                 list_del(&uuid->list);
2394                 kfree(uuid);
2395         }
2396
2397         return 0;
2398 }
2399
2400 int hci_link_keys_clear(struct hci_dev *hdev)
2401 {
2402         struct list_head *p, *n;
2403
2404         list_for_each_safe(p, n, &hdev->link_keys) {
2405                 struct link_key *key;
2406
2407                 key = list_entry(p, struct link_key, list);
2408
2409                 list_del(p);
2410                 kfree(key);
2411         }
2412
2413         return 0;
2414 }
2415
2416 int hci_smp_ltks_clear(struct hci_dev *hdev)
2417 {
2418         struct smp_ltk *k, *tmp;
2419
2420         list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
2421                 list_del(&k->list);
2422                 kfree(k);
2423         }
2424
2425         return 0;
2426 }
2427
2428 struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
2429 {
2430         struct link_key *k;
2431
2432         list_for_each_entry(k, &hdev->link_keys, list)
2433                 if (bacmp(bdaddr, &k->bdaddr) == 0)
2434                         return k;
2435
2436         return NULL;
2437 }
2438
2439 static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
2440                                u8 key_type, u8 old_key_type)
2441 {
2442         /* Legacy key */
2443         if (key_type < 0x03)
2444                 return true;
2445
2446         /* Debug keys are insecure so don't store them persistently */
2447         if (key_type == HCI_LK_DEBUG_COMBINATION)
2448                 return false;
2449
2450         /* Changed combination key and there's no previous one */
2451         if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
2452                 return false;
2453
2454         /* Security mode 3 case */
2455         if (!conn)
2456                 return true;
2457
2458         /* Neither local nor remote side had no-bonding as requirement */
2459         if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
2460                 return true;
2461
2462         /* Local side had dedicated bonding as requirement */
2463         if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
2464                 return true;
2465
2466         /* Remote side had dedicated bonding as requirement */
2467         if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
2468                 return true;
2469
2470         /* If none of the above criteria match, then don't store the key
2471          * persistently */
2472         return false;
2473 }
2474
2475 struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, __le16 ediv, u8 rand[8])
2476 {
2477         struct smp_ltk *k;
2478
2479         list_for_each_entry(k, &hdev->long_term_keys, list) {
2480                 if (k->ediv != ediv ||
2481                     memcmp(rand, k->rand, sizeof(k->rand)))
2482                         continue;
2483
2484                 return k;
2485         }
2486
2487         return NULL;
2488 }
2489
2490 struct smp_ltk *hci_find_ltk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
2491                                      u8 addr_type)
2492 {
2493         struct smp_ltk *k;
2494
2495         list_for_each_entry(k, &hdev->long_term_keys, list)
2496                 if (addr_type == k->bdaddr_type &&
2497                     bacmp(bdaddr, &k->bdaddr) == 0)
2498                         return k;
2499
2500         return NULL;
2501 }
2502
2503 int hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn, int new_key,
2504                      bdaddr_t *bdaddr, u8 *val, u8 type, u8 pin_len)
2505 {
2506         struct link_key *key, *old_key;
2507         u8 old_key_type;
2508         bool persistent;
2509
2510         old_key = hci_find_link_key(hdev, bdaddr);
2511         if (old_key) {
2512                 old_key_type = old_key->type;
2513                 key = old_key;
2514         } else {
2515                 old_key_type = conn ? conn->key_type : 0xff;
2516                 key = kzalloc(sizeof(*key), GFP_ATOMIC);
2517                 if (!key)
2518                         return -ENOMEM;
2519                 list_add(&key->list, &hdev->link_keys);
2520         }
2521
2522         BT_DBG("%s key for %pMR type %u", hdev->name, bdaddr, type);
2523
2524         /* Some buggy controller combinations generate a changed
2525          * combination key for legacy pairing even when there's no
2526          * previous key */
2527         if (type == HCI_LK_CHANGED_COMBINATION &&
2528             (!conn || conn->remote_auth == 0xff) && old_key_type == 0xff) {
2529                 type = HCI_LK_COMBINATION;
2530                 if (conn)
2531                         conn->key_type = type;
2532         }
2533
2534         bacpy(&key->bdaddr, bdaddr);
2535         memcpy(key->val, val, HCI_LINK_KEY_SIZE);
2536         key->pin_len = pin_len;
2537
2538         if (type == HCI_LK_CHANGED_COMBINATION)
2539                 key->type = old_key_type;
2540         else
2541                 key->type = type;
2542
2543         if (!new_key)
2544                 return 0;
2545
2546         persistent = hci_persistent_key(hdev, conn, type, old_key_type);
2547
2548         mgmt_new_link_key(hdev, key, persistent);
2549
2550         if (conn)
2551                 conn->flush_key = !persistent;
2552
2553         return 0;
2554 }
2555
2556 int hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type, u8 type,
2557                 int new_key, u8 authenticated, u8 tk[16], u8 enc_size, __le16
2558                 ediv, u8 rand[8])
2559 {
2560         struct smp_ltk *key, *old_key;
2561
2562         if (!(type & HCI_SMP_STK) && !(type & HCI_SMP_LTK))
2563                 return 0;
2564
2565         old_key = hci_find_ltk_by_addr(hdev, bdaddr, addr_type);
2566         if (old_key)
2567                 key = old_key;
2568         else {
2569                 key = kzalloc(sizeof(*key), GFP_ATOMIC);
2570                 if (!key)
2571                         return -ENOMEM;
2572                 list_add(&key->list, &hdev->long_term_keys);
2573         }
2574
2575         bacpy(&key->bdaddr, bdaddr);
2576         key->bdaddr_type = addr_type;
2577         memcpy(key->val, tk, sizeof(key->val));
2578         key->authenticated = authenticated;
2579         key->ediv = ediv;
2580         key->enc_size = enc_size;
2581         key->type = type;
2582         memcpy(key->rand, rand, sizeof(key->rand));
2583
2584         if (!new_key)
2585                 return 0;
2586
2587         if (type & HCI_SMP_LTK)
2588                 mgmt_new_ltk(hdev, key, 1);
2589
2590         return 0;
2591 }
2592
2593 int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
2594 {
2595         struct link_key *key;
2596
2597         key = hci_find_link_key(hdev, bdaddr);
2598         if (!key)
2599                 return -ENOENT;
2600
2601         BT_DBG("%s removing %pMR", hdev->name, bdaddr);
2602
2603         list_del(&key->list);
2604         kfree(key);
2605
2606         return 0;
2607 }
2608
2609 int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr)
2610 {
2611         struct smp_ltk *k, *tmp;
2612
2613         list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
2614                 if (bacmp(bdaddr, &k->bdaddr))
2615                         continue;
2616
2617                 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
2618
2619                 list_del(&k->list);
2620                 kfree(k);
2621         }
2622
2623         return 0;
2624 }
2625
2626 /* HCI command timer function */
2627 static void hci_cmd_timeout(unsigned long arg)
2628 {
2629         struct hci_dev *hdev = (void *) arg;
2630
2631         if (hdev->sent_cmd) {
2632                 struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
2633                 u16 opcode = __le16_to_cpu(sent->opcode);
2634
2635                 BT_ERR("%s command 0x%4.4x tx timeout", hdev->name, opcode);
2636         } else {
2637                 BT_ERR("%s command tx timeout", hdev->name);
2638         }
2639
2640         atomic_set(&hdev->cmd_cnt, 1);
2641         queue_work(hdev->workqueue, &hdev->cmd_work);
2642 }
2643
2644 struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
2645                                           bdaddr_t *bdaddr)
2646 {
2647         struct oob_data *data;
2648
2649         list_for_each_entry(data, &hdev->remote_oob_data, list)
2650                 if (bacmp(bdaddr, &data->bdaddr) == 0)
2651                         return data;
2652
2653         return NULL;
2654 }
2655
2656 int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr)
2657 {
2658         struct oob_data *data;
2659
2660         data = hci_find_remote_oob_data(hdev, bdaddr);
2661         if (!data)
2662                 return -ENOENT;
2663
2664         BT_DBG("%s removing %pMR", hdev->name, bdaddr);
2665
2666         list_del(&data->list);
2667         kfree(data);
2668
2669         return 0;
2670 }
2671
2672 int hci_remote_oob_data_clear(struct hci_dev *hdev)
2673 {
2674         struct oob_data *data, *n;
2675
2676         list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
2677                 list_del(&data->list);
2678                 kfree(data);
2679         }
2680
2681         return 0;
2682 }
2683
2684 int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 *hash,
2685                             u8 *randomizer)
2686 {
2687         struct oob_data *data;
2688
2689         data = hci_find_remote_oob_data(hdev, bdaddr);
2690
2691         if (!data) {
2692                 data = kmalloc(sizeof(*data), GFP_ATOMIC);
2693                 if (!data)
2694                         return -ENOMEM;
2695
2696                 bacpy(&data->bdaddr, bdaddr);
2697                 list_add(&data->list, &hdev->remote_oob_data);
2698         }
2699
2700         memcpy(data->hash, hash, sizeof(data->hash));
2701         memcpy(data->randomizer, randomizer, sizeof(data->randomizer));
2702
2703         BT_DBG("%s for %pMR", hdev->name, bdaddr);
2704
2705         return 0;
2706 }
2707
2708 struct bdaddr_list *hci_blacklist_lookup(struct hci_dev *hdev,
2709                                          bdaddr_t *bdaddr, u8 type)
2710 {
2711         struct bdaddr_list *b;
2712
2713         list_for_each_entry(b, &hdev->blacklist, list) {
2714                 if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
2715                         return b;
2716         }
2717
2718         return NULL;
2719 }
2720
2721 int hci_blacklist_clear(struct hci_dev *hdev)
2722 {
2723         struct list_head *p, *n;
2724
2725         list_for_each_safe(p, n, &hdev->blacklist) {
2726                 struct bdaddr_list *b = list_entry(p, struct bdaddr_list, list);
2727
2728                 list_del(p);
2729                 kfree(b);
2730         }
2731
2732         return 0;
2733 }
2734
2735 int hci_blacklist_add(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
2736 {
2737         struct bdaddr_list *entry;
2738
2739         if (!bacmp(bdaddr, BDADDR_ANY))
2740                 return -EBADF;
2741
2742         if (hci_blacklist_lookup(hdev, bdaddr, type))
2743                 return -EEXIST;
2744
2745         entry = kzalloc(sizeof(struct bdaddr_list), GFP_KERNEL);
2746         if (!entry)
2747                 return -ENOMEM;
2748
2749         bacpy(&entry->bdaddr, bdaddr);
2750         entry->bdaddr_type = type;
2751
2752         list_add(&entry->list, &hdev->blacklist);
2753
2754         return mgmt_device_blocked(hdev, bdaddr, type);
2755 }
2756
2757 int hci_blacklist_del(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
2758 {
2759         struct bdaddr_list *entry;
2760
2761         if (!bacmp(bdaddr, BDADDR_ANY))
2762                 return hci_blacklist_clear(hdev);
2763
2764         entry = hci_blacklist_lookup(hdev, bdaddr, type);
2765         if (!entry)
2766                 return -ENOENT;
2767
2768         list_del(&entry->list);
2769         kfree(entry);
2770
2771         return mgmt_device_unblocked(hdev, bdaddr, type);
2772 }
2773
2774 static void inquiry_complete(struct hci_dev *hdev, u8 status)
2775 {
2776         if (status) {
2777                 BT_ERR("Failed to start inquiry: status %d", status);
2778
2779                 hci_dev_lock(hdev);
2780                 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2781                 hci_dev_unlock(hdev);
2782                 return;
2783         }
2784 }
2785
2786 static void le_scan_disable_work_complete(struct hci_dev *hdev, u8 status)
2787 {
2788         /* General inquiry access code (GIAC) */
2789         u8 lap[3] = { 0x33, 0x8b, 0x9e };
2790         struct hci_request req;
2791         struct hci_cp_inquiry cp;
2792         int err;
2793
2794         if (status) {
2795                 BT_ERR("Failed to disable LE scanning: status %d", status);
2796                 return;
2797         }
2798
2799         switch (hdev->discovery.type) {
2800         case DISCOV_TYPE_LE:
2801                 hci_dev_lock(hdev);
2802                 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2803                 hci_dev_unlock(hdev);
2804                 break;
2805
2806         case DISCOV_TYPE_INTERLEAVED:
2807                 hci_req_init(&req, hdev);
2808
2809                 memset(&cp, 0, sizeof(cp));
2810                 memcpy(&cp.lap, lap, sizeof(cp.lap));
2811                 cp.length = DISCOV_INTERLEAVED_INQUIRY_LEN;
2812                 hci_req_add(&req, HCI_OP_INQUIRY, sizeof(cp), &cp);
2813
2814                 hci_dev_lock(hdev);
2815
2816                 hci_inquiry_cache_flush(hdev);
2817
2818                 err = hci_req_run(&req, inquiry_complete);
2819                 if (err) {
2820                         BT_ERR("Inquiry request failed: err %d", err);
2821                         hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2822                 }
2823
2824                 hci_dev_unlock(hdev);
2825                 break;
2826         }
2827 }
2828
2829 static void le_scan_disable_work(struct work_struct *work)
2830 {
2831         struct hci_dev *hdev = container_of(work, struct hci_dev,
2832                                             le_scan_disable.work);
2833         struct hci_cp_le_set_scan_enable cp;
2834         struct hci_request req;
2835         int err;
2836
2837         BT_DBG("%s", hdev->name);
2838
2839         hci_req_init(&req, hdev);
2840
2841         memset(&cp, 0, sizeof(cp));
2842         cp.enable = LE_SCAN_DISABLE;
2843         hci_req_add(&req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
2844
2845         err = hci_req_run(&req, le_scan_disable_work_complete);
2846         if (err)
2847                 BT_ERR("Disable LE scanning request failed: err %d", err);
2848 }
2849
2850 /* Alloc HCI device */
2851 struct hci_dev *hci_alloc_dev(void)
2852 {
2853         struct hci_dev *hdev;
2854
2855         hdev = kzalloc(sizeof(struct hci_dev), GFP_KERNEL);
2856         if (!hdev)
2857                 return NULL;
2858
2859         hdev->pkt_type  = (HCI_DM1 | HCI_DH1 | HCI_HV1);
2860         hdev->esco_type = (ESCO_HV1);
2861         hdev->link_mode = (HCI_LM_ACCEPT);
2862         hdev->num_iac = 0x01;           /* One IAC support is mandatory */
2863         hdev->io_capability = 0x03;     /* No Input No Output */
2864         hdev->inq_tx_power = HCI_TX_POWER_INVALID;
2865         hdev->adv_tx_power = HCI_TX_POWER_INVALID;
2866
2867         hdev->sniff_max_interval = 800;
2868         hdev->sniff_min_interval = 80;
2869
2870         hdev->le_scan_interval = 0x0060;
2871         hdev->le_scan_window = 0x0030;
2872         hdev->le_conn_min_interval = 0x0028;
2873         hdev->le_conn_max_interval = 0x0038;
2874
2875         mutex_init(&hdev->lock);
2876         mutex_init(&hdev->req_lock);
2877
2878         INIT_LIST_HEAD(&hdev->mgmt_pending);
2879         INIT_LIST_HEAD(&hdev->blacklist);
2880         INIT_LIST_HEAD(&hdev->uuids);
2881         INIT_LIST_HEAD(&hdev->link_keys);
2882         INIT_LIST_HEAD(&hdev->long_term_keys);
2883         INIT_LIST_HEAD(&hdev->remote_oob_data);
2884         INIT_LIST_HEAD(&hdev->conn_hash.list);
2885
2886         INIT_WORK(&hdev->rx_work, hci_rx_work);
2887         INIT_WORK(&hdev->cmd_work, hci_cmd_work);
2888         INIT_WORK(&hdev->tx_work, hci_tx_work);
2889         INIT_WORK(&hdev->power_on, hci_power_on);
2890
2891         INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
2892         INIT_DELAYED_WORK(&hdev->discov_off, hci_discov_off);
2893         INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
2894
2895         skb_queue_head_init(&hdev->rx_q);
2896         skb_queue_head_init(&hdev->cmd_q);
2897         skb_queue_head_init(&hdev->raw_q);
2898
2899         init_waitqueue_head(&hdev->req_wait_q);
2900
2901         setup_timer(&hdev->cmd_timer, hci_cmd_timeout, (unsigned long) hdev);
2902
2903         hci_init_sysfs(hdev);
2904         discovery_init(hdev);
2905
2906         return hdev;
2907 }
2908 EXPORT_SYMBOL(hci_alloc_dev);
2909
2910 /* Free HCI device */
2911 void hci_free_dev(struct hci_dev *hdev)
2912 {
2913         /* will free via device release */
2914         put_device(&hdev->dev);
2915 }
2916 EXPORT_SYMBOL(hci_free_dev);
2917
2918 /* Register HCI device */
2919 int hci_register_dev(struct hci_dev *hdev)
2920 {
2921         int id, error;
2922
2923         if (!hdev->open || !hdev->close)
2924                 return -EINVAL;
2925
2926         /* Do not allow HCI_AMP devices to register at index 0,
2927          * so the index can be used as the AMP controller ID.
2928          */
2929         switch (hdev->dev_type) {
2930         case HCI_BREDR:
2931                 id = ida_simple_get(&hci_index_ida, 0, 0, GFP_KERNEL);
2932                 break;
2933         case HCI_AMP:
2934                 id = ida_simple_get(&hci_index_ida, 1, 0, GFP_KERNEL);
2935                 break;
2936         default:
2937                 return -EINVAL;
2938         }
2939
2940         if (id < 0)
2941                 return id;
2942
2943         sprintf(hdev->name, "hci%d", id);
2944         hdev->id = id;
2945
2946         BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
2947
2948         hdev->workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
2949                                           WQ_MEM_RECLAIM, 1, hdev->name);
2950         if (!hdev->workqueue) {
2951                 error = -ENOMEM;
2952                 goto err;
2953         }
2954
2955         hdev->req_workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
2956                                               WQ_MEM_RECLAIM, 1, hdev->name);
2957         if (!hdev->req_workqueue) {
2958                 destroy_workqueue(hdev->workqueue);
2959                 error = -ENOMEM;
2960                 goto err;
2961         }
2962
2963         if (!IS_ERR_OR_NULL(bt_debugfs))
2964                 hdev->debugfs = debugfs_create_dir(hdev->name, bt_debugfs);
2965
2966         dev_set_name(&hdev->dev, "%s", hdev->name);
2967
2968         error = device_add(&hdev->dev);
2969         if (error < 0)
2970                 goto err_wqueue;
2971
2972         hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
2973                                     RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops,
2974                                     hdev);
2975         if (hdev->rfkill) {
2976                 if (rfkill_register(hdev->rfkill) < 0) {
2977                         rfkill_destroy(hdev->rfkill);
2978                         hdev->rfkill = NULL;
2979                 }
2980         }
2981
2982         if (hdev->rfkill && rfkill_blocked(hdev->rfkill))
2983                 set_bit(HCI_RFKILLED, &hdev->dev_flags);
2984
2985         set_bit(HCI_SETUP, &hdev->dev_flags);
2986         set_bit(HCI_AUTO_OFF, &hdev->dev_flags);
2987
2988         if (hdev->dev_type == HCI_BREDR) {
2989                 /* Assume BR/EDR support until proven otherwise (such as
2990                  * through reading supported features during init.
2991                  */
2992                 set_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
2993         }
2994
2995         write_lock(&hci_dev_list_lock);
2996         list_add(&hdev->list, &hci_dev_list);
2997         write_unlock(&hci_dev_list_lock);
2998
2999         hci_notify(hdev, HCI_DEV_REG);
3000         hci_dev_hold(hdev);
3001
3002         queue_work(hdev->req_workqueue, &hdev->power_on);
3003
3004         return id;
3005
3006 err_wqueue:
3007         destroy_workqueue(hdev->workqueue);
3008         destroy_workqueue(hdev->req_workqueue);
3009 err:
3010         ida_simple_remove(&hci_index_ida, hdev->id);
3011
3012         return error;
3013 }
3014 EXPORT_SYMBOL(hci_register_dev);
3015
3016 /* Unregister HCI device */
3017 void hci_unregister_dev(struct hci_dev *hdev)
3018 {
3019         int i, id;
3020
3021         BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
3022
3023         set_bit(HCI_UNREGISTER, &hdev->dev_flags);
3024
3025         id = hdev->id;
3026
3027         write_lock(&hci_dev_list_lock);
3028         list_del(&hdev->list);
3029         write_unlock(&hci_dev_list_lock);
3030
3031         hci_dev_do_close(hdev);
3032
3033         for (i = 0; i < NUM_REASSEMBLY; i++)
3034                 kfree_skb(hdev->reassembly[i]);
3035
3036         cancel_work_sync(&hdev->power_on);
3037
3038         if (!test_bit(HCI_INIT, &hdev->flags) &&
3039             !test_bit(HCI_SETUP, &hdev->dev_flags)) {
3040                 hci_dev_lock(hdev);
3041                 mgmt_index_removed(hdev);
3042                 hci_dev_unlock(hdev);
3043         }
3044
3045         /* mgmt_index_removed should take care of emptying the
3046          * pending list */
3047         BUG_ON(!list_empty(&hdev->mgmt_pending));
3048
3049         hci_notify(hdev, HCI_DEV_UNREG);
3050
3051         if (hdev->rfkill) {
3052                 rfkill_unregister(hdev->rfkill);
3053                 rfkill_destroy(hdev->rfkill);
3054         }
3055
3056         device_del(&hdev->dev);
3057
3058         debugfs_remove_recursive(hdev->debugfs);
3059
3060         destroy_workqueue(hdev->workqueue);
3061         destroy_workqueue(hdev->req_workqueue);
3062
3063         hci_dev_lock(hdev);
3064         hci_blacklist_clear(hdev);
3065         hci_uuids_clear(hdev);
3066         hci_link_keys_clear(hdev);
3067         hci_smp_ltks_clear(hdev);
3068         hci_remote_oob_data_clear(hdev);
3069         hci_dev_unlock(hdev);
3070
3071         hci_dev_put(hdev);
3072
3073         ida_simple_remove(&hci_index_ida, id);
3074 }
3075 EXPORT_SYMBOL(hci_unregister_dev);
3076
3077 /* Suspend HCI device */
3078 int hci_suspend_dev(struct hci_dev *hdev)
3079 {
3080         hci_notify(hdev, HCI_DEV_SUSPEND);
3081         return 0;
3082 }
3083 EXPORT_SYMBOL(hci_suspend_dev);
3084
3085 /* Resume HCI device */
3086 int hci_resume_dev(struct hci_dev *hdev)
3087 {
3088         hci_notify(hdev, HCI_DEV_RESUME);
3089         return 0;
3090 }
3091 EXPORT_SYMBOL(hci_resume_dev);
3092
3093 /* Receive frame from HCI drivers */
3094 int hci_recv_frame(struct hci_dev *hdev, struct sk_buff *skb)
3095 {
3096         if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
3097                       && !test_bit(HCI_INIT, &hdev->flags))) {
3098                 kfree_skb(skb);
3099                 return -ENXIO;
3100         }
3101
3102         /* Incoming skb */
3103         bt_cb(skb)->incoming = 1;
3104
3105         /* Time stamp */
3106         __net_timestamp(skb);
3107
3108         skb_queue_tail(&hdev->rx_q, skb);
3109         queue_work(hdev->workqueue, &hdev->rx_work);
3110
3111         return 0;
3112 }
3113 EXPORT_SYMBOL(hci_recv_frame);
3114
3115 static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
3116                           int count, __u8 index)
3117 {
3118         int len = 0;
3119         int hlen = 0;
3120         int remain = count;
3121         struct sk_buff *skb;
3122         struct bt_skb_cb *scb;
3123
3124         if ((type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT) ||
3125             index >= NUM_REASSEMBLY)
3126                 return -EILSEQ;
3127
3128         skb = hdev->reassembly[index];
3129
3130         if (!skb) {
3131                 switch (type) {
3132                 case HCI_ACLDATA_PKT:
3133                         len = HCI_MAX_FRAME_SIZE;
3134                         hlen = HCI_ACL_HDR_SIZE;
3135                         break;
3136                 case HCI_EVENT_PKT:
3137                         len = HCI_MAX_EVENT_SIZE;
3138                         hlen = HCI_EVENT_HDR_SIZE;
3139                         break;
3140                 case HCI_SCODATA_PKT:
3141                         len = HCI_MAX_SCO_SIZE;
3142                         hlen = HCI_SCO_HDR_SIZE;
3143                         break;
3144                 }
3145
3146                 skb = bt_skb_alloc(len, GFP_ATOMIC);
3147                 if (!skb)
3148                         return -ENOMEM;
3149
3150                 scb = (void *) skb->cb;
3151                 scb->expect = hlen;
3152                 scb->pkt_type = type;
3153
3154                 hdev->reassembly[index] = skb;
3155         }
3156
3157         while (count) {
3158                 scb = (void *) skb->cb;
3159                 len = min_t(uint, scb->expect, count);
3160
3161                 memcpy(skb_put(skb, len), data, len);
3162
3163                 count -= len;
3164                 data += len;
3165                 scb->expect -= len;
3166                 remain = count;
3167
3168                 switch (type) {
3169                 case HCI_EVENT_PKT:
3170                         if (skb->len == HCI_EVENT_HDR_SIZE) {
3171                                 struct hci_event_hdr *h = hci_event_hdr(skb);
3172                                 scb->expect = h->plen;
3173
3174                                 if (skb_tailroom(skb) < scb->expect) {
3175                                         kfree_skb(skb);
3176                                         hdev->reassembly[index] = NULL;
3177                                         return -ENOMEM;
3178                                 }
3179                         }
3180                         break;
3181
3182                 case HCI_ACLDATA_PKT:
3183                         if (skb->len  == HCI_ACL_HDR_SIZE) {
3184                                 struct hci_acl_hdr *h = hci_acl_hdr(skb);
3185                                 scb->expect = __le16_to_cpu(h->dlen);
3186
3187                                 if (skb_tailroom(skb) < scb->expect) {
3188                                         kfree_skb(skb);
3189                                         hdev->reassembly[index] = NULL;
3190                                         return -ENOMEM;
3191                                 }
3192                         }
3193                         break;
3194
3195                 case HCI_SCODATA_PKT:
3196                         if (skb->len == HCI_SCO_HDR_SIZE) {
3197                                 struct hci_sco_hdr *h = hci_sco_hdr(skb);
3198                                 scb->expect = h->dlen;
3199
3200                                 if (skb_tailroom(skb) < scb->expect) {
3201                                         kfree_skb(skb);
3202                                         hdev->reassembly[index] = NULL;
3203                                         return -ENOMEM;
3204                                 }
3205                         }
3206                         break;
3207                 }
3208
3209                 if (scb->expect == 0) {
3210                         /* Complete frame */
3211
3212                         bt_cb(skb)->pkt_type = type;
3213                         hci_recv_frame(hdev, skb);
3214
3215                         hdev->reassembly[index] = NULL;
3216                         return remain;
3217                 }
3218         }
3219
3220         return remain;
3221 }
3222
3223 int hci_recv_fragment(struct hci_dev *hdev, int type, void *data, int count)
3224 {
3225         int rem = 0;
3226
3227         if (type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT)
3228                 return -EILSEQ;
3229
3230         while (count) {
3231                 rem = hci_reassembly(hdev, type, data, count, type - 1);
3232                 if (rem < 0)
3233                         return rem;
3234
3235                 data += (count - rem);
3236                 count = rem;
3237         }
3238
3239         return rem;
3240 }
3241 EXPORT_SYMBOL(hci_recv_fragment);
3242
3243 #define STREAM_REASSEMBLY 0
3244
3245 int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count)
3246 {
3247         int type;
3248         int rem = 0;
3249
3250         while (count) {
3251                 struct sk_buff *skb = hdev->reassembly[STREAM_REASSEMBLY];
3252
3253                 if (!skb) {
3254                         struct { char type; } *pkt;
3255
3256                         /* Start of the frame */
3257                         pkt = data;
3258                         type = pkt->type;
3259
3260                         data++;
3261                         count--;
3262                 } else
3263                         type = bt_cb(skb)->pkt_type;
3264
3265                 rem = hci_reassembly(hdev, type, data, count,
3266                                      STREAM_REASSEMBLY);
3267                 if (rem < 0)
3268                         return rem;
3269
3270                 data += (count - rem);
3271                 count = rem;
3272         }
3273
3274         return rem;
3275 }
3276 EXPORT_SYMBOL(hci_recv_stream_fragment);
3277
3278 /* ---- Interface to upper protocols ---- */
3279
3280 int hci_register_cb(struct hci_cb *cb)
3281 {
3282         BT_DBG("%p name %s", cb, cb->name);
3283
3284         write_lock(&hci_cb_list_lock);
3285         list_add(&cb->list, &hci_cb_list);
3286         write_unlock(&hci_cb_list_lock);
3287
3288         return 0;
3289 }
3290 EXPORT_SYMBOL(hci_register_cb);
3291
3292 int hci_unregister_cb(struct hci_cb *cb)
3293 {
3294         BT_DBG("%p name %s", cb, cb->name);
3295
3296         write_lock(&hci_cb_list_lock);
3297         list_del(&cb->list);
3298         write_unlock(&hci_cb_list_lock);
3299
3300         return 0;
3301 }
3302 EXPORT_SYMBOL(hci_unregister_cb);
3303
3304 static void hci_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
3305 {
3306         BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
3307
3308         /* Time stamp */
3309         __net_timestamp(skb);
3310
3311         /* Send copy to monitor */
3312         hci_send_to_monitor(hdev, skb);
3313
3314         if (atomic_read(&hdev->promisc)) {
3315                 /* Send copy to the sockets */
3316                 hci_send_to_sock(hdev, skb);
3317         }
3318
3319         /* Get rid of skb owner, prior to sending to the driver. */
3320         skb_orphan(skb);
3321
3322         if (hdev->send(hdev, skb) < 0)
3323                 BT_ERR("%s sending frame failed", hdev->name);
3324 }
3325
3326 void hci_req_init(struct hci_request *req, struct hci_dev *hdev)
3327 {
3328         skb_queue_head_init(&req->cmd_q);
3329         req->hdev = hdev;
3330         req->err = 0;
3331 }
3332
3333 int hci_req_run(struct hci_request *req, hci_req_complete_t complete)
3334 {
3335         struct hci_dev *hdev = req->hdev;
3336         struct sk_buff *skb;
3337         unsigned long flags;
3338
3339         BT_DBG("length %u", skb_queue_len(&req->cmd_q));
3340
3341         /* If an error occured during request building, remove all HCI
3342          * commands queued on the HCI request queue.
3343          */
3344         if (req->err) {
3345                 skb_queue_purge(&req->cmd_q);
3346                 return req->err;
3347         }
3348
3349         /* Do not allow empty requests */
3350         if (skb_queue_empty(&req->cmd_q))
3351                 return -ENODATA;
3352
3353         skb = skb_peek_tail(&req->cmd_q);
3354         bt_cb(skb)->req.complete = complete;
3355
3356         spin_lock_irqsave(&hdev->cmd_q.lock, flags);
3357         skb_queue_splice_tail(&req->cmd_q, &hdev->cmd_q);
3358         spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
3359
3360         queue_work(hdev->workqueue, &hdev->cmd_work);
3361
3362         return 0;
3363 }
3364
3365 static struct sk_buff *hci_prepare_cmd(struct hci_dev *hdev, u16 opcode,
3366                                        u32 plen, const void *param)
3367 {
3368         int len = HCI_COMMAND_HDR_SIZE + plen;
3369         struct hci_command_hdr *hdr;
3370         struct sk_buff *skb;
3371
3372         skb = bt_skb_alloc(len, GFP_ATOMIC);
3373         if (!skb)
3374                 return NULL;
3375
3376         hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
3377         hdr->opcode = cpu_to_le16(opcode);
3378         hdr->plen   = plen;
3379
3380         if (plen)
3381                 memcpy(skb_put(skb, plen), param, plen);
3382
3383         BT_DBG("skb len %d", skb->len);
3384
3385         bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
3386
3387         return skb;
3388 }
3389
3390 /* Send HCI command */
3391 int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen,
3392                  const void *param)
3393 {
3394         struct sk_buff *skb;
3395
3396         BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
3397
3398         skb = hci_prepare_cmd(hdev, opcode, plen, param);
3399         if (!skb) {
3400                 BT_ERR("%s no memory for command", hdev->name);
3401                 return -ENOMEM;
3402         }
3403
3404         /* Stand-alone HCI commands must be flaged as
3405          * single-command requests.
3406          */
3407         bt_cb(skb)->req.start = true;
3408
3409         skb_queue_tail(&hdev->cmd_q, skb);
3410         queue_work(hdev->workqueue, &hdev->cmd_work);
3411
3412         return 0;
3413 }
3414
3415 /* Queue a command to an asynchronous HCI request */
3416 void hci_req_add_ev(struct hci_request *req, u16 opcode, u32 plen,
3417                     const void *param, u8 event)
3418 {
3419         struct hci_dev *hdev = req->hdev;
3420         struct sk_buff *skb;
3421
3422         BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
3423
3424         /* If an error occured during request building, there is no point in
3425          * queueing the HCI command. We can simply return.
3426          */
3427         if (req->err)
3428                 return;
3429
3430         skb = hci_prepare_cmd(hdev, opcode, plen, param);
3431         if (!skb) {
3432                 BT_ERR("%s no memory for command (opcode 0x%4.4x)",
3433                        hdev->name, opcode);
3434                 req->err = -ENOMEM;
3435                 return;
3436         }
3437
3438         if (skb_queue_empty(&req->cmd_q))
3439                 bt_cb(skb)->req.start = true;
3440
3441         bt_cb(skb)->req.event = event;
3442
3443         skb_queue_tail(&req->cmd_q, skb);
3444 }
3445
3446 void hci_req_add(struct hci_request *req, u16 opcode, u32 plen,
3447                  const void *param)
3448 {
3449         hci_req_add_ev(req, opcode, plen, param, 0);
3450 }
3451
3452 /* Get data from the previously sent command */
3453 void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
3454 {
3455         struct hci_command_hdr *hdr;
3456
3457         if (!hdev->sent_cmd)
3458                 return NULL;
3459
3460         hdr = (void *) hdev->sent_cmd->data;
3461
3462         if (hdr->opcode != cpu_to_le16(opcode))
3463                 return NULL;
3464
3465         BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
3466
3467         return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
3468 }
3469
3470 /* Send ACL data */
3471 static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
3472 {
3473         struct hci_acl_hdr *hdr;
3474         int len = skb->len;
3475
3476         skb_push(skb, HCI_ACL_HDR_SIZE);
3477         skb_reset_transport_header(skb);
3478         hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
3479         hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
3480         hdr->dlen   = cpu_to_le16(len);
3481 }
3482
3483 static void hci_queue_acl(struct hci_chan *chan, struct sk_buff_head *queue,
3484                           struct sk_buff *skb, __u16 flags)
3485 {
3486         struct hci_conn *conn = chan->conn;
3487         struct hci_dev *hdev = conn->hdev;
3488         struct sk_buff *list;
3489
3490         skb->len = skb_headlen(skb);
3491         skb->data_len = 0;
3492
3493         bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
3494
3495         switch (hdev->dev_type) {
3496         case HCI_BREDR:
3497                 hci_add_acl_hdr(skb, conn->handle, flags);
3498                 break;
3499         case HCI_AMP:
3500                 hci_add_acl_hdr(skb, chan->handle, flags);
3501                 break;
3502         default:
3503                 BT_ERR("%s unknown dev_type %d", hdev->name, hdev->dev_type);
3504                 return;
3505         }
3506
3507         list = skb_shinfo(skb)->frag_list;
3508         if (!list) {
3509                 /* Non fragmented */
3510                 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
3511
3512                 skb_queue_tail(queue, skb);
3513         } else {
3514                 /* Fragmented */
3515                 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
3516
3517                 skb_shinfo(skb)->frag_list = NULL;
3518
3519                 /* Queue all fragments atomically */
3520                 spin_lock(&queue->lock);
3521
3522                 __skb_queue_tail(queue, skb);
3523
3524                 flags &= ~ACL_START;
3525                 flags |= ACL_CONT;
3526                 do {
3527                         skb = list; list = list->next;
3528
3529                         bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
3530                         hci_add_acl_hdr(skb, conn->handle, flags);
3531
3532                         BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
3533
3534                         __skb_queue_tail(queue, skb);
3535                 } while (list);
3536
3537                 spin_unlock(&queue->lock);
3538         }
3539 }
3540
3541 void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
3542 {
3543         struct hci_dev *hdev = chan->conn->hdev;
3544
3545         BT_DBG("%s chan %p flags 0x%4.4x", hdev->name, chan, flags);
3546
3547         hci_queue_acl(chan, &chan->data_q, skb, flags);
3548
3549         queue_work(hdev->workqueue, &hdev->tx_work);
3550 }
3551
3552 /* Send SCO data */
3553 void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
3554 {
3555         struct hci_dev *hdev = conn->hdev;
3556         struct hci_sco_hdr hdr;
3557
3558         BT_DBG("%s len %d", hdev->name, skb->len);
3559
3560         hdr.handle = cpu_to_le16(conn->handle);
3561         hdr.dlen   = skb->len;
3562
3563         skb_push(skb, HCI_SCO_HDR_SIZE);
3564         skb_reset_transport_header(skb);
3565         memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
3566
3567         bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
3568
3569         skb_queue_tail(&conn->data_q, skb);
3570         queue_work(hdev->workqueue, &hdev->tx_work);
3571 }
3572
3573 /* ---- HCI TX task (outgoing data) ---- */
3574
3575 /* HCI Connection scheduler */
3576 static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type,
3577                                      int *quote)
3578 {
3579         struct hci_conn_hash *h = &hdev->conn_hash;
3580         struct hci_conn *conn = NULL, *c;
3581         unsigned int num = 0, min = ~0;
3582
3583         /* We don't have to lock device here. Connections are always
3584          * added and removed with TX task disabled. */
3585
3586         rcu_read_lock();
3587
3588         list_for_each_entry_rcu(c, &h->list, list) {
3589                 if (c->type != type || skb_queue_empty(&c->data_q))
3590                         continue;
3591
3592                 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
3593                         continue;
3594
3595                 num++;
3596
3597                 if (c->sent < min) {
3598                         min  = c->sent;
3599                         conn = c;
3600                 }
3601
3602                 if (hci_conn_num(hdev, type) == num)
3603                         break;
3604         }
3605
3606         rcu_read_unlock();
3607
3608         if (conn) {
3609                 int cnt, q;
3610
3611                 switch (conn->type) {
3612                 case ACL_LINK:
3613                         cnt = hdev->acl_cnt;
3614                         break;
3615                 case SCO_LINK:
3616                 case ESCO_LINK:
3617                         cnt = hdev->sco_cnt;
3618                         break;
3619                 case LE_LINK:
3620                         cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
3621                         break;
3622                 default:
3623                         cnt = 0;
3624                         BT_ERR("Unknown link type");
3625                 }
3626
3627                 q = cnt / num;
3628                 *quote = q ? q : 1;
3629         } else
3630                 *quote = 0;
3631
3632         BT_DBG("conn %p quote %d", conn, *quote);
3633         return conn;
3634 }
3635
3636 static void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
3637 {
3638         struct hci_conn_hash *h = &hdev->conn_hash;
3639         struct hci_conn *c;
3640
3641         BT_ERR("%s link tx timeout", hdev->name);
3642
3643         rcu_read_lock();
3644
3645         /* Kill stalled connections */
3646         list_for_each_entry_rcu(c, &h->list, list) {
3647                 if (c->type == type && c->sent) {
3648                         BT_ERR("%s killing stalled connection %pMR",
3649                                hdev->name, &c->dst);
3650                         hci_disconnect(c, HCI_ERROR_REMOTE_USER_TERM);
3651                 }
3652         }
3653
3654         rcu_read_unlock();
3655 }
3656
3657 static struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
3658                                       int *quote)
3659 {
3660         struct hci_conn_hash *h = &hdev->conn_hash;
3661         struct hci_chan *chan = NULL;
3662         unsigned int num = 0, min = ~0, cur_prio = 0;
3663         struct hci_conn *conn;
3664         int cnt, q, conn_num = 0;
3665
3666         BT_DBG("%s", hdev->name);
3667
3668         rcu_read_lock();
3669
3670         list_for_each_entry_rcu(conn, &h->list, list) {
3671                 struct hci_chan *tmp;
3672
3673                 if (conn->type != type)
3674                         continue;
3675
3676                 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
3677                         continue;
3678
3679                 conn_num++;
3680
3681                 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
3682                         struct sk_buff *skb;
3683
3684                         if (skb_queue_empty(&tmp->data_q))
3685                                 continue;
3686
3687                         skb = skb_peek(&tmp->data_q);
3688                         if (skb->priority < cur_prio)
3689                                 continue;
3690
3691                         if (skb->priority > cur_prio) {
3692                                 num = 0;
3693                                 min = ~0;
3694                                 cur_prio = skb->priority;
3695                         }
3696
3697                         num++;
3698
3699                         if (conn->sent < min) {
3700                                 min  = conn->sent;
3701                                 chan = tmp;
3702                         }
3703                 }
3704
3705                 if (hci_conn_num(hdev, type) == conn_num)
3706                         break;
3707         }
3708
3709         rcu_read_unlock();
3710
3711         if (!chan)
3712                 return NULL;
3713
3714         switch (chan->conn->type) {
3715         case ACL_LINK:
3716                 cnt = hdev->acl_cnt;
3717                 break;
3718         case AMP_LINK:
3719                 cnt = hdev->block_cnt;
3720                 break;
3721         case SCO_LINK:
3722         case ESCO_LINK:
3723                 cnt = hdev->sco_cnt;
3724                 break;
3725         case LE_LINK:
3726                 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
3727                 break;
3728         default:
3729                 cnt = 0;
3730                 BT_ERR("Unknown link type");
3731         }
3732
3733         q = cnt / num;
3734         *quote = q ? q : 1;
3735         BT_DBG("chan %p quote %d", chan, *quote);
3736         return chan;
3737 }
3738
3739 static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
3740 {
3741         struct hci_conn_hash *h = &hdev->conn_hash;
3742         struct hci_conn *conn;
3743         int num = 0;
3744
3745         BT_DBG("%s", hdev->name);
3746
3747         rcu_read_lock();
3748
3749         list_for_each_entry_rcu(conn, &h->list, list) {
3750                 struct hci_chan *chan;
3751
3752                 if (conn->type != type)
3753                         continue;
3754
3755                 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
3756                         continue;
3757
3758                 num++;
3759
3760                 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
3761                         struct sk_buff *skb;
3762
3763                         if (chan->sent) {
3764                                 chan->sent = 0;
3765                                 continue;
3766                         }
3767
3768                         if (skb_queue_empty(&chan->data_q))
3769                                 continue;
3770
3771                         skb = skb_peek(&chan->data_q);
3772                         if (skb->priority >= HCI_PRIO_MAX - 1)
3773                                 continue;
3774
3775                         skb->priority = HCI_PRIO_MAX - 1;
3776
3777                         BT_DBG("chan %p skb %p promoted to %d", chan, skb,
3778                                skb->priority);
3779                 }
3780
3781                 if (hci_conn_num(hdev, type) == num)
3782                         break;
3783         }
3784
3785         rcu_read_unlock();
3786
3787 }
3788
3789 static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
3790 {
3791         /* Calculate count of blocks used by this packet */
3792         return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
3793 }
3794
3795 static void __check_timeout(struct hci_dev *hdev, unsigned int cnt)
3796 {
3797         if (!test_bit(HCI_RAW, &hdev->flags)) {
3798                 /* ACL tx timeout must be longer than maximum
3799                  * link supervision timeout (40.9 seconds) */
3800                 if (!cnt && time_after(jiffies, hdev->acl_last_tx +
3801                                        HCI_ACL_TX_TIMEOUT))
3802                         hci_link_tx_to(hdev, ACL_LINK);
3803         }
3804 }
3805
3806 static void hci_sched_acl_pkt(struct hci_dev *hdev)
3807 {
3808         unsigned int cnt = hdev->acl_cnt;
3809         struct hci_chan *chan;
3810         struct sk_buff *skb;
3811         int quote;
3812
3813         __check_timeout(hdev, cnt);
3814
3815         while (hdev->acl_cnt &&
3816                (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
3817                 u32 priority = (skb_peek(&chan->data_q))->priority;
3818                 while (quote-- && (skb = skb_peek(&chan->data_q))) {
3819                         BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
3820                                skb->len, skb->priority);
3821
3822                         /* Stop if priority has changed */
3823                         if (skb->priority < priority)
3824                                 break;
3825
3826                         skb = skb_dequeue(&chan->data_q);
3827
3828                         hci_conn_enter_active_mode(chan->conn,
3829                                                    bt_cb(skb)->force_active);
3830
3831                         hci_send_frame(hdev, skb);
3832                         hdev->acl_last_tx = jiffies;
3833
3834                         hdev->acl_cnt--;
3835                         chan->sent++;
3836                         chan->conn->sent++;
3837                 }
3838         }
3839
3840         if (cnt != hdev->acl_cnt)
3841                 hci_prio_recalculate(hdev, ACL_LINK);
3842 }
3843
3844 static void hci_sched_acl_blk(struct hci_dev *hdev)
3845 {
3846         unsigned int cnt = hdev->block_cnt;
3847         struct hci_chan *chan;
3848         struct sk_buff *skb;
3849         int quote;
3850         u8 type;
3851
3852         __check_timeout(hdev, cnt);
3853
3854         BT_DBG("%s", hdev->name);
3855
3856         if (hdev->dev_type == HCI_AMP)
3857                 type = AMP_LINK;
3858         else
3859                 type = ACL_LINK;
3860
3861         while (hdev->block_cnt > 0 &&
3862                (chan = hci_chan_sent(hdev, type, &quote))) {
3863                 u32 priority = (skb_peek(&chan->data_q))->priority;
3864                 while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
3865                         int blocks;
3866
3867                         BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
3868                                skb->len, skb->priority);
3869
3870                         /* Stop if priority has changed */
3871                         if (skb->priority < priority)
3872                                 break;
3873
3874                         skb = skb_dequeue(&chan->data_q);
3875
3876                         blocks = __get_blocks(hdev, skb);
3877                         if (blocks > hdev->block_cnt)
3878                                 return;
3879
3880                         hci_conn_enter_active_mode(chan->conn,
3881                                                    bt_cb(skb)->force_active);
3882
3883                         hci_send_frame(hdev, skb);
3884                         hdev->acl_last_tx = jiffies;
3885
3886                         hdev->block_cnt -= blocks;
3887                         quote -= blocks;
3888
3889                         chan->sent += blocks;
3890                         chan->conn->sent += blocks;
3891                 }
3892         }
3893
3894         if (cnt != hdev->block_cnt)
3895                 hci_prio_recalculate(hdev, type);
3896 }
3897
3898 static void hci_sched_acl(struct hci_dev *hdev)
3899 {
3900         BT_DBG("%s", hdev->name);
3901
3902         /* No ACL link over BR/EDR controller */
3903         if (!hci_conn_num(hdev, ACL_LINK) && hdev->dev_type == HCI_BREDR)
3904                 return;
3905
3906         /* No AMP link over AMP controller */
3907         if (!hci_conn_num(hdev, AMP_LINK) && hdev->dev_type == HCI_AMP)
3908                 return;
3909
3910         switch (hdev->flow_ctl_mode) {
3911         case HCI_FLOW_CTL_MODE_PACKET_BASED:
3912                 hci_sched_acl_pkt(hdev);
3913                 break;
3914
3915         case HCI_FLOW_CTL_MODE_BLOCK_BASED:
3916                 hci_sched_acl_blk(hdev);
3917                 break;
3918         }
3919 }
3920
3921 /* Schedule SCO */
3922 static void hci_sched_sco(struct hci_dev *hdev)
3923 {
3924         struct hci_conn *conn;
3925         struct sk_buff *skb;
3926         int quote;
3927
3928         BT_DBG("%s", hdev->name);
3929
3930         if (!hci_conn_num(hdev, SCO_LINK))
3931                 return;
3932
3933         while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
3934                 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
3935                         BT_DBG("skb %p len %d", skb, skb->len);
3936                         hci_send_frame(hdev, skb);
3937
3938                         conn->sent++;
3939                         if (conn->sent == ~0)
3940                                 conn->sent = 0;
3941                 }
3942         }
3943 }
3944
3945 static void hci_sched_esco(struct hci_dev *hdev)
3946 {
3947         struct hci_conn *conn;
3948         struct sk_buff *skb;
3949         int quote;
3950
3951         BT_DBG("%s", hdev->name);
3952
3953         if (!hci_conn_num(hdev, ESCO_LINK))
3954                 return;
3955
3956         while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK,
3957                                                      &quote))) {
3958                 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
3959                         BT_DBG("skb %p len %d", skb, skb->len);
3960                         hci_send_frame(hdev, skb);
3961
3962                         conn->sent++;
3963                         if (conn->sent == ~0)
3964                                 conn->sent = 0;
3965                 }
3966         }
3967 }
3968
3969 static void hci_sched_le(struct hci_dev *hdev)
3970 {
3971         struct hci_chan *chan;
3972         struct sk_buff *skb;
3973         int quote, cnt, tmp;
3974
3975         BT_DBG("%s", hdev->name);
3976
3977         if (!hci_conn_num(hdev, LE_LINK))
3978                 return;
3979
3980         if (!test_bit(HCI_RAW, &hdev->flags)) {
3981                 /* LE tx timeout must be longer than maximum
3982                  * link supervision timeout (40.9 seconds) */
3983                 if (!hdev->le_cnt && hdev->le_pkts &&
3984                     time_after(jiffies, hdev->le_last_tx + HZ * 45))
3985                         hci_link_tx_to(hdev, LE_LINK);
3986         }
3987
3988         cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
3989         tmp = cnt;
3990         while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, &quote))) {
3991                 u32 priority = (skb_peek(&chan->data_q))->priority;
3992                 while (quote-- && (skb = skb_peek(&chan->data_q))) {
3993                         BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
3994                                skb->len, skb->priority);
3995
3996                         /* Stop if priority has changed */
3997                         if (skb->priority < priority)
3998                                 break;
3999
4000                         skb = skb_dequeue(&chan->data_q);
4001
4002                         hci_send_frame(hdev, skb);
4003                         hdev->le_last_tx = jiffies;
4004
4005                         cnt--;
4006                         chan->sent++;
4007                         chan->conn->sent++;
4008                 }
4009         }
4010
4011         if (hdev->le_pkts)
4012                 hdev->le_cnt = cnt;
4013         else
4014                 hdev->acl_cnt = cnt;
4015
4016         if (cnt != tmp)
4017                 hci_prio_recalculate(hdev, LE_LINK);
4018 }
4019
4020 static void hci_tx_work(struct work_struct *work)
4021 {
4022         struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
4023         struct sk_buff *skb;
4024
4025         BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
4026                hdev->sco_cnt, hdev->le_cnt);
4027
4028         if (!test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
4029                 /* Schedule queues and send stuff to HCI driver */
4030                 hci_sched_acl(hdev);
4031                 hci_sched_sco(hdev);
4032                 hci_sched_esco(hdev);
4033                 hci_sched_le(hdev);
4034         }
4035
4036         /* Send next queued raw (unknown type) packet */
4037         while ((skb = skb_dequeue(&hdev->raw_q)))
4038                 hci_send_frame(hdev, skb);
4039 }
4040
4041 /* ----- HCI RX task (incoming data processing) ----- */
4042
4043 /* ACL data packet */
4044 static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
4045 {
4046         struct hci_acl_hdr *hdr = (void *) skb->data;
4047         struct hci_conn *conn;
4048         __u16 handle, flags;
4049
4050         skb_pull(skb, HCI_ACL_HDR_SIZE);
4051
4052         handle = __le16_to_cpu(hdr->handle);
4053         flags  = hci_flags(handle);
4054         handle = hci_handle(handle);
4055
4056         BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
4057                handle, flags);
4058
4059         hdev->stat.acl_rx++;
4060
4061         hci_dev_lock(hdev);
4062         conn = hci_conn_hash_lookup_handle(hdev, handle);
4063         hci_dev_unlock(hdev);
4064
4065         if (conn) {
4066                 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
4067
4068                 /* Send to upper protocol */
4069                 l2cap_recv_acldata(conn, skb, flags);
4070                 return;
4071         } else {
4072                 BT_ERR("%s ACL packet for unknown connection handle %d",
4073                        hdev->name, handle);
4074         }
4075
4076         kfree_skb(skb);
4077 }
4078
4079 /* SCO data packet */
4080 static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
4081 {
4082         struct hci_sco_hdr *hdr = (void *) skb->data;
4083         struct hci_conn *conn;
4084         __u16 handle;
4085
4086         skb_pull(skb, HCI_SCO_HDR_SIZE);
4087
4088         handle = __le16_to_cpu(hdr->handle);
4089
4090         BT_DBG("%s len %d handle 0x%4.4x", hdev->name, skb->len, handle);
4091
4092         hdev->stat.sco_rx++;
4093
4094         hci_dev_lock(hdev);
4095         conn = hci_conn_hash_lookup_handle(hdev, handle);
4096         hci_dev_unlock(hdev);
4097
4098         if (conn) {
4099                 /* Send to upper protocol */
4100                 sco_recv_scodata(conn, skb);
4101                 return;
4102         } else {
4103                 BT_ERR("%s SCO packet for unknown connection handle %d",
4104                        hdev->name, handle);
4105         }
4106
4107         kfree_skb(skb);
4108 }
4109
4110 static bool hci_req_is_complete(struct hci_dev *hdev)
4111 {
4112         struct sk_buff *skb;
4113
4114         skb = skb_peek(&hdev->cmd_q);
4115         if (!skb)
4116                 return true;
4117
4118         return bt_cb(skb)->req.start;
4119 }
4120
4121 static void hci_resend_last(struct hci_dev *hdev)
4122 {
4123         struct hci_command_hdr *sent;
4124         struct sk_buff *skb;
4125         u16 opcode;
4126
4127         if (!hdev->sent_cmd)
4128                 return;
4129
4130         sent = (void *) hdev->sent_cmd->data;
4131         opcode = __le16_to_cpu(sent->opcode);
4132         if (opcode == HCI_OP_RESET)
4133                 return;
4134
4135         skb = skb_clone(hdev->sent_cmd, GFP_KERNEL);
4136         if (!skb)
4137                 return;
4138
4139         skb_queue_head(&hdev->cmd_q, skb);
4140         queue_work(hdev->workqueue, &hdev->cmd_work);
4141 }
4142
4143 void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status)
4144 {
4145         hci_req_complete_t req_complete = NULL;
4146         struct sk_buff *skb;
4147         unsigned long flags;
4148
4149         BT_DBG("opcode 0x%04x status 0x%02x", opcode, status);
4150
4151         /* If the completed command doesn't match the last one that was
4152          * sent we need to do special handling of it.
4153          */
4154         if (!hci_sent_cmd_data(hdev, opcode)) {
4155                 /* Some CSR based controllers generate a spontaneous
4156                  * reset complete event during init and any pending
4157                  * command will never be completed. In such a case we
4158                  * need to resend whatever was the last sent
4159                  * command.
4160                  */
4161                 if (test_bit(HCI_INIT, &hdev->flags) && opcode == HCI_OP_RESET)
4162                         hci_resend_last(hdev);
4163
4164                 return;
4165         }
4166
4167         /* If the command succeeded and there's still more commands in
4168          * this request the request is not yet complete.
4169          */
4170         if (!status && !hci_req_is_complete(hdev))
4171                 return;
4172
4173         /* If this was the last command in a request the complete
4174          * callback would be found in hdev->sent_cmd instead of the
4175          * command queue (hdev->cmd_q).
4176          */
4177         if (hdev->sent_cmd) {
4178                 req_complete = bt_cb(hdev->sent_cmd)->req.complete;
4179
4180                 if (req_complete) {
4181                         /* We must set the complete callback to NULL to
4182                          * avoid calling the callback more than once if
4183                          * this function gets called again.
4184                          */
4185                         bt_cb(hdev->sent_cmd)->req.complete = NULL;
4186
4187                         goto call_complete;
4188                 }
4189         }
4190
4191         /* Remove all pending commands belonging to this request */
4192         spin_lock_irqsave(&hdev->cmd_q.lock, flags);
4193         while ((skb = __skb_dequeue(&hdev->cmd_q))) {
4194                 if (bt_cb(skb)->req.start) {
4195                         __skb_queue_head(&hdev->cmd_q, skb);
4196                         break;
4197                 }
4198
4199                 req_complete = bt_cb(skb)->req.complete;
4200                 kfree_skb(skb);
4201         }
4202         spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
4203
4204 call_complete:
4205         if (req_complete)
4206                 req_complete(hdev, status);
4207 }
4208
4209 static void hci_rx_work(struct work_struct *work)
4210 {
4211         struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
4212         struct sk_buff *skb;
4213
4214         BT_DBG("%s", hdev->name);
4215
4216         while ((skb = skb_dequeue(&hdev->rx_q))) {
4217                 /* Send copy to monitor */
4218                 hci_send_to_monitor(hdev, skb);
4219
4220                 if (atomic_read(&hdev->promisc)) {
4221                         /* Send copy to the sockets */
4222                         hci_send_to_sock(hdev, skb);
4223                 }
4224
4225                 if (test_bit(HCI_RAW, &hdev->flags) ||
4226                     test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
4227                         kfree_skb(skb);
4228                         continue;
4229                 }
4230
4231                 if (test_bit(HCI_INIT, &hdev->flags)) {
4232                         /* Don't process data packets in this states. */
4233                         switch (bt_cb(skb)->pkt_type) {
4234                         case HCI_ACLDATA_PKT:
4235                         case HCI_SCODATA_PKT:
4236                                 kfree_skb(skb);
4237                                 continue;
4238                         }
4239                 }
4240
4241                 /* Process frame */
4242                 switch (bt_cb(skb)->pkt_type) {
4243                 case HCI_EVENT_PKT:
4244                         BT_DBG("%s Event packet", hdev->name);
4245                         hci_event_packet(hdev, skb);
4246                         break;
4247
4248                 case HCI_ACLDATA_PKT:
4249                         BT_DBG("%s ACL data packet", hdev->name);
4250                         hci_acldata_packet(hdev, skb);
4251                         break;
4252
4253                 case HCI_SCODATA_PKT:
4254                         BT_DBG("%s SCO data packet", hdev->name);
4255                         hci_scodata_packet(hdev, skb);
4256                         break;
4257
4258                 default:
4259                         kfree_skb(skb);
4260                         break;
4261                 }
4262         }
4263 }
4264
4265 static void hci_cmd_work(struct work_struct *work)
4266 {
4267         struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
4268         struct sk_buff *skb;
4269
4270         BT_DBG("%s cmd_cnt %d cmd queued %d", hdev->name,
4271                atomic_read(&hdev->cmd_cnt), skb_queue_len(&hdev->cmd_q));
4272
4273         /* Send queued commands */
4274         if (atomic_read(&hdev->cmd_cnt)) {
4275                 skb = skb_dequeue(&hdev->cmd_q);
4276                 if (!skb)
4277                         return;
4278
4279                 kfree_skb(hdev->sent_cmd);
4280
4281                 hdev->sent_cmd = skb_clone(skb, GFP_KERNEL);
4282                 if (hdev->sent_cmd) {
4283                         atomic_dec(&hdev->cmd_cnt);
4284                         hci_send_frame(hdev, skb);
4285                         if (test_bit(HCI_RESET, &hdev->flags))
4286                                 del_timer(&hdev->cmd_timer);
4287                         else
4288                                 mod_timer(&hdev->cmd_timer,
4289                                           jiffies + HCI_CMD_TIMEOUT);
4290                 } else {
4291                         skb_queue_head(&hdev->cmd_q, skb);
4292                         queue_work(hdev->workqueue, &hdev->cmd_work);
4293                 }
4294         }
4295 }