]> git.karo-electronics.de Git - karo-tx-linux.git/blob - net/bluetooth/hci_core.c
Bluetooth: Add support for setting DUT mode
[karo-tx-linux.git] / net / bluetooth / hci_core.c
1 /*
2    BlueZ - Bluetooth protocol stack for Linux
3    Copyright (C) 2000-2001 Qualcomm Incorporated
4    Copyright (C) 2011 ProFUSION Embedded Systems
5
6    Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7
8    This program is free software; you can redistribute it and/or modify
9    it under the terms of the GNU General Public License version 2 as
10    published by the Free Software Foundation;
11
12    THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13    OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14    FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15    IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
16    CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17    WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18    ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
19    OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20
21    ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22    COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
23    SOFTWARE IS DISCLAIMED.
24 */
25
26 /* Bluetooth HCI core. */
27
28 #include <linux/export.h>
29 #include <linux/idr.h>
30 #include <linux/rfkill.h>
31 #include <linux/debugfs.h>
32 #include <asm/unaligned.h>
33
34 #include <net/bluetooth/bluetooth.h>
35 #include <net/bluetooth/hci_core.h>
36
37 static void hci_rx_work(struct work_struct *work);
38 static void hci_cmd_work(struct work_struct *work);
39 static void hci_tx_work(struct work_struct *work);
40
41 /* HCI device list */
42 LIST_HEAD(hci_dev_list);
43 DEFINE_RWLOCK(hci_dev_list_lock);
44
45 /* HCI callback list */
46 LIST_HEAD(hci_cb_list);
47 DEFINE_RWLOCK(hci_cb_list_lock);
48
49 /* HCI ID Numbering */
50 static DEFINE_IDA(hci_index_ida);
51
52 /* ---- HCI notifications ---- */
53
54 static void hci_notify(struct hci_dev *hdev, int event)
55 {
56         hci_sock_dev_event(hdev, event);
57 }
58
59 /* ---- HCI debugfs entries ---- */
60
61 static ssize_t dut_mode_read(struct file *file, char __user *user_buf,
62                              size_t count, loff_t *ppos)
63 {
64         struct hci_dev *hdev = file->private_data;
65         char buf[3];
66
67         buf[0] = test_bit(HCI_DUT_MODE, &hdev->dev_flags) ? 'Y': 'N';
68         buf[1] = '\n';
69         buf[2] = '\0';
70         return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
71 }
72
73 static ssize_t dut_mode_write(struct file *file, const char __user *user_buf,
74                               size_t count, loff_t *ppos)
75 {
76         struct hci_dev *hdev = file->private_data;
77         struct sk_buff *skb;
78         char buf[32];
79         size_t buf_size = min(count, (sizeof(buf)-1));
80         bool enable;
81         int err;
82
83         if (!test_bit(HCI_UP, &hdev->flags))
84                 return -ENETDOWN;
85
86         if (copy_from_user(buf, user_buf, buf_size))
87                 return -EFAULT;
88
89         buf[buf_size] = '\0';
90         if (strtobool(buf, &enable))
91                 return -EINVAL;
92
93         if (enable == test_bit(HCI_DUT_MODE, &hdev->dev_flags))
94                 return -EALREADY;
95
96         hci_req_lock(hdev);
97         if (enable)
98                 skb = __hci_cmd_sync(hdev, HCI_OP_ENABLE_DUT_MODE, 0, NULL,
99                                      HCI_CMD_TIMEOUT);
100         else
101                 skb = __hci_cmd_sync(hdev, HCI_OP_RESET, 0, NULL,
102                                      HCI_CMD_TIMEOUT);
103         hci_req_unlock(hdev);
104
105         if (IS_ERR(skb))
106                 return PTR_ERR(skb);
107
108         err = -bt_to_errno(skb->data[0]);
109         kfree_skb(skb);
110
111         if (err < 0)
112                 return err;
113
114         change_bit(HCI_DUT_MODE, &hdev->dev_flags);
115
116         return count;
117 }
118
119 static const struct file_operations dut_mode_fops = {
120         .open           = simple_open,
121         .read           = dut_mode_read,
122         .write          = dut_mode_write,
123         .llseek         = default_llseek,
124 };
125
126 static int features_show(struct seq_file *f, void *ptr)
127 {
128         struct hci_dev *hdev = f->private;
129         u8 p;
130
131         hci_dev_lock(hdev);
132         for (p = 0; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
133                 seq_printf(f, "%2u: 0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x "
134                            "0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x\n", p,
135                            hdev->features[p][0], hdev->features[p][1],
136                            hdev->features[p][2], hdev->features[p][3],
137                            hdev->features[p][4], hdev->features[p][5],
138                            hdev->features[p][6], hdev->features[p][7]);
139         }
140         if (lmp_le_capable(hdev))
141                 seq_printf(f, "LE: 0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x "
142                            "0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x\n",
143                            hdev->le_features[0], hdev->le_features[1],
144                            hdev->le_features[2], hdev->le_features[3],
145                            hdev->le_features[4], hdev->le_features[5],
146                            hdev->le_features[6], hdev->le_features[7]);
147         hci_dev_unlock(hdev);
148
149         return 0;
150 }
151
152 static int features_open(struct inode *inode, struct file *file)
153 {
154         return single_open(file, features_show, inode->i_private);
155 }
156
157 static const struct file_operations features_fops = {
158         .open           = features_open,
159         .read           = seq_read,
160         .llseek         = seq_lseek,
161         .release        = single_release,
162 };
163
164 static int blacklist_show(struct seq_file *f, void *p)
165 {
166         struct hci_dev *hdev = f->private;
167         struct bdaddr_list *b;
168
169         hci_dev_lock(hdev);
170         list_for_each_entry(b, &hdev->blacklist, list)
171                 seq_printf(f, "%pMR (type %u)\n", &b->bdaddr, b->bdaddr_type);
172         hci_dev_unlock(hdev);
173
174         return 0;
175 }
176
177 static int blacklist_open(struct inode *inode, struct file *file)
178 {
179         return single_open(file, blacklist_show, inode->i_private);
180 }
181
182 static const struct file_operations blacklist_fops = {
183         .open           = blacklist_open,
184         .read           = seq_read,
185         .llseek         = seq_lseek,
186         .release        = single_release,
187 };
188
189 static int uuids_show(struct seq_file *f, void *p)
190 {
191         struct hci_dev *hdev = f->private;
192         struct bt_uuid *uuid;
193
194         hci_dev_lock(hdev);
195         list_for_each_entry(uuid, &hdev->uuids, list) {
196                 u32 data0, data5;
197                 u16 data1, data2, data3, data4;
198
199                 data5 = get_unaligned_le32(uuid);
200                 data4 = get_unaligned_le16(uuid + 4);
201                 data3 = get_unaligned_le16(uuid + 6);
202                 data2 = get_unaligned_le16(uuid + 8);
203                 data1 = get_unaligned_le16(uuid + 10);
204                 data0 = get_unaligned_le32(uuid + 12);
205
206                 seq_printf(f, "%.8x-%.4x-%.4x-%.4x-%.4x%.8x\n",
207                            data0, data1, data2, data3, data4, data5);
208         }
209         hci_dev_unlock(hdev);
210
211         return 0;
212 }
213
214 static int uuids_open(struct inode *inode, struct file *file)
215 {
216         return single_open(file, uuids_show, inode->i_private);
217 }
218
219 static const struct file_operations uuids_fops = {
220         .open           = uuids_open,
221         .read           = seq_read,
222         .llseek         = seq_lseek,
223         .release        = single_release,
224 };
225
226 static int inquiry_cache_show(struct seq_file *f, void *p)
227 {
228         struct hci_dev *hdev = f->private;
229         struct discovery_state *cache = &hdev->discovery;
230         struct inquiry_entry *e;
231
232         hci_dev_lock(hdev);
233
234         list_for_each_entry(e, &cache->all, all) {
235                 struct inquiry_data *data = &e->data;
236                 seq_printf(f, "%pMR %d %d %d 0x%.2x%.2x%.2x 0x%.4x %d %d %u\n",
237                            &data->bdaddr,
238                            data->pscan_rep_mode, data->pscan_period_mode,
239                            data->pscan_mode, data->dev_class[2],
240                            data->dev_class[1], data->dev_class[0],
241                            __le16_to_cpu(data->clock_offset),
242                            data->rssi, data->ssp_mode, e->timestamp);
243         }
244
245         hci_dev_unlock(hdev);
246
247         return 0;
248 }
249
250 static int inquiry_cache_open(struct inode *inode, struct file *file)
251 {
252         return single_open(file, inquiry_cache_show, inode->i_private);
253 }
254
255 static const struct file_operations inquiry_cache_fops = {
256         .open           = inquiry_cache_open,
257         .read           = seq_read,
258         .llseek         = seq_lseek,
259         .release        = single_release,
260 };
261
262 static int link_keys_show(struct seq_file *f, void *ptr)
263 {
264         struct hci_dev *hdev = f->private;
265         struct list_head *p, *n;
266
267         hci_dev_lock(hdev);
268         list_for_each_safe(p, n, &hdev->link_keys) {
269                 struct link_key *key = list_entry(p, struct link_key, list);
270                 seq_printf(f, "%pMR %u %*phN %u\n", &key->bdaddr, key->type,
271                            HCI_LINK_KEY_SIZE, key->val, key->pin_len);
272         }
273         hci_dev_unlock(hdev);
274
275         return 0;
276 }
277
278 static int link_keys_open(struct inode *inode, struct file *file)
279 {
280         return single_open(file, link_keys_show, inode->i_private);
281 }
282
283 static const struct file_operations link_keys_fops = {
284         .open           = link_keys_open,
285         .read           = seq_read,
286         .llseek         = seq_lseek,
287         .release        = single_release,
288 };
289
290 static ssize_t use_debug_keys_read(struct file *file, char __user *user_buf,
291                                    size_t count, loff_t *ppos)
292 {
293         struct hci_dev *hdev = file->private_data;
294         char buf[3];
295
296         buf[0] = test_bit(HCI_DEBUG_KEYS, &hdev->dev_flags) ? 'Y': 'N';
297         buf[1] = '\n';
298         buf[2] = '\0';
299         return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
300 }
301
302 static const struct file_operations use_debug_keys_fops = {
303         .open           = simple_open,
304         .read           = use_debug_keys_read,
305         .llseek         = default_llseek,
306 };
307
308 static int dev_class_show(struct seq_file *f, void *ptr)
309 {
310         struct hci_dev *hdev = f->private;
311
312         hci_dev_lock(hdev);
313         seq_printf(f, "0x%.2x%.2x%.2x\n", hdev->dev_class[2],
314                    hdev->dev_class[1], hdev->dev_class[0]);
315         hci_dev_unlock(hdev);
316
317         return 0;
318 }
319
320 static int dev_class_open(struct inode *inode, struct file *file)
321 {
322         return single_open(file, dev_class_show, inode->i_private);
323 }
324
325 static const struct file_operations dev_class_fops = {
326         .open           = dev_class_open,
327         .read           = seq_read,
328         .llseek         = seq_lseek,
329         .release        = single_release,
330 };
331
332 static int voice_setting_get(void *data, u64 *val)
333 {
334         struct hci_dev *hdev = data;
335
336         hci_dev_lock(hdev);
337         *val = hdev->voice_setting;
338         hci_dev_unlock(hdev);
339
340         return 0;
341 }
342
343 DEFINE_SIMPLE_ATTRIBUTE(voice_setting_fops, voice_setting_get,
344                         NULL, "0x%4.4llx\n");
345
346 static int auto_accept_delay_set(void *data, u64 val)
347 {
348         struct hci_dev *hdev = data;
349
350         hci_dev_lock(hdev);
351         hdev->auto_accept_delay = val;
352         hci_dev_unlock(hdev);
353
354         return 0;
355 }
356
357 static int auto_accept_delay_get(void *data, u64 *val)
358 {
359         struct hci_dev *hdev = data;
360
361         hci_dev_lock(hdev);
362         *val = hdev->auto_accept_delay;
363         hci_dev_unlock(hdev);
364
365         return 0;
366 }
367
368 DEFINE_SIMPLE_ATTRIBUTE(auto_accept_delay_fops, auto_accept_delay_get,
369                         auto_accept_delay_set, "%llu\n");
370
371 static int ssp_debug_mode_set(void *data, u64 val)
372 {
373         struct hci_dev *hdev = data;
374         struct sk_buff *skb;
375         __u8 mode;
376         int err;
377
378         if (val != 0 && val != 1)
379                 return -EINVAL;
380
381         if (!test_bit(HCI_UP, &hdev->flags))
382                 return -ENETDOWN;
383
384         hci_req_lock(hdev);
385         mode = val;
386         skb = __hci_cmd_sync(hdev, HCI_OP_WRITE_SSP_DEBUG_MODE, sizeof(mode),
387                              &mode, HCI_CMD_TIMEOUT);
388         hci_req_unlock(hdev);
389
390         if (IS_ERR(skb))
391                 return PTR_ERR(skb);
392
393         err = -bt_to_errno(skb->data[0]);
394         kfree_skb(skb);
395
396         if (err < 0)
397                 return err;
398
399         hci_dev_lock(hdev);
400         hdev->ssp_debug_mode = val;
401         hci_dev_unlock(hdev);
402
403         return 0;
404 }
405
406 static int ssp_debug_mode_get(void *data, u64 *val)
407 {
408         struct hci_dev *hdev = data;
409
410         hci_dev_lock(hdev);
411         *val = hdev->ssp_debug_mode;
412         hci_dev_unlock(hdev);
413
414         return 0;
415 }
416
417 DEFINE_SIMPLE_ATTRIBUTE(ssp_debug_mode_fops, ssp_debug_mode_get,
418                         ssp_debug_mode_set, "%llu\n");
419
420 static int idle_timeout_set(void *data, u64 val)
421 {
422         struct hci_dev *hdev = data;
423
424         if (val != 0 && (val < 500 || val > 3600000))
425                 return -EINVAL;
426
427         hci_dev_lock(hdev);
428         hdev->idle_timeout= val;
429         hci_dev_unlock(hdev);
430
431         return 0;
432 }
433
434 static int idle_timeout_get(void *data, u64 *val)
435 {
436         struct hci_dev *hdev = data;
437
438         hci_dev_lock(hdev);
439         *val = hdev->idle_timeout;
440         hci_dev_unlock(hdev);
441
442         return 0;
443 }
444
445 DEFINE_SIMPLE_ATTRIBUTE(idle_timeout_fops, idle_timeout_get,
446                         idle_timeout_set, "%llu\n");
447
448 static int sniff_min_interval_set(void *data, u64 val)
449 {
450         struct hci_dev *hdev = data;
451
452         if (val == 0 || val % 2 || val > hdev->sniff_max_interval)
453                 return -EINVAL;
454
455         hci_dev_lock(hdev);
456         hdev->sniff_min_interval= val;
457         hci_dev_unlock(hdev);
458
459         return 0;
460 }
461
462 static int sniff_min_interval_get(void *data, u64 *val)
463 {
464         struct hci_dev *hdev = data;
465
466         hci_dev_lock(hdev);
467         *val = hdev->sniff_min_interval;
468         hci_dev_unlock(hdev);
469
470         return 0;
471 }
472
473 DEFINE_SIMPLE_ATTRIBUTE(sniff_min_interval_fops, sniff_min_interval_get,
474                         sniff_min_interval_set, "%llu\n");
475
476 static int sniff_max_interval_set(void *data, u64 val)
477 {
478         struct hci_dev *hdev = data;
479
480         if (val == 0 || val % 2 || val < hdev->sniff_min_interval)
481                 return -EINVAL;
482
483         hci_dev_lock(hdev);
484         hdev->sniff_max_interval= val;
485         hci_dev_unlock(hdev);
486
487         return 0;
488 }
489
490 static int sniff_max_interval_get(void *data, u64 *val)
491 {
492         struct hci_dev *hdev = data;
493
494         hci_dev_lock(hdev);
495         *val = hdev->sniff_max_interval;
496         hci_dev_unlock(hdev);
497
498         return 0;
499 }
500
501 DEFINE_SIMPLE_ATTRIBUTE(sniff_max_interval_fops, sniff_max_interval_get,
502                         sniff_max_interval_set, "%llu\n");
503
504 static int static_address_show(struct seq_file *f, void *p)
505 {
506         struct hci_dev *hdev = f->private;
507
508         hci_dev_lock(hdev);
509         seq_printf(f, "%pMR\n", &hdev->static_addr);
510         hci_dev_unlock(hdev);
511
512         return 0;
513 }
514
515 static int static_address_open(struct inode *inode, struct file *file)
516 {
517         return single_open(file, static_address_show, inode->i_private);
518 }
519
520 static const struct file_operations static_address_fops = {
521         .open           = static_address_open,
522         .read           = seq_read,
523         .llseek         = seq_lseek,
524         .release        = single_release,
525 };
526
527 static int own_address_type_set(void *data, u64 val)
528 {
529         struct hci_dev *hdev = data;
530
531         if (val != 0 && val != 1)
532                 return -EINVAL;
533
534         hci_dev_lock(hdev);
535         hdev->own_addr_type = val;
536         hci_dev_unlock(hdev);
537
538         return 0;
539 }
540
541 static int own_address_type_get(void *data, u64 *val)
542 {
543         struct hci_dev *hdev = data;
544
545         hci_dev_lock(hdev);
546         *val = hdev->own_addr_type;
547         hci_dev_unlock(hdev);
548
549         return 0;
550 }
551
552 DEFINE_SIMPLE_ATTRIBUTE(own_address_type_fops, own_address_type_get,
553                         own_address_type_set, "%llu\n");
554
555 static int long_term_keys_show(struct seq_file *f, void *ptr)
556 {
557         struct hci_dev *hdev = f->private;
558         struct list_head *p, *n;
559
560         hci_dev_lock(hdev);
561         list_for_each_safe(p, n, &hdev->link_keys) {
562                 struct smp_ltk *ltk = list_entry(p, struct smp_ltk, list);
563                 seq_printf(f, "%pMR (type %u) %u %u %u %.4x %*phN %*phN\\n",
564                            &ltk->bdaddr, ltk->bdaddr_type, ltk->authenticated,
565                            ltk->type, ltk->enc_size, __le16_to_cpu(ltk->ediv),
566                            8, ltk->rand, 16, ltk->val);
567         }
568         hci_dev_unlock(hdev);
569
570         return 0;
571 }
572
573 static int long_term_keys_open(struct inode *inode, struct file *file)
574 {
575         return single_open(file, long_term_keys_show, inode->i_private);
576 }
577
578 static const struct file_operations long_term_keys_fops = {
579         .open           = long_term_keys_open,
580         .read           = seq_read,
581         .llseek         = seq_lseek,
582         .release        = single_release,
583 };
584
585 static int conn_min_interval_set(void *data, u64 val)
586 {
587         struct hci_dev *hdev = data;
588
589         if (val < 0x0006 || val > 0x0c80 || val > hdev->le_conn_max_interval)
590                 return -EINVAL;
591
592         hci_dev_lock(hdev);
593         hdev->le_conn_min_interval= val;
594         hci_dev_unlock(hdev);
595
596         return 0;
597 }
598
599 static int conn_min_interval_get(void *data, u64 *val)
600 {
601         struct hci_dev *hdev = data;
602
603         hci_dev_lock(hdev);
604         *val = hdev->le_conn_min_interval;
605         hci_dev_unlock(hdev);
606
607         return 0;
608 }
609
610 DEFINE_SIMPLE_ATTRIBUTE(conn_min_interval_fops, conn_min_interval_get,
611                         conn_min_interval_set, "%llu\n");
612
613 static int conn_max_interval_set(void *data, u64 val)
614 {
615         struct hci_dev *hdev = data;
616
617         if (val < 0x0006 || val > 0x0c80 || val < hdev->le_conn_min_interval)
618                 return -EINVAL;
619
620         hci_dev_lock(hdev);
621         hdev->le_conn_max_interval= val;
622         hci_dev_unlock(hdev);
623
624         return 0;
625 }
626
627 static int conn_max_interval_get(void *data, u64 *val)
628 {
629         struct hci_dev *hdev = data;
630
631         hci_dev_lock(hdev);
632         *val = hdev->le_conn_max_interval;
633         hci_dev_unlock(hdev);
634
635         return 0;
636 }
637
638 DEFINE_SIMPLE_ATTRIBUTE(conn_max_interval_fops, conn_max_interval_get,
639                         conn_max_interval_set, "%llu\n");
640
641 /* ---- HCI requests ---- */
642
643 static void hci_req_sync_complete(struct hci_dev *hdev, u8 result)
644 {
645         BT_DBG("%s result 0x%2.2x", hdev->name, result);
646
647         if (hdev->req_status == HCI_REQ_PEND) {
648                 hdev->req_result = result;
649                 hdev->req_status = HCI_REQ_DONE;
650                 wake_up_interruptible(&hdev->req_wait_q);
651         }
652 }
653
654 static void hci_req_cancel(struct hci_dev *hdev, int err)
655 {
656         BT_DBG("%s err 0x%2.2x", hdev->name, err);
657
658         if (hdev->req_status == HCI_REQ_PEND) {
659                 hdev->req_result = err;
660                 hdev->req_status = HCI_REQ_CANCELED;
661                 wake_up_interruptible(&hdev->req_wait_q);
662         }
663 }
664
665 static struct sk_buff *hci_get_cmd_complete(struct hci_dev *hdev, u16 opcode,
666                                             u8 event)
667 {
668         struct hci_ev_cmd_complete *ev;
669         struct hci_event_hdr *hdr;
670         struct sk_buff *skb;
671
672         hci_dev_lock(hdev);
673
674         skb = hdev->recv_evt;
675         hdev->recv_evt = NULL;
676
677         hci_dev_unlock(hdev);
678
679         if (!skb)
680                 return ERR_PTR(-ENODATA);
681
682         if (skb->len < sizeof(*hdr)) {
683                 BT_ERR("Too short HCI event");
684                 goto failed;
685         }
686
687         hdr = (void *) skb->data;
688         skb_pull(skb, HCI_EVENT_HDR_SIZE);
689
690         if (event) {
691                 if (hdr->evt != event)
692                         goto failed;
693                 return skb;
694         }
695
696         if (hdr->evt != HCI_EV_CMD_COMPLETE) {
697                 BT_DBG("Last event is not cmd complete (0x%2.2x)", hdr->evt);
698                 goto failed;
699         }
700
701         if (skb->len < sizeof(*ev)) {
702                 BT_ERR("Too short cmd_complete event");
703                 goto failed;
704         }
705
706         ev = (void *) skb->data;
707         skb_pull(skb, sizeof(*ev));
708
709         if (opcode == __le16_to_cpu(ev->opcode))
710                 return skb;
711
712         BT_DBG("opcode doesn't match (0x%2.2x != 0x%2.2x)", opcode,
713                __le16_to_cpu(ev->opcode));
714
715 failed:
716         kfree_skb(skb);
717         return ERR_PTR(-ENODATA);
718 }
719
720 struct sk_buff *__hci_cmd_sync_ev(struct hci_dev *hdev, u16 opcode, u32 plen,
721                                   const void *param, u8 event, u32 timeout)
722 {
723         DECLARE_WAITQUEUE(wait, current);
724         struct hci_request req;
725         int err = 0;
726
727         BT_DBG("%s", hdev->name);
728
729         hci_req_init(&req, hdev);
730
731         hci_req_add_ev(&req, opcode, plen, param, event);
732
733         hdev->req_status = HCI_REQ_PEND;
734
735         err = hci_req_run(&req, hci_req_sync_complete);
736         if (err < 0)
737                 return ERR_PTR(err);
738
739         add_wait_queue(&hdev->req_wait_q, &wait);
740         set_current_state(TASK_INTERRUPTIBLE);
741
742         schedule_timeout(timeout);
743
744         remove_wait_queue(&hdev->req_wait_q, &wait);
745
746         if (signal_pending(current))
747                 return ERR_PTR(-EINTR);
748
749         switch (hdev->req_status) {
750         case HCI_REQ_DONE:
751                 err = -bt_to_errno(hdev->req_result);
752                 break;
753
754         case HCI_REQ_CANCELED:
755                 err = -hdev->req_result;
756                 break;
757
758         default:
759                 err = -ETIMEDOUT;
760                 break;
761         }
762
763         hdev->req_status = hdev->req_result = 0;
764
765         BT_DBG("%s end: err %d", hdev->name, err);
766
767         if (err < 0)
768                 return ERR_PTR(err);
769
770         return hci_get_cmd_complete(hdev, opcode, event);
771 }
772 EXPORT_SYMBOL(__hci_cmd_sync_ev);
773
774 struct sk_buff *__hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
775                                const void *param, u32 timeout)
776 {
777         return __hci_cmd_sync_ev(hdev, opcode, plen, param, 0, timeout);
778 }
779 EXPORT_SYMBOL(__hci_cmd_sync);
780
781 /* Execute request and wait for completion. */
782 static int __hci_req_sync(struct hci_dev *hdev,
783                           void (*func)(struct hci_request *req,
784                                       unsigned long opt),
785                           unsigned long opt, __u32 timeout)
786 {
787         struct hci_request req;
788         DECLARE_WAITQUEUE(wait, current);
789         int err = 0;
790
791         BT_DBG("%s start", hdev->name);
792
793         hci_req_init(&req, hdev);
794
795         hdev->req_status = HCI_REQ_PEND;
796
797         func(&req, opt);
798
799         err = hci_req_run(&req, hci_req_sync_complete);
800         if (err < 0) {
801                 hdev->req_status = 0;
802
803                 /* ENODATA means the HCI request command queue is empty.
804                  * This can happen when a request with conditionals doesn't
805                  * trigger any commands to be sent. This is normal behavior
806                  * and should not trigger an error return.
807                  */
808                 if (err == -ENODATA)
809                         return 0;
810
811                 return err;
812         }
813
814         add_wait_queue(&hdev->req_wait_q, &wait);
815         set_current_state(TASK_INTERRUPTIBLE);
816
817         schedule_timeout(timeout);
818
819         remove_wait_queue(&hdev->req_wait_q, &wait);
820
821         if (signal_pending(current))
822                 return -EINTR;
823
824         switch (hdev->req_status) {
825         case HCI_REQ_DONE:
826                 err = -bt_to_errno(hdev->req_result);
827                 break;
828
829         case HCI_REQ_CANCELED:
830                 err = -hdev->req_result;
831                 break;
832
833         default:
834                 err = -ETIMEDOUT;
835                 break;
836         }
837
838         hdev->req_status = hdev->req_result = 0;
839
840         BT_DBG("%s end: err %d", hdev->name, err);
841
842         return err;
843 }
844
845 static int hci_req_sync(struct hci_dev *hdev,
846                         void (*req)(struct hci_request *req,
847                                     unsigned long opt),
848                         unsigned long opt, __u32 timeout)
849 {
850         int ret;
851
852         if (!test_bit(HCI_UP, &hdev->flags))
853                 return -ENETDOWN;
854
855         /* Serialize all requests */
856         hci_req_lock(hdev);
857         ret = __hci_req_sync(hdev, req, opt, timeout);
858         hci_req_unlock(hdev);
859
860         return ret;
861 }
862
863 static void hci_reset_req(struct hci_request *req, unsigned long opt)
864 {
865         BT_DBG("%s %ld", req->hdev->name, opt);
866
867         /* Reset device */
868         set_bit(HCI_RESET, &req->hdev->flags);
869         hci_req_add(req, HCI_OP_RESET, 0, NULL);
870 }
871
872 static void bredr_init(struct hci_request *req)
873 {
874         req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;
875
876         /* Read Local Supported Features */
877         hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
878
879         /* Read Local Version */
880         hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
881
882         /* Read BD Address */
883         hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
884 }
885
886 static void amp_init(struct hci_request *req)
887 {
888         req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
889
890         /* Read Local Version */
891         hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
892
893         /* Read Local Supported Commands */
894         hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
895
896         /* Read Local Supported Features */
897         hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
898
899         /* Read Local AMP Info */
900         hci_req_add(req, HCI_OP_READ_LOCAL_AMP_INFO, 0, NULL);
901
902         /* Read Data Blk size */
903         hci_req_add(req, HCI_OP_READ_DATA_BLOCK_SIZE, 0, NULL);
904
905         /* Read Flow Control Mode */
906         hci_req_add(req, HCI_OP_READ_FLOW_CONTROL_MODE, 0, NULL);
907
908         /* Read Location Data */
909         hci_req_add(req, HCI_OP_READ_LOCATION_DATA, 0, NULL);
910 }
911
912 static void hci_init1_req(struct hci_request *req, unsigned long opt)
913 {
914         struct hci_dev *hdev = req->hdev;
915
916         BT_DBG("%s %ld", hdev->name, opt);
917
918         /* Reset */
919         if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
920                 hci_reset_req(req, 0);
921
922         switch (hdev->dev_type) {
923         case HCI_BREDR:
924                 bredr_init(req);
925                 break;
926
927         case HCI_AMP:
928                 amp_init(req);
929                 break;
930
931         default:
932                 BT_ERR("Unknown device type %d", hdev->dev_type);
933                 break;
934         }
935 }
936
937 static void bredr_setup(struct hci_request *req)
938 {
939         struct hci_dev *hdev = req->hdev;
940
941         __le16 param;
942         __u8 flt_type;
943
944         /* Read Buffer Size (ACL mtu, max pkt, etc.) */
945         hci_req_add(req, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
946
947         /* Read Class of Device */
948         hci_req_add(req, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
949
950         /* Read Local Name */
951         hci_req_add(req, HCI_OP_READ_LOCAL_NAME, 0, NULL);
952
953         /* Read Voice Setting */
954         hci_req_add(req, HCI_OP_READ_VOICE_SETTING, 0, NULL);
955
956         /* Read Number of Supported IAC */
957         hci_req_add(req, HCI_OP_READ_NUM_SUPPORTED_IAC, 0, NULL);
958
959         /* Read Current IAC LAP */
960         hci_req_add(req, HCI_OP_READ_CURRENT_IAC_LAP, 0, NULL);
961
962         /* Clear Event Filters */
963         flt_type = HCI_FLT_CLEAR_ALL;
964         hci_req_add(req, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
965
966         /* Connection accept timeout ~20 secs */
967         param = __constant_cpu_to_le16(0x7d00);
968         hci_req_add(req, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
969
970         /* AVM Berlin (31), aka "BlueFRITZ!", reports version 1.2,
971          * but it does not support page scan related HCI commands.
972          */
973         if (hdev->manufacturer != 31 && hdev->hci_ver > BLUETOOTH_VER_1_1) {
974                 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_ACTIVITY, 0, NULL);
975                 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_TYPE, 0, NULL);
976         }
977 }
978
979 static void le_setup(struct hci_request *req)
980 {
981         struct hci_dev *hdev = req->hdev;
982
983         /* Read LE Buffer Size */
984         hci_req_add(req, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
985
986         /* Read LE Local Supported Features */
987         hci_req_add(req, HCI_OP_LE_READ_LOCAL_FEATURES, 0, NULL);
988
989         /* Read LE Advertising Channel TX Power */
990         hci_req_add(req, HCI_OP_LE_READ_ADV_TX_POWER, 0, NULL);
991
992         /* Read LE White List Size */
993         hci_req_add(req, HCI_OP_LE_READ_WHITE_LIST_SIZE, 0, NULL);
994
995         /* Read LE Supported States */
996         hci_req_add(req, HCI_OP_LE_READ_SUPPORTED_STATES, 0, NULL);
997
998         /* LE-only controllers have LE implicitly enabled */
999         if (!lmp_bredr_capable(hdev))
1000                 set_bit(HCI_LE_ENABLED, &hdev->dev_flags);
1001 }
1002
1003 static u8 hci_get_inquiry_mode(struct hci_dev *hdev)
1004 {
1005         if (lmp_ext_inq_capable(hdev))
1006                 return 0x02;
1007
1008         if (lmp_inq_rssi_capable(hdev))
1009                 return 0x01;
1010
1011         if (hdev->manufacturer == 11 && hdev->hci_rev == 0x00 &&
1012             hdev->lmp_subver == 0x0757)
1013                 return 0x01;
1014
1015         if (hdev->manufacturer == 15) {
1016                 if (hdev->hci_rev == 0x03 && hdev->lmp_subver == 0x6963)
1017                         return 0x01;
1018                 if (hdev->hci_rev == 0x09 && hdev->lmp_subver == 0x6963)
1019                         return 0x01;
1020                 if (hdev->hci_rev == 0x00 && hdev->lmp_subver == 0x6965)
1021                         return 0x01;
1022         }
1023
1024         if (hdev->manufacturer == 31 && hdev->hci_rev == 0x2005 &&
1025             hdev->lmp_subver == 0x1805)
1026                 return 0x01;
1027
1028         return 0x00;
1029 }
1030
1031 static void hci_setup_inquiry_mode(struct hci_request *req)
1032 {
1033         u8 mode;
1034
1035         mode = hci_get_inquiry_mode(req->hdev);
1036
1037         hci_req_add(req, HCI_OP_WRITE_INQUIRY_MODE, 1, &mode);
1038 }
1039
1040 static void hci_setup_event_mask(struct hci_request *req)
1041 {
1042         struct hci_dev *hdev = req->hdev;
1043
1044         /* The second byte is 0xff instead of 0x9f (two reserved bits
1045          * disabled) since a Broadcom 1.2 dongle doesn't respond to the
1046          * command otherwise.
1047          */
1048         u8 events[8] = { 0xff, 0xff, 0xfb, 0xff, 0x00, 0x00, 0x00, 0x00 };
1049
1050         /* CSR 1.1 dongles does not accept any bitfield so don't try to set
1051          * any event mask for pre 1.2 devices.
1052          */
1053         if (hdev->hci_ver < BLUETOOTH_VER_1_2)
1054                 return;
1055
1056         if (lmp_bredr_capable(hdev)) {
1057                 events[4] |= 0x01; /* Flow Specification Complete */
1058                 events[4] |= 0x02; /* Inquiry Result with RSSI */
1059                 events[4] |= 0x04; /* Read Remote Extended Features Complete */
1060                 events[5] |= 0x08; /* Synchronous Connection Complete */
1061                 events[5] |= 0x10; /* Synchronous Connection Changed */
1062         } else {
1063                 /* Use a different default for LE-only devices */
1064                 memset(events, 0, sizeof(events));
1065                 events[0] |= 0x10; /* Disconnection Complete */
1066                 events[0] |= 0x80; /* Encryption Change */
1067                 events[1] |= 0x08; /* Read Remote Version Information Complete */
1068                 events[1] |= 0x20; /* Command Complete */
1069                 events[1] |= 0x40; /* Command Status */
1070                 events[1] |= 0x80; /* Hardware Error */
1071                 events[2] |= 0x04; /* Number of Completed Packets */
1072                 events[3] |= 0x02; /* Data Buffer Overflow */
1073                 events[5] |= 0x80; /* Encryption Key Refresh Complete */
1074         }
1075
1076         if (lmp_inq_rssi_capable(hdev))
1077                 events[4] |= 0x02; /* Inquiry Result with RSSI */
1078
1079         if (lmp_sniffsubr_capable(hdev))
1080                 events[5] |= 0x20; /* Sniff Subrating */
1081
1082         if (lmp_pause_enc_capable(hdev))
1083                 events[5] |= 0x80; /* Encryption Key Refresh Complete */
1084
1085         if (lmp_ext_inq_capable(hdev))
1086                 events[5] |= 0x40; /* Extended Inquiry Result */
1087
1088         if (lmp_no_flush_capable(hdev))
1089                 events[7] |= 0x01; /* Enhanced Flush Complete */
1090
1091         if (lmp_lsto_capable(hdev))
1092                 events[6] |= 0x80; /* Link Supervision Timeout Changed */
1093
1094         if (lmp_ssp_capable(hdev)) {
1095                 events[6] |= 0x01;      /* IO Capability Request */
1096                 events[6] |= 0x02;      /* IO Capability Response */
1097                 events[6] |= 0x04;      /* User Confirmation Request */
1098                 events[6] |= 0x08;      /* User Passkey Request */
1099                 events[6] |= 0x10;      /* Remote OOB Data Request */
1100                 events[6] |= 0x20;      /* Simple Pairing Complete */
1101                 events[7] |= 0x04;      /* User Passkey Notification */
1102                 events[7] |= 0x08;      /* Keypress Notification */
1103                 events[7] |= 0x10;      /* Remote Host Supported
1104                                          * Features Notification
1105                                          */
1106         }
1107
1108         if (lmp_le_capable(hdev))
1109                 events[7] |= 0x20;      /* LE Meta-Event */
1110
1111         hci_req_add(req, HCI_OP_SET_EVENT_MASK, sizeof(events), events);
1112
1113         if (lmp_le_capable(hdev)) {
1114                 memset(events, 0, sizeof(events));
1115                 events[0] = 0x1f;
1116                 hci_req_add(req, HCI_OP_LE_SET_EVENT_MASK,
1117                             sizeof(events), events);
1118         }
1119 }
1120
1121 static void hci_init2_req(struct hci_request *req, unsigned long opt)
1122 {
1123         struct hci_dev *hdev = req->hdev;
1124
1125         if (lmp_bredr_capable(hdev))
1126                 bredr_setup(req);
1127         else
1128                 clear_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
1129
1130         if (lmp_le_capable(hdev))
1131                 le_setup(req);
1132
1133         hci_setup_event_mask(req);
1134
1135         /* AVM Berlin (31), aka "BlueFRITZ!", doesn't support the read
1136          * local supported commands HCI command.
1137          */
1138         if (hdev->manufacturer != 31 && hdev->hci_ver > BLUETOOTH_VER_1_1)
1139                 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
1140
1141         if (lmp_ssp_capable(hdev)) {
1142                 /* When SSP is available, then the host features page
1143                  * should also be available as well. However some
1144                  * controllers list the max_page as 0 as long as SSP
1145                  * has not been enabled. To achieve proper debugging
1146                  * output, force the minimum max_page to 1 at least.
1147                  */
1148                 hdev->max_page = 0x01;
1149
1150                 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
1151                         u8 mode = 0x01;
1152                         hci_req_add(req, HCI_OP_WRITE_SSP_MODE,
1153                                     sizeof(mode), &mode);
1154                 } else {
1155                         struct hci_cp_write_eir cp;
1156
1157                         memset(hdev->eir, 0, sizeof(hdev->eir));
1158                         memset(&cp, 0, sizeof(cp));
1159
1160                         hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
1161                 }
1162         }
1163
1164         if (lmp_inq_rssi_capable(hdev))
1165                 hci_setup_inquiry_mode(req);
1166
1167         if (lmp_inq_tx_pwr_capable(hdev))
1168                 hci_req_add(req, HCI_OP_READ_INQ_RSP_TX_POWER, 0, NULL);
1169
1170         if (lmp_ext_feat_capable(hdev)) {
1171                 struct hci_cp_read_local_ext_features cp;
1172
1173                 cp.page = 0x01;
1174                 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
1175                             sizeof(cp), &cp);
1176         }
1177
1178         if (test_bit(HCI_LINK_SECURITY, &hdev->dev_flags)) {
1179                 u8 enable = 1;
1180                 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, sizeof(enable),
1181                             &enable);
1182         }
1183 }
1184
1185 static void hci_setup_link_policy(struct hci_request *req)
1186 {
1187         struct hci_dev *hdev = req->hdev;
1188         struct hci_cp_write_def_link_policy cp;
1189         u16 link_policy = 0;
1190
1191         if (lmp_rswitch_capable(hdev))
1192                 link_policy |= HCI_LP_RSWITCH;
1193         if (lmp_hold_capable(hdev))
1194                 link_policy |= HCI_LP_HOLD;
1195         if (lmp_sniff_capable(hdev))
1196                 link_policy |= HCI_LP_SNIFF;
1197         if (lmp_park_capable(hdev))
1198                 link_policy |= HCI_LP_PARK;
1199
1200         cp.policy = cpu_to_le16(link_policy);
1201         hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, sizeof(cp), &cp);
1202 }
1203
1204 static void hci_set_le_support(struct hci_request *req)
1205 {
1206         struct hci_dev *hdev = req->hdev;
1207         struct hci_cp_write_le_host_supported cp;
1208
1209         /* LE-only devices do not support explicit enablement */
1210         if (!lmp_bredr_capable(hdev))
1211                 return;
1212
1213         memset(&cp, 0, sizeof(cp));
1214
1215         if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
1216                 cp.le = 0x01;
1217                 cp.simul = lmp_le_br_capable(hdev);
1218         }
1219
1220         if (cp.le != lmp_host_le_capable(hdev))
1221                 hci_req_add(req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(cp),
1222                             &cp);
1223 }
1224
1225 static void hci_set_event_mask_page_2(struct hci_request *req)
1226 {
1227         struct hci_dev *hdev = req->hdev;
1228         u8 events[8] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
1229
1230         /* If Connectionless Slave Broadcast master role is supported
1231          * enable all necessary events for it.
1232          */
1233         if (hdev->features[2][0] & 0x01) {
1234                 events[1] |= 0x40;      /* Triggered Clock Capture */
1235                 events[1] |= 0x80;      /* Synchronization Train Complete */
1236                 events[2] |= 0x10;      /* Slave Page Response Timeout */
1237                 events[2] |= 0x20;      /* CSB Channel Map Change */
1238         }
1239
1240         /* If Connectionless Slave Broadcast slave role is supported
1241          * enable all necessary events for it.
1242          */
1243         if (hdev->features[2][0] & 0x02) {
1244                 events[2] |= 0x01;      /* Synchronization Train Received */
1245                 events[2] |= 0x02;      /* CSB Receive */
1246                 events[2] |= 0x04;      /* CSB Timeout */
1247                 events[2] |= 0x08;      /* Truncated Page Complete */
1248         }
1249
1250         hci_req_add(req, HCI_OP_SET_EVENT_MASK_PAGE_2, sizeof(events), events);
1251 }
1252
1253 static void hci_init3_req(struct hci_request *req, unsigned long opt)
1254 {
1255         struct hci_dev *hdev = req->hdev;
1256         u8 p;
1257
1258         /* Some Broadcom based Bluetooth controllers do not support the
1259          * Delete Stored Link Key command. They are clearly indicating its
1260          * absence in the bit mask of supported commands.
1261          *
1262          * Check the supported commands and only if the the command is marked
1263          * as supported send it. If not supported assume that the controller
1264          * does not have actual support for stored link keys which makes this
1265          * command redundant anyway.
1266          */
1267         if (hdev->commands[6] & 0x80) {
1268                 struct hci_cp_delete_stored_link_key cp;
1269
1270                 bacpy(&cp.bdaddr, BDADDR_ANY);
1271                 cp.delete_all = 0x01;
1272                 hci_req_add(req, HCI_OP_DELETE_STORED_LINK_KEY,
1273                             sizeof(cp), &cp);
1274         }
1275
1276         if (hdev->commands[5] & 0x10)
1277                 hci_setup_link_policy(req);
1278
1279         if (lmp_le_capable(hdev)) {
1280                 /* If the controller has a public BD_ADDR, then by
1281                  * default use that one. If this is a LE only
1282                  * controller without one, default to the random
1283                  * address.
1284                  */
1285                 if (bacmp(&hdev->bdaddr, BDADDR_ANY))
1286                         hdev->own_addr_type = ADDR_LE_DEV_PUBLIC;
1287                 else
1288                         hdev->own_addr_type = ADDR_LE_DEV_RANDOM;
1289
1290                 hci_set_le_support(req);
1291         }
1292
1293         /* Read features beyond page 1 if available */
1294         for (p = 2; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
1295                 struct hci_cp_read_local_ext_features cp;
1296
1297                 cp.page = p;
1298                 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
1299                             sizeof(cp), &cp);
1300         }
1301 }
1302
1303 static void hci_init4_req(struct hci_request *req, unsigned long opt)
1304 {
1305         struct hci_dev *hdev = req->hdev;
1306
1307         /* Set event mask page 2 if the HCI command for it is supported */
1308         if (hdev->commands[22] & 0x04)
1309                 hci_set_event_mask_page_2(req);
1310
1311         /* Check for Synchronization Train support */
1312         if (hdev->features[2][0] & 0x04)
1313                 hci_req_add(req, HCI_OP_READ_SYNC_TRAIN_PARAMS, 0, NULL);
1314 }
1315
1316 static int __hci_init(struct hci_dev *hdev)
1317 {
1318         int err;
1319
1320         err = __hci_req_sync(hdev, hci_init1_req, 0, HCI_INIT_TIMEOUT);
1321         if (err < 0)
1322                 return err;
1323
1324         /* The Device Under Test (DUT) mode is special and available for
1325          * all controller types. So just create it early on.
1326          */
1327         if (test_bit(HCI_SETUP, &hdev->dev_flags)) {
1328                 debugfs_create_file("dut_mode", 0644, hdev->debugfs, hdev,
1329                                     &dut_mode_fops);
1330         }
1331
1332         /* HCI_BREDR covers both single-mode LE, BR/EDR and dual-mode
1333          * BR/EDR/LE type controllers. AMP controllers only need the
1334          * first stage init.
1335          */
1336         if (hdev->dev_type != HCI_BREDR)
1337                 return 0;
1338
1339         err = __hci_req_sync(hdev, hci_init2_req, 0, HCI_INIT_TIMEOUT);
1340         if (err < 0)
1341                 return err;
1342
1343         err = __hci_req_sync(hdev, hci_init3_req, 0, HCI_INIT_TIMEOUT);
1344         if (err < 0)
1345                 return err;
1346
1347         err = __hci_req_sync(hdev, hci_init4_req, 0, HCI_INIT_TIMEOUT);
1348         if (err < 0)
1349                 return err;
1350
1351         /* Only create debugfs entries during the initial setup
1352          * phase and not every time the controller gets powered on.
1353          */
1354         if (!test_bit(HCI_SETUP, &hdev->dev_flags))
1355                 return 0;
1356
1357         debugfs_create_file("features", 0444, hdev->debugfs, hdev,
1358                             &features_fops);
1359         debugfs_create_u16("manufacturer", 0444, hdev->debugfs,
1360                            &hdev->manufacturer);
1361         debugfs_create_u8("hci_version", 0444, hdev->debugfs, &hdev->hci_ver);
1362         debugfs_create_u16("hci_revision", 0444, hdev->debugfs, &hdev->hci_rev);
1363         debugfs_create_file("blacklist", 0444, hdev->debugfs, hdev,
1364                             &blacklist_fops);
1365         debugfs_create_file("uuids", 0444, hdev->debugfs, hdev, &uuids_fops);
1366
1367         if (lmp_bredr_capable(hdev)) {
1368                 debugfs_create_file("inquiry_cache", 0444, hdev->debugfs,
1369                                     hdev, &inquiry_cache_fops);
1370                 debugfs_create_file("link_keys", 0400, hdev->debugfs,
1371                                     hdev, &link_keys_fops);
1372                 debugfs_create_file("use_debug_keys", 0444, hdev->debugfs,
1373                                     hdev, &use_debug_keys_fops);
1374                 debugfs_create_file("dev_class", 0444, hdev->debugfs,
1375                                     hdev, &dev_class_fops);
1376                 debugfs_create_file("voice_setting", 0444, hdev->debugfs,
1377                                     hdev, &voice_setting_fops);
1378         }
1379
1380         if (lmp_ssp_capable(hdev)) {
1381                 debugfs_create_file("auto_accept_delay", 0644, hdev->debugfs,
1382                                     hdev, &auto_accept_delay_fops);
1383                 debugfs_create_file("ssp_debug_mode", 0644, hdev->debugfs,
1384                                     hdev, &ssp_debug_mode_fops);
1385         }
1386
1387         if (lmp_sniff_capable(hdev)) {
1388                 debugfs_create_file("idle_timeout", 0644, hdev->debugfs,
1389                                     hdev, &idle_timeout_fops);
1390                 debugfs_create_file("sniff_min_interval", 0644, hdev->debugfs,
1391                                     hdev, &sniff_min_interval_fops);
1392                 debugfs_create_file("sniff_max_interval", 0644, hdev->debugfs,
1393                                     hdev, &sniff_max_interval_fops);
1394         }
1395
1396         if (lmp_le_capable(hdev)) {
1397                 debugfs_create_u8("white_list_size", 0444, hdev->debugfs,
1398                                   &hdev->le_white_list_size);
1399                 debugfs_create_file("static_address", 0444, hdev->debugfs,
1400                                    hdev, &static_address_fops);
1401                 debugfs_create_file("own_address_type", 0644, hdev->debugfs,
1402                                     hdev, &own_address_type_fops);
1403                 debugfs_create_file("long_term_keys", 0400, hdev->debugfs,
1404                                     hdev, &long_term_keys_fops);
1405                 debugfs_create_file("conn_min_interval", 0644, hdev->debugfs,
1406                                     hdev, &conn_min_interval_fops);
1407                 debugfs_create_file("conn_max_interval", 0644, hdev->debugfs,
1408                                     hdev, &conn_max_interval_fops);
1409         }
1410
1411         return 0;
1412 }
1413
1414 static void hci_scan_req(struct hci_request *req, unsigned long opt)
1415 {
1416         __u8 scan = opt;
1417
1418         BT_DBG("%s %x", req->hdev->name, scan);
1419
1420         /* Inquiry and Page scans */
1421         hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
1422 }
1423
1424 static void hci_auth_req(struct hci_request *req, unsigned long opt)
1425 {
1426         __u8 auth = opt;
1427
1428         BT_DBG("%s %x", req->hdev->name, auth);
1429
1430         /* Authentication */
1431         hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
1432 }
1433
1434 static void hci_encrypt_req(struct hci_request *req, unsigned long opt)
1435 {
1436         __u8 encrypt = opt;
1437
1438         BT_DBG("%s %x", req->hdev->name, encrypt);
1439
1440         /* Encryption */
1441         hci_req_add(req, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
1442 }
1443
1444 static void hci_linkpol_req(struct hci_request *req, unsigned long opt)
1445 {
1446         __le16 policy = cpu_to_le16(opt);
1447
1448         BT_DBG("%s %x", req->hdev->name, policy);
1449
1450         /* Default link policy */
1451         hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
1452 }
1453
1454 /* Get HCI device by index.
1455  * Device is held on return. */
1456 struct hci_dev *hci_dev_get(int index)
1457 {
1458         struct hci_dev *hdev = NULL, *d;
1459
1460         BT_DBG("%d", index);
1461
1462         if (index < 0)
1463                 return NULL;
1464
1465         read_lock(&hci_dev_list_lock);
1466         list_for_each_entry(d, &hci_dev_list, list) {
1467                 if (d->id == index) {
1468                         hdev = hci_dev_hold(d);
1469                         break;
1470                 }
1471         }
1472         read_unlock(&hci_dev_list_lock);
1473         return hdev;
1474 }
1475
1476 /* ---- Inquiry support ---- */
1477
1478 bool hci_discovery_active(struct hci_dev *hdev)
1479 {
1480         struct discovery_state *discov = &hdev->discovery;
1481
1482         switch (discov->state) {
1483         case DISCOVERY_FINDING:
1484         case DISCOVERY_RESOLVING:
1485                 return true;
1486
1487         default:
1488                 return false;
1489         }
1490 }
1491
1492 void hci_discovery_set_state(struct hci_dev *hdev, int state)
1493 {
1494         BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
1495
1496         if (hdev->discovery.state == state)
1497                 return;
1498
1499         switch (state) {
1500         case DISCOVERY_STOPPED:
1501                 if (hdev->discovery.state != DISCOVERY_STARTING)
1502                         mgmt_discovering(hdev, 0);
1503                 break;
1504         case DISCOVERY_STARTING:
1505                 break;
1506         case DISCOVERY_FINDING:
1507                 mgmt_discovering(hdev, 1);
1508                 break;
1509         case DISCOVERY_RESOLVING:
1510                 break;
1511         case DISCOVERY_STOPPING:
1512                 break;
1513         }
1514
1515         hdev->discovery.state = state;
1516 }
1517
1518 void hci_inquiry_cache_flush(struct hci_dev *hdev)
1519 {
1520         struct discovery_state *cache = &hdev->discovery;
1521         struct inquiry_entry *p, *n;
1522
1523         list_for_each_entry_safe(p, n, &cache->all, all) {
1524                 list_del(&p->all);
1525                 kfree(p);
1526         }
1527
1528         INIT_LIST_HEAD(&cache->unknown);
1529         INIT_LIST_HEAD(&cache->resolve);
1530 }
1531
1532 struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev,
1533                                                bdaddr_t *bdaddr)
1534 {
1535         struct discovery_state *cache = &hdev->discovery;
1536         struct inquiry_entry *e;
1537
1538         BT_DBG("cache %p, %pMR", cache, bdaddr);
1539
1540         list_for_each_entry(e, &cache->all, all) {
1541                 if (!bacmp(&e->data.bdaddr, bdaddr))
1542                         return e;
1543         }
1544
1545         return NULL;
1546 }
1547
1548 struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
1549                                                        bdaddr_t *bdaddr)
1550 {
1551         struct discovery_state *cache = &hdev->discovery;
1552         struct inquiry_entry *e;
1553
1554         BT_DBG("cache %p, %pMR", cache, bdaddr);
1555
1556         list_for_each_entry(e, &cache->unknown, list) {
1557                 if (!bacmp(&e->data.bdaddr, bdaddr))
1558                         return e;
1559         }
1560
1561         return NULL;
1562 }
1563
1564 struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
1565                                                        bdaddr_t *bdaddr,
1566                                                        int state)
1567 {
1568         struct discovery_state *cache = &hdev->discovery;
1569         struct inquiry_entry *e;
1570
1571         BT_DBG("cache %p bdaddr %pMR state %d", cache, bdaddr, state);
1572
1573         list_for_each_entry(e, &cache->resolve, list) {
1574                 if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
1575                         return e;
1576                 if (!bacmp(&e->data.bdaddr, bdaddr))
1577                         return e;
1578         }
1579
1580         return NULL;
1581 }
1582
1583 void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
1584                                       struct inquiry_entry *ie)
1585 {
1586         struct discovery_state *cache = &hdev->discovery;
1587         struct list_head *pos = &cache->resolve;
1588         struct inquiry_entry *p;
1589
1590         list_del(&ie->list);
1591
1592         list_for_each_entry(p, &cache->resolve, list) {
1593                 if (p->name_state != NAME_PENDING &&
1594                     abs(p->data.rssi) >= abs(ie->data.rssi))
1595                         break;
1596                 pos = &p->list;
1597         }
1598
1599         list_add(&ie->list, pos);
1600 }
1601
1602 bool hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
1603                               bool name_known, bool *ssp)
1604 {
1605         struct discovery_state *cache = &hdev->discovery;
1606         struct inquiry_entry *ie;
1607
1608         BT_DBG("cache %p, %pMR", cache, &data->bdaddr);
1609
1610         hci_remove_remote_oob_data(hdev, &data->bdaddr);
1611
1612         if (ssp)
1613                 *ssp = data->ssp_mode;
1614
1615         ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
1616         if (ie) {
1617                 if (ie->data.ssp_mode && ssp)
1618                         *ssp = true;
1619
1620                 if (ie->name_state == NAME_NEEDED &&
1621                     data->rssi != ie->data.rssi) {
1622                         ie->data.rssi = data->rssi;
1623                         hci_inquiry_cache_update_resolve(hdev, ie);
1624                 }
1625
1626                 goto update;
1627         }
1628
1629         /* Entry not in the cache. Add new one. */
1630         ie = kzalloc(sizeof(struct inquiry_entry), GFP_ATOMIC);
1631         if (!ie)
1632                 return false;
1633
1634         list_add(&ie->all, &cache->all);
1635
1636         if (name_known) {
1637                 ie->name_state = NAME_KNOWN;
1638         } else {
1639                 ie->name_state = NAME_NOT_KNOWN;
1640                 list_add(&ie->list, &cache->unknown);
1641         }
1642
1643 update:
1644         if (name_known && ie->name_state != NAME_KNOWN &&
1645             ie->name_state != NAME_PENDING) {
1646                 ie->name_state = NAME_KNOWN;
1647                 list_del(&ie->list);
1648         }
1649
1650         memcpy(&ie->data, data, sizeof(*data));
1651         ie->timestamp = jiffies;
1652         cache->timestamp = jiffies;
1653
1654         if (ie->name_state == NAME_NOT_KNOWN)
1655                 return false;
1656
1657         return true;
1658 }
1659
1660 static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
1661 {
1662         struct discovery_state *cache = &hdev->discovery;
1663         struct inquiry_info *info = (struct inquiry_info *) buf;
1664         struct inquiry_entry *e;
1665         int copied = 0;
1666
1667         list_for_each_entry(e, &cache->all, all) {
1668                 struct inquiry_data *data = &e->data;
1669
1670                 if (copied >= num)
1671                         break;
1672
1673                 bacpy(&info->bdaddr, &data->bdaddr);
1674                 info->pscan_rep_mode    = data->pscan_rep_mode;
1675                 info->pscan_period_mode = data->pscan_period_mode;
1676                 info->pscan_mode        = data->pscan_mode;
1677                 memcpy(info->dev_class, data->dev_class, 3);
1678                 info->clock_offset      = data->clock_offset;
1679
1680                 info++;
1681                 copied++;
1682         }
1683
1684         BT_DBG("cache %p, copied %d", cache, copied);
1685         return copied;
1686 }
1687
1688 static void hci_inq_req(struct hci_request *req, unsigned long opt)
1689 {
1690         struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
1691         struct hci_dev *hdev = req->hdev;
1692         struct hci_cp_inquiry cp;
1693
1694         BT_DBG("%s", hdev->name);
1695
1696         if (test_bit(HCI_INQUIRY, &hdev->flags))
1697                 return;
1698
1699         /* Start Inquiry */
1700         memcpy(&cp.lap, &ir->lap, 3);
1701         cp.length  = ir->length;
1702         cp.num_rsp = ir->num_rsp;
1703         hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
1704 }
1705
1706 static int wait_inquiry(void *word)
1707 {
1708         schedule();
1709         return signal_pending(current);
1710 }
1711
1712 int hci_inquiry(void __user *arg)
1713 {
1714         __u8 __user *ptr = arg;
1715         struct hci_inquiry_req ir;
1716         struct hci_dev *hdev;
1717         int err = 0, do_inquiry = 0, max_rsp;
1718         long timeo;
1719         __u8 *buf;
1720
1721         if (copy_from_user(&ir, ptr, sizeof(ir)))
1722                 return -EFAULT;
1723
1724         hdev = hci_dev_get(ir.dev_id);
1725         if (!hdev)
1726                 return -ENODEV;
1727
1728         if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
1729                 err = -EBUSY;
1730                 goto done;
1731         }
1732
1733         if (hdev->dev_type != HCI_BREDR) {
1734                 err = -EOPNOTSUPP;
1735                 goto done;
1736         }
1737
1738         if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
1739                 err = -EOPNOTSUPP;
1740                 goto done;
1741         }
1742
1743         hci_dev_lock(hdev);
1744         if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
1745             inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) {
1746                 hci_inquiry_cache_flush(hdev);
1747                 do_inquiry = 1;
1748         }
1749         hci_dev_unlock(hdev);
1750
1751         timeo = ir.length * msecs_to_jiffies(2000);
1752
1753         if (do_inquiry) {
1754                 err = hci_req_sync(hdev, hci_inq_req, (unsigned long) &ir,
1755                                    timeo);
1756                 if (err < 0)
1757                         goto done;
1758
1759                 /* Wait until Inquiry procedure finishes (HCI_INQUIRY flag is
1760                  * cleared). If it is interrupted by a signal, return -EINTR.
1761                  */
1762                 if (wait_on_bit(&hdev->flags, HCI_INQUIRY, wait_inquiry,
1763                                 TASK_INTERRUPTIBLE))
1764                         return -EINTR;
1765         }
1766
1767         /* for unlimited number of responses we will use buffer with
1768          * 255 entries
1769          */
1770         max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
1771
1772         /* cache_dump can't sleep. Therefore we allocate temp buffer and then
1773          * copy it to the user space.
1774          */
1775         buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
1776         if (!buf) {
1777                 err = -ENOMEM;
1778                 goto done;
1779         }
1780
1781         hci_dev_lock(hdev);
1782         ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
1783         hci_dev_unlock(hdev);
1784
1785         BT_DBG("num_rsp %d", ir.num_rsp);
1786
1787         if (!copy_to_user(ptr, &ir, sizeof(ir))) {
1788                 ptr += sizeof(ir);
1789                 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
1790                                  ir.num_rsp))
1791                         err = -EFAULT;
1792         } else
1793                 err = -EFAULT;
1794
1795         kfree(buf);
1796
1797 done:
1798         hci_dev_put(hdev);
1799         return err;
1800 }
1801
1802 static int hci_dev_do_open(struct hci_dev *hdev)
1803 {
1804         int ret = 0;
1805
1806         BT_DBG("%s %p", hdev->name, hdev);
1807
1808         hci_req_lock(hdev);
1809
1810         if (test_bit(HCI_UNREGISTER, &hdev->dev_flags)) {
1811                 ret = -ENODEV;
1812                 goto done;
1813         }
1814
1815         if (!test_bit(HCI_SETUP, &hdev->dev_flags)) {
1816                 /* Check for rfkill but allow the HCI setup stage to
1817                  * proceed (which in itself doesn't cause any RF activity).
1818                  */
1819                 if (test_bit(HCI_RFKILLED, &hdev->dev_flags)) {
1820                         ret = -ERFKILL;
1821                         goto done;
1822                 }
1823
1824                 /* Check for valid public address or a configured static
1825                  * random adddress, but let the HCI setup proceed to
1826                  * be able to determine if there is a public address
1827                  * or not.
1828                  *
1829                  * This check is only valid for BR/EDR controllers
1830                  * since AMP controllers do not have an address.
1831                  */
1832                 if (hdev->dev_type == HCI_BREDR &&
1833                     !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
1834                     !bacmp(&hdev->static_addr, BDADDR_ANY)) {
1835                         ret = -EADDRNOTAVAIL;
1836                         goto done;
1837                 }
1838         }
1839
1840         if (test_bit(HCI_UP, &hdev->flags)) {
1841                 ret = -EALREADY;
1842                 goto done;
1843         }
1844
1845         if (hdev->open(hdev)) {
1846                 ret = -EIO;
1847                 goto done;
1848         }
1849
1850         atomic_set(&hdev->cmd_cnt, 1);
1851         set_bit(HCI_INIT, &hdev->flags);
1852
1853         if (hdev->setup && test_bit(HCI_SETUP, &hdev->dev_flags))
1854                 ret = hdev->setup(hdev);
1855
1856         if (!ret) {
1857                 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
1858                         set_bit(HCI_RAW, &hdev->flags);
1859
1860                 if (!test_bit(HCI_RAW, &hdev->flags) &&
1861                     !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags))
1862                         ret = __hci_init(hdev);
1863         }
1864
1865         clear_bit(HCI_INIT, &hdev->flags);
1866
1867         if (!ret) {
1868                 hci_dev_hold(hdev);
1869                 set_bit(HCI_UP, &hdev->flags);
1870                 hci_notify(hdev, HCI_DEV_UP);
1871                 if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
1872                     !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags) &&
1873                     hdev->dev_type == HCI_BREDR) {
1874                         hci_dev_lock(hdev);
1875                         mgmt_powered(hdev, 1);
1876                         hci_dev_unlock(hdev);
1877                 }
1878         } else {
1879                 /* Init failed, cleanup */
1880                 flush_work(&hdev->tx_work);
1881                 flush_work(&hdev->cmd_work);
1882                 flush_work(&hdev->rx_work);
1883
1884                 skb_queue_purge(&hdev->cmd_q);
1885                 skb_queue_purge(&hdev->rx_q);
1886
1887                 if (hdev->flush)
1888                         hdev->flush(hdev);
1889
1890                 if (hdev->sent_cmd) {
1891                         kfree_skb(hdev->sent_cmd);
1892                         hdev->sent_cmd = NULL;
1893                 }
1894
1895                 hdev->close(hdev);
1896                 hdev->flags = 0;
1897         }
1898
1899 done:
1900         hci_req_unlock(hdev);
1901         return ret;
1902 }
1903
1904 /* ---- HCI ioctl helpers ---- */
1905
1906 int hci_dev_open(__u16 dev)
1907 {
1908         struct hci_dev *hdev;
1909         int err;
1910
1911         hdev = hci_dev_get(dev);
1912         if (!hdev)
1913                 return -ENODEV;
1914
1915         /* We need to ensure that no other power on/off work is pending
1916          * before proceeding to call hci_dev_do_open. This is
1917          * particularly important if the setup procedure has not yet
1918          * completed.
1919          */
1920         if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
1921                 cancel_delayed_work(&hdev->power_off);
1922
1923         /* After this call it is guaranteed that the setup procedure
1924          * has finished. This means that error conditions like RFKILL
1925          * or no valid public or static random address apply.
1926          */
1927         flush_workqueue(hdev->req_workqueue);
1928
1929         err = hci_dev_do_open(hdev);
1930
1931         hci_dev_put(hdev);
1932
1933         return err;
1934 }
1935
1936 static int hci_dev_do_close(struct hci_dev *hdev)
1937 {
1938         BT_DBG("%s %p", hdev->name, hdev);
1939
1940         cancel_delayed_work(&hdev->power_off);
1941
1942         hci_req_cancel(hdev, ENODEV);
1943         hci_req_lock(hdev);
1944
1945         if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
1946                 del_timer_sync(&hdev->cmd_timer);
1947                 hci_req_unlock(hdev);
1948                 return 0;
1949         }
1950
1951         /* Flush RX and TX works */
1952         flush_work(&hdev->tx_work);
1953         flush_work(&hdev->rx_work);
1954
1955         if (hdev->discov_timeout > 0) {
1956                 cancel_delayed_work(&hdev->discov_off);
1957                 hdev->discov_timeout = 0;
1958                 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
1959                 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
1960         }
1961
1962         if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
1963                 cancel_delayed_work(&hdev->service_cache);
1964
1965         cancel_delayed_work_sync(&hdev->le_scan_disable);
1966
1967         hci_dev_lock(hdev);
1968         hci_inquiry_cache_flush(hdev);
1969         hci_conn_hash_flush(hdev);
1970         hci_dev_unlock(hdev);
1971
1972         hci_notify(hdev, HCI_DEV_DOWN);
1973
1974         if (hdev->flush)
1975                 hdev->flush(hdev);
1976
1977         /* Reset device */
1978         skb_queue_purge(&hdev->cmd_q);
1979         atomic_set(&hdev->cmd_cnt, 1);
1980         if (!test_bit(HCI_RAW, &hdev->flags) &&
1981             !test_bit(HCI_AUTO_OFF, &hdev->dev_flags) &&
1982             test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks)) {
1983                 set_bit(HCI_INIT, &hdev->flags);
1984                 __hci_req_sync(hdev, hci_reset_req, 0, HCI_CMD_TIMEOUT);
1985                 clear_bit(HCI_INIT, &hdev->flags);
1986         }
1987
1988         /* flush cmd  work */
1989         flush_work(&hdev->cmd_work);
1990
1991         /* Drop queues */
1992         skb_queue_purge(&hdev->rx_q);
1993         skb_queue_purge(&hdev->cmd_q);
1994         skb_queue_purge(&hdev->raw_q);
1995
1996         /* Drop last sent command */
1997         if (hdev->sent_cmd) {
1998                 del_timer_sync(&hdev->cmd_timer);
1999                 kfree_skb(hdev->sent_cmd);
2000                 hdev->sent_cmd = NULL;
2001         }
2002
2003         kfree_skb(hdev->recv_evt);
2004         hdev->recv_evt = NULL;
2005
2006         /* After this point our queues are empty
2007          * and no tasks are scheduled. */
2008         hdev->close(hdev);
2009
2010         /* Clear flags */
2011         hdev->flags = 0;
2012         hdev->dev_flags &= ~HCI_PERSISTENT_MASK;
2013
2014         if (!test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
2015                 if (hdev->dev_type == HCI_BREDR) {
2016                         hci_dev_lock(hdev);
2017                         mgmt_powered(hdev, 0);
2018                         hci_dev_unlock(hdev);
2019                 }
2020         }
2021
2022         /* Controller radio is available but is currently powered down */
2023         hdev->amp_status = AMP_STATUS_POWERED_DOWN;
2024
2025         memset(hdev->eir, 0, sizeof(hdev->eir));
2026         memset(hdev->dev_class, 0, sizeof(hdev->dev_class));
2027
2028         hci_req_unlock(hdev);
2029
2030         hci_dev_put(hdev);
2031         return 0;
2032 }
2033
2034 int hci_dev_close(__u16 dev)
2035 {
2036         struct hci_dev *hdev;
2037         int err;
2038
2039         hdev = hci_dev_get(dev);
2040         if (!hdev)
2041                 return -ENODEV;
2042
2043         if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2044                 err = -EBUSY;
2045                 goto done;
2046         }
2047
2048         if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
2049                 cancel_delayed_work(&hdev->power_off);
2050
2051         err = hci_dev_do_close(hdev);
2052
2053 done:
2054         hci_dev_put(hdev);
2055         return err;
2056 }
2057
2058 int hci_dev_reset(__u16 dev)
2059 {
2060         struct hci_dev *hdev;
2061         int ret = 0;
2062
2063         hdev = hci_dev_get(dev);
2064         if (!hdev)
2065                 return -ENODEV;
2066
2067         hci_req_lock(hdev);
2068
2069         if (!test_bit(HCI_UP, &hdev->flags)) {
2070                 ret = -ENETDOWN;
2071                 goto done;
2072         }
2073
2074         if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2075                 ret = -EBUSY;
2076                 goto done;
2077         }
2078
2079         /* Drop queues */
2080         skb_queue_purge(&hdev->rx_q);
2081         skb_queue_purge(&hdev->cmd_q);
2082
2083         hci_dev_lock(hdev);
2084         hci_inquiry_cache_flush(hdev);
2085         hci_conn_hash_flush(hdev);
2086         hci_dev_unlock(hdev);
2087
2088         if (hdev->flush)
2089                 hdev->flush(hdev);
2090
2091         atomic_set(&hdev->cmd_cnt, 1);
2092         hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
2093
2094         if (!test_bit(HCI_RAW, &hdev->flags))
2095                 ret = __hci_req_sync(hdev, hci_reset_req, 0, HCI_INIT_TIMEOUT);
2096
2097 done:
2098         hci_req_unlock(hdev);
2099         hci_dev_put(hdev);
2100         return ret;
2101 }
2102
2103 int hci_dev_reset_stat(__u16 dev)
2104 {
2105         struct hci_dev *hdev;
2106         int ret = 0;
2107
2108         hdev = hci_dev_get(dev);
2109         if (!hdev)
2110                 return -ENODEV;
2111
2112         if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2113                 ret = -EBUSY;
2114                 goto done;
2115         }
2116
2117         memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
2118
2119 done:
2120         hci_dev_put(hdev);
2121         return ret;
2122 }
2123
2124 int hci_dev_cmd(unsigned int cmd, void __user *arg)
2125 {
2126         struct hci_dev *hdev;
2127         struct hci_dev_req dr;
2128         int err = 0;
2129
2130         if (copy_from_user(&dr, arg, sizeof(dr)))
2131                 return -EFAULT;
2132
2133         hdev = hci_dev_get(dr.dev_id);
2134         if (!hdev)
2135                 return -ENODEV;
2136
2137         if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2138                 err = -EBUSY;
2139                 goto done;
2140         }
2141
2142         if (hdev->dev_type != HCI_BREDR) {
2143                 err = -EOPNOTSUPP;
2144                 goto done;
2145         }
2146
2147         if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
2148                 err = -EOPNOTSUPP;
2149                 goto done;
2150         }
2151
2152         switch (cmd) {
2153         case HCISETAUTH:
2154                 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
2155                                    HCI_INIT_TIMEOUT);
2156                 break;
2157
2158         case HCISETENCRYPT:
2159                 if (!lmp_encrypt_capable(hdev)) {
2160                         err = -EOPNOTSUPP;
2161                         break;
2162                 }
2163
2164                 if (!test_bit(HCI_AUTH, &hdev->flags)) {
2165                         /* Auth must be enabled first */
2166                         err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
2167                                            HCI_INIT_TIMEOUT);
2168                         if (err)
2169                                 break;
2170                 }
2171
2172                 err = hci_req_sync(hdev, hci_encrypt_req, dr.dev_opt,
2173                                    HCI_INIT_TIMEOUT);
2174                 break;
2175
2176         case HCISETSCAN:
2177                 err = hci_req_sync(hdev, hci_scan_req, dr.dev_opt,
2178                                    HCI_INIT_TIMEOUT);
2179                 break;
2180
2181         case HCISETLINKPOL:
2182                 err = hci_req_sync(hdev, hci_linkpol_req, dr.dev_opt,
2183                                    HCI_INIT_TIMEOUT);
2184                 break;
2185
2186         case HCISETLINKMODE:
2187                 hdev->link_mode = ((__u16) dr.dev_opt) &
2188                                         (HCI_LM_MASTER | HCI_LM_ACCEPT);
2189                 break;
2190
2191         case HCISETPTYPE:
2192                 hdev->pkt_type = (__u16) dr.dev_opt;
2193                 break;
2194
2195         case HCISETACLMTU:
2196                 hdev->acl_mtu  = *((__u16 *) &dr.dev_opt + 1);
2197                 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
2198                 break;
2199
2200         case HCISETSCOMTU:
2201                 hdev->sco_mtu  = *((__u16 *) &dr.dev_opt + 1);
2202                 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
2203                 break;
2204
2205         default:
2206                 err = -EINVAL;
2207                 break;
2208         }
2209
2210 done:
2211         hci_dev_put(hdev);
2212         return err;
2213 }
2214
2215 int hci_get_dev_list(void __user *arg)
2216 {
2217         struct hci_dev *hdev;
2218         struct hci_dev_list_req *dl;
2219         struct hci_dev_req *dr;
2220         int n = 0, size, err;
2221         __u16 dev_num;
2222
2223         if (get_user(dev_num, (__u16 __user *) arg))
2224                 return -EFAULT;
2225
2226         if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
2227                 return -EINVAL;
2228
2229         size = sizeof(*dl) + dev_num * sizeof(*dr);
2230
2231         dl = kzalloc(size, GFP_KERNEL);
2232         if (!dl)
2233                 return -ENOMEM;
2234
2235         dr = dl->dev_req;
2236
2237         read_lock(&hci_dev_list_lock);
2238         list_for_each_entry(hdev, &hci_dev_list, list) {
2239                 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
2240                         cancel_delayed_work(&hdev->power_off);
2241
2242                 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
2243                         set_bit(HCI_PAIRABLE, &hdev->dev_flags);
2244
2245                 (dr + n)->dev_id  = hdev->id;
2246                 (dr + n)->dev_opt = hdev->flags;
2247
2248                 if (++n >= dev_num)
2249                         break;
2250         }
2251         read_unlock(&hci_dev_list_lock);
2252
2253         dl->dev_num = n;
2254         size = sizeof(*dl) + n * sizeof(*dr);
2255
2256         err = copy_to_user(arg, dl, size);
2257         kfree(dl);
2258
2259         return err ? -EFAULT : 0;
2260 }
2261
2262 int hci_get_dev_info(void __user *arg)
2263 {
2264         struct hci_dev *hdev;
2265         struct hci_dev_info di;
2266         int err = 0;
2267
2268         if (copy_from_user(&di, arg, sizeof(di)))
2269                 return -EFAULT;
2270
2271         hdev = hci_dev_get(di.dev_id);
2272         if (!hdev)
2273                 return -ENODEV;
2274
2275         if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
2276                 cancel_delayed_work_sync(&hdev->power_off);
2277
2278         if (!test_bit(HCI_MGMT, &hdev->dev_flags))
2279                 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
2280
2281         strcpy(di.name, hdev->name);
2282         di.bdaddr   = hdev->bdaddr;
2283         di.type     = (hdev->bus & 0x0f) | ((hdev->dev_type & 0x03) << 4);
2284         di.flags    = hdev->flags;
2285         di.pkt_type = hdev->pkt_type;
2286         if (lmp_bredr_capable(hdev)) {
2287                 di.acl_mtu  = hdev->acl_mtu;
2288                 di.acl_pkts = hdev->acl_pkts;
2289                 di.sco_mtu  = hdev->sco_mtu;
2290                 di.sco_pkts = hdev->sco_pkts;
2291         } else {
2292                 di.acl_mtu  = hdev->le_mtu;
2293                 di.acl_pkts = hdev->le_pkts;
2294                 di.sco_mtu  = 0;
2295                 di.sco_pkts = 0;
2296         }
2297         di.link_policy = hdev->link_policy;
2298         di.link_mode   = hdev->link_mode;
2299
2300         memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
2301         memcpy(&di.features, &hdev->features, sizeof(di.features));
2302
2303         if (copy_to_user(arg, &di, sizeof(di)))
2304                 err = -EFAULT;
2305
2306         hci_dev_put(hdev);
2307
2308         return err;
2309 }
2310
2311 /* ---- Interface to HCI drivers ---- */
2312
2313 static int hci_rfkill_set_block(void *data, bool blocked)
2314 {
2315         struct hci_dev *hdev = data;
2316
2317         BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
2318
2319         if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags))
2320                 return -EBUSY;
2321
2322         if (blocked) {
2323                 set_bit(HCI_RFKILLED, &hdev->dev_flags);
2324                 if (!test_bit(HCI_SETUP, &hdev->dev_flags))
2325                         hci_dev_do_close(hdev);
2326         } else {
2327                 clear_bit(HCI_RFKILLED, &hdev->dev_flags);
2328         }
2329
2330         return 0;
2331 }
2332
2333 static const struct rfkill_ops hci_rfkill_ops = {
2334         .set_block = hci_rfkill_set_block,
2335 };
2336
2337 static void hci_power_on(struct work_struct *work)
2338 {
2339         struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
2340         int err;
2341
2342         BT_DBG("%s", hdev->name);
2343
2344         err = hci_dev_do_open(hdev);
2345         if (err < 0) {
2346                 mgmt_set_powered_failed(hdev, err);
2347                 return;
2348         }
2349
2350         /* During the HCI setup phase, a few error conditions are
2351          * ignored and they need to be checked now. If they are still
2352          * valid, it is important to turn the device back off.
2353          */
2354         if (test_bit(HCI_RFKILLED, &hdev->dev_flags) ||
2355             (hdev->dev_type == HCI_BREDR &&
2356              !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
2357              !bacmp(&hdev->static_addr, BDADDR_ANY))) {
2358                 clear_bit(HCI_AUTO_OFF, &hdev->dev_flags);
2359                 hci_dev_do_close(hdev);
2360         } else if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
2361                 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
2362                                    HCI_AUTO_OFF_TIMEOUT);
2363         }
2364
2365         if (test_and_clear_bit(HCI_SETUP, &hdev->dev_flags))
2366                 mgmt_index_added(hdev);
2367 }
2368
2369 static void hci_power_off(struct work_struct *work)
2370 {
2371         struct hci_dev *hdev = container_of(work, struct hci_dev,
2372                                             power_off.work);
2373
2374         BT_DBG("%s", hdev->name);
2375
2376         hci_dev_do_close(hdev);
2377 }
2378
2379 static void hci_discov_off(struct work_struct *work)
2380 {
2381         struct hci_dev *hdev;
2382
2383         hdev = container_of(work, struct hci_dev, discov_off.work);
2384
2385         BT_DBG("%s", hdev->name);
2386
2387         mgmt_discoverable_timeout(hdev);
2388 }
2389
2390 int hci_uuids_clear(struct hci_dev *hdev)
2391 {
2392         struct bt_uuid *uuid, *tmp;
2393
2394         list_for_each_entry_safe(uuid, tmp, &hdev->uuids, list) {
2395                 list_del(&uuid->list);
2396                 kfree(uuid);
2397         }
2398
2399         return 0;
2400 }
2401
2402 int hci_link_keys_clear(struct hci_dev *hdev)
2403 {
2404         struct list_head *p, *n;
2405
2406         list_for_each_safe(p, n, &hdev->link_keys) {
2407                 struct link_key *key;
2408
2409                 key = list_entry(p, struct link_key, list);
2410
2411                 list_del(p);
2412                 kfree(key);
2413         }
2414
2415         return 0;
2416 }
2417
2418 int hci_smp_ltks_clear(struct hci_dev *hdev)
2419 {
2420         struct smp_ltk *k, *tmp;
2421
2422         list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
2423                 list_del(&k->list);
2424                 kfree(k);
2425         }
2426
2427         return 0;
2428 }
2429
2430 struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
2431 {
2432         struct link_key *k;
2433
2434         list_for_each_entry(k, &hdev->link_keys, list)
2435                 if (bacmp(bdaddr, &k->bdaddr) == 0)
2436                         return k;
2437
2438         return NULL;
2439 }
2440
2441 static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
2442                                u8 key_type, u8 old_key_type)
2443 {
2444         /* Legacy key */
2445         if (key_type < 0x03)
2446                 return true;
2447
2448         /* Debug keys are insecure so don't store them persistently */
2449         if (key_type == HCI_LK_DEBUG_COMBINATION)
2450                 return false;
2451
2452         /* Changed combination key and there's no previous one */
2453         if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
2454                 return false;
2455
2456         /* Security mode 3 case */
2457         if (!conn)
2458                 return true;
2459
2460         /* Neither local nor remote side had no-bonding as requirement */
2461         if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
2462                 return true;
2463
2464         /* Local side had dedicated bonding as requirement */
2465         if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
2466                 return true;
2467
2468         /* Remote side had dedicated bonding as requirement */
2469         if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
2470                 return true;
2471
2472         /* If none of the above criteria match, then don't store the key
2473          * persistently */
2474         return false;
2475 }
2476
2477 struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, __le16 ediv, u8 rand[8])
2478 {
2479         struct smp_ltk *k;
2480
2481         list_for_each_entry(k, &hdev->long_term_keys, list) {
2482                 if (k->ediv != ediv ||
2483                     memcmp(rand, k->rand, sizeof(k->rand)))
2484                         continue;
2485
2486                 return k;
2487         }
2488
2489         return NULL;
2490 }
2491
2492 struct smp_ltk *hci_find_ltk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
2493                                      u8 addr_type)
2494 {
2495         struct smp_ltk *k;
2496
2497         list_for_each_entry(k, &hdev->long_term_keys, list)
2498                 if (addr_type == k->bdaddr_type &&
2499                     bacmp(bdaddr, &k->bdaddr) == 0)
2500                         return k;
2501
2502         return NULL;
2503 }
2504
2505 int hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn, int new_key,
2506                      bdaddr_t *bdaddr, u8 *val, u8 type, u8 pin_len)
2507 {
2508         struct link_key *key, *old_key;
2509         u8 old_key_type;
2510         bool persistent;
2511
2512         old_key = hci_find_link_key(hdev, bdaddr);
2513         if (old_key) {
2514                 old_key_type = old_key->type;
2515                 key = old_key;
2516         } else {
2517                 old_key_type = conn ? conn->key_type : 0xff;
2518                 key = kzalloc(sizeof(*key), GFP_ATOMIC);
2519                 if (!key)
2520                         return -ENOMEM;
2521                 list_add(&key->list, &hdev->link_keys);
2522         }
2523
2524         BT_DBG("%s key for %pMR type %u", hdev->name, bdaddr, type);
2525
2526         /* Some buggy controller combinations generate a changed
2527          * combination key for legacy pairing even when there's no
2528          * previous key */
2529         if (type == HCI_LK_CHANGED_COMBINATION &&
2530             (!conn || conn->remote_auth == 0xff) && old_key_type == 0xff) {
2531                 type = HCI_LK_COMBINATION;
2532                 if (conn)
2533                         conn->key_type = type;
2534         }
2535
2536         bacpy(&key->bdaddr, bdaddr);
2537         memcpy(key->val, val, HCI_LINK_KEY_SIZE);
2538         key->pin_len = pin_len;
2539
2540         if (type == HCI_LK_CHANGED_COMBINATION)
2541                 key->type = old_key_type;
2542         else
2543                 key->type = type;
2544
2545         if (!new_key)
2546                 return 0;
2547
2548         persistent = hci_persistent_key(hdev, conn, type, old_key_type);
2549
2550         mgmt_new_link_key(hdev, key, persistent);
2551
2552         if (conn)
2553                 conn->flush_key = !persistent;
2554
2555         return 0;
2556 }
2557
2558 int hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type, u8 type,
2559                 int new_key, u8 authenticated, u8 tk[16], u8 enc_size, __le16
2560                 ediv, u8 rand[8])
2561 {
2562         struct smp_ltk *key, *old_key;
2563
2564         if (!(type & HCI_SMP_STK) && !(type & HCI_SMP_LTK))
2565                 return 0;
2566
2567         old_key = hci_find_ltk_by_addr(hdev, bdaddr, addr_type);
2568         if (old_key)
2569                 key = old_key;
2570         else {
2571                 key = kzalloc(sizeof(*key), GFP_ATOMIC);
2572                 if (!key)
2573                         return -ENOMEM;
2574                 list_add(&key->list, &hdev->long_term_keys);
2575         }
2576
2577         bacpy(&key->bdaddr, bdaddr);
2578         key->bdaddr_type = addr_type;
2579         memcpy(key->val, tk, sizeof(key->val));
2580         key->authenticated = authenticated;
2581         key->ediv = ediv;
2582         key->enc_size = enc_size;
2583         key->type = type;
2584         memcpy(key->rand, rand, sizeof(key->rand));
2585
2586         if (!new_key)
2587                 return 0;
2588
2589         if (type & HCI_SMP_LTK)
2590                 mgmt_new_ltk(hdev, key, 1);
2591
2592         return 0;
2593 }
2594
2595 int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
2596 {
2597         struct link_key *key;
2598
2599         key = hci_find_link_key(hdev, bdaddr);
2600         if (!key)
2601                 return -ENOENT;
2602
2603         BT_DBG("%s removing %pMR", hdev->name, bdaddr);
2604
2605         list_del(&key->list);
2606         kfree(key);
2607
2608         return 0;
2609 }
2610
2611 int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr)
2612 {
2613         struct smp_ltk *k, *tmp;
2614
2615         list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
2616                 if (bacmp(bdaddr, &k->bdaddr))
2617                         continue;
2618
2619                 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
2620
2621                 list_del(&k->list);
2622                 kfree(k);
2623         }
2624
2625         return 0;
2626 }
2627
2628 /* HCI command timer function */
2629 static void hci_cmd_timeout(unsigned long arg)
2630 {
2631         struct hci_dev *hdev = (void *) arg;
2632
2633         if (hdev->sent_cmd) {
2634                 struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
2635                 u16 opcode = __le16_to_cpu(sent->opcode);
2636
2637                 BT_ERR("%s command 0x%4.4x tx timeout", hdev->name, opcode);
2638         } else {
2639                 BT_ERR("%s command tx timeout", hdev->name);
2640         }
2641
2642         atomic_set(&hdev->cmd_cnt, 1);
2643         queue_work(hdev->workqueue, &hdev->cmd_work);
2644 }
2645
2646 struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
2647                                           bdaddr_t *bdaddr)
2648 {
2649         struct oob_data *data;
2650
2651         list_for_each_entry(data, &hdev->remote_oob_data, list)
2652                 if (bacmp(bdaddr, &data->bdaddr) == 0)
2653                         return data;
2654
2655         return NULL;
2656 }
2657
2658 int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr)
2659 {
2660         struct oob_data *data;
2661
2662         data = hci_find_remote_oob_data(hdev, bdaddr);
2663         if (!data)
2664                 return -ENOENT;
2665
2666         BT_DBG("%s removing %pMR", hdev->name, bdaddr);
2667
2668         list_del(&data->list);
2669         kfree(data);
2670
2671         return 0;
2672 }
2673
2674 int hci_remote_oob_data_clear(struct hci_dev *hdev)
2675 {
2676         struct oob_data *data, *n;
2677
2678         list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
2679                 list_del(&data->list);
2680                 kfree(data);
2681         }
2682
2683         return 0;
2684 }
2685
2686 int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 *hash,
2687                             u8 *randomizer)
2688 {
2689         struct oob_data *data;
2690
2691         data = hci_find_remote_oob_data(hdev, bdaddr);
2692
2693         if (!data) {
2694                 data = kmalloc(sizeof(*data), GFP_ATOMIC);
2695                 if (!data)
2696                         return -ENOMEM;
2697
2698                 bacpy(&data->bdaddr, bdaddr);
2699                 list_add(&data->list, &hdev->remote_oob_data);
2700         }
2701
2702         memcpy(data->hash, hash, sizeof(data->hash));
2703         memcpy(data->randomizer, randomizer, sizeof(data->randomizer));
2704
2705         BT_DBG("%s for %pMR", hdev->name, bdaddr);
2706
2707         return 0;
2708 }
2709
2710 struct bdaddr_list *hci_blacklist_lookup(struct hci_dev *hdev,
2711                                          bdaddr_t *bdaddr, u8 type)
2712 {
2713         struct bdaddr_list *b;
2714
2715         list_for_each_entry(b, &hdev->blacklist, list) {
2716                 if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
2717                         return b;
2718         }
2719
2720         return NULL;
2721 }
2722
2723 int hci_blacklist_clear(struct hci_dev *hdev)
2724 {
2725         struct list_head *p, *n;
2726
2727         list_for_each_safe(p, n, &hdev->blacklist) {
2728                 struct bdaddr_list *b = list_entry(p, struct bdaddr_list, list);
2729
2730                 list_del(p);
2731                 kfree(b);
2732         }
2733
2734         return 0;
2735 }
2736
2737 int hci_blacklist_add(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
2738 {
2739         struct bdaddr_list *entry;
2740
2741         if (!bacmp(bdaddr, BDADDR_ANY))
2742                 return -EBADF;
2743
2744         if (hci_blacklist_lookup(hdev, bdaddr, type))
2745                 return -EEXIST;
2746
2747         entry = kzalloc(sizeof(struct bdaddr_list), GFP_KERNEL);
2748         if (!entry)
2749                 return -ENOMEM;
2750
2751         bacpy(&entry->bdaddr, bdaddr);
2752         entry->bdaddr_type = type;
2753
2754         list_add(&entry->list, &hdev->blacklist);
2755
2756         return mgmt_device_blocked(hdev, bdaddr, type);
2757 }
2758
2759 int hci_blacklist_del(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
2760 {
2761         struct bdaddr_list *entry;
2762
2763         if (!bacmp(bdaddr, BDADDR_ANY))
2764                 return hci_blacklist_clear(hdev);
2765
2766         entry = hci_blacklist_lookup(hdev, bdaddr, type);
2767         if (!entry)
2768                 return -ENOENT;
2769
2770         list_del(&entry->list);
2771         kfree(entry);
2772
2773         return mgmt_device_unblocked(hdev, bdaddr, type);
2774 }
2775
2776 static void inquiry_complete(struct hci_dev *hdev, u8 status)
2777 {
2778         if (status) {
2779                 BT_ERR("Failed to start inquiry: status %d", status);
2780
2781                 hci_dev_lock(hdev);
2782                 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2783                 hci_dev_unlock(hdev);
2784                 return;
2785         }
2786 }
2787
2788 static void le_scan_disable_work_complete(struct hci_dev *hdev, u8 status)
2789 {
2790         /* General inquiry access code (GIAC) */
2791         u8 lap[3] = { 0x33, 0x8b, 0x9e };
2792         struct hci_request req;
2793         struct hci_cp_inquiry cp;
2794         int err;
2795
2796         if (status) {
2797                 BT_ERR("Failed to disable LE scanning: status %d", status);
2798                 return;
2799         }
2800
2801         switch (hdev->discovery.type) {
2802         case DISCOV_TYPE_LE:
2803                 hci_dev_lock(hdev);
2804                 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2805                 hci_dev_unlock(hdev);
2806                 break;
2807
2808         case DISCOV_TYPE_INTERLEAVED:
2809                 hci_req_init(&req, hdev);
2810
2811                 memset(&cp, 0, sizeof(cp));
2812                 memcpy(&cp.lap, lap, sizeof(cp.lap));
2813                 cp.length = DISCOV_INTERLEAVED_INQUIRY_LEN;
2814                 hci_req_add(&req, HCI_OP_INQUIRY, sizeof(cp), &cp);
2815
2816                 hci_dev_lock(hdev);
2817
2818                 hci_inquiry_cache_flush(hdev);
2819
2820                 err = hci_req_run(&req, inquiry_complete);
2821                 if (err) {
2822                         BT_ERR("Inquiry request failed: err %d", err);
2823                         hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2824                 }
2825
2826                 hci_dev_unlock(hdev);
2827                 break;
2828         }
2829 }
2830
2831 static void le_scan_disable_work(struct work_struct *work)
2832 {
2833         struct hci_dev *hdev = container_of(work, struct hci_dev,
2834                                             le_scan_disable.work);
2835         struct hci_cp_le_set_scan_enable cp;
2836         struct hci_request req;
2837         int err;
2838
2839         BT_DBG("%s", hdev->name);
2840
2841         hci_req_init(&req, hdev);
2842
2843         memset(&cp, 0, sizeof(cp));
2844         cp.enable = LE_SCAN_DISABLE;
2845         hci_req_add(&req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
2846
2847         err = hci_req_run(&req, le_scan_disable_work_complete);
2848         if (err)
2849                 BT_ERR("Disable LE scanning request failed: err %d", err);
2850 }
2851
2852 /* Alloc HCI device */
2853 struct hci_dev *hci_alloc_dev(void)
2854 {
2855         struct hci_dev *hdev;
2856
2857         hdev = kzalloc(sizeof(struct hci_dev), GFP_KERNEL);
2858         if (!hdev)
2859                 return NULL;
2860
2861         hdev->pkt_type  = (HCI_DM1 | HCI_DH1 | HCI_HV1);
2862         hdev->esco_type = (ESCO_HV1);
2863         hdev->link_mode = (HCI_LM_ACCEPT);
2864         hdev->num_iac = 0x01;           /* One IAC support is mandatory */
2865         hdev->io_capability = 0x03;     /* No Input No Output */
2866         hdev->inq_tx_power = HCI_TX_POWER_INVALID;
2867         hdev->adv_tx_power = HCI_TX_POWER_INVALID;
2868
2869         hdev->sniff_max_interval = 800;
2870         hdev->sniff_min_interval = 80;
2871
2872         hdev->le_scan_interval = 0x0060;
2873         hdev->le_scan_window = 0x0030;
2874         hdev->le_conn_min_interval = 0x0028;
2875         hdev->le_conn_max_interval = 0x0038;
2876
2877         mutex_init(&hdev->lock);
2878         mutex_init(&hdev->req_lock);
2879
2880         INIT_LIST_HEAD(&hdev->mgmt_pending);
2881         INIT_LIST_HEAD(&hdev->blacklist);
2882         INIT_LIST_HEAD(&hdev->uuids);
2883         INIT_LIST_HEAD(&hdev->link_keys);
2884         INIT_LIST_HEAD(&hdev->long_term_keys);
2885         INIT_LIST_HEAD(&hdev->remote_oob_data);
2886         INIT_LIST_HEAD(&hdev->conn_hash.list);
2887
2888         INIT_WORK(&hdev->rx_work, hci_rx_work);
2889         INIT_WORK(&hdev->cmd_work, hci_cmd_work);
2890         INIT_WORK(&hdev->tx_work, hci_tx_work);
2891         INIT_WORK(&hdev->power_on, hci_power_on);
2892
2893         INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
2894         INIT_DELAYED_WORK(&hdev->discov_off, hci_discov_off);
2895         INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
2896
2897         skb_queue_head_init(&hdev->rx_q);
2898         skb_queue_head_init(&hdev->cmd_q);
2899         skb_queue_head_init(&hdev->raw_q);
2900
2901         init_waitqueue_head(&hdev->req_wait_q);
2902
2903         setup_timer(&hdev->cmd_timer, hci_cmd_timeout, (unsigned long) hdev);
2904
2905         hci_init_sysfs(hdev);
2906         discovery_init(hdev);
2907
2908         return hdev;
2909 }
2910 EXPORT_SYMBOL(hci_alloc_dev);
2911
2912 /* Free HCI device */
2913 void hci_free_dev(struct hci_dev *hdev)
2914 {
2915         /* will free via device release */
2916         put_device(&hdev->dev);
2917 }
2918 EXPORT_SYMBOL(hci_free_dev);
2919
2920 /* Register HCI device */
2921 int hci_register_dev(struct hci_dev *hdev)
2922 {
2923         int id, error;
2924
2925         if (!hdev->open || !hdev->close)
2926                 return -EINVAL;
2927
2928         /* Do not allow HCI_AMP devices to register at index 0,
2929          * so the index can be used as the AMP controller ID.
2930          */
2931         switch (hdev->dev_type) {
2932         case HCI_BREDR:
2933                 id = ida_simple_get(&hci_index_ida, 0, 0, GFP_KERNEL);
2934                 break;
2935         case HCI_AMP:
2936                 id = ida_simple_get(&hci_index_ida, 1, 0, GFP_KERNEL);
2937                 break;
2938         default:
2939                 return -EINVAL;
2940         }
2941
2942         if (id < 0)
2943                 return id;
2944
2945         sprintf(hdev->name, "hci%d", id);
2946         hdev->id = id;
2947
2948         BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
2949
2950         hdev->workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
2951                                           WQ_MEM_RECLAIM, 1, hdev->name);
2952         if (!hdev->workqueue) {
2953                 error = -ENOMEM;
2954                 goto err;
2955         }
2956
2957         hdev->req_workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
2958                                               WQ_MEM_RECLAIM, 1, hdev->name);
2959         if (!hdev->req_workqueue) {
2960                 destroy_workqueue(hdev->workqueue);
2961                 error = -ENOMEM;
2962                 goto err;
2963         }
2964
2965         if (!IS_ERR_OR_NULL(bt_debugfs))
2966                 hdev->debugfs = debugfs_create_dir(hdev->name, bt_debugfs);
2967
2968         dev_set_name(&hdev->dev, "%s", hdev->name);
2969
2970         error = device_add(&hdev->dev);
2971         if (error < 0)
2972                 goto err_wqueue;
2973
2974         hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
2975                                     RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops,
2976                                     hdev);
2977         if (hdev->rfkill) {
2978                 if (rfkill_register(hdev->rfkill) < 0) {
2979                         rfkill_destroy(hdev->rfkill);
2980                         hdev->rfkill = NULL;
2981                 }
2982         }
2983
2984         if (hdev->rfkill && rfkill_blocked(hdev->rfkill))
2985                 set_bit(HCI_RFKILLED, &hdev->dev_flags);
2986
2987         set_bit(HCI_SETUP, &hdev->dev_flags);
2988         set_bit(HCI_AUTO_OFF, &hdev->dev_flags);
2989
2990         if (hdev->dev_type == HCI_BREDR) {
2991                 /* Assume BR/EDR support until proven otherwise (such as
2992                  * through reading supported features during init.
2993                  */
2994                 set_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
2995         }
2996
2997         write_lock(&hci_dev_list_lock);
2998         list_add(&hdev->list, &hci_dev_list);
2999         write_unlock(&hci_dev_list_lock);
3000
3001         hci_notify(hdev, HCI_DEV_REG);
3002         hci_dev_hold(hdev);
3003
3004         queue_work(hdev->req_workqueue, &hdev->power_on);
3005
3006         return id;
3007
3008 err_wqueue:
3009         destroy_workqueue(hdev->workqueue);
3010         destroy_workqueue(hdev->req_workqueue);
3011 err:
3012         ida_simple_remove(&hci_index_ida, hdev->id);
3013
3014         return error;
3015 }
3016 EXPORT_SYMBOL(hci_register_dev);
3017
3018 /* Unregister HCI device */
3019 void hci_unregister_dev(struct hci_dev *hdev)
3020 {
3021         int i, id;
3022
3023         BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
3024
3025         set_bit(HCI_UNREGISTER, &hdev->dev_flags);
3026
3027         id = hdev->id;
3028
3029         write_lock(&hci_dev_list_lock);
3030         list_del(&hdev->list);
3031         write_unlock(&hci_dev_list_lock);
3032
3033         hci_dev_do_close(hdev);
3034
3035         for (i = 0; i < NUM_REASSEMBLY; i++)
3036                 kfree_skb(hdev->reassembly[i]);
3037
3038         cancel_work_sync(&hdev->power_on);
3039
3040         if (!test_bit(HCI_INIT, &hdev->flags) &&
3041             !test_bit(HCI_SETUP, &hdev->dev_flags)) {
3042                 hci_dev_lock(hdev);
3043                 mgmt_index_removed(hdev);
3044                 hci_dev_unlock(hdev);
3045         }
3046
3047         /* mgmt_index_removed should take care of emptying the
3048          * pending list */
3049         BUG_ON(!list_empty(&hdev->mgmt_pending));
3050
3051         hci_notify(hdev, HCI_DEV_UNREG);
3052
3053         if (hdev->rfkill) {
3054                 rfkill_unregister(hdev->rfkill);
3055                 rfkill_destroy(hdev->rfkill);
3056         }
3057
3058         device_del(&hdev->dev);
3059
3060         debugfs_remove_recursive(hdev->debugfs);
3061
3062         destroy_workqueue(hdev->workqueue);
3063         destroy_workqueue(hdev->req_workqueue);
3064
3065         hci_dev_lock(hdev);
3066         hci_blacklist_clear(hdev);
3067         hci_uuids_clear(hdev);
3068         hci_link_keys_clear(hdev);
3069         hci_smp_ltks_clear(hdev);
3070         hci_remote_oob_data_clear(hdev);
3071         hci_dev_unlock(hdev);
3072
3073         hci_dev_put(hdev);
3074
3075         ida_simple_remove(&hci_index_ida, id);
3076 }
3077 EXPORT_SYMBOL(hci_unregister_dev);
3078
3079 /* Suspend HCI device */
3080 int hci_suspend_dev(struct hci_dev *hdev)
3081 {
3082         hci_notify(hdev, HCI_DEV_SUSPEND);
3083         return 0;
3084 }
3085 EXPORT_SYMBOL(hci_suspend_dev);
3086
3087 /* Resume HCI device */
3088 int hci_resume_dev(struct hci_dev *hdev)
3089 {
3090         hci_notify(hdev, HCI_DEV_RESUME);
3091         return 0;
3092 }
3093 EXPORT_SYMBOL(hci_resume_dev);
3094
3095 /* Receive frame from HCI drivers */
3096 int hci_recv_frame(struct hci_dev *hdev, struct sk_buff *skb)
3097 {
3098         if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
3099                       && !test_bit(HCI_INIT, &hdev->flags))) {
3100                 kfree_skb(skb);
3101                 return -ENXIO;
3102         }
3103
3104         /* Incoming skb */
3105         bt_cb(skb)->incoming = 1;
3106
3107         /* Time stamp */
3108         __net_timestamp(skb);
3109
3110         skb_queue_tail(&hdev->rx_q, skb);
3111         queue_work(hdev->workqueue, &hdev->rx_work);
3112
3113         return 0;
3114 }
3115 EXPORT_SYMBOL(hci_recv_frame);
3116
3117 static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
3118                           int count, __u8 index)
3119 {
3120         int len = 0;
3121         int hlen = 0;
3122         int remain = count;
3123         struct sk_buff *skb;
3124         struct bt_skb_cb *scb;
3125
3126         if ((type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT) ||
3127             index >= NUM_REASSEMBLY)
3128                 return -EILSEQ;
3129
3130         skb = hdev->reassembly[index];
3131
3132         if (!skb) {
3133                 switch (type) {
3134                 case HCI_ACLDATA_PKT:
3135                         len = HCI_MAX_FRAME_SIZE;
3136                         hlen = HCI_ACL_HDR_SIZE;
3137                         break;
3138                 case HCI_EVENT_PKT:
3139                         len = HCI_MAX_EVENT_SIZE;
3140                         hlen = HCI_EVENT_HDR_SIZE;
3141                         break;
3142                 case HCI_SCODATA_PKT:
3143                         len = HCI_MAX_SCO_SIZE;
3144                         hlen = HCI_SCO_HDR_SIZE;
3145                         break;
3146                 }
3147
3148                 skb = bt_skb_alloc(len, GFP_ATOMIC);
3149                 if (!skb)
3150                         return -ENOMEM;
3151
3152                 scb = (void *) skb->cb;
3153                 scb->expect = hlen;
3154                 scb->pkt_type = type;
3155
3156                 hdev->reassembly[index] = skb;
3157         }
3158
3159         while (count) {
3160                 scb = (void *) skb->cb;
3161                 len = min_t(uint, scb->expect, count);
3162
3163                 memcpy(skb_put(skb, len), data, len);
3164
3165                 count -= len;
3166                 data += len;
3167                 scb->expect -= len;
3168                 remain = count;
3169
3170                 switch (type) {
3171                 case HCI_EVENT_PKT:
3172                         if (skb->len == HCI_EVENT_HDR_SIZE) {
3173                                 struct hci_event_hdr *h = hci_event_hdr(skb);
3174                                 scb->expect = h->plen;
3175
3176                                 if (skb_tailroom(skb) < scb->expect) {
3177                                         kfree_skb(skb);
3178                                         hdev->reassembly[index] = NULL;
3179                                         return -ENOMEM;
3180                                 }
3181                         }
3182                         break;
3183
3184                 case HCI_ACLDATA_PKT:
3185                         if (skb->len  == HCI_ACL_HDR_SIZE) {
3186                                 struct hci_acl_hdr *h = hci_acl_hdr(skb);
3187                                 scb->expect = __le16_to_cpu(h->dlen);
3188
3189                                 if (skb_tailroom(skb) < scb->expect) {
3190                                         kfree_skb(skb);
3191                                         hdev->reassembly[index] = NULL;
3192                                         return -ENOMEM;
3193                                 }
3194                         }
3195                         break;
3196
3197                 case HCI_SCODATA_PKT:
3198                         if (skb->len == HCI_SCO_HDR_SIZE) {
3199                                 struct hci_sco_hdr *h = hci_sco_hdr(skb);
3200                                 scb->expect = h->dlen;
3201
3202                                 if (skb_tailroom(skb) < scb->expect) {
3203                                         kfree_skb(skb);
3204                                         hdev->reassembly[index] = NULL;
3205                                         return -ENOMEM;
3206                                 }
3207                         }
3208                         break;
3209                 }
3210
3211                 if (scb->expect == 0) {
3212                         /* Complete frame */
3213
3214                         bt_cb(skb)->pkt_type = type;
3215                         hci_recv_frame(hdev, skb);
3216
3217                         hdev->reassembly[index] = NULL;
3218                         return remain;
3219                 }
3220         }
3221
3222         return remain;
3223 }
3224
3225 int hci_recv_fragment(struct hci_dev *hdev, int type, void *data, int count)
3226 {
3227         int rem = 0;
3228
3229         if (type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT)
3230                 return -EILSEQ;
3231
3232         while (count) {
3233                 rem = hci_reassembly(hdev, type, data, count, type - 1);
3234                 if (rem < 0)
3235                         return rem;
3236
3237                 data += (count - rem);
3238                 count = rem;
3239         }
3240
3241         return rem;
3242 }
3243 EXPORT_SYMBOL(hci_recv_fragment);
3244
3245 #define STREAM_REASSEMBLY 0
3246
3247 int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count)
3248 {
3249         int type;
3250         int rem = 0;
3251
3252         while (count) {
3253                 struct sk_buff *skb = hdev->reassembly[STREAM_REASSEMBLY];
3254
3255                 if (!skb) {
3256                         struct { char type; } *pkt;
3257
3258                         /* Start of the frame */
3259                         pkt = data;
3260                         type = pkt->type;
3261
3262                         data++;
3263                         count--;
3264                 } else
3265                         type = bt_cb(skb)->pkt_type;
3266
3267                 rem = hci_reassembly(hdev, type, data, count,
3268                                      STREAM_REASSEMBLY);
3269                 if (rem < 0)
3270                         return rem;
3271
3272                 data += (count - rem);
3273                 count = rem;
3274         }
3275
3276         return rem;
3277 }
3278 EXPORT_SYMBOL(hci_recv_stream_fragment);
3279
3280 /* ---- Interface to upper protocols ---- */
3281
3282 int hci_register_cb(struct hci_cb *cb)
3283 {
3284         BT_DBG("%p name %s", cb, cb->name);
3285
3286         write_lock(&hci_cb_list_lock);
3287         list_add(&cb->list, &hci_cb_list);
3288         write_unlock(&hci_cb_list_lock);
3289
3290         return 0;
3291 }
3292 EXPORT_SYMBOL(hci_register_cb);
3293
3294 int hci_unregister_cb(struct hci_cb *cb)
3295 {
3296         BT_DBG("%p name %s", cb, cb->name);
3297
3298         write_lock(&hci_cb_list_lock);
3299         list_del(&cb->list);
3300         write_unlock(&hci_cb_list_lock);
3301
3302         return 0;
3303 }
3304 EXPORT_SYMBOL(hci_unregister_cb);
3305
3306 static void hci_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
3307 {
3308         BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
3309
3310         /* Time stamp */
3311         __net_timestamp(skb);
3312
3313         /* Send copy to monitor */
3314         hci_send_to_monitor(hdev, skb);
3315
3316         if (atomic_read(&hdev->promisc)) {
3317                 /* Send copy to the sockets */
3318                 hci_send_to_sock(hdev, skb);
3319         }
3320
3321         /* Get rid of skb owner, prior to sending to the driver. */
3322         skb_orphan(skb);
3323
3324         if (hdev->send(hdev, skb) < 0)
3325                 BT_ERR("%s sending frame failed", hdev->name);
3326 }
3327
3328 void hci_req_init(struct hci_request *req, struct hci_dev *hdev)
3329 {
3330         skb_queue_head_init(&req->cmd_q);
3331         req->hdev = hdev;
3332         req->err = 0;
3333 }
3334
3335 int hci_req_run(struct hci_request *req, hci_req_complete_t complete)
3336 {
3337         struct hci_dev *hdev = req->hdev;
3338         struct sk_buff *skb;
3339         unsigned long flags;
3340
3341         BT_DBG("length %u", skb_queue_len(&req->cmd_q));
3342
3343         /* If an error occured during request building, remove all HCI
3344          * commands queued on the HCI request queue.
3345          */
3346         if (req->err) {
3347                 skb_queue_purge(&req->cmd_q);
3348                 return req->err;
3349         }
3350
3351         /* Do not allow empty requests */
3352         if (skb_queue_empty(&req->cmd_q))
3353                 return -ENODATA;
3354
3355         skb = skb_peek_tail(&req->cmd_q);
3356         bt_cb(skb)->req.complete = complete;
3357
3358         spin_lock_irqsave(&hdev->cmd_q.lock, flags);
3359         skb_queue_splice_tail(&req->cmd_q, &hdev->cmd_q);
3360         spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
3361
3362         queue_work(hdev->workqueue, &hdev->cmd_work);
3363
3364         return 0;
3365 }
3366
3367 static struct sk_buff *hci_prepare_cmd(struct hci_dev *hdev, u16 opcode,
3368                                        u32 plen, const void *param)
3369 {
3370         int len = HCI_COMMAND_HDR_SIZE + plen;
3371         struct hci_command_hdr *hdr;
3372         struct sk_buff *skb;
3373
3374         skb = bt_skb_alloc(len, GFP_ATOMIC);
3375         if (!skb)
3376                 return NULL;
3377
3378         hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
3379         hdr->opcode = cpu_to_le16(opcode);
3380         hdr->plen   = plen;
3381
3382         if (plen)
3383                 memcpy(skb_put(skb, plen), param, plen);
3384
3385         BT_DBG("skb len %d", skb->len);
3386
3387         bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
3388
3389         return skb;
3390 }
3391
3392 /* Send HCI command */
3393 int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen,
3394                  const void *param)
3395 {
3396         struct sk_buff *skb;
3397
3398         BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
3399
3400         skb = hci_prepare_cmd(hdev, opcode, plen, param);
3401         if (!skb) {
3402                 BT_ERR("%s no memory for command", hdev->name);
3403                 return -ENOMEM;
3404         }
3405
3406         /* Stand-alone HCI commands must be flaged as
3407          * single-command requests.
3408          */
3409         bt_cb(skb)->req.start = true;
3410
3411         skb_queue_tail(&hdev->cmd_q, skb);
3412         queue_work(hdev->workqueue, &hdev->cmd_work);
3413
3414         return 0;
3415 }
3416
3417 /* Queue a command to an asynchronous HCI request */
3418 void hci_req_add_ev(struct hci_request *req, u16 opcode, u32 plen,
3419                     const void *param, u8 event)
3420 {
3421         struct hci_dev *hdev = req->hdev;
3422         struct sk_buff *skb;
3423
3424         BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
3425
3426         /* If an error occured during request building, there is no point in
3427          * queueing the HCI command. We can simply return.
3428          */
3429         if (req->err)
3430                 return;
3431
3432         skb = hci_prepare_cmd(hdev, opcode, plen, param);
3433         if (!skb) {
3434                 BT_ERR("%s no memory for command (opcode 0x%4.4x)",
3435                        hdev->name, opcode);
3436                 req->err = -ENOMEM;
3437                 return;
3438         }
3439
3440         if (skb_queue_empty(&req->cmd_q))
3441                 bt_cb(skb)->req.start = true;
3442
3443         bt_cb(skb)->req.event = event;
3444
3445         skb_queue_tail(&req->cmd_q, skb);
3446 }
3447
3448 void hci_req_add(struct hci_request *req, u16 opcode, u32 plen,
3449                  const void *param)
3450 {
3451         hci_req_add_ev(req, opcode, plen, param, 0);
3452 }
3453
3454 /* Get data from the previously sent command */
3455 void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
3456 {
3457         struct hci_command_hdr *hdr;
3458
3459         if (!hdev->sent_cmd)
3460                 return NULL;
3461
3462         hdr = (void *) hdev->sent_cmd->data;
3463
3464         if (hdr->opcode != cpu_to_le16(opcode))
3465                 return NULL;
3466
3467         BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
3468
3469         return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
3470 }
3471
3472 /* Send ACL data */
3473 static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
3474 {
3475         struct hci_acl_hdr *hdr;
3476         int len = skb->len;
3477
3478         skb_push(skb, HCI_ACL_HDR_SIZE);
3479         skb_reset_transport_header(skb);
3480         hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
3481         hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
3482         hdr->dlen   = cpu_to_le16(len);
3483 }
3484
3485 static void hci_queue_acl(struct hci_chan *chan, struct sk_buff_head *queue,
3486                           struct sk_buff *skb, __u16 flags)
3487 {
3488         struct hci_conn *conn = chan->conn;
3489         struct hci_dev *hdev = conn->hdev;
3490         struct sk_buff *list;
3491
3492         skb->len = skb_headlen(skb);
3493         skb->data_len = 0;
3494
3495         bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
3496
3497         switch (hdev->dev_type) {
3498         case HCI_BREDR:
3499                 hci_add_acl_hdr(skb, conn->handle, flags);
3500                 break;
3501         case HCI_AMP:
3502                 hci_add_acl_hdr(skb, chan->handle, flags);
3503                 break;
3504         default:
3505                 BT_ERR("%s unknown dev_type %d", hdev->name, hdev->dev_type);
3506                 return;
3507         }
3508
3509         list = skb_shinfo(skb)->frag_list;
3510         if (!list) {
3511                 /* Non fragmented */
3512                 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
3513
3514                 skb_queue_tail(queue, skb);
3515         } else {
3516                 /* Fragmented */
3517                 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
3518
3519                 skb_shinfo(skb)->frag_list = NULL;
3520
3521                 /* Queue all fragments atomically */
3522                 spin_lock(&queue->lock);
3523
3524                 __skb_queue_tail(queue, skb);
3525
3526                 flags &= ~ACL_START;
3527                 flags |= ACL_CONT;
3528                 do {
3529                         skb = list; list = list->next;
3530
3531                         bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
3532                         hci_add_acl_hdr(skb, conn->handle, flags);
3533
3534                         BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
3535
3536                         __skb_queue_tail(queue, skb);
3537                 } while (list);
3538
3539                 spin_unlock(&queue->lock);
3540         }
3541 }
3542
3543 void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
3544 {
3545         struct hci_dev *hdev = chan->conn->hdev;
3546
3547         BT_DBG("%s chan %p flags 0x%4.4x", hdev->name, chan, flags);
3548
3549         hci_queue_acl(chan, &chan->data_q, skb, flags);
3550
3551         queue_work(hdev->workqueue, &hdev->tx_work);
3552 }
3553
3554 /* Send SCO data */
3555 void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
3556 {
3557         struct hci_dev *hdev = conn->hdev;
3558         struct hci_sco_hdr hdr;
3559
3560         BT_DBG("%s len %d", hdev->name, skb->len);
3561
3562         hdr.handle = cpu_to_le16(conn->handle);
3563         hdr.dlen   = skb->len;
3564
3565         skb_push(skb, HCI_SCO_HDR_SIZE);
3566         skb_reset_transport_header(skb);
3567         memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
3568
3569         bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
3570
3571         skb_queue_tail(&conn->data_q, skb);
3572         queue_work(hdev->workqueue, &hdev->tx_work);
3573 }
3574
3575 /* ---- HCI TX task (outgoing data) ---- */
3576
3577 /* HCI Connection scheduler */
3578 static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type,
3579                                      int *quote)
3580 {
3581         struct hci_conn_hash *h = &hdev->conn_hash;
3582         struct hci_conn *conn = NULL, *c;
3583         unsigned int num = 0, min = ~0;
3584
3585         /* We don't have to lock device here. Connections are always
3586          * added and removed with TX task disabled. */
3587
3588         rcu_read_lock();
3589
3590         list_for_each_entry_rcu(c, &h->list, list) {
3591                 if (c->type != type || skb_queue_empty(&c->data_q))
3592                         continue;
3593
3594                 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
3595                         continue;
3596
3597                 num++;
3598
3599                 if (c->sent < min) {
3600                         min  = c->sent;
3601                         conn = c;
3602                 }
3603
3604                 if (hci_conn_num(hdev, type) == num)
3605                         break;
3606         }
3607
3608         rcu_read_unlock();
3609
3610         if (conn) {
3611                 int cnt, q;
3612
3613                 switch (conn->type) {
3614                 case ACL_LINK:
3615                         cnt = hdev->acl_cnt;
3616                         break;
3617                 case SCO_LINK:
3618                 case ESCO_LINK:
3619                         cnt = hdev->sco_cnt;
3620                         break;
3621                 case LE_LINK:
3622                         cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
3623                         break;
3624                 default:
3625                         cnt = 0;
3626                         BT_ERR("Unknown link type");
3627                 }
3628
3629                 q = cnt / num;
3630                 *quote = q ? q : 1;
3631         } else
3632                 *quote = 0;
3633
3634         BT_DBG("conn %p quote %d", conn, *quote);
3635         return conn;
3636 }
3637
3638 static void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
3639 {
3640         struct hci_conn_hash *h = &hdev->conn_hash;
3641         struct hci_conn *c;
3642
3643         BT_ERR("%s link tx timeout", hdev->name);
3644
3645         rcu_read_lock();
3646
3647         /* Kill stalled connections */
3648         list_for_each_entry_rcu(c, &h->list, list) {
3649                 if (c->type == type && c->sent) {
3650                         BT_ERR("%s killing stalled connection %pMR",
3651                                hdev->name, &c->dst);
3652                         hci_disconnect(c, HCI_ERROR_REMOTE_USER_TERM);
3653                 }
3654         }
3655
3656         rcu_read_unlock();
3657 }
3658
3659 static struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
3660                                       int *quote)
3661 {
3662         struct hci_conn_hash *h = &hdev->conn_hash;
3663         struct hci_chan *chan = NULL;
3664         unsigned int num = 0, min = ~0, cur_prio = 0;
3665         struct hci_conn *conn;
3666         int cnt, q, conn_num = 0;
3667
3668         BT_DBG("%s", hdev->name);
3669
3670         rcu_read_lock();
3671
3672         list_for_each_entry_rcu(conn, &h->list, list) {
3673                 struct hci_chan *tmp;
3674
3675                 if (conn->type != type)
3676                         continue;
3677
3678                 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
3679                         continue;
3680
3681                 conn_num++;
3682
3683                 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
3684                         struct sk_buff *skb;
3685
3686                         if (skb_queue_empty(&tmp->data_q))
3687                                 continue;
3688
3689                         skb = skb_peek(&tmp->data_q);
3690                         if (skb->priority < cur_prio)
3691                                 continue;
3692
3693                         if (skb->priority > cur_prio) {
3694                                 num = 0;
3695                                 min = ~0;
3696                                 cur_prio = skb->priority;
3697                         }
3698
3699                         num++;
3700
3701                         if (conn->sent < min) {
3702                                 min  = conn->sent;
3703                                 chan = tmp;
3704                         }
3705                 }
3706
3707                 if (hci_conn_num(hdev, type) == conn_num)
3708                         break;
3709         }
3710
3711         rcu_read_unlock();
3712
3713         if (!chan)
3714                 return NULL;
3715
3716         switch (chan->conn->type) {
3717         case ACL_LINK:
3718                 cnt = hdev->acl_cnt;
3719                 break;
3720         case AMP_LINK:
3721                 cnt = hdev->block_cnt;
3722                 break;
3723         case SCO_LINK:
3724         case ESCO_LINK:
3725                 cnt = hdev->sco_cnt;
3726                 break;
3727         case LE_LINK:
3728                 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
3729                 break;
3730         default:
3731                 cnt = 0;
3732                 BT_ERR("Unknown link type");
3733         }
3734
3735         q = cnt / num;
3736         *quote = q ? q : 1;
3737         BT_DBG("chan %p quote %d", chan, *quote);
3738         return chan;
3739 }
3740
3741 static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
3742 {
3743         struct hci_conn_hash *h = &hdev->conn_hash;
3744         struct hci_conn *conn;
3745         int num = 0;
3746
3747         BT_DBG("%s", hdev->name);
3748
3749         rcu_read_lock();
3750
3751         list_for_each_entry_rcu(conn, &h->list, list) {
3752                 struct hci_chan *chan;
3753
3754                 if (conn->type != type)
3755                         continue;
3756
3757                 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
3758                         continue;
3759
3760                 num++;
3761
3762                 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
3763                         struct sk_buff *skb;
3764
3765                         if (chan->sent) {
3766                                 chan->sent = 0;
3767                                 continue;
3768                         }
3769
3770                         if (skb_queue_empty(&chan->data_q))
3771                                 continue;
3772
3773                         skb = skb_peek(&chan->data_q);
3774                         if (skb->priority >= HCI_PRIO_MAX - 1)
3775                                 continue;
3776
3777                         skb->priority = HCI_PRIO_MAX - 1;
3778
3779                         BT_DBG("chan %p skb %p promoted to %d", chan, skb,
3780                                skb->priority);
3781                 }
3782
3783                 if (hci_conn_num(hdev, type) == num)
3784                         break;
3785         }
3786
3787         rcu_read_unlock();
3788
3789 }
3790
3791 static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
3792 {
3793         /* Calculate count of blocks used by this packet */
3794         return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
3795 }
3796
3797 static void __check_timeout(struct hci_dev *hdev, unsigned int cnt)
3798 {
3799         if (!test_bit(HCI_RAW, &hdev->flags)) {
3800                 /* ACL tx timeout must be longer than maximum
3801                  * link supervision timeout (40.9 seconds) */
3802                 if (!cnt && time_after(jiffies, hdev->acl_last_tx +
3803                                        HCI_ACL_TX_TIMEOUT))
3804                         hci_link_tx_to(hdev, ACL_LINK);
3805         }
3806 }
3807
3808 static void hci_sched_acl_pkt(struct hci_dev *hdev)
3809 {
3810         unsigned int cnt = hdev->acl_cnt;
3811         struct hci_chan *chan;
3812         struct sk_buff *skb;
3813         int quote;
3814
3815         __check_timeout(hdev, cnt);
3816
3817         while (hdev->acl_cnt &&
3818                (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
3819                 u32 priority = (skb_peek(&chan->data_q))->priority;
3820                 while (quote-- && (skb = skb_peek(&chan->data_q))) {
3821                         BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
3822                                skb->len, skb->priority);
3823
3824                         /* Stop if priority has changed */
3825                         if (skb->priority < priority)
3826                                 break;
3827
3828                         skb = skb_dequeue(&chan->data_q);
3829
3830                         hci_conn_enter_active_mode(chan->conn,
3831                                                    bt_cb(skb)->force_active);
3832
3833                         hci_send_frame(hdev, skb);
3834                         hdev->acl_last_tx = jiffies;
3835
3836                         hdev->acl_cnt--;
3837                         chan->sent++;
3838                         chan->conn->sent++;
3839                 }
3840         }
3841
3842         if (cnt != hdev->acl_cnt)
3843                 hci_prio_recalculate(hdev, ACL_LINK);
3844 }
3845
3846 static void hci_sched_acl_blk(struct hci_dev *hdev)
3847 {
3848         unsigned int cnt = hdev->block_cnt;
3849         struct hci_chan *chan;
3850         struct sk_buff *skb;
3851         int quote;
3852         u8 type;
3853
3854         __check_timeout(hdev, cnt);
3855
3856         BT_DBG("%s", hdev->name);
3857
3858         if (hdev->dev_type == HCI_AMP)
3859                 type = AMP_LINK;
3860         else
3861                 type = ACL_LINK;
3862
3863         while (hdev->block_cnt > 0 &&
3864                (chan = hci_chan_sent(hdev, type, &quote))) {
3865                 u32 priority = (skb_peek(&chan->data_q))->priority;
3866                 while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
3867                         int blocks;
3868
3869                         BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
3870                                skb->len, skb->priority);
3871
3872                         /* Stop if priority has changed */
3873                         if (skb->priority < priority)
3874                                 break;
3875
3876                         skb = skb_dequeue(&chan->data_q);
3877
3878                         blocks = __get_blocks(hdev, skb);
3879                         if (blocks > hdev->block_cnt)
3880                                 return;
3881
3882                         hci_conn_enter_active_mode(chan->conn,
3883                                                    bt_cb(skb)->force_active);
3884
3885                         hci_send_frame(hdev, skb);
3886                         hdev->acl_last_tx = jiffies;
3887
3888                         hdev->block_cnt -= blocks;
3889                         quote -= blocks;
3890
3891                         chan->sent += blocks;
3892                         chan->conn->sent += blocks;
3893                 }
3894         }
3895
3896         if (cnt != hdev->block_cnt)
3897                 hci_prio_recalculate(hdev, type);
3898 }
3899
3900 static void hci_sched_acl(struct hci_dev *hdev)
3901 {
3902         BT_DBG("%s", hdev->name);
3903
3904         /* No ACL link over BR/EDR controller */
3905         if (!hci_conn_num(hdev, ACL_LINK) && hdev->dev_type == HCI_BREDR)
3906                 return;
3907
3908         /* No AMP link over AMP controller */
3909         if (!hci_conn_num(hdev, AMP_LINK) && hdev->dev_type == HCI_AMP)
3910                 return;
3911
3912         switch (hdev->flow_ctl_mode) {
3913         case HCI_FLOW_CTL_MODE_PACKET_BASED:
3914                 hci_sched_acl_pkt(hdev);
3915                 break;
3916
3917         case HCI_FLOW_CTL_MODE_BLOCK_BASED:
3918                 hci_sched_acl_blk(hdev);
3919                 break;
3920         }
3921 }
3922
3923 /* Schedule SCO */
3924 static void hci_sched_sco(struct hci_dev *hdev)
3925 {
3926         struct hci_conn *conn;
3927         struct sk_buff *skb;
3928         int quote;
3929
3930         BT_DBG("%s", hdev->name);
3931
3932         if (!hci_conn_num(hdev, SCO_LINK))
3933                 return;
3934
3935         while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
3936                 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
3937                         BT_DBG("skb %p len %d", skb, skb->len);
3938                         hci_send_frame(hdev, skb);
3939
3940                         conn->sent++;
3941                         if (conn->sent == ~0)
3942                                 conn->sent = 0;
3943                 }
3944         }
3945 }
3946
3947 static void hci_sched_esco(struct hci_dev *hdev)
3948 {
3949         struct hci_conn *conn;
3950         struct sk_buff *skb;
3951         int quote;
3952
3953         BT_DBG("%s", hdev->name);
3954
3955         if (!hci_conn_num(hdev, ESCO_LINK))
3956                 return;
3957
3958         while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK,
3959                                                      &quote))) {
3960                 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
3961                         BT_DBG("skb %p len %d", skb, skb->len);
3962                         hci_send_frame(hdev, skb);
3963
3964                         conn->sent++;
3965                         if (conn->sent == ~0)
3966                                 conn->sent = 0;
3967                 }
3968         }
3969 }
3970
3971 static void hci_sched_le(struct hci_dev *hdev)
3972 {
3973         struct hci_chan *chan;
3974         struct sk_buff *skb;
3975         int quote, cnt, tmp;
3976
3977         BT_DBG("%s", hdev->name);
3978
3979         if (!hci_conn_num(hdev, LE_LINK))
3980                 return;
3981
3982         if (!test_bit(HCI_RAW, &hdev->flags)) {
3983                 /* LE tx timeout must be longer than maximum
3984                  * link supervision timeout (40.9 seconds) */
3985                 if (!hdev->le_cnt && hdev->le_pkts &&
3986                     time_after(jiffies, hdev->le_last_tx + HZ * 45))
3987                         hci_link_tx_to(hdev, LE_LINK);
3988         }
3989
3990         cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
3991         tmp = cnt;
3992         while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, &quote))) {
3993                 u32 priority = (skb_peek(&chan->data_q))->priority;
3994                 while (quote-- && (skb = skb_peek(&chan->data_q))) {
3995                         BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
3996                                skb->len, skb->priority);
3997
3998                         /* Stop if priority has changed */
3999                         if (skb->priority < priority)
4000                                 break;
4001
4002                         skb = skb_dequeue(&chan->data_q);
4003
4004                         hci_send_frame(hdev, skb);
4005                         hdev->le_last_tx = jiffies;
4006
4007                         cnt--;
4008                         chan->sent++;
4009                         chan->conn->sent++;
4010                 }
4011         }
4012
4013         if (hdev->le_pkts)
4014                 hdev->le_cnt = cnt;
4015         else
4016                 hdev->acl_cnt = cnt;
4017
4018         if (cnt != tmp)
4019                 hci_prio_recalculate(hdev, LE_LINK);
4020 }
4021
4022 static void hci_tx_work(struct work_struct *work)
4023 {
4024         struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
4025         struct sk_buff *skb;
4026
4027         BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
4028                hdev->sco_cnt, hdev->le_cnt);
4029
4030         if (!test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
4031                 /* Schedule queues and send stuff to HCI driver */
4032                 hci_sched_acl(hdev);
4033                 hci_sched_sco(hdev);
4034                 hci_sched_esco(hdev);
4035                 hci_sched_le(hdev);
4036         }
4037
4038         /* Send next queued raw (unknown type) packet */
4039         while ((skb = skb_dequeue(&hdev->raw_q)))
4040                 hci_send_frame(hdev, skb);
4041 }
4042
4043 /* ----- HCI RX task (incoming data processing) ----- */
4044
4045 /* ACL data packet */
4046 static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
4047 {
4048         struct hci_acl_hdr *hdr = (void *) skb->data;
4049         struct hci_conn *conn;
4050         __u16 handle, flags;
4051
4052         skb_pull(skb, HCI_ACL_HDR_SIZE);
4053
4054         handle = __le16_to_cpu(hdr->handle);
4055         flags  = hci_flags(handle);
4056         handle = hci_handle(handle);
4057
4058         BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
4059                handle, flags);
4060
4061         hdev->stat.acl_rx++;
4062
4063         hci_dev_lock(hdev);
4064         conn = hci_conn_hash_lookup_handle(hdev, handle);
4065         hci_dev_unlock(hdev);
4066
4067         if (conn) {
4068                 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
4069
4070                 /* Send to upper protocol */
4071                 l2cap_recv_acldata(conn, skb, flags);
4072                 return;
4073         } else {
4074                 BT_ERR("%s ACL packet for unknown connection handle %d",
4075                        hdev->name, handle);
4076         }
4077
4078         kfree_skb(skb);
4079 }
4080
4081 /* SCO data packet */
4082 static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
4083 {
4084         struct hci_sco_hdr *hdr = (void *) skb->data;
4085         struct hci_conn *conn;
4086         __u16 handle;
4087
4088         skb_pull(skb, HCI_SCO_HDR_SIZE);
4089
4090         handle = __le16_to_cpu(hdr->handle);
4091
4092         BT_DBG("%s len %d handle 0x%4.4x", hdev->name, skb->len, handle);
4093
4094         hdev->stat.sco_rx++;
4095
4096         hci_dev_lock(hdev);
4097         conn = hci_conn_hash_lookup_handle(hdev, handle);
4098         hci_dev_unlock(hdev);
4099
4100         if (conn) {
4101                 /* Send to upper protocol */
4102                 sco_recv_scodata(conn, skb);
4103                 return;
4104         } else {
4105                 BT_ERR("%s SCO packet for unknown connection handle %d",
4106                        hdev->name, handle);
4107         }
4108
4109         kfree_skb(skb);
4110 }
4111
4112 static bool hci_req_is_complete(struct hci_dev *hdev)
4113 {
4114         struct sk_buff *skb;
4115
4116         skb = skb_peek(&hdev->cmd_q);
4117         if (!skb)
4118                 return true;
4119
4120         return bt_cb(skb)->req.start;
4121 }
4122
4123 static void hci_resend_last(struct hci_dev *hdev)
4124 {
4125         struct hci_command_hdr *sent;
4126         struct sk_buff *skb;
4127         u16 opcode;
4128
4129         if (!hdev->sent_cmd)
4130                 return;
4131
4132         sent = (void *) hdev->sent_cmd->data;
4133         opcode = __le16_to_cpu(sent->opcode);
4134         if (opcode == HCI_OP_RESET)
4135                 return;
4136
4137         skb = skb_clone(hdev->sent_cmd, GFP_KERNEL);
4138         if (!skb)
4139                 return;
4140
4141         skb_queue_head(&hdev->cmd_q, skb);
4142         queue_work(hdev->workqueue, &hdev->cmd_work);
4143 }
4144
4145 void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status)
4146 {
4147         hci_req_complete_t req_complete = NULL;
4148         struct sk_buff *skb;
4149         unsigned long flags;
4150
4151         BT_DBG("opcode 0x%04x status 0x%02x", opcode, status);
4152
4153         /* If the completed command doesn't match the last one that was
4154          * sent we need to do special handling of it.
4155          */
4156         if (!hci_sent_cmd_data(hdev, opcode)) {
4157                 /* Some CSR based controllers generate a spontaneous
4158                  * reset complete event during init and any pending
4159                  * command will never be completed. In such a case we
4160                  * need to resend whatever was the last sent
4161                  * command.
4162                  */
4163                 if (test_bit(HCI_INIT, &hdev->flags) && opcode == HCI_OP_RESET)
4164                         hci_resend_last(hdev);
4165
4166                 return;
4167         }
4168
4169         /* If the command succeeded and there's still more commands in
4170          * this request the request is not yet complete.
4171          */
4172         if (!status && !hci_req_is_complete(hdev))
4173                 return;
4174
4175         /* If this was the last command in a request the complete
4176          * callback would be found in hdev->sent_cmd instead of the
4177          * command queue (hdev->cmd_q).
4178          */
4179         if (hdev->sent_cmd) {
4180                 req_complete = bt_cb(hdev->sent_cmd)->req.complete;
4181
4182                 if (req_complete) {
4183                         /* We must set the complete callback to NULL to
4184                          * avoid calling the callback more than once if
4185                          * this function gets called again.
4186                          */
4187                         bt_cb(hdev->sent_cmd)->req.complete = NULL;
4188
4189                         goto call_complete;
4190                 }
4191         }
4192
4193         /* Remove all pending commands belonging to this request */
4194         spin_lock_irqsave(&hdev->cmd_q.lock, flags);
4195         while ((skb = __skb_dequeue(&hdev->cmd_q))) {
4196                 if (bt_cb(skb)->req.start) {
4197                         __skb_queue_head(&hdev->cmd_q, skb);
4198                         break;
4199                 }
4200
4201                 req_complete = bt_cb(skb)->req.complete;
4202                 kfree_skb(skb);
4203         }
4204         spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
4205
4206 call_complete:
4207         if (req_complete)
4208                 req_complete(hdev, status);
4209 }
4210
4211 static void hci_rx_work(struct work_struct *work)
4212 {
4213         struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
4214         struct sk_buff *skb;
4215
4216         BT_DBG("%s", hdev->name);
4217
4218         while ((skb = skb_dequeue(&hdev->rx_q))) {
4219                 /* Send copy to monitor */
4220                 hci_send_to_monitor(hdev, skb);
4221
4222                 if (atomic_read(&hdev->promisc)) {
4223                         /* Send copy to the sockets */
4224                         hci_send_to_sock(hdev, skb);
4225                 }
4226
4227                 if (test_bit(HCI_RAW, &hdev->flags) ||
4228                     test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
4229                         kfree_skb(skb);
4230                         continue;
4231                 }
4232
4233                 if (test_bit(HCI_INIT, &hdev->flags)) {
4234                         /* Don't process data packets in this states. */
4235                         switch (bt_cb(skb)->pkt_type) {
4236                         case HCI_ACLDATA_PKT:
4237                         case HCI_SCODATA_PKT:
4238                                 kfree_skb(skb);
4239                                 continue;
4240                         }
4241                 }
4242
4243                 /* Process frame */
4244                 switch (bt_cb(skb)->pkt_type) {
4245                 case HCI_EVENT_PKT:
4246                         BT_DBG("%s Event packet", hdev->name);
4247                         hci_event_packet(hdev, skb);
4248                         break;
4249
4250                 case HCI_ACLDATA_PKT:
4251                         BT_DBG("%s ACL data packet", hdev->name);
4252                         hci_acldata_packet(hdev, skb);
4253                         break;
4254
4255                 case HCI_SCODATA_PKT:
4256                         BT_DBG("%s SCO data packet", hdev->name);
4257                         hci_scodata_packet(hdev, skb);
4258                         break;
4259
4260                 default:
4261                         kfree_skb(skb);
4262                         break;
4263                 }
4264         }
4265 }
4266
4267 static void hci_cmd_work(struct work_struct *work)
4268 {
4269         struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
4270         struct sk_buff *skb;
4271
4272         BT_DBG("%s cmd_cnt %d cmd queued %d", hdev->name,
4273                atomic_read(&hdev->cmd_cnt), skb_queue_len(&hdev->cmd_q));
4274
4275         /* Send queued commands */
4276         if (atomic_read(&hdev->cmd_cnt)) {
4277                 skb = skb_dequeue(&hdev->cmd_q);
4278                 if (!skb)
4279                         return;
4280
4281                 kfree_skb(hdev->sent_cmd);
4282
4283                 hdev->sent_cmd = skb_clone(skb, GFP_KERNEL);
4284                 if (hdev->sent_cmd) {
4285                         atomic_dec(&hdev->cmd_cnt);
4286                         hci_send_frame(hdev, skb);
4287                         if (test_bit(HCI_RESET, &hdev->flags))
4288                                 del_timer(&hdev->cmd_timer);
4289                         else
4290                                 mod_timer(&hdev->cmd_timer,
4291                                           jiffies + HCI_CMD_TIMEOUT);
4292                 } else {
4293                         skb_queue_head(&hdev->cmd_q, skb);
4294                         queue_work(hdev->workqueue, &hdev->cmd_work);
4295                 }
4296         }
4297 }