]> git.karo-electronics.de Git - karo-tx-linux.git/blob - net/bluetooth/hci_core.c
Merge branch 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/jesse/openvswitch
[karo-tx-linux.git] / net / bluetooth / hci_core.c
1 /*
2    BlueZ - Bluetooth protocol stack for Linux
3    Copyright (C) 2000-2001 Qualcomm Incorporated
4    Copyright (C) 2011 ProFUSION Embedded Systems
5
6    Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7
8    This program is free software; you can redistribute it and/or modify
9    it under the terms of the GNU General Public License version 2 as
10    published by the Free Software Foundation;
11
12    THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13    OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14    FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15    IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
16    CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17    WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18    ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
19    OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20
21    ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22    COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
23    SOFTWARE IS DISCLAIMED.
24 */
25
26 /* Bluetooth HCI core. */
27
28 #include <linux/export.h>
29 #include <linux/idr.h>
30
31 #include <linux/rfkill.h>
32
33 #include <net/bluetooth/bluetooth.h>
34 #include <net/bluetooth/hci_core.h>
35
36 static void hci_rx_work(struct work_struct *work);
37 static void hci_cmd_work(struct work_struct *work);
38 static void hci_tx_work(struct work_struct *work);
39
40 /* HCI device list */
41 LIST_HEAD(hci_dev_list);
42 DEFINE_RWLOCK(hci_dev_list_lock);
43
44 /* HCI callback list */
45 LIST_HEAD(hci_cb_list);
46 DEFINE_RWLOCK(hci_cb_list_lock);
47
48 /* HCI ID Numbering */
49 static DEFINE_IDA(hci_index_ida);
50
51 /* ---- HCI notifications ---- */
52
53 static void hci_notify(struct hci_dev *hdev, int event)
54 {
55         hci_sock_dev_event(hdev, event);
56 }
57
58 /* ---- HCI requests ---- */
59
60 static void hci_req_sync_complete(struct hci_dev *hdev, u8 result)
61 {
62         BT_DBG("%s result 0x%2.2x", hdev->name, result);
63
64         if (hdev->req_status == HCI_REQ_PEND) {
65                 hdev->req_result = result;
66                 hdev->req_status = HCI_REQ_DONE;
67                 wake_up_interruptible(&hdev->req_wait_q);
68         }
69 }
70
71 static void hci_req_cancel(struct hci_dev *hdev, int err)
72 {
73         BT_DBG("%s err 0x%2.2x", hdev->name, err);
74
75         if (hdev->req_status == HCI_REQ_PEND) {
76                 hdev->req_result = err;
77                 hdev->req_status = HCI_REQ_CANCELED;
78                 wake_up_interruptible(&hdev->req_wait_q);
79         }
80 }
81
82 /* Execute request and wait for completion. */
83 static int __hci_req_sync(struct hci_dev *hdev,
84                           void (*func)(struct hci_request *req,
85                                       unsigned long opt),
86                           unsigned long opt, __u32 timeout)
87 {
88         struct hci_request req;
89         DECLARE_WAITQUEUE(wait, current);
90         int err = 0;
91
92         BT_DBG("%s start", hdev->name);
93
94         hci_req_init(&req, hdev);
95
96         hdev->req_status = HCI_REQ_PEND;
97
98         func(&req, opt);
99
100         err = hci_req_run(&req, hci_req_sync_complete);
101         if (err < 0) {
102                 hdev->req_status = 0;
103
104                 /* ENODATA means the HCI request command queue is empty.
105                  * This can happen when a request with conditionals doesn't
106                  * trigger any commands to be sent. This is normal behavior
107                  * and should not trigger an error return.
108                  */
109                 if (err == -ENODATA)
110                         return 0;
111
112                 return err;
113         }
114
115         add_wait_queue(&hdev->req_wait_q, &wait);
116         set_current_state(TASK_INTERRUPTIBLE);
117
118         schedule_timeout(timeout);
119
120         remove_wait_queue(&hdev->req_wait_q, &wait);
121
122         if (signal_pending(current))
123                 return -EINTR;
124
125         switch (hdev->req_status) {
126         case HCI_REQ_DONE:
127                 err = -bt_to_errno(hdev->req_result);
128                 break;
129
130         case HCI_REQ_CANCELED:
131                 err = -hdev->req_result;
132                 break;
133
134         default:
135                 err = -ETIMEDOUT;
136                 break;
137         }
138
139         hdev->req_status = hdev->req_result = 0;
140
141         BT_DBG("%s end: err %d", hdev->name, err);
142
143         return err;
144 }
145
146 static int hci_req_sync(struct hci_dev *hdev,
147                         void (*req)(struct hci_request *req,
148                                     unsigned long opt),
149                         unsigned long opt, __u32 timeout)
150 {
151         int ret;
152
153         if (!test_bit(HCI_UP, &hdev->flags))
154                 return -ENETDOWN;
155
156         /* Serialize all requests */
157         hci_req_lock(hdev);
158         ret = __hci_req_sync(hdev, req, opt, timeout);
159         hci_req_unlock(hdev);
160
161         return ret;
162 }
163
164 static void hci_reset_req(struct hci_request *req, unsigned long opt)
165 {
166         BT_DBG("%s %ld", req->hdev->name, opt);
167
168         /* Reset device */
169         set_bit(HCI_RESET, &req->hdev->flags);
170         hci_req_add(req, HCI_OP_RESET, 0, NULL);
171 }
172
173 static void bredr_init(struct hci_request *req)
174 {
175         req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;
176
177         /* Read Local Supported Features */
178         hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
179
180         /* Read Local Version */
181         hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
182
183         /* Read BD Address */
184         hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
185 }
186
187 static void amp_init(struct hci_request *req)
188 {
189         req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
190
191         /* Read Local Version */
192         hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
193
194         /* Read Local AMP Info */
195         hci_req_add(req, HCI_OP_READ_LOCAL_AMP_INFO, 0, NULL);
196
197         /* Read Data Blk size */
198         hci_req_add(req, HCI_OP_READ_DATA_BLOCK_SIZE, 0, NULL);
199 }
200
201 static void hci_init1_req(struct hci_request *req, unsigned long opt)
202 {
203         struct hci_dev *hdev = req->hdev;
204         struct hci_request init_req;
205         struct sk_buff *skb;
206
207         BT_DBG("%s %ld", hdev->name, opt);
208
209         /* Driver initialization */
210
211         hci_req_init(&init_req, hdev);
212
213         /* Special commands */
214         while ((skb = skb_dequeue(&hdev->driver_init))) {
215                 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
216                 skb->dev = (void *) hdev;
217
218                 if (skb_queue_empty(&init_req.cmd_q))
219                         bt_cb(skb)->req.start = true;
220
221                 skb_queue_tail(&init_req.cmd_q, skb);
222         }
223         skb_queue_purge(&hdev->driver_init);
224
225         hci_req_run(&init_req, NULL);
226
227         /* Reset */
228         if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
229                 hci_reset_req(req, 0);
230
231         switch (hdev->dev_type) {
232         case HCI_BREDR:
233                 bredr_init(req);
234                 break;
235
236         case HCI_AMP:
237                 amp_init(req);
238                 break;
239
240         default:
241                 BT_ERR("Unknown device type %d", hdev->dev_type);
242                 break;
243         }
244 }
245
246 static void bredr_setup(struct hci_request *req)
247 {
248         struct hci_cp_delete_stored_link_key cp;
249         __le16 param;
250         __u8 flt_type;
251
252         /* Read Buffer Size (ACL mtu, max pkt, etc.) */
253         hci_req_add(req, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
254
255         /* Read Class of Device */
256         hci_req_add(req, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
257
258         /* Read Local Name */
259         hci_req_add(req, HCI_OP_READ_LOCAL_NAME, 0, NULL);
260
261         /* Read Voice Setting */
262         hci_req_add(req, HCI_OP_READ_VOICE_SETTING, 0, NULL);
263
264         /* Clear Event Filters */
265         flt_type = HCI_FLT_CLEAR_ALL;
266         hci_req_add(req, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
267
268         /* Connection accept timeout ~20 secs */
269         param = __constant_cpu_to_le16(0x7d00);
270         hci_req_add(req, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
271
272         bacpy(&cp.bdaddr, BDADDR_ANY);
273         cp.delete_all = 0x01;
274         hci_req_add(req, HCI_OP_DELETE_STORED_LINK_KEY, sizeof(cp), &cp);
275
276         /* Read page scan parameters */
277         if (req->hdev->hci_ver > BLUETOOTH_VER_1_1) {
278                 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_ACTIVITY, 0, NULL);
279                 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_TYPE, 0, NULL);
280         }
281 }
282
283 static void le_setup(struct hci_request *req)
284 {
285         /* Read LE Buffer Size */
286         hci_req_add(req, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
287
288         /* Read LE Local Supported Features */
289         hci_req_add(req, HCI_OP_LE_READ_LOCAL_FEATURES, 0, NULL);
290
291         /* Read LE Advertising Channel TX Power */
292         hci_req_add(req, HCI_OP_LE_READ_ADV_TX_POWER, 0, NULL);
293
294         /* Read LE White List Size */
295         hci_req_add(req, HCI_OP_LE_READ_WHITE_LIST_SIZE, 0, NULL);
296
297         /* Read LE Supported States */
298         hci_req_add(req, HCI_OP_LE_READ_SUPPORTED_STATES, 0, NULL);
299 }
300
301 static u8 hci_get_inquiry_mode(struct hci_dev *hdev)
302 {
303         if (lmp_ext_inq_capable(hdev))
304                 return 0x02;
305
306         if (lmp_inq_rssi_capable(hdev))
307                 return 0x01;
308
309         if (hdev->manufacturer == 11 && hdev->hci_rev == 0x00 &&
310             hdev->lmp_subver == 0x0757)
311                 return 0x01;
312
313         if (hdev->manufacturer == 15) {
314                 if (hdev->hci_rev == 0x03 && hdev->lmp_subver == 0x6963)
315                         return 0x01;
316                 if (hdev->hci_rev == 0x09 && hdev->lmp_subver == 0x6963)
317                         return 0x01;
318                 if (hdev->hci_rev == 0x00 && hdev->lmp_subver == 0x6965)
319                         return 0x01;
320         }
321
322         if (hdev->manufacturer == 31 && hdev->hci_rev == 0x2005 &&
323             hdev->lmp_subver == 0x1805)
324                 return 0x01;
325
326         return 0x00;
327 }
328
329 static void hci_setup_inquiry_mode(struct hci_request *req)
330 {
331         u8 mode;
332
333         mode = hci_get_inquiry_mode(req->hdev);
334
335         hci_req_add(req, HCI_OP_WRITE_INQUIRY_MODE, 1, &mode);
336 }
337
338 static void hci_setup_event_mask(struct hci_request *req)
339 {
340         struct hci_dev *hdev = req->hdev;
341
342         /* The second byte is 0xff instead of 0x9f (two reserved bits
343          * disabled) since a Broadcom 1.2 dongle doesn't respond to the
344          * command otherwise.
345          */
346         u8 events[8] = { 0xff, 0xff, 0xfb, 0xff, 0x00, 0x00, 0x00, 0x00 };
347
348         /* CSR 1.1 dongles does not accept any bitfield so don't try to set
349          * any event mask for pre 1.2 devices.
350          */
351         if (hdev->hci_ver < BLUETOOTH_VER_1_2)
352                 return;
353
354         if (lmp_bredr_capable(hdev)) {
355                 events[4] |= 0x01; /* Flow Specification Complete */
356                 events[4] |= 0x02; /* Inquiry Result with RSSI */
357                 events[4] |= 0x04; /* Read Remote Extended Features Complete */
358                 events[5] |= 0x08; /* Synchronous Connection Complete */
359                 events[5] |= 0x10; /* Synchronous Connection Changed */
360         }
361
362         if (lmp_inq_rssi_capable(hdev))
363                 events[4] |= 0x02; /* Inquiry Result with RSSI */
364
365         if (lmp_sniffsubr_capable(hdev))
366                 events[5] |= 0x20; /* Sniff Subrating */
367
368         if (lmp_pause_enc_capable(hdev))
369                 events[5] |= 0x80; /* Encryption Key Refresh Complete */
370
371         if (lmp_ext_inq_capable(hdev))
372                 events[5] |= 0x40; /* Extended Inquiry Result */
373
374         if (lmp_no_flush_capable(hdev))
375                 events[7] |= 0x01; /* Enhanced Flush Complete */
376
377         if (lmp_lsto_capable(hdev))
378                 events[6] |= 0x80; /* Link Supervision Timeout Changed */
379
380         if (lmp_ssp_capable(hdev)) {
381                 events[6] |= 0x01;      /* IO Capability Request */
382                 events[6] |= 0x02;      /* IO Capability Response */
383                 events[6] |= 0x04;      /* User Confirmation Request */
384                 events[6] |= 0x08;      /* User Passkey Request */
385                 events[6] |= 0x10;      /* Remote OOB Data Request */
386                 events[6] |= 0x20;      /* Simple Pairing Complete */
387                 events[7] |= 0x04;      /* User Passkey Notification */
388                 events[7] |= 0x08;      /* Keypress Notification */
389                 events[7] |= 0x10;      /* Remote Host Supported
390                                          * Features Notification
391                                          */
392         }
393
394         if (lmp_le_capable(hdev))
395                 events[7] |= 0x20;      /* LE Meta-Event */
396
397         hci_req_add(req, HCI_OP_SET_EVENT_MASK, sizeof(events), events);
398
399         if (lmp_le_capable(hdev)) {
400                 memset(events, 0, sizeof(events));
401                 events[0] = 0x1f;
402                 hci_req_add(req, HCI_OP_LE_SET_EVENT_MASK,
403                             sizeof(events), events);
404         }
405 }
406
407 static void hci_init2_req(struct hci_request *req, unsigned long opt)
408 {
409         struct hci_dev *hdev = req->hdev;
410
411         if (lmp_bredr_capable(hdev))
412                 bredr_setup(req);
413
414         if (lmp_le_capable(hdev))
415                 le_setup(req);
416
417         hci_setup_event_mask(req);
418
419         if (hdev->hci_ver > BLUETOOTH_VER_1_1)
420                 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
421
422         if (lmp_ssp_capable(hdev)) {
423                 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
424                         u8 mode = 0x01;
425                         hci_req_add(req, HCI_OP_WRITE_SSP_MODE,
426                                     sizeof(mode), &mode);
427                 } else {
428                         struct hci_cp_write_eir cp;
429
430                         memset(hdev->eir, 0, sizeof(hdev->eir));
431                         memset(&cp, 0, sizeof(cp));
432
433                         hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
434                 }
435         }
436
437         if (lmp_inq_rssi_capable(hdev))
438                 hci_setup_inquiry_mode(req);
439
440         if (lmp_inq_tx_pwr_capable(hdev))
441                 hci_req_add(req, HCI_OP_READ_INQ_RSP_TX_POWER, 0, NULL);
442
443         if (lmp_ext_feat_capable(hdev)) {
444                 struct hci_cp_read_local_ext_features cp;
445
446                 cp.page = 0x01;
447                 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
448                             sizeof(cp), &cp);
449         }
450
451         if (test_bit(HCI_LINK_SECURITY, &hdev->dev_flags)) {
452                 u8 enable = 1;
453                 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, sizeof(enable),
454                             &enable);
455         }
456 }
457
458 static void hci_setup_link_policy(struct hci_request *req)
459 {
460         struct hci_dev *hdev = req->hdev;
461         struct hci_cp_write_def_link_policy cp;
462         u16 link_policy = 0;
463
464         if (lmp_rswitch_capable(hdev))
465                 link_policy |= HCI_LP_RSWITCH;
466         if (lmp_hold_capable(hdev))
467                 link_policy |= HCI_LP_HOLD;
468         if (lmp_sniff_capable(hdev))
469                 link_policy |= HCI_LP_SNIFF;
470         if (lmp_park_capable(hdev))
471                 link_policy |= HCI_LP_PARK;
472
473         cp.policy = cpu_to_le16(link_policy);
474         hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, sizeof(cp), &cp);
475 }
476
477 static void hci_set_le_support(struct hci_request *req)
478 {
479         struct hci_dev *hdev = req->hdev;
480         struct hci_cp_write_le_host_supported cp;
481
482         memset(&cp, 0, sizeof(cp));
483
484         if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
485                 cp.le = 0x01;
486                 cp.simul = lmp_le_br_capable(hdev);
487         }
488
489         if (cp.le != lmp_host_le_capable(hdev))
490                 hci_req_add(req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(cp),
491                             &cp);
492 }
493
494 static void hci_init3_req(struct hci_request *req, unsigned long opt)
495 {
496         struct hci_dev *hdev = req->hdev;
497
498         if (hdev->commands[5] & 0x10)
499                 hci_setup_link_policy(req);
500
501         if (lmp_le_capable(hdev)) {
502                 hci_set_le_support(req);
503                 hci_update_ad(req);
504         }
505 }
506
507 static int __hci_init(struct hci_dev *hdev)
508 {
509         int err;
510
511         err = __hci_req_sync(hdev, hci_init1_req, 0, HCI_INIT_TIMEOUT);
512         if (err < 0)
513                 return err;
514
515         /* HCI_BREDR covers both single-mode LE, BR/EDR and dual-mode
516          * BR/EDR/LE type controllers. AMP controllers only need the
517          * first stage init.
518          */
519         if (hdev->dev_type != HCI_BREDR)
520                 return 0;
521
522         err = __hci_req_sync(hdev, hci_init2_req, 0, HCI_INIT_TIMEOUT);
523         if (err < 0)
524                 return err;
525
526         return __hci_req_sync(hdev, hci_init3_req, 0, HCI_INIT_TIMEOUT);
527 }
528
529 static void hci_scan_req(struct hci_request *req, unsigned long opt)
530 {
531         __u8 scan = opt;
532
533         BT_DBG("%s %x", req->hdev->name, scan);
534
535         /* Inquiry and Page scans */
536         hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
537 }
538
539 static void hci_auth_req(struct hci_request *req, unsigned long opt)
540 {
541         __u8 auth = opt;
542
543         BT_DBG("%s %x", req->hdev->name, auth);
544
545         /* Authentication */
546         hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
547 }
548
549 static void hci_encrypt_req(struct hci_request *req, unsigned long opt)
550 {
551         __u8 encrypt = opt;
552
553         BT_DBG("%s %x", req->hdev->name, encrypt);
554
555         /* Encryption */
556         hci_req_add(req, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
557 }
558
559 static void hci_linkpol_req(struct hci_request *req, unsigned long opt)
560 {
561         __le16 policy = cpu_to_le16(opt);
562
563         BT_DBG("%s %x", req->hdev->name, policy);
564
565         /* Default link policy */
566         hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
567 }
568
569 /* Get HCI device by index.
570  * Device is held on return. */
571 struct hci_dev *hci_dev_get(int index)
572 {
573         struct hci_dev *hdev = NULL, *d;
574
575         BT_DBG("%d", index);
576
577         if (index < 0)
578                 return NULL;
579
580         read_lock(&hci_dev_list_lock);
581         list_for_each_entry(d, &hci_dev_list, list) {
582                 if (d->id == index) {
583                         hdev = hci_dev_hold(d);
584                         break;
585                 }
586         }
587         read_unlock(&hci_dev_list_lock);
588         return hdev;
589 }
590
591 /* ---- Inquiry support ---- */
592
593 bool hci_discovery_active(struct hci_dev *hdev)
594 {
595         struct discovery_state *discov = &hdev->discovery;
596
597         switch (discov->state) {
598         case DISCOVERY_FINDING:
599         case DISCOVERY_RESOLVING:
600                 return true;
601
602         default:
603                 return false;
604         }
605 }
606
607 void hci_discovery_set_state(struct hci_dev *hdev, int state)
608 {
609         BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
610
611         if (hdev->discovery.state == state)
612                 return;
613
614         switch (state) {
615         case DISCOVERY_STOPPED:
616                 if (hdev->discovery.state != DISCOVERY_STARTING)
617                         mgmt_discovering(hdev, 0);
618                 break;
619         case DISCOVERY_STARTING:
620                 break;
621         case DISCOVERY_FINDING:
622                 mgmt_discovering(hdev, 1);
623                 break;
624         case DISCOVERY_RESOLVING:
625                 break;
626         case DISCOVERY_STOPPING:
627                 break;
628         }
629
630         hdev->discovery.state = state;
631 }
632
633 static void inquiry_cache_flush(struct hci_dev *hdev)
634 {
635         struct discovery_state *cache = &hdev->discovery;
636         struct inquiry_entry *p, *n;
637
638         list_for_each_entry_safe(p, n, &cache->all, all) {
639                 list_del(&p->all);
640                 kfree(p);
641         }
642
643         INIT_LIST_HEAD(&cache->unknown);
644         INIT_LIST_HEAD(&cache->resolve);
645 }
646
647 struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev,
648                                                bdaddr_t *bdaddr)
649 {
650         struct discovery_state *cache = &hdev->discovery;
651         struct inquiry_entry *e;
652
653         BT_DBG("cache %p, %pMR", cache, bdaddr);
654
655         list_for_each_entry(e, &cache->all, all) {
656                 if (!bacmp(&e->data.bdaddr, bdaddr))
657                         return e;
658         }
659
660         return NULL;
661 }
662
663 struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
664                                                        bdaddr_t *bdaddr)
665 {
666         struct discovery_state *cache = &hdev->discovery;
667         struct inquiry_entry *e;
668
669         BT_DBG("cache %p, %pMR", cache, bdaddr);
670
671         list_for_each_entry(e, &cache->unknown, list) {
672                 if (!bacmp(&e->data.bdaddr, bdaddr))
673                         return e;
674         }
675
676         return NULL;
677 }
678
679 struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
680                                                        bdaddr_t *bdaddr,
681                                                        int state)
682 {
683         struct discovery_state *cache = &hdev->discovery;
684         struct inquiry_entry *e;
685
686         BT_DBG("cache %p bdaddr %pMR state %d", cache, bdaddr, state);
687
688         list_for_each_entry(e, &cache->resolve, list) {
689                 if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
690                         return e;
691                 if (!bacmp(&e->data.bdaddr, bdaddr))
692                         return e;
693         }
694
695         return NULL;
696 }
697
698 void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
699                                       struct inquiry_entry *ie)
700 {
701         struct discovery_state *cache = &hdev->discovery;
702         struct list_head *pos = &cache->resolve;
703         struct inquiry_entry *p;
704
705         list_del(&ie->list);
706
707         list_for_each_entry(p, &cache->resolve, list) {
708                 if (p->name_state != NAME_PENDING &&
709                     abs(p->data.rssi) >= abs(ie->data.rssi))
710                         break;
711                 pos = &p->list;
712         }
713
714         list_add(&ie->list, pos);
715 }
716
717 bool hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
718                               bool name_known, bool *ssp)
719 {
720         struct discovery_state *cache = &hdev->discovery;
721         struct inquiry_entry *ie;
722
723         BT_DBG("cache %p, %pMR", cache, &data->bdaddr);
724
725         hci_remove_remote_oob_data(hdev, &data->bdaddr);
726
727         if (ssp)
728                 *ssp = data->ssp_mode;
729
730         ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
731         if (ie) {
732                 if (ie->data.ssp_mode && ssp)
733                         *ssp = true;
734
735                 if (ie->name_state == NAME_NEEDED &&
736                     data->rssi != ie->data.rssi) {
737                         ie->data.rssi = data->rssi;
738                         hci_inquiry_cache_update_resolve(hdev, ie);
739                 }
740
741                 goto update;
742         }
743
744         /* Entry not in the cache. Add new one. */
745         ie = kzalloc(sizeof(struct inquiry_entry), GFP_ATOMIC);
746         if (!ie)
747                 return false;
748
749         list_add(&ie->all, &cache->all);
750
751         if (name_known) {
752                 ie->name_state = NAME_KNOWN;
753         } else {
754                 ie->name_state = NAME_NOT_KNOWN;
755                 list_add(&ie->list, &cache->unknown);
756         }
757
758 update:
759         if (name_known && ie->name_state != NAME_KNOWN &&
760             ie->name_state != NAME_PENDING) {
761                 ie->name_state = NAME_KNOWN;
762                 list_del(&ie->list);
763         }
764
765         memcpy(&ie->data, data, sizeof(*data));
766         ie->timestamp = jiffies;
767         cache->timestamp = jiffies;
768
769         if (ie->name_state == NAME_NOT_KNOWN)
770                 return false;
771
772         return true;
773 }
774
775 static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
776 {
777         struct discovery_state *cache = &hdev->discovery;
778         struct inquiry_info *info = (struct inquiry_info *) buf;
779         struct inquiry_entry *e;
780         int copied = 0;
781
782         list_for_each_entry(e, &cache->all, all) {
783                 struct inquiry_data *data = &e->data;
784
785                 if (copied >= num)
786                         break;
787
788                 bacpy(&info->bdaddr, &data->bdaddr);
789                 info->pscan_rep_mode    = data->pscan_rep_mode;
790                 info->pscan_period_mode = data->pscan_period_mode;
791                 info->pscan_mode        = data->pscan_mode;
792                 memcpy(info->dev_class, data->dev_class, 3);
793                 info->clock_offset      = data->clock_offset;
794
795                 info++;
796                 copied++;
797         }
798
799         BT_DBG("cache %p, copied %d", cache, copied);
800         return copied;
801 }
802
803 static void hci_inq_req(struct hci_request *req, unsigned long opt)
804 {
805         struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
806         struct hci_dev *hdev = req->hdev;
807         struct hci_cp_inquiry cp;
808
809         BT_DBG("%s", hdev->name);
810
811         if (test_bit(HCI_INQUIRY, &hdev->flags))
812                 return;
813
814         /* Start Inquiry */
815         memcpy(&cp.lap, &ir->lap, 3);
816         cp.length  = ir->length;
817         cp.num_rsp = ir->num_rsp;
818         hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
819 }
820
821 int hci_inquiry(void __user *arg)
822 {
823         __u8 __user *ptr = arg;
824         struct hci_inquiry_req ir;
825         struct hci_dev *hdev;
826         int err = 0, do_inquiry = 0, max_rsp;
827         long timeo;
828         __u8 *buf;
829
830         if (copy_from_user(&ir, ptr, sizeof(ir)))
831                 return -EFAULT;
832
833         hdev = hci_dev_get(ir.dev_id);
834         if (!hdev)
835                 return -ENODEV;
836
837         hci_dev_lock(hdev);
838         if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
839             inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) {
840                 inquiry_cache_flush(hdev);
841                 do_inquiry = 1;
842         }
843         hci_dev_unlock(hdev);
844
845         timeo = ir.length * msecs_to_jiffies(2000);
846
847         if (do_inquiry) {
848                 err = hci_req_sync(hdev, hci_inq_req, (unsigned long) &ir,
849                                    timeo);
850                 if (err < 0)
851                         goto done;
852         }
853
854         /* for unlimited number of responses we will use buffer with
855          * 255 entries
856          */
857         max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
858
859         /* cache_dump can't sleep. Therefore we allocate temp buffer and then
860          * copy it to the user space.
861          */
862         buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
863         if (!buf) {
864                 err = -ENOMEM;
865                 goto done;
866         }
867
868         hci_dev_lock(hdev);
869         ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
870         hci_dev_unlock(hdev);
871
872         BT_DBG("num_rsp %d", ir.num_rsp);
873
874         if (!copy_to_user(ptr, &ir, sizeof(ir))) {
875                 ptr += sizeof(ir);
876                 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
877                                  ir.num_rsp))
878                         err = -EFAULT;
879         } else
880                 err = -EFAULT;
881
882         kfree(buf);
883
884 done:
885         hci_dev_put(hdev);
886         return err;
887 }
888
889 static u8 create_ad(struct hci_dev *hdev, u8 *ptr)
890 {
891         u8 ad_len = 0, flags = 0;
892         size_t name_len;
893
894         if (test_bit(HCI_LE_PERIPHERAL, &hdev->dev_flags))
895                 flags |= LE_AD_GENERAL;
896
897         if (!lmp_bredr_capable(hdev))
898                 flags |= LE_AD_NO_BREDR;
899
900         if (lmp_le_br_capable(hdev))
901                 flags |= LE_AD_SIM_LE_BREDR_CTRL;
902
903         if (lmp_host_le_br_capable(hdev))
904                 flags |= LE_AD_SIM_LE_BREDR_HOST;
905
906         if (flags) {
907                 BT_DBG("adv flags 0x%02x", flags);
908
909                 ptr[0] = 2;
910                 ptr[1] = EIR_FLAGS;
911                 ptr[2] = flags;
912
913                 ad_len += 3;
914                 ptr += 3;
915         }
916
917         if (hdev->adv_tx_power != HCI_TX_POWER_INVALID) {
918                 ptr[0] = 2;
919                 ptr[1] = EIR_TX_POWER;
920                 ptr[2] = (u8) hdev->adv_tx_power;
921
922                 ad_len += 3;
923                 ptr += 3;
924         }
925
926         name_len = strlen(hdev->dev_name);
927         if (name_len > 0) {
928                 size_t max_len = HCI_MAX_AD_LENGTH - ad_len - 2;
929
930                 if (name_len > max_len) {
931                         name_len = max_len;
932                         ptr[1] = EIR_NAME_SHORT;
933                 } else
934                         ptr[1] = EIR_NAME_COMPLETE;
935
936                 ptr[0] = name_len + 1;
937
938                 memcpy(ptr + 2, hdev->dev_name, name_len);
939
940                 ad_len += (name_len + 2);
941                 ptr += (name_len + 2);
942         }
943
944         return ad_len;
945 }
946
947 void hci_update_ad(struct hci_request *req)
948 {
949         struct hci_dev *hdev = req->hdev;
950         struct hci_cp_le_set_adv_data cp;
951         u8 len;
952
953         if (!lmp_le_capable(hdev))
954                 return;
955
956         memset(&cp, 0, sizeof(cp));
957
958         len = create_ad(hdev, cp.data);
959
960         if (hdev->adv_data_len == len &&
961             memcmp(cp.data, hdev->adv_data, len) == 0)
962                 return;
963
964         memcpy(hdev->adv_data, cp.data, sizeof(cp.data));
965         hdev->adv_data_len = len;
966
967         cp.length = len;
968
969         hci_req_add(req, HCI_OP_LE_SET_ADV_DATA, sizeof(cp), &cp);
970 }
971
972 /* ---- HCI ioctl helpers ---- */
973
974 int hci_dev_open(__u16 dev)
975 {
976         struct hci_dev *hdev;
977         int ret = 0;
978
979         hdev = hci_dev_get(dev);
980         if (!hdev)
981                 return -ENODEV;
982
983         BT_DBG("%s %p", hdev->name, hdev);
984
985         hci_req_lock(hdev);
986
987         if (test_bit(HCI_UNREGISTER, &hdev->dev_flags)) {
988                 ret = -ENODEV;
989                 goto done;
990         }
991
992         if (hdev->rfkill && rfkill_blocked(hdev->rfkill)) {
993                 ret = -ERFKILL;
994                 goto done;
995         }
996
997         if (test_bit(HCI_UP, &hdev->flags)) {
998                 ret = -EALREADY;
999                 goto done;
1000         }
1001
1002         if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
1003                 set_bit(HCI_RAW, &hdev->flags);
1004
1005         /* Treat all non BR/EDR controllers as raw devices if
1006            enable_hs is not set */
1007         if (hdev->dev_type != HCI_BREDR && !enable_hs)
1008                 set_bit(HCI_RAW, &hdev->flags);
1009
1010         if (hdev->open(hdev)) {
1011                 ret = -EIO;
1012                 goto done;
1013         }
1014
1015         if (!test_bit(HCI_RAW, &hdev->flags)) {
1016                 atomic_set(&hdev->cmd_cnt, 1);
1017                 set_bit(HCI_INIT, &hdev->flags);
1018                 ret = __hci_init(hdev);
1019                 clear_bit(HCI_INIT, &hdev->flags);
1020         }
1021
1022         if (!ret) {
1023                 hci_dev_hold(hdev);
1024                 set_bit(HCI_UP, &hdev->flags);
1025                 hci_notify(hdev, HCI_DEV_UP);
1026                 if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
1027                     mgmt_valid_hdev(hdev)) {
1028                         hci_dev_lock(hdev);
1029                         mgmt_powered(hdev, 1);
1030                         hci_dev_unlock(hdev);
1031                 }
1032         } else {
1033                 /* Init failed, cleanup */
1034                 flush_work(&hdev->tx_work);
1035                 flush_work(&hdev->cmd_work);
1036                 flush_work(&hdev->rx_work);
1037
1038                 skb_queue_purge(&hdev->cmd_q);
1039                 skb_queue_purge(&hdev->rx_q);
1040
1041                 if (hdev->flush)
1042                         hdev->flush(hdev);
1043
1044                 if (hdev->sent_cmd) {
1045                         kfree_skb(hdev->sent_cmd);
1046                         hdev->sent_cmd = NULL;
1047                 }
1048
1049                 hdev->close(hdev);
1050                 hdev->flags = 0;
1051         }
1052
1053 done:
1054         hci_req_unlock(hdev);
1055         hci_dev_put(hdev);
1056         return ret;
1057 }
1058
1059 static int hci_dev_do_close(struct hci_dev *hdev)
1060 {
1061         BT_DBG("%s %p", hdev->name, hdev);
1062
1063         cancel_work_sync(&hdev->le_scan);
1064
1065         cancel_delayed_work(&hdev->power_off);
1066
1067         hci_req_cancel(hdev, ENODEV);
1068         hci_req_lock(hdev);
1069
1070         if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
1071                 del_timer_sync(&hdev->cmd_timer);
1072                 hci_req_unlock(hdev);
1073                 return 0;
1074         }
1075
1076         /* Flush RX and TX works */
1077         flush_work(&hdev->tx_work);
1078         flush_work(&hdev->rx_work);
1079
1080         if (hdev->discov_timeout > 0) {
1081                 cancel_delayed_work(&hdev->discov_off);
1082                 hdev->discov_timeout = 0;
1083                 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
1084         }
1085
1086         if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
1087                 cancel_delayed_work(&hdev->service_cache);
1088
1089         cancel_delayed_work_sync(&hdev->le_scan_disable);
1090
1091         hci_dev_lock(hdev);
1092         inquiry_cache_flush(hdev);
1093         hci_conn_hash_flush(hdev);
1094         hci_dev_unlock(hdev);
1095
1096         hci_notify(hdev, HCI_DEV_DOWN);
1097
1098         if (hdev->flush)
1099                 hdev->flush(hdev);
1100
1101         /* Reset device */
1102         skb_queue_purge(&hdev->cmd_q);
1103         atomic_set(&hdev->cmd_cnt, 1);
1104         if (!test_bit(HCI_RAW, &hdev->flags) &&
1105             test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks)) {
1106                 set_bit(HCI_INIT, &hdev->flags);
1107                 __hci_req_sync(hdev, hci_reset_req, 0, HCI_CMD_TIMEOUT);
1108                 clear_bit(HCI_INIT, &hdev->flags);
1109         }
1110
1111         /* flush cmd  work */
1112         flush_work(&hdev->cmd_work);
1113
1114         /* Drop queues */
1115         skb_queue_purge(&hdev->rx_q);
1116         skb_queue_purge(&hdev->cmd_q);
1117         skb_queue_purge(&hdev->raw_q);
1118
1119         /* Drop last sent command */
1120         if (hdev->sent_cmd) {
1121                 del_timer_sync(&hdev->cmd_timer);
1122                 kfree_skb(hdev->sent_cmd);
1123                 hdev->sent_cmd = NULL;
1124         }
1125
1126         /* After this point our queues are empty
1127          * and no tasks are scheduled. */
1128         hdev->close(hdev);
1129
1130         /* Clear flags */
1131         hdev->flags = 0;
1132         hdev->dev_flags &= ~HCI_PERSISTENT_MASK;
1133
1134         if (!test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags) &&
1135             mgmt_valid_hdev(hdev)) {
1136                 hci_dev_lock(hdev);
1137                 mgmt_powered(hdev, 0);
1138                 hci_dev_unlock(hdev);
1139         }
1140
1141         /* Controller radio is available but is currently powered down */
1142         hdev->amp_status = 0;
1143
1144         memset(hdev->eir, 0, sizeof(hdev->eir));
1145         memset(hdev->dev_class, 0, sizeof(hdev->dev_class));
1146
1147         hci_req_unlock(hdev);
1148
1149         hci_dev_put(hdev);
1150         return 0;
1151 }
1152
1153 int hci_dev_close(__u16 dev)
1154 {
1155         struct hci_dev *hdev;
1156         int err;
1157
1158         hdev = hci_dev_get(dev);
1159         if (!hdev)
1160                 return -ENODEV;
1161
1162         if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
1163                 cancel_delayed_work(&hdev->power_off);
1164
1165         err = hci_dev_do_close(hdev);
1166
1167         hci_dev_put(hdev);
1168         return err;
1169 }
1170
1171 int hci_dev_reset(__u16 dev)
1172 {
1173         struct hci_dev *hdev;
1174         int ret = 0;
1175
1176         hdev = hci_dev_get(dev);
1177         if (!hdev)
1178                 return -ENODEV;
1179
1180         hci_req_lock(hdev);
1181
1182         if (!test_bit(HCI_UP, &hdev->flags))
1183                 goto done;
1184
1185         /* Drop queues */
1186         skb_queue_purge(&hdev->rx_q);
1187         skb_queue_purge(&hdev->cmd_q);
1188
1189         hci_dev_lock(hdev);
1190         inquiry_cache_flush(hdev);
1191         hci_conn_hash_flush(hdev);
1192         hci_dev_unlock(hdev);
1193
1194         if (hdev->flush)
1195                 hdev->flush(hdev);
1196
1197         atomic_set(&hdev->cmd_cnt, 1);
1198         hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
1199
1200         if (!test_bit(HCI_RAW, &hdev->flags))
1201                 ret = __hci_req_sync(hdev, hci_reset_req, 0, HCI_INIT_TIMEOUT);
1202
1203 done:
1204         hci_req_unlock(hdev);
1205         hci_dev_put(hdev);
1206         return ret;
1207 }
1208
1209 int hci_dev_reset_stat(__u16 dev)
1210 {
1211         struct hci_dev *hdev;
1212         int ret = 0;
1213
1214         hdev = hci_dev_get(dev);
1215         if (!hdev)
1216                 return -ENODEV;
1217
1218         memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
1219
1220         hci_dev_put(hdev);
1221
1222         return ret;
1223 }
1224
1225 int hci_dev_cmd(unsigned int cmd, void __user *arg)
1226 {
1227         struct hci_dev *hdev;
1228         struct hci_dev_req dr;
1229         int err = 0;
1230
1231         if (copy_from_user(&dr, arg, sizeof(dr)))
1232                 return -EFAULT;
1233
1234         hdev = hci_dev_get(dr.dev_id);
1235         if (!hdev)
1236                 return -ENODEV;
1237
1238         switch (cmd) {
1239         case HCISETAUTH:
1240                 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
1241                                    HCI_INIT_TIMEOUT);
1242                 break;
1243
1244         case HCISETENCRYPT:
1245                 if (!lmp_encrypt_capable(hdev)) {
1246                         err = -EOPNOTSUPP;
1247                         break;
1248                 }
1249
1250                 if (!test_bit(HCI_AUTH, &hdev->flags)) {
1251                         /* Auth must be enabled first */
1252                         err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
1253                                            HCI_INIT_TIMEOUT);
1254                         if (err)
1255                                 break;
1256                 }
1257
1258                 err = hci_req_sync(hdev, hci_encrypt_req, dr.dev_opt,
1259                                    HCI_INIT_TIMEOUT);
1260                 break;
1261
1262         case HCISETSCAN:
1263                 err = hci_req_sync(hdev, hci_scan_req, dr.dev_opt,
1264                                    HCI_INIT_TIMEOUT);
1265                 break;
1266
1267         case HCISETLINKPOL:
1268                 err = hci_req_sync(hdev, hci_linkpol_req, dr.dev_opt,
1269                                    HCI_INIT_TIMEOUT);
1270                 break;
1271
1272         case HCISETLINKMODE:
1273                 hdev->link_mode = ((__u16) dr.dev_opt) &
1274                                         (HCI_LM_MASTER | HCI_LM_ACCEPT);
1275                 break;
1276
1277         case HCISETPTYPE:
1278                 hdev->pkt_type = (__u16) dr.dev_opt;
1279                 break;
1280
1281         case HCISETACLMTU:
1282                 hdev->acl_mtu  = *((__u16 *) &dr.dev_opt + 1);
1283                 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
1284                 break;
1285
1286         case HCISETSCOMTU:
1287                 hdev->sco_mtu  = *((__u16 *) &dr.dev_opt + 1);
1288                 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
1289                 break;
1290
1291         default:
1292                 err = -EINVAL;
1293                 break;
1294         }
1295
1296         hci_dev_put(hdev);
1297         return err;
1298 }
1299
1300 int hci_get_dev_list(void __user *arg)
1301 {
1302         struct hci_dev *hdev;
1303         struct hci_dev_list_req *dl;
1304         struct hci_dev_req *dr;
1305         int n = 0, size, err;
1306         __u16 dev_num;
1307
1308         if (get_user(dev_num, (__u16 __user *) arg))
1309                 return -EFAULT;
1310
1311         if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
1312                 return -EINVAL;
1313
1314         size = sizeof(*dl) + dev_num * sizeof(*dr);
1315
1316         dl = kzalloc(size, GFP_KERNEL);
1317         if (!dl)
1318                 return -ENOMEM;
1319
1320         dr = dl->dev_req;
1321
1322         read_lock(&hci_dev_list_lock);
1323         list_for_each_entry(hdev, &hci_dev_list, list) {
1324                 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
1325                         cancel_delayed_work(&hdev->power_off);
1326
1327                 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
1328                         set_bit(HCI_PAIRABLE, &hdev->dev_flags);
1329
1330                 (dr + n)->dev_id  = hdev->id;
1331                 (dr + n)->dev_opt = hdev->flags;
1332
1333                 if (++n >= dev_num)
1334                         break;
1335         }
1336         read_unlock(&hci_dev_list_lock);
1337
1338         dl->dev_num = n;
1339         size = sizeof(*dl) + n * sizeof(*dr);
1340
1341         err = copy_to_user(arg, dl, size);
1342         kfree(dl);
1343
1344         return err ? -EFAULT : 0;
1345 }
1346
1347 int hci_get_dev_info(void __user *arg)
1348 {
1349         struct hci_dev *hdev;
1350         struct hci_dev_info di;
1351         int err = 0;
1352
1353         if (copy_from_user(&di, arg, sizeof(di)))
1354                 return -EFAULT;
1355
1356         hdev = hci_dev_get(di.dev_id);
1357         if (!hdev)
1358                 return -ENODEV;
1359
1360         if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
1361                 cancel_delayed_work_sync(&hdev->power_off);
1362
1363         if (!test_bit(HCI_MGMT, &hdev->dev_flags))
1364                 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
1365
1366         strcpy(di.name, hdev->name);
1367         di.bdaddr   = hdev->bdaddr;
1368         di.type     = (hdev->bus & 0x0f) | (hdev->dev_type << 4);
1369         di.flags    = hdev->flags;
1370         di.pkt_type = hdev->pkt_type;
1371         if (lmp_bredr_capable(hdev)) {
1372                 di.acl_mtu  = hdev->acl_mtu;
1373                 di.acl_pkts = hdev->acl_pkts;
1374                 di.sco_mtu  = hdev->sco_mtu;
1375                 di.sco_pkts = hdev->sco_pkts;
1376         } else {
1377                 di.acl_mtu  = hdev->le_mtu;
1378                 di.acl_pkts = hdev->le_pkts;
1379                 di.sco_mtu  = 0;
1380                 di.sco_pkts = 0;
1381         }
1382         di.link_policy = hdev->link_policy;
1383         di.link_mode   = hdev->link_mode;
1384
1385         memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
1386         memcpy(&di.features, &hdev->features, sizeof(di.features));
1387
1388         if (copy_to_user(arg, &di, sizeof(di)))
1389                 err = -EFAULT;
1390
1391         hci_dev_put(hdev);
1392
1393         return err;
1394 }
1395
1396 /* ---- Interface to HCI drivers ---- */
1397
1398 static int hci_rfkill_set_block(void *data, bool blocked)
1399 {
1400         struct hci_dev *hdev = data;
1401
1402         BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
1403
1404         if (!blocked)
1405                 return 0;
1406
1407         hci_dev_do_close(hdev);
1408
1409         return 0;
1410 }
1411
1412 static const struct rfkill_ops hci_rfkill_ops = {
1413         .set_block = hci_rfkill_set_block,
1414 };
1415
1416 static void hci_power_on(struct work_struct *work)
1417 {
1418         struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
1419
1420         BT_DBG("%s", hdev->name);
1421
1422         if (hci_dev_open(hdev->id) < 0)
1423                 return;
1424
1425         if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags))
1426                 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
1427                                    HCI_AUTO_OFF_TIMEOUT);
1428
1429         if (test_and_clear_bit(HCI_SETUP, &hdev->dev_flags))
1430                 mgmt_index_added(hdev);
1431 }
1432
1433 static void hci_power_off(struct work_struct *work)
1434 {
1435         struct hci_dev *hdev = container_of(work, struct hci_dev,
1436                                             power_off.work);
1437
1438         BT_DBG("%s", hdev->name);
1439
1440         hci_dev_do_close(hdev);
1441 }
1442
1443 static void hci_discov_off(struct work_struct *work)
1444 {
1445         struct hci_dev *hdev;
1446         u8 scan = SCAN_PAGE;
1447
1448         hdev = container_of(work, struct hci_dev, discov_off.work);
1449
1450         BT_DBG("%s", hdev->name);
1451
1452         hci_dev_lock(hdev);
1453
1454         hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, sizeof(scan), &scan);
1455
1456         hdev->discov_timeout = 0;
1457
1458         hci_dev_unlock(hdev);
1459 }
1460
1461 int hci_uuids_clear(struct hci_dev *hdev)
1462 {
1463         struct bt_uuid *uuid, *tmp;
1464
1465         list_for_each_entry_safe(uuid, tmp, &hdev->uuids, list) {
1466                 list_del(&uuid->list);
1467                 kfree(uuid);
1468         }
1469
1470         return 0;
1471 }
1472
1473 int hci_link_keys_clear(struct hci_dev *hdev)
1474 {
1475         struct list_head *p, *n;
1476
1477         list_for_each_safe(p, n, &hdev->link_keys) {
1478                 struct link_key *key;
1479
1480                 key = list_entry(p, struct link_key, list);
1481
1482                 list_del(p);
1483                 kfree(key);
1484         }
1485
1486         return 0;
1487 }
1488
1489 int hci_smp_ltks_clear(struct hci_dev *hdev)
1490 {
1491         struct smp_ltk *k, *tmp;
1492
1493         list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
1494                 list_del(&k->list);
1495                 kfree(k);
1496         }
1497
1498         return 0;
1499 }
1500
1501 struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1502 {
1503         struct link_key *k;
1504
1505         list_for_each_entry(k, &hdev->link_keys, list)
1506                 if (bacmp(bdaddr, &k->bdaddr) == 0)
1507                         return k;
1508
1509         return NULL;
1510 }
1511
1512 static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
1513                                u8 key_type, u8 old_key_type)
1514 {
1515         /* Legacy key */
1516         if (key_type < 0x03)
1517                 return true;
1518
1519         /* Debug keys are insecure so don't store them persistently */
1520         if (key_type == HCI_LK_DEBUG_COMBINATION)
1521                 return false;
1522
1523         /* Changed combination key and there's no previous one */
1524         if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
1525                 return false;
1526
1527         /* Security mode 3 case */
1528         if (!conn)
1529                 return true;
1530
1531         /* Neither local nor remote side had no-bonding as requirement */
1532         if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
1533                 return true;
1534
1535         /* Local side had dedicated bonding as requirement */
1536         if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
1537                 return true;
1538
1539         /* Remote side had dedicated bonding as requirement */
1540         if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
1541                 return true;
1542
1543         /* If none of the above criteria match, then don't store the key
1544          * persistently */
1545         return false;
1546 }
1547
1548 struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, __le16 ediv, u8 rand[8])
1549 {
1550         struct smp_ltk *k;
1551
1552         list_for_each_entry(k, &hdev->long_term_keys, list) {
1553                 if (k->ediv != ediv ||
1554                     memcmp(rand, k->rand, sizeof(k->rand)))
1555                         continue;
1556
1557                 return k;
1558         }
1559
1560         return NULL;
1561 }
1562
1563 struct smp_ltk *hci_find_ltk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
1564                                      u8 addr_type)
1565 {
1566         struct smp_ltk *k;
1567
1568         list_for_each_entry(k, &hdev->long_term_keys, list)
1569                 if (addr_type == k->bdaddr_type &&
1570                     bacmp(bdaddr, &k->bdaddr) == 0)
1571                         return k;
1572
1573         return NULL;
1574 }
1575
1576 int hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn, int new_key,
1577                      bdaddr_t *bdaddr, u8 *val, u8 type, u8 pin_len)
1578 {
1579         struct link_key *key, *old_key;
1580         u8 old_key_type;
1581         bool persistent;
1582
1583         old_key = hci_find_link_key(hdev, bdaddr);
1584         if (old_key) {
1585                 old_key_type = old_key->type;
1586                 key = old_key;
1587         } else {
1588                 old_key_type = conn ? conn->key_type : 0xff;
1589                 key = kzalloc(sizeof(*key), GFP_ATOMIC);
1590                 if (!key)
1591                         return -ENOMEM;
1592                 list_add(&key->list, &hdev->link_keys);
1593         }
1594
1595         BT_DBG("%s key for %pMR type %u", hdev->name, bdaddr, type);
1596
1597         /* Some buggy controller combinations generate a changed
1598          * combination key for legacy pairing even when there's no
1599          * previous key */
1600         if (type == HCI_LK_CHANGED_COMBINATION &&
1601             (!conn || conn->remote_auth == 0xff) && old_key_type == 0xff) {
1602                 type = HCI_LK_COMBINATION;
1603                 if (conn)
1604                         conn->key_type = type;
1605         }
1606
1607         bacpy(&key->bdaddr, bdaddr);
1608         memcpy(key->val, val, HCI_LINK_KEY_SIZE);
1609         key->pin_len = pin_len;
1610
1611         if (type == HCI_LK_CHANGED_COMBINATION)
1612                 key->type = old_key_type;
1613         else
1614                 key->type = type;
1615
1616         if (!new_key)
1617                 return 0;
1618
1619         persistent = hci_persistent_key(hdev, conn, type, old_key_type);
1620
1621         mgmt_new_link_key(hdev, key, persistent);
1622
1623         if (conn)
1624                 conn->flush_key = !persistent;
1625
1626         return 0;
1627 }
1628
1629 int hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type, u8 type,
1630                 int new_key, u8 authenticated, u8 tk[16], u8 enc_size, __le16
1631                 ediv, u8 rand[8])
1632 {
1633         struct smp_ltk *key, *old_key;
1634
1635         if (!(type & HCI_SMP_STK) && !(type & HCI_SMP_LTK))
1636                 return 0;
1637
1638         old_key = hci_find_ltk_by_addr(hdev, bdaddr, addr_type);
1639         if (old_key)
1640                 key = old_key;
1641         else {
1642                 key = kzalloc(sizeof(*key), GFP_ATOMIC);
1643                 if (!key)
1644                         return -ENOMEM;
1645                 list_add(&key->list, &hdev->long_term_keys);
1646         }
1647
1648         bacpy(&key->bdaddr, bdaddr);
1649         key->bdaddr_type = addr_type;
1650         memcpy(key->val, tk, sizeof(key->val));
1651         key->authenticated = authenticated;
1652         key->ediv = ediv;
1653         key->enc_size = enc_size;
1654         key->type = type;
1655         memcpy(key->rand, rand, sizeof(key->rand));
1656
1657         if (!new_key)
1658                 return 0;
1659
1660         if (type & HCI_SMP_LTK)
1661                 mgmt_new_ltk(hdev, key, 1);
1662
1663         return 0;
1664 }
1665
1666 int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1667 {
1668         struct link_key *key;
1669
1670         key = hci_find_link_key(hdev, bdaddr);
1671         if (!key)
1672                 return -ENOENT;
1673
1674         BT_DBG("%s removing %pMR", hdev->name, bdaddr);
1675
1676         list_del(&key->list);
1677         kfree(key);
1678
1679         return 0;
1680 }
1681
1682 int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr)
1683 {
1684         struct smp_ltk *k, *tmp;
1685
1686         list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
1687                 if (bacmp(bdaddr, &k->bdaddr))
1688                         continue;
1689
1690                 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
1691
1692                 list_del(&k->list);
1693                 kfree(k);
1694         }
1695
1696         return 0;
1697 }
1698
1699 /* HCI command timer function */
1700 static void hci_cmd_timeout(unsigned long arg)
1701 {
1702         struct hci_dev *hdev = (void *) arg;
1703
1704         if (hdev->sent_cmd) {
1705                 struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
1706                 u16 opcode = __le16_to_cpu(sent->opcode);
1707
1708                 BT_ERR("%s command 0x%4.4x tx timeout", hdev->name, opcode);
1709         } else {
1710                 BT_ERR("%s command tx timeout", hdev->name);
1711         }
1712
1713         atomic_set(&hdev->cmd_cnt, 1);
1714         queue_work(hdev->workqueue, &hdev->cmd_work);
1715 }
1716
1717 struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
1718                                           bdaddr_t *bdaddr)
1719 {
1720         struct oob_data *data;
1721
1722         list_for_each_entry(data, &hdev->remote_oob_data, list)
1723                 if (bacmp(bdaddr, &data->bdaddr) == 0)
1724                         return data;
1725
1726         return NULL;
1727 }
1728
1729 int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr)
1730 {
1731         struct oob_data *data;
1732
1733         data = hci_find_remote_oob_data(hdev, bdaddr);
1734         if (!data)
1735                 return -ENOENT;
1736
1737         BT_DBG("%s removing %pMR", hdev->name, bdaddr);
1738
1739         list_del(&data->list);
1740         kfree(data);
1741
1742         return 0;
1743 }
1744
1745 int hci_remote_oob_data_clear(struct hci_dev *hdev)
1746 {
1747         struct oob_data *data, *n;
1748
1749         list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
1750                 list_del(&data->list);
1751                 kfree(data);
1752         }
1753
1754         return 0;
1755 }
1756
1757 int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 *hash,
1758                             u8 *randomizer)
1759 {
1760         struct oob_data *data;
1761
1762         data = hci_find_remote_oob_data(hdev, bdaddr);
1763
1764         if (!data) {
1765                 data = kmalloc(sizeof(*data), GFP_ATOMIC);
1766                 if (!data)
1767                         return -ENOMEM;
1768
1769                 bacpy(&data->bdaddr, bdaddr);
1770                 list_add(&data->list, &hdev->remote_oob_data);
1771         }
1772
1773         memcpy(data->hash, hash, sizeof(data->hash));
1774         memcpy(data->randomizer, randomizer, sizeof(data->randomizer));
1775
1776         BT_DBG("%s for %pMR", hdev->name, bdaddr);
1777
1778         return 0;
1779 }
1780
1781 struct bdaddr_list *hci_blacklist_lookup(struct hci_dev *hdev, bdaddr_t *bdaddr)
1782 {
1783         struct bdaddr_list *b;
1784
1785         list_for_each_entry(b, &hdev->blacklist, list)
1786                 if (bacmp(bdaddr, &b->bdaddr) == 0)
1787                         return b;
1788
1789         return NULL;
1790 }
1791
1792 int hci_blacklist_clear(struct hci_dev *hdev)
1793 {
1794         struct list_head *p, *n;
1795
1796         list_for_each_safe(p, n, &hdev->blacklist) {
1797                 struct bdaddr_list *b;
1798
1799                 b = list_entry(p, struct bdaddr_list, list);
1800
1801                 list_del(p);
1802                 kfree(b);
1803         }
1804
1805         return 0;
1806 }
1807
1808 int hci_blacklist_add(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
1809 {
1810         struct bdaddr_list *entry;
1811
1812         if (bacmp(bdaddr, BDADDR_ANY) == 0)
1813                 return -EBADF;
1814
1815         if (hci_blacklist_lookup(hdev, bdaddr))
1816                 return -EEXIST;
1817
1818         entry = kzalloc(sizeof(struct bdaddr_list), GFP_KERNEL);
1819         if (!entry)
1820                 return -ENOMEM;
1821
1822         bacpy(&entry->bdaddr, bdaddr);
1823
1824         list_add(&entry->list, &hdev->blacklist);
1825
1826         return mgmt_device_blocked(hdev, bdaddr, type);
1827 }
1828
1829 int hci_blacklist_del(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
1830 {
1831         struct bdaddr_list *entry;
1832
1833         if (bacmp(bdaddr, BDADDR_ANY) == 0)
1834                 return hci_blacklist_clear(hdev);
1835
1836         entry = hci_blacklist_lookup(hdev, bdaddr);
1837         if (!entry)
1838                 return -ENOENT;
1839
1840         list_del(&entry->list);
1841         kfree(entry);
1842
1843         return mgmt_device_unblocked(hdev, bdaddr, type);
1844 }
1845
1846 static void le_scan_param_req(struct hci_request *req, unsigned long opt)
1847 {
1848         struct le_scan_params *param =  (struct le_scan_params *) opt;
1849         struct hci_cp_le_set_scan_param cp;
1850
1851         memset(&cp, 0, sizeof(cp));
1852         cp.type = param->type;
1853         cp.interval = cpu_to_le16(param->interval);
1854         cp.window = cpu_to_le16(param->window);
1855
1856         hci_req_add(req, HCI_OP_LE_SET_SCAN_PARAM, sizeof(cp), &cp);
1857 }
1858
1859 static void le_scan_enable_req(struct hci_request *req, unsigned long opt)
1860 {
1861         struct hci_cp_le_set_scan_enable cp;
1862
1863         memset(&cp, 0, sizeof(cp));
1864         cp.enable = 1;
1865         cp.filter_dup = 1;
1866
1867         hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
1868 }
1869
1870 static int hci_do_le_scan(struct hci_dev *hdev, u8 type, u16 interval,
1871                           u16 window, int timeout)
1872 {
1873         long timeo = msecs_to_jiffies(3000);
1874         struct le_scan_params param;
1875         int err;
1876
1877         BT_DBG("%s", hdev->name);
1878
1879         if (test_bit(HCI_LE_SCAN, &hdev->dev_flags))
1880                 return -EINPROGRESS;
1881
1882         param.type = type;
1883         param.interval = interval;
1884         param.window = window;
1885
1886         hci_req_lock(hdev);
1887
1888         err = __hci_req_sync(hdev, le_scan_param_req, (unsigned long) &param,
1889                              timeo);
1890         if (!err)
1891                 err = __hci_req_sync(hdev, le_scan_enable_req, 0, timeo);
1892
1893         hci_req_unlock(hdev);
1894
1895         if (err < 0)
1896                 return err;
1897
1898         queue_delayed_work(hdev->workqueue, &hdev->le_scan_disable,
1899                            msecs_to_jiffies(timeout));
1900
1901         return 0;
1902 }
1903
1904 int hci_cancel_le_scan(struct hci_dev *hdev)
1905 {
1906         BT_DBG("%s", hdev->name);
1907
1908         if (!test_bit(HCI_LE_SCAN, &hdev->dev_flags))
1909                 return -EALREADY;
1910
1911         if (cancel_delayed_work(&hdev->le_scan_disable)) {
1912                 struct hci_cp_le_set_scan_enable cp;
1913
1914                 /* Send HCI command to disable LE Scan */
1915                 memset(&cp, 0, sizeof(cp));
1916                 hci_send_cmd(hdev, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
1917         }
1918
1919         return 0;
1920 }
1921
1922 static void le_scan_disable_work(struct work_struct *work)
1923 {
1924         struct hci_dev *hdev = container_of(work, struct hci_dev,
1925                                             le_scan_disable.work);
1926         struct hci_cp_le_set_scan_enable cp;
1927
1928         BT_DBG("%s", hdev->name);
1929
1930         memset(&cp, 0, sizeof(cp));
1931
1932         hci_send_cmd(hdev, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
1933 }
1934
1935 static void le_scan_work(struct work_struct *work)
1936 {
1937         struct hci_dev *hdev = container_of(work, struct hci_dev, le_scan);
1938         struct le_scan_params *param = &hdev->le_scan_params;
1939
1940         BT_DBG("%s", hdev->name);
1941
1942         hci_do_le_scan(hdev, param->type, param->interval, param->window,
1943                        param->timeout);
1944 }
1945
1946 int hci_le_scan(struct hci_dev *hdev, u8 type, u16 interval, u16 window,
1947                 int timeout)
1948 {
1949         struct le_scan_params *param = &hdev->le_scan_params;
1950
1951         BT_DBG("%s", hdev->name);
1952
1953         if (test_bit(HCI_LE_PERIPHERAL, &hdev->dev_flags))
1954                 return -ENOTSUPP;
1955
1956         if (work_busy(&hdev->le_scan))
1957                 return -EINPROGRESS;
1958
1959         param->type = type;
1960         param->interval = interval;
1961         param->window = window;
1962         param->timeout = timeout;
1963
1964         queue_work(system_long_wq, &hdev->le_scan);
1965
1966         return 0;
1967 }
1968
1969 /* Alloc HCI device */
1970 struct hci_dev *hci_alloc_dev(void)
1971 {
1972         struct hci_dev *hdev;
1973
1974         hdev = kzalloc(sizeof(struct hci_dev), GFP_KERNEL);
1975         if (!hdev)
1976                 return NULL;
1977
1978         hdev->pkt_type  = (HCI_DM1 | HCI_DH1 | HCI_HV1);
1979         hdev->esco_type = (ESCO_HV1);
1980         hdev->link_mode = (HCI_LM_ACCEPT);
1981         hdev->io_capability = 0x03; /* No Input No Output */
1982         hdev->inq_tx_power = HCI_TX_POWER_INVALID;
1983         hdev->adv_tx_power = HCI_TX_POWER_INVALID;
1984
1985         hdev->sniff_max_interval = 800;
1986         hdev->sniff_min_interval = 80;
1987
1988         mutex_init(&hdev->lock);
1989         mutex_init(&hdev->req_lock);
1990
1991         INIT_LIST_HEAD(&hdev->mgmt_pending);
1992         INIT_LIST_HEAD(&hdev->blacklist);
1993         INIT_LIST_HEAD(&hdev->uuids);
1994         INIT_LIST_HEAD(&hdev->link_keys);
1995         INIT_LIST_HEAD(&hdev->long_term_keys);
1996         INIT_LIST_HEAD(&hdev->remote_oob_data);
1997         INIT_LIST_HEAD(&hdev->conn_hash.list);
1998
1999         INIT_WORK(&hdev->rx_work, hci_rx_work);
2000         INIT_WORK(&hdev->cmd_work, hci_cmd_work);
2001         INIT_WORK(&hdev->tx_work, hci_tx_work);
2002         INIT_WORK(&hdev->power_on, hci_power_on);
2003         INIT_WORK(&hdev->le_scan, le_scan_work);
2004
2005         INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
2006         INIT_DELAYED_WORK(&hdev->discov_off, hci_discov_off);
2007         INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
2008
2009         skb_queue_head_init(&hdev->driver_init);
2010         skb_queue_head_init(&hdev->rx_q);
2011         skb_queue_head_init(&hdev->cmd_q);
2012         skb_queue_head_init(&hdev->raw_q);
2013
2014         init_waitqueue_head(&hdev->req_wait_q);
2015
2016         setup_timer(&hdev->cmd_timer, hci_cmd_timeout, (unsigned long) hdev);
2017
2018         hci_init_sysfs(hdev);
2019         discovery_init(hdev);
2020
2021         return hdev;
2022 }
2023 EXPORT_SYMBOL(hci_alloc_dev);
2024
2025 /* Free HCI device */
2026 void hci_free_dev(struct hci_dev *hdev)
2027 {
2028         skb_queue_purge(&hdev->driver_init);
2029
2030         /* will free via device release */
2031         put_device(&hdev->dev);
2032 }
2033 EXPORT_SYMBOL(hci_free_dev);
2034
2035 /* Register HCI device */
2036 int hci_register_dev(struct hci_dev *hdev)
2037 {
2038         int id, error;
2039
2040         if (!hdev->open || !hdev->close)
2041                 return -EINVAL;
2042
2043         /* Do not allow HCI_AMP devices to register at index 0,
2044          * so the index can be used as the AMP controller ID.
2045          */
2046         switch (hdev->dev_type) {
2047         case HCI_BREDR:
2048                 id = ida_simple_get(&hci_index_ida, 0, 0, GFP_KERNEL);
2049                 break;
2050         case HCI_AMP:
2051                 id = ida_simple_get(&hci_index_ida, 1, 0, GFP_KERNEL);
2052                 break;
2053         default:
2054                 return -EINVAL;
2055         }
2056
2057         if (id < 0)
2058                 return id;
2059
2060         sprintf(hdev->name, "hci%d", id);
2061         hdev->id = id;
2062
2063         BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
2064
2065         write_lock(&hci_dev_list_lock);
2066         list_add(&hdev->list, &hci_dev_list);
2067         write_unlock(&hci_dev_list_lock);
2068
2069         hdev->workqueue = alloc_workqueue(hdev->name, WQ_HIGHPRI | WQ_UNBOUND |
2070                                           WQ_MEM_RECLAIM, 1);
2071         if (!hdev->workqueue) {
2072                 error = -ENOMEM;
2073                 goto err;
2074         }
2075
2076         hdev->req_workqueue = alloc_workqueue(hdev->name,
2077                                               WQ_HIGHPRI | WQ_UNBOUND |
2078                                               WQ_MEM_RECLAIM, 1);
2079         if (!hdev->req_workqueue) {
2080                 destroy_workqueue(hdev->workqueue);
2081                 error = -ENOMEM;
2082                 goto err;
2083         }
2084
2085         error = hci_add_sysfs(hdev);
2086         if (error < 0)
2087                 goto err_wqueue;
2088
2089         hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
2090                                     RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops,
2091                                     hdev);
2092         if (hdev->rfkill) {
2093                 if (rfkill_register(hdev->rfkill) < 0) {
2094                         rfkill_destroy(hdev->rfkill);
2095                         hdev->rfkill = NULL;
2096                 }
2097         }
2098
2099         set_bit(HCI_SETUP, &hdev->dev_flags);
2100
2101         if (hdev->dev_type != HCI_AMP)
2102                 set_bit(HCI_AUTO_OFF, &hdev->dev_flags);
2103
2104         hci_notify(hdev, HCI_DEV_REG);
2105         hci_dev_hold(hdev);
2106
2107         queue_work(hdev->req_workqueue, &hdev->power_on);
2108
2109         return id;
2110
2111 err_wqueue:
2112         destroy_workqueue(hdev->workqueue);
2113         destroy_workqueue(hdev->req_workqueue);
2114 err:
2115         ida_simple_remove(&hci_index_ida, hdev->id);
2116         write_lock(&hci_dev_list_lock);
2117         list_del(&hdev->list);
2118         write_unlock(&hci_dev_list_lock);
2119
2120         return error;
2121 }
2122 EXPORT_SYMBOL(hci_register_dev);
2123
2124 /* Unregister HCI device */
2125 void hci_unregister_dev(struct hci_dev *hdev)
2126 {
2127         int i, id;
2128
2129         BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
2130
2131         set_bit(HCI_UNREGISTER, &hdev->dev_flags);
2132
2133         id = hdev->id;
2134
2135         write_lock(&hci_dev_list_lock);
2136         list_del(&hdev->list);
2137         write_unlock(&hci_dev_list_lock);
2138
2139         hci_dev_do_close(hdev);
2140
2141         for (i = 0; i < NUM_REASSEMBLY; i++)
2142                 kfree_skb(hdev->reassembly[i]);
2143
2144         cancel_work_sync(&hdev->power_on);
2145
2146         if (!test_bit(HCI_INIT, &hdev->flags) &&
2147             !test_bit(HCI_SETUP, &hdev->dev_flags)) {
2148                 hci_dev_lock(hdev);
2149                 mgmt_index_removed(hdev);
2150                 hci_dev_unlock(hdev);
2151         }
2152
2153         /* mgmt_index_removed should take care of emptying the
2154          * pending list */
2155         BUG_ON(!list_empty(&hdev->mgmt_pending));
2156
2157         hci_notify(hdev, HCI_DEV_UNREG);
2158
2159         if (hdev->rfkill) {
2160                 rfkill_unregister(hdev->rfkill);
2161                 rfkill_destroy(hdev->rfkill);
2162         }
2163
2164         hci_del_sysfs(hdev);
2165
2166         destroy_workqueue(hdev->workqueue);
2167         destroy_workqueue(hdev->req_workqueue);
2168
2169         hci_dev_lock(hdev);
2170         hci_blacklist_clear(hdev);
2171         hci_uuids_clear(hdev);
2172         hci_link_keys_clear(hdev);
2173         hci_smp_ltks_clear(hdev);
2174         hci_remote_oob_data_clear(hdev);
2175         hci_dev_unlock(hdev);
2176
2177         hci_dev_put(hdev);
2178
2179         ida_simple_remove(&hci_index_ida, id);
2180 }
2181 EXPORT_SYMBOL(hci_unregister_dev);
2182
2183 /* Suspend HCI device */
2184 int hci_suspend_dev(struct hci_dev *hdev)
2185 {
2186         hci_notify(hdev, HCI_DEV_SUSPEND);
2187         return 0;
2188 }
2189 EXPORT_SYMBOL(hci_suspend_dev);
2190
2191 /* Resume HCI device */
2192 int hci_resume_dev(struct hci_dev *hdev)
2193 {
2194         hci_notify(hdev, HCI_DEV_RESUME);
2195         return 0;
2196 }
2197 EXPORT_SYMBOL(hci_resume_dev);
2198
2199 /* Receive frame from HCI drivers */
2200 int hci_recv_frame(struct sk_buff *skb)
2201 {
2202         struct hci_dev *hdev = (struct hci_dev *) skb->dev;
2203         if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
2204                       && !test_bit(HCI_INIT, &hdev->flags))) {
2205                 kfree_skb(skb);
2206                 return -ENXIO;
2207         }
2208
2209         /* Incoming skb */
2210         bt_cb(skb)->incoming = 1;
2211
2212         /* Time stamp */
2213         __net_timestamp(skb);
2214
2215         skb_queue_tail(&hdev->rx_q, skb);
2216         queue_work(hdev->workqueue, &hdev->rx_work);
2217
2218         return 0;
2219 }
2220 EXPORT_SYMBOL(hci_recv_frame);
2221
2222 static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
2223                           int count, __u8 index)
2224 {
2225         int len = 0;
2226         int hlen = 0;
2227         int remain = count;
2228         struct sk_buff *skb;
2229         struct bt_skb_cb *scb;
2230
2231         if ((type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT) ||
2232             index >= NUM_REASSEMBLY)
2233                 return -EILSEQ;
2234
2235         skb = hdev->reassembly[index];
2236
2237         if (!skb) {
2238                 switch (type) {
2239                 case HCI_ACLDATA_PKT:
2240                         len = HCI_MAX_FRAME_SIZE;
2241                         hlen = HCI_ACL_HDR_SIZE;
2242                         break;
2243                 case HCI_EVENT_PKT:
2244                         len = HCI_MAX_EVENT_SIZE;
2245                         hlen = HCI_EVENT_HDR_SIZE;
2246                         break;
2247                 case HCI_SCODATA_PKT:
2248                         len = HCI_MAX_SCO_SIZE;
2249                         hlen = HCI_SCO_HDR_SIZE;
2250                         break;
2251                 }
2252
2253                 skb = bt_skb_alloc(len, GFP_ATOMIC);
2254                 if (!skb)
2255                         return -ENOMEM;
2256
2257                 scb = (void *) skb->cb;
2258                 scb->expect = hlen;
2259                 scb->pkt_type = type;
2260
2261                 skb->dev = (void *) hdev;
2262                 hdev->reassembly[index] = skb;
2263         }
2264
2265         while (count) {
2266                 scb = (void *) skb->cb;
2267                 len = min_t(uint, scb->expect, count);
2268
2269                 memcpy(skb_put(skb, len), data, len);
2270
2271                 count -= len;
2272                 data += len;
2273                 scb->expect -= len;
2274                 remain = count;
2275
2276                 switch (type) {
2277                 case HCI_EVENT_PKT:
2278                         if (skb->len == HCI_EVENT_HDR_SIZE) {
2279                                 struct hci_event_hdr *h = hci_event_hdr(skb);
2280                                 scb->expect = h->plen;
2281
2282                                 if (skb_tailroom(skb) < scb->expect) {
2283                                         kfree_skb(skb);
2284                                         hdev->reassembly[index] = NULL;
2285                                         return -ENOMEM;
2286                                 }
2287                         }
2288                         break;
2289
2290                 case HCI_ACLDATA_PKT:
2291                         if (skb->len  == HCI_ACL_HDR_SIZE) {
2292                                 struct hci_acl_hdr *h = hci_acl_hdr(skb);
2293                                 scb->expect = __le16_to_cpu(h->dlen);
2294
2295                                 if (skb_tailroom(skb) < scb->expect) {
2296                                         kfree_skb(skb);
2297                                         hdev->reassembly[index] = NULL;
2298                                         return -ENOMEM;
2299                                 }
2300                         }
2301                         break;
2302
2303                 case HCI_SCODATA_PKT:
2304                         if (skb->len == HCI_SCO_HDR_SIZE) {
2305                                 struct hci_sco_hdr *h = hci_sco_hdr(skb);
2306                                 scb->expect = h->dlen;
2307
2308                                 if (skb_tailroom(skb) < scb->expect) {
2309                                         kfree_skb(skb);
2310                                         hdev->reassembly[index] = NULL;
2311                                         return -ENOMEM;
2312                                 }
2313                         }
2314                         break;
2315                 }
2316
2317                 if (scb->expect == 0) {
2318                         /* Complete frame */
2319
2320                         bt_cb(skb)->pkt_type = type;
2321                         hci_recv_frame(skb);
2322
2323                         hdev->reassembly[index] = NULL;
2324                         return remain;
2325                 }
2326         }
2327
2328         return remain;
2329 }
2330
2331 int hci_recv_fragment(struct hci_dev *hdev, int type, void *data, int count)
2332 {
2333         int rem = 0;
2334
2335         if (type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT)
2336                 return -EILSEQ;
2337
2338         while (count) {
2339                 rem = hci_reassembly(hdev, type, data, count, type - 1);
2340                 if (rem < 0)
2341                         return rem;
2342
2343                 data += (count - rem);
2344                 count = rem;
2345         }
2346
2347         return rem;
2348 }
2349 EXPORT_SYMBOL(hci_recv_fragment);
2350
2351 #define STREAM_REASSEMBLY 0
2352
2353 int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count)
2354 {
2355         int type;
2356         int rem = 0;
2357
2358         while (count) {
2359                 struct sk_buff *skb = hdev->reassembly[STREAM_REASSEMBLY];
2360
2361                 if (!skb) {
2362                         struct { char type; } *pkt;
2363
2364                         /* Start of the frame */
2365                         pkt = data;
2366                         type = pkt->type;
2367
2368                         data++;
2369                         count--;
2370                 } else
2371                         type = bt_cb(skb)->pkt_type;
2372
2373                 rem = hci_reassembly(hdev, type, data, count,
2374                                      STREAM_REASSEMBLY);
2375                 if (rem < 0)
2376                         return rem;
2377
2378                 data += (count - rem);
2379                 count = rem;
2380         }
2381
2382         return rem;
2383 }
2384 EXPORT_SYMBOL(hci_recv_stream_fragment);
2385
2386 /* ---- Interface to upper protocols ---- */
2387
2388 int hci_register_cb(struct hci_cb *cb)
2389 {
2390         BT_DBG("%p name %s", cb, cb->name);
2391
2392         write_lock(&hci_cb_list_lock);
2393         list_add(&cb->list, &hci_cb_list);
2394         write_unlock(&hci_cb_list_lock);
2395
2396         return 0;
2397 }
2398 EXPORT_SYMBOL(hci_register_cb);
2399
2400 int hci_unregister_cb(struct hci_cb *cb)
2401 {
2402         BT_DBG("%p name %s", cb, cb->name);
2403
2404         write_lock(&hci_cb_list_lock);
2405         list_del(&cb->list);
2406         write_unlock(&hci_cb_list_lock);
2407
2408         return 0;
2409 }
2410 EXPORT_SYMBOL(hci_unregister_cb);
2411
2412 static int hci_send_frame(struct sk_buff *skb)
2413 {
2414         struct hci_dev *hdev = (struct hci_dev *) skb->dev;
2415
2416         if (!hdev) {
2417                 kfree_skb(skb);
2418                 return -ENODEV;
2419         }
2420
2421         BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
2422
2423         /* Time stamp */
2424         __net_timestamp(skb);
2425
2426         /* Send copy to monitor */
2427         hci_send_to_monitor(hdev, skb);
2428
2429         if (atomic_read(&hdev->promisc)) {
2430                 /* Send copy to the sockets */
2431                 hci_send_to_sock(hdev, skb);
2432         }
2433
2434         /* Get rid of skb owner, prior to sending to the driver. */
2435         skb_orphan(skb);
2436
2437         return hdev->send(skb);
2438 }
2439
2440 void hci_req_init(struct hci_request *req, struct hci_dev *hdev)
2441 {
2442         skb_queue_head_init(&req->cmd_q);
2443         req->hdev = hdev;
2444         req->err = 0;
2445 }
2446
2447 int hci_req_run(struct hci_request *req, hci_req_complete_t complete)
2448 {
2449         struct hci_dev *hdev = req->hdev;
2450         struct sk_buff *skb;
2451         unsigned long flags;
2452
2453         BT_DBG("length %u", skb_queue_len(&req->cmd_q));
2454
2455         /* If an error occured during request building, remove all HCI
2456          * commands queued on the HCI request queue.
2457          */
2458         if (req->err) {
2459                 skb_queue_purge(&req->cmd_q);
2460                 return req->err;
2461         }
2462
2463         /* Do not allow empty requests */
2464         if (skb_queue_empty(&req->cmd_q))
2465                 return -ENODATA;
2466
2467         skb = skb_peek_tail(&req->cmd_q);
2468         bt_cb(skb)->req.complete = complete;
2469
2470         spin_lock_irqsave(&hdev->cmd_q.lock, flags);
2471         skb_queue_splice_tail(&req->cmd_q, &hdev->cmd_q);
2472         spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
2473
2474         queue_work(hdev->workqueue, &hdev->cmd_work);
2475
2476         return 0;
2477 }
2478
2479 static struct sk_buff *hci_prepare_cmd(struct hci_dev *hdev, u16 opcode,
2480                                        u32 plen, void *param)
2481 {
2482         int len = HCI_COMMAND_HDR_SIZE + plen;
2483         struct hci_command_hdr *hdr;
2484         struct sk_buff *skb;
2485
2486         skb = bt_skb_alloc(len, GFP_ATOMIC);
2487         if (!skb)
2488                 return NULL;
2489
2490         hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
2491         hdr->opcode = cpu_to_le16(opcode);
2492         hdr->plen   = plen;
2493
2494         if (plen)
2495                 memcpy(skb_put(skb, plen), param, plen);
2496
2497         BT_DBG("skb len %d", skb->len);
2498
2499         bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
2500         skb->dev = (void *) hdev;
2501
2502         return skb;
2503 }
2504
2505 /* Send HCI command */
2506 int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen, void *param)
2507 {
2508         struct sk_buff *skb;
2509
2510         BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
2511
2512         skb = hci_prepare_cmd(hdev, opcode, plen, param);
2513         if (!skb) {
2514                 BT_ERR("%s no memory for command", hdev->name);
2515                 return -ENOMEM;
2516         }
2517
2518         /* Stand-alone HCI commands must be flaged as
2519          * single-command requests.
2520          */
2521         bt_cb(skb)->req.start = true;
2522
2523         skb_queue_tail(&hdev->cmd_q, skb);
2524         queue_work(hdev->workqueue, &hdev->cmd_work);
2525
2526         return 0;
2527 }
2528
2529 /* Queue a command to an asynchronous HCI request */
2530 void hci_req_add(struct hci_request *req, u16 opcode, u32 plen, void *param)
2531 {
2532         struct hci_dev *hdev = req->hdev;
2533         struct sk_buff *skb;
2534
2535         BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
2536
2537         /* If an error occured during request building, there is no point in
2538          * queueing the HCI command. We can simply return.
2539          */
2540         if (req->err)
2541                 return;
2542
2543         skb = hci_prepare_cmd(hdev, opcode, plen, param);
2544         if (!skb) {
2545                 BT_ERR("%s no memory for command (opcode 0x%4.4x)",
2546                        hdev->name, opcode);
2547                 req->err = -ENOMEM;
2548                 return;
2549         }
2550
2551         if (skb_queue_empty(&req->cmd_q))
2552                 bt_cb(skb)->req.start = true;
2553
2554         skb_queue_tail(&req->cmd_q, skb);
2555 }
2556
2557 /* Get data from the previously sent command */
2558 void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
2559 {
2560         struct hci_command_hdr *hdr;
2561
2562         if (!hdev->sent_cmd)
2563                 return NULL;
2564
2565         hdr = (void *) hdev->sent_cmd->data;
2566
2567         if (hdr->opcode != cpu_to_le16(opcode))
2568                 return NULL;
2569
2570         BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
2571
2572         return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
2573 }
2574
2575 /* Send ACL data */
2576 static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
2577 {
2578         struct hci_acl_hdr *hdr;
2579         int len = skb->len;
2580
2581         skb_push(skb, HCI_ACL_HDR_SIZE);
2582         skb_reset_transport_header(skb);
2583         hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
2584         hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
2585         hdr->dlen   = cpu_to_le16(len);
2586 }
2587
2588 static void hci_queue_acl(struct hci_chan *chan, struct sk_buff_head *queue,
2589                           struct sk_buff *skb, __u16 flags)
2590 {
2591         struct hci_conn *conn = chan->conn;
2592         struct hci_dev *hdev = conn->hdev;
2593         struct sk_buff *list;
2594
2595         skb->len = skb_headlen(skb);
2596         skb->data_len = 0;
2597
2598         bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
2599
2600         switch (hdev->dev_type) {
2601         case HCI_BREDR:
2602                 hci_add_acl_hdr(skb, conn->handle, flags);
2603                 break;
2604         case HCI_AMP:
2605                 hci_add_acl_hdr(skb, chan->handle, flags);
2606                 break;
2607         default:
2608                 BT_ERR("%s unknown dev_type %d", hdev->name, hdev->dev_type);
2609                 return;
2610         }
2611
2612         list = skb_shinfo(skb)->frag_list;
2613         if (!list) {
2614                 /* Non fragmented */
2615                 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
2616
2617                 skb_queue_tail(queue, skb);
2618         } else {
2619                 /* Fragmented */
2620                 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
2621
2622                 skb_shinfo(skb)->frag_list = NULL;
2623
2624                 /* Queue all fragments atomically */
2625                 spin_lock(&queue->lock);
2626
2627                 __skb_queue_tail(queue, skb);
2628
2629                 flags &= ~ACL_START;
2630                 flags |= ACL_CONT;
2631                 do {
2632                         skb = list; list = list->next;
2633
2634                         skb->dev = (void *) hdev;
2635                         bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
2636                         hci_add_acl_hdr(skb, conn->handle, flags);
2637
2638                         BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
2639
2640                         __skb_queue_tail(queue, skb);
2641                 } while (list);
2642
2643                 spin_unlock(&queue->lock);
2644         }
2645 }
2646
2647 void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
2648 {
2649         struct hci_dev *hdev = chan->conn->hdev;
2650
2651         BT_DBG("%s chan %p flags 0x%4.4x", hdev->name, chan, flags);
2652
2653         skb->dev = (void *) hdev;
2654
2655         hci_queue_acl(chan, &chan->data_q, skb, flags);
2656
2657         queue_work(hdev->workqueue, &hdev->tx_work);
2658 }
2659
2660 /* Send SCO data */
2661 void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
2662 {
2663         struct hci_dev *hdev = conn->hdev;
2664         struct hci_sco_hdr hdr;
2665
2666         BT_DBG("%s len %d", hdev->name, skb->len);
2667
2668         hdr.handle = cpu_to_le16(conn->handle);
2669         hdr.dlen   = skb->len;
2670
2671         skb_push(skb, HCI_SCO_HDR_SIZE);
2672         skb_reset_transport_header(skb);
2673         memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
2674
2675         skb->dev = (void *) hdev;
2676         bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
2677
2678         skb_queue_tail(&conn->data_q, skb);
2679         queue_work(hdev->workqueue, &hdev->tx_work);
2680 }
2681
2682 /* ---- HCI TX task (outgoing data) ---- */
2683
2684 /* HCI Connection scheduler */
2685 static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type,
2686                                      int *quote)
2687 {
2688         struct hci_conn_hash *h = &hdev->conn_hash;
2689         struct hci_conn *conn = NULL, *c;
2690         unsigned int num = 0, min = ~0;
2691
2692         /* We don't have to lock device here. Connections are always
2693          * added and removed with TX task disabled. */
2694
2695         rcu_read_lock();
2696
2697         list_for_each_entry_rcu(c, &h->list, list) {
2698                 if (c->type != type || skb_queue_empty(&c->data_q))
2699                         continue;
2700
2701                 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
2702                         continue;
2703
2704                 num++;
2705
2706                 if (c->sent < min) {
2707                         min  = c->sent;
2708                         conn = c;
2709                 }
2710
2711                 if (hci_conn_num(hdev, type) == num)
2712                         break;
2713         }
2714
2715         rcu_read_unlock();
2716
2717         if (conn) {
2718                 int cnt, q;
2719
2720                 switch (conn->type) {
2721                 case ACL_LINK:
2722                         cnt = hdev->acl_cnt;
2723                         break;
2724                 case SCO_LINK:
2725                 case ESCO_LINK:
2726                         cnt = hdev->sco_cnt;
2727                         break;
2728                 case LE_LINK:
2729                         cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
2730                         break;
2731                 default:
2732                         cnt = 0;
2733                         BT_ERR("Unknown link type");
2734                 }
2735
2736                 q = cnt / num;
2737                 *quote = q ? q : 1;
2738         } else
2739                 *quote = 0;
2740
2741         BT_DBG("conn %p quote %d", conn, *quote);
2742         return conn;
2743 }
2744
2745 static void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
2746 {
2747         struct hci_conn_hash *h = &hdev->conn_hash;
2748         struct hci_conn *c;
2749
2750         BT_ERR("%s link tx timeout", hdev->name);
2751
2752         rcu_read_lock();
2753
2754         /* Kill stalled connections */
2755         list_for_each_entry_rcu(c, &h->list, list) {
2756                 if (c->type == type && c->sent) {
2757                         BT_ERR("%s killing stalled connection %pMR",
2758                                hdev->name, &c->dst);
2759                         hci_disconnect(c, HCI_ERROR_REMOTE_USER_TERM);
2760                 }
2761         }
2762
2763         rcu_read_unlock();
2764 }
2765
2766 static struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
2767                                       int *quote)
2768 {
2769         struct hci_conn_hash *h = &hdev->conn_hash;
2770         struct hci_chan *chan = NULL;
2771         unsigned int num = 0, min = ~0, cur_prio = 0;
2772         struct hci_conn *conn;
2773         int cnt, q, conn_num = 0;
2774
2775         BT_DBG("%s", hdev->name);
2776
2777         rcu_read_lock();
2778
2779         list_for_each_entry_rcu(conn, &h->list, list) {
2780                 struct hci_chan *tmp;
2781
2782                 if (conn->type != type)
2783                         continue;
2784
2785                 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
2786                         continue;
2787
2788                 conn_num++;
2789
2790                 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
2791                         struct sk_buff *skb;
2792
2793                         if (skb_queue_empty(&tmp->data_q))
2794                                 continue;
2795
2796                         skb = skb_peek(&tmp->data_q);
2797                         if (skb->priority < cur_prio)
2798                                 continue;
2799
2800                         if (skb->priority > cur_prio) {
2801                                 num = 0;
2802                                 min = ~0;
2803                                 cur_prio = skb->priority;
2804                         }
2805
2806                         num++;
2807
2808                         if (conn->sent < min) {
2809                                 min  = conn->sent;
2810                                 chan = tmp;
2811                         }
2812                 }
2813
2814                 if (hci_conn_num(hdev, type) == conn_num)
2815                         break;
2816         }
2817
2818         rcu_read_unlock();
2819
2820         if (!chan)
2821                 return NULL;
2822
2823         switch (chan->conn->type) {
2824         case ACL_LINK:
2825                 cnt = hdev->acl_cnt;
2826                 break;
2827         case AMP_LINK:
2828                 cnt = hdev->block_cnt;
2829                 break;
2830         case SCO_LINK:
2831         case ESCO_LINK:
2832                 cnt = hdev->sco_cnt;
2833                 break;
2834         case LE_LINK:
2835                 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
2836                 break;
2837         default:
2838                 cnt = 0;
2839                 BT_ERR("Unknown link type");
2840         }
2841
2842         q = cnt / num;
2843         *quote = q ? q : 1;
2844         BT_DBG("chan %p quote %d", chan, *quote);
2845         return chan;
2846 }
2847
2848 static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
2849 {
2850         struct hci_conn_hash *h = &hdev->conn_hash;
2851         struct hci_conn *conn;
2852         int num = 0;
2853
2854         BT_DBG("%s", hdev->name);
2855
2856         rcu_read_lock();
2857
2858         list_for_each_entry_rcu(conn, &h->list, list) {
2859                 struct hci_chan *chan;
2860
2861                 if (conn->type != type)
2862                         continue;
2863
2864                 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
2865                         continue;
2866
2867                 num++;
2868
2869                 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
2870                         struct sk_buff *skb;
2871
2872                         if (chan->sent) {
2873                                 chan->sent = 0;
2874                                 continue;
2875                         }
2876
2877                         if (skb_queue_empty(&chan->data_q))
2878                                 continue;
2879
2880                         skb = skb_peek(&chan->data_q);
2881                         if (skb->priority >= HCI_PRIO_MAX - 1)
2882                                 continue;
2883
2884                         skb->priority = HCI_PRIO_MAX - 1;
2885
2886                         BT_DBG("chan %p skb %p promoted to %d", chan, skb,
2887                                skb->priority);
2888                 }
2889
2890                 if (hci_conn_num(hdev, type) == num)
2891                         break;
2892         }
2893
2894         rcu_read_unlock();
2895
2896 }
2897
2898 static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
2899 {
2900         /* Calculate count of blocks used by this packet */
2901         return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
2902 }
2903
2904 static void __check_timeout(struct hci_dev *hdev, unsigned int cnt)
2905 {
2906         if (!test_bit(HCI_RAW, &hdev->flags)) {
2907                 /* ACL tx timeout must be longer than maximum
2908                  * link supervision timeout (40.9 seconds) */
2909                 if (!cnt && time_after(jiffies, hdev->acl_last_tx +
2910                                        HCI_ACL_TX_TIMEOUT))
2911                         hci_link_tx_to(hdev, ACL_LINK);
2912         }
2913 }
2914
2915 static void hci_sched_acl_pkt(struct hci_dev *hdev)
2916 {
2917         unsigned int cnt = hdev->acl_cnt;
2918         struct hci_chan *chan;
2919         struct sk_buff *skb;
2920         int quote;
2921
2922         __check_timeout(hdev, cnt);
2923
2924         while (hdev->acl_cnt &&
2925                (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
2926                 u32 priority = (skb_peek(&chan->data_q))->priority;
2927                 while (quote-- && (skb = skb_peek(&chan->data_q))) {
2928                         BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
2929                                skb->len, skb->priority);
2930
2931                         /* Stop if priority has changed */
2932                         if (skb->priority < priority)
2933                                 break;
2934
2935                         skb = skb_dequeue(&chan->data_q);
2936
2937                         hci_conn_enter_active_mode(chan->conn,
2938                                                    bt_cb(skb)->force_active);
2939
2940                         hci_send_frame(skb);
2941                         hdev->acl_last_tx = jiffies;
2942
2943                         hdev->acl_cnt--;
2944                         chan->sent++;
2945                         chan->conn->sent++;
2946                 }
2947         }
2948
2949         if (cnt != hdev->acl_cnt)
2950                 hci_prio_recalculate(hdev, ACL_LINK);
2951 }
2952
2953 static void hci_sched_acl_blk(struct hci_dev *hdev)
2954 {
2955         unsigned int cnt = hdev->block_cnt;
2956         struct hci_chan *chan;
2957         struct sk_buff *skb;
2958         int quote;
2959         u8 type;
2960
2961         __check_timeout(hdev, cnt);
2962
2963         BT_DBG("%s", hdev->name);
2964
2965         if (hdev->dev_type == HCI_AMP)
2966                 type = AMP_LINK;
2967         else
2968                 type = ACL_LINK;
2969
2970         while (hdev->block_cnt > 0 &&
2971                (chan = hci_chan_sent(hdev, type, &quote))) {
2972                 u32 priority = (skb_peek(&chan->data_q))->priority;
2973                 while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
2974                         int blocks;
2975
2976                         BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
2977                                skb->len, skb->priority);
2978
2979                         /* Stop if priority has changed */
2980                         if (skb->priority < priority)
2981                                 break;
2982
2983                         skb = skb_dequeue(&chan->data_q);
2984
2985                         blocks = __get_blocks(hdev, skb);
2986                         if (blocks > hdev->block_cnt)
2987                                 return;
2988
2989                         hci_conn_enter_active_mode(chan->conn,
2990                                                    bt_cb(skb)->force_active);
2991
2992                         hci_send_frame(skb);
2993                         hdev->acl_last_tx = jiffies;
2994
2995                         hdev->block_cnt -= blocks;
2996                         quote -= blocks;
2997
2998                         chan->sent += blocks;
2999                         chan->conn->sent += blocks;
3000                 }
3001         }
3002
3003         if (cnt != hdev->block_cnt)
3004                 hci_prio_recalculate(hdev, type);
3005 }
3006
3007 static void hci_sched_acl(struct hci_dev *hdev)
3008 {
3009         BT_DBG("%s", hdev->name);
3010
3011         /* No ACL link over BR/EDR controller */
3012         if (!hci_conn_num(hdev, ACL_LINK) && hdev->dev_type == HCI_BREDR)
3013                 return;
3014
3015         /* No AMP link over AMP controller */
3016         if (!hci_conn_num(hdev, AMP_LINK) && hdev->dev_type == HCI_AMP)
3017                 return;
3018
3019         switch (hdev->flow_ctl_mode) {
3020         case HCI_FLOW_CTL_MODE_PACKET_BASED:
3021                 hci_sched_acl_pkt(hdev);
3022                 break;
3023
3024         case HCI_FLOW_CTL_MODE_BLOCK_BASED:
3025                 hci_sched_acl_blk(hdev);
3026                 break;
3027         }
3028 }
3029
3030 /* Schedule SCO */
3031 static void hci_sched_sco(struct hci_dev *hdev)
3032 {
3033         struct hci_conn *conn;
3034         struct sk_buff *skb;
3035         int quote;
3036
3037         BT_DBG("%s", hdev->name);
3038
3039         if (!hci_conn_num(hdev, SCO_LINK))
3040                 return;
3041
3042         while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
3043                 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
3044                         BT_DBG("skb %p len %d", skb, skb->len);
3045                         hci_send_frame(skb);
3046
3047                         conn->sent++;
3048                         if (conn->sent == ~0)
3049                                 conn->sent = 0;
3050                 }
3051         }
3052 }
3053
3054 static void hci_sched_esco(struct hci_dev *hdev)
3055 {
3056         struct hci_conn *conn;
3057         struct sk_buff *skb;
3058         int quote;
3059
3060         BT_DBG("%s", hdev->name);
3061
3062         if (!hci_conn_num(hdev, ESCO_LINK))
3063                 return;
3064
3065         while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK,
3066                                                      &quote))) {
3067                 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
3068                         BT_DBG("skb %p len %d", skb, skb->len);
3069                         hci_send_frame(skb);
3070
3071                         conn->sent++;
3072                         if (conn->sent == ~0)
3073                                 conn->sent = 0;
3074                 }
3075         }
3076 }
3077
3078 static void hci_sched_le(struct hci_dev *hdev)
3079 {
3080         struct hci_chan *chan;
3081         struct sk_buff *skb;
3082         int quote, cnt, tmp;
3083
3084         BT_DBG("%s", hdev->name);
3085
3086         if (!hci_conn_num(hdev, LE_LINK))
3087                 return;
3088
3089         if (!test_bit(HCI_RAW, &hdev->flags)) {
3090                 /* LE tx timeout must be longer than maximum
3091                  * link supervision timeout (40.9 seconds) */
3092                 if (!hdev->le_cnt && hdev->le_pkts &&
3093                     time_after(jiffies, hdev->le_last_tx + HZ * 45))
3094                         hci_link_tx_to(hdev, LE_LINK);
3095         }
3096
3097         cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
3098         tmp = cnt;
3099         while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, &quote))) {
3100                 u32 priority = (skb_peek(&chan->data_q))->priority;
3101                 while (quote-- && (skb = skb_peek(&chan->data_q))) {
3102                         BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
3103                                skb->len, skb->priority);
3104
3105                         /* Stop if priority has changed */
3106                         if (skb->priority < priority)
3107                                 break;
3108
3109                         skb = skb_dequeue(&chan->data_q);
3110
3111                         hci_send_frame(skb);
3112                         hdev->le_last_tx = jiffies;
3113
3114                         cnt--;
3115                         chan->sent++;
3116                         chan->conn->sent++;
3117                 }
3118         }
3119
3120         if (hdev->le_pkts)
3121                 hdev->le_cnt = cnt;
3122         else
3123                 hdev->acl_cnt = cnt;
3124
3125         if (cnt != tmp)
3126                 hci_prio_recalculate(hdev, LE_LINK);
3127 }
3128
3129 static void hci_tx_work(struct work_struct *work)
3130 {
3131         struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
3132         struct sk_buff *skb;
3133
3134         BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
3135                hdev->sco_cnt, hdev->le_cnt);
3136
3137         /* Schedule queues and send stuff to HCI driver */
3138
3139         hci_sched_acl(hdev);
3140
3141         hci_sched_sco(hdev);
3142
3143         hci_sched_esco(hdev);
3144
3145         hci_sched_le(hdev);
3146
3147         /* Send next queued raw (unknown type) packet */
3148         while ((skb = skb_dequeue(&hdev->raw_q)))
3149                 hci_send_frame(skb);
3150 }
3151
3152 /* ----- HCI RX task (incoming data processing) ----- */
3153
3154 /* ACL data packet */
3155 static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
3156 {
3157         struct hci_acl_hdr *hdr = (void *) skb->data;
3158         struct hci_conn *conn;
3159         __u16 handle, flags;
3160
3161         skb_pull(skb, HCI_ACL_HDR_SIZE);
3162
3163         handle = __le16_to_cpu(hdr->handle);
3164         flags  = hci_flags(handle);
3165         handle = hci_handle(handle);
3166
3167         BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
3168                handle, flags);
3169
3170         hdev->stat.acl_rx++;
3171
3172         hci_dev_lock(hdev);
3173         conn = hci_conn_hash_lookup_handle(hdev, handle);
3174         hci_dev_unlock(hdev);
3175
3176         if (conn) {
3177                 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
3178
3179                 /* Send to upper protocol */
3180                 l2cap_recv_acldata(conn, skb, flags);
3181                 return;
3182         } else {
3183                 BT_ERR("%s ACL packet for unknown connection handle %d",
3184                        hdev->name, handle);
3185         }
3186
3187         kfree_skb(skb);
3188 }
3189
3190 /* SCO data packet */
3191 static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
3192 {
3193         struct hci_sco_hdr *hdr = (void *) skb->data;
3194         struct hci_conn *conn;
3195         __u16 handle;
3196
3197         skb_pull(skb, HCI_SCO_HDR_SIZE);
3198
3199         handle = __le16_to_cpu(hdr->handle);
3200
3201         BT_DBG("%s len %d handle 0x%4.4x", hdev->name, skb->len, handle);
3202
3203         hdev->stat.sco_rx++;
3204
3205         hci_dev_lock(hdev);
3206         conn = hci_conn_hash_lookup_handle(hdev, handle);
3207         hci_dev_unlock(hdev);
3208
3209         if (conn) {
3210                 /* Send to upper protocol */
3211                 sco_recv_scodata(conn, skb);
3212                 return;
3213         } else {
3214                 BT_ERR("%s SCO packet for unknown connection handle %d",
3215                        hdev->name, handle);
3216         }
3217
3218         kfree_skb(skb);
3219 }
3220
3221 static bool hci_req_is_complete(struct hci_dev *hdev)
3222 {
3223         struct sk_buff *skb;
3224
3225         skb = skb_peek(&hdev->cmd_q);
3226         if (!skb)
3227                 return true;
3228
3229         return bt_cb(skb)->req.start;
3230 }
3231
3232 static void hci_resend_last(struct hci_dev *hdev)
3233 {
3234         struct hci_command_hdr *sent;
3235         struct sk_buff *skb;
3236         u16 opcode;
3237
3238         if (!hdev->sent_cmd)
3239                 return;
3240
3241         sent = (void *) hdev->sent_cmd->data;
3242         opcode = __le16_to_cpu(sent->opcode);
3243         if (opcode == HCI_OP_RESET)
3244                 return;
3245
3246         skb = skb_clone(hdev->sent_cmd, GFP_KERNEL);
3247         if (!skb)
3248                 return;
3249
3250         skb_queue_head(&hdev->cmd_q, skb);
3251         queue_work(hdev->workqueue, &hdev->cmd_work);
3252 }
3253
3254 void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status)
3255 {
3256         hci_req_complete_t req_complete = NULL;
3257         struct sk_buff *skb;
3258         unsigned long flags;
3259
3260         BT_DBG("opcode 0x%04x status 0x%02x", opcode, status);
3261
3262         /* If the completed command doesn't match the last one that was
3263          * sent we need to do special handling of it.
3264          */
3265         if (!hci_sent_cmd_data(hdev, opcode)) {
3266                 /* Some CSR based controllers generate a spontaneous
3267                  * reset complete event during init and any pending
3268                  * command will never be completed. In such a case we
3269                  * need to resend whatever was the last sent
3270                  * command.
3271                  */
3272                 if (test_bit(HCI_INIT, &hdev->flags) && opcode == HCI_OP_RESET)
3273                         hci_resend_last(hdev);
3274
3275                 return;
3276         }
3277
3278         /* If the command succeeded and there's still more commands in
3279          * this request the request is not yet complete.
3280          */
3281         if (!status && !hci_req_is_complete(hdev))
3282                 return;
3283
3284         /* If this was the last command in a request the complete
3285          * callback would be found in hdev->sent_cmd instead of the
3286          * command queue (hdev->cmd_q).
3287          */
3288         if (hdev->sent_cmd) {
3289                 req_complete = bt_cb(hdev->sent_cmd)->req.complete;
3290                 if (req_complete)
3291                         goto call_complete;
3292         }
3293
3294         /* Remove all pending commands belonging to this request */
3295         spin_lock_irqsave(&hdev->cmd_q.lock, flags);
3296         while ((skb = __skb_dequeue(&hdev->cmd_q))) {
3297                 if (bt_cb(skb)->req.start) {
3298                         __skb_queue_head(&hdev->cmd_q, skb);
3299                         break;
3300                 }
3301
3302                 req_complete = bt_cb(skb)->req.complete;
3303                 kfree_skb(skb);
3304         }
3305         spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
3306
3307 call_complete:
3308         if (req_complete)
3309                 req_complete(hdev, status);
3310 }
3311
3312 void hci_req_cmd_status(struct hci_dev *hdev, u16 opcode, u8 status)
3313 {
3314         hci_req_complete_t req_complete = NULL;
3315
3316         BT_DBG("opcode 0x%04x status 0x%02x", opcode, status);
3317
3318         if (status) {
3319                 hci_req_cmd_complete(hdev, opcode, status);
3320                 return;
3321         }
3322
3323         /* No need to handle success status if there are more commands */
3324         if (!hci_req_is_complete(hdev))
3325                 return;
3326
3327         if (hdev->sent_cmd)
3328                 req_complete = bt_cb(hdev->sent_cmd)->req.complete;
3329
3330         /* If the request doesn't have a complete callback or there
3331          * are other commands/requests in the hdev queue we consider
3332          * this request as completed.
3333          */
3334         if (!req_complete || !skb_queue_empty(&hdev->cmd_q))
3335                 hci_req_cmd_complete(hdev, opcode, status);
3336 }
3337
3338 static void hci_rx_work(struct work_struct *work)
3339 {
3340         struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
3341         struct sk_buff *skb;
3342
3343         BT_DBG("%s", hdev->name);
3344
3345         while ((skb = skb_dequeue(&hdev->rx_q))) {
3346                 /* Send copy to monitor */
3347                 hci_send_to_monitor(hdev, skb);
3348
3349                 if (atomic_read(&hdev->promisc)) {
3350                         /* Send copy to the sockets */
3351                         hci_send_to_sock(hdev, skb);
3352                 }
3353
3354                 if (test_bit(HCI_RAW, &hdev->flags)) {
3355                         kfree_skb(skb);
3356                         continue;
3357                 }
3358
3359                 if (test_bit(HCI_INIT, &hdev->flags)) {
3360                         /* Don't process data packets in this states. */
3361                         switch (bt_cb(skb)->pkt_type) {
3362                         case HCI_ACLDATA_PKT:
3363                         case HCI_SCODATA_PKT:
3364                                 kfree_skb(skb);
3365                                 continue;
3366                         }
3367                 }
3368
3369                 /* Process frame */
3370                 switch (bt_cb(skb)->pkt_type) {
3371                 case HCI_EVENT_PKT:
3372                         BT_DBG("%s Event packet", hdev->name);
3373                         hci_event_packet(hdev, skb);
3374                         break;
3375
3376                 case HCI_ACLDATA_PKT:
3377                         BT_DBG("%s ACL data packet", hdev->name);
3378                         hci_acldata_packet(hdev, skb);
3379                         break;
3380
3381                 case HCI_SCODATA_PKT:
3382                         BT_DBG("%s SCO data packet", hdev->name);
3383                         hci_scodata_packet(hdev, skb);
3384                         break;
3385
3386                 default:
3387                         kfree_skb(skb);
3388                         break;
3389                 }
3390         }
3391 }
3392
3393 static void hci_cmd_work(struct work_struct *work)
3394 {
3395         struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
3396         struct sk_buff *skb;
3397
3398         BT_DBG("%s cmd_cnt %d cmd queued %d", hdev->name,
3399                atomic_read(&hdev->cmd_cnt), skb_queue_len(&hdev->cmd_q));
3400
3401         /* Send queued commands */
3402         if (atomic_read(&hdev->cmd_cnt)) {
3403                 skb = skb_dequeue(&hdev->cmd_q);
3404                 if (!skb)
3405                         return;
3406
3407                 kfree_skb(hdev->sent_cmd);
3408
3409                 hdev->sent_cmd = skb_clone(skb, GFP_ATOMIC);
3410                 if (hdev->sent_cmd) {
3411                         atomic_dec(&hdev->cmd_cnt);
3412                         hci_send_frame(skb);
3413                         if (test_bit(HCI_RESET, &hdev->flags))
3414                                 del_timer(&hdev->cmd_timer);
3415                         else
3416                                 mod_timer(&hdev->cmd_timer,
3417                                           jiffies + HCI_CMD_TIMEOUT);
3418                 } else {
3419                         skb_queue_head(&hdev->cmd_q, skb);
3420                         queue_work(hdev->workqueue, &hdev->cmd_work);
3421                 }
3422         }
3423 }
3424
3425 int hci_do_inquiry(struct hci_dev *hdev, u8 length)
3426 {
3427         /* General inquiry access code (GIAC) */
3428         u8 lap[3] = { 0x33, 0x8b, 0x9e };
3429         struct hci_cp_inquiry cp;
3430
3431         BT_DBG("%s", hdev->name);
3432
3433         if (test_bit(HCI_INQUIRY, &hdev->flags))
3434                 return -EINPROGRESS;
3435
3436         inquiry_cache_flush(hdev);
3437
3438         memset(&cp, 0, sizeof(cp));
3439         memcpy(&cp.lap, lap, sizeof(cp.lap));
3440         cp.length  = length;
3441
3442         return hci_send_cmd(hdev, HCI_OP_INQUIRY, sizeof(cp), &cp);
3443 }
3444
3445 int hci_cancel_inquiry(struct hci_dev *hdev)
3446 {
3447         BT_DBG("%s", hdev->name);
3448
3449         if (!test_bit(HCI_INQUIRY, &hdev->flags))
3450                 return -EALREADY;
3451
3452         return hci_send_cmd(hdev, HCI_OP_INQUIRY_CANCEL, 0, NULL);
3453 }
3454
3455 u8 bdaddr_to_le(u8 bdaddr_type)
3456 {
3457         switch (bdaddr_type) {
3458         case BDADDR_LE_PUBLIC:
3459                 return ADDR_LE_DEV_PUBLIC;
3460
3461         default:
3462                 /* Fallback to LE Random address type */
3463                 return ADDR_LE_DEV_RANDOM;
3464         }
3465 }