]> git.karo-electronics.de Git - karo-tx-linux.git/blob - net/bluetooth/hci_core.c
Merge remote-tracking branch 'slave-dma/next'
[karo-tx-linux.git] / net / bluetooth / hci_core.c
1 /*
2    BlueZ - Bluetooth protocol stack for Linux
3    Copyright (C) 2000-2001 Qualcomm Incorporated
4    Copyright (C) 2011 ProFUSION Embedded Systems
5
6    Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7
8    This program is free software; you can redistribute it and/or modify
9    it under the terms of the GNU General Public License version 2 as
10    published by the Free Software Foundation;
11
12    THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13    OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14    FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15    IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
16    CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17    WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18    ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
19    OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20
21    ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22    COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
23    SOFTWARE IS DISCLAIMED.
24 */
25
26 /* Bluetooth HCI core. */
27
28 #include <linux/export.h>
29 #include <linux/idr.h>
30
31 #include <linux/rfkill.h>
32
33 #include <net/bluetooth/bluetooth.h>
34 #include <net/bluetooth/hci_core.h>
35
36 static void hci_rx_work(struct work_struct *work);
37 static void hci_cmd_work(struct work_struct *work);
38 static void hci_tx_work(struct work_struct *work);
39
40 /* HCI device list */
41 LIST_HEAD(hci_dev_list);
42 DEFINE_RWLOCK(hci_dev_list_lock);
43
44 /* HCI callback list */
45 LIST_HEAD(hci_cb_list);
46 DEFINE_RWLOCK(hci_cb_list_lock);
47
48 /* HCI ID Numbering */
49 static DEFINE_IDA(hci_index_ida);
50
51 /* ---- HCI notifications ---- */
52
53 static void hci_notify(struct hci_dev *hdev, int event)
54 {
55         hci_sock_dev_event(hdev, event);
56 }
57
58 /* ---- HCI requests ---- */
59
60 static void hci_req_sync_complete(struct hci_dev *hdev, u8 result)
61 {
62         BT_DBG("%s result 0x%2.2x", hdev->name, result);
63
64         if (hdev->req_status == HCI_REQ_PEND) {
65                 hdev->req_result = result;
66                 hdev->req_status = HCI_REQ_DONE;
67                 wake_up_interruptible(&hdev->req_wait_q);
68         }
69 }
70
71 static void hci_req_cancel(struct hci_dev *hdev, int err)
72 {
73         BT_DBG("%s err 0x%2.2x", hdev->name, err);
74
75         if (hdev->req_status == HCI_REQ_PEND) {
76                 hdev->req_result = err;
77                 hdev->req_status = HCI_REQ_CANCELED;
78                 wake_up_interruptible(&hdev->req_wait_q);
79         }
80 }
81
82 static struct sk_buff *hci_get_cmd_complete(struct hci_dev *hdev, u16 opcode,
83                                             u8 event)
84 {
85         struct hci_ev_cmd_complete *ev;
86         struct hci_event_hdr *hdr;
87         struct sk_buff *skb;
88
89         hci_dev_lock(hdev);
90
91         skb = hdev->recv_evt;
92         hdev->recv_evt = NULL;
93
94         hci_dev_unlock(hdev);
95
96         if (!skb)
97                 return ERR_PTR(-ENODATA);
98
99         if (skb->len < sizeof(*hdr)) {
100                 BT_ERR("Too short HCI event");
101                 goto failed;
102         }
103
104         hdr = (void *) skb->data;
105         skb_pull(skb, HCI_EVENT_HDR_SIZE);
106
107         if (event) {
108                 if (hdr->evt != event)
109                         goto failed;
110                 return skb;
111         }
112
113         if (hdr->evt != HCI_EV_CMD_COMPLETE) {
114                 BT_DBG("Last event is not cmd complete (0x%2.2x)", hdr->evt);
115                 goto failed;
116         }
117
118         if (skb->len < sizeof(*ev)) {
119                 BT_ERR("Too short cmd_complete event");
120                 goto failed;
121         }
122
123         ev = (void *) skb->data;
124         skb_pull(skb, sizeof(*ev));
125
126         if (opcode == __le16_to_cpu(ev->opcode))
127                 return skb;
128
129         BT_DBG("opcode doesn't match (0x%2.2x != 0x%2.2x)", opcode,
130                __le16_to_cpu(ev->opcode));
131
132 failed:
133         kfree_skb(skb);
134         return ERR_PTR(-ENODATA);
135 }
136
137 struct sk_buff *__hci_cmd_sync_ev(struct hci_dev *hdev, u16 opcode, u32 plen,
138                                   const void *param, u8 event, u32 timeout)
139 {
140         DECLARE_WAITQUEUE(wait, current);
141         struct hci_request req;
142         int err = 0;
143
144         BT_DBG("%s", hdev->name);
145
146         hci_req_init(&req, hdev);
147
148         hci_req_add_ev(&req, opcode, plen, param, event);
149
150         hdev->req_status = HCI_REQ_PEND;
151
152         err = hci_req_run(&req, hci_req_sync_complete);
153         if (err < 0)
154                 return ERR_PTR(err);
155
156         add_wait_queue(&hdev->req_wait_q, &wait);
157         set_current_state(TASK_INTERRUPTIBLE);
158
159         schedule_timeout(timeout);
160
161         remove_wait_queue(&hdev->req_wait_q, &wait);
162
163         if (signal_pending(current))
164                 return ERR_PTR(-EINTR);
165
166         switch (hdev->req_status) {
167         case HCI_REQ_DONE:
168                 err = -bt_to_errno(hdev->req_result);
169                 break;
170
171         case HCI_REQ_CANCELED:
172                 err = -hdev->req_result;
173                 break;
174
175         default:
176                 err = -ETIMEDOUT;
177                 break;
178         }
179
180         hdev->req_status = hdev->req_result = 0;
181
182         BT_DBG("%s end: err %d", hdev->name, err);
183
184         if (err < 0)
185                 return ERR_PTR(err);
186
187         return hci_get_cmd_complete(hdev, opcode, event);
188 }
189 EXPORT_SYMBOL(__hci_cmd_sync_ev);
190
191 struct sk_buff *__hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
192                                const void *param, u32 timeout)
193 {
194         return __hci_cmd_sync_ev(hdev, opcode, plen, param, 0, timeout);
195 }
196 EXPORT_SYMBOL(__hci_cmd_sync);
197
198 /* Execute request and wait for completion. */
199 static int __hci_req_sync(struct hci_dev *hdev,
200                           void (*func)(struct hci_request *req,
201                                       unsigned long opt),
202                           unsigned long opt, __u32 timeout)
203 {
204         struct hci_request req;
205         DECLARE_WAITQUEUE(wait, current);
206         int err = 0;
207
208         BT_DBG("%s start", hdev->name);
209
210         hci_req_init(&req, hdev);
211
212         hdev->req_status = HCI_REQ_PEND;
213
214         func(&req, opt);
215
216         err = hci_req_run(&req, hci_req_sync_complete);
217         if (err < 0) {
218                 hdev->req_status = 0;
219
220                 /* ENODATA means the HCI request command queue is empty.
221                  * This can happen when a request with conditionals doesn't
222                  * trigger any commands to be sent. This is normal behavior
223                  * and should not trigger an error return.
224                  */
225                 if (err == -ENODATA)
226                         return 0;
227
228                 return err;
229         }
230
231         add_wait_queue(&hdev->req_wait_q, &wait);
232         set_current_state(TASK_INTERRUPTIBLE);
233
234         schedule_timeout(timeout);
235
236         remove_wait_queue(&hdev->req_wait_q, &wait);
237
238         if (signal_pending(current))
239                 return -EINTR;
240
241         switch (hdev->req_status) {
242         case HCI_REQ_DONE:
243                 err = -bt_to_errno(hdev->req_result);
244                 break;
245
246         case HCI_REQ_CANCELED:
247                 err = -hdev->req_result;
248                 break;
249
250         default:
251                 err = -ETIMEDOUT;
252                 break;
253         }
254
255         hdev->req_status = hdev->req_result = 0;
256
257         BT_DBG("%s end: err %d", hdev->name, err);
258
259         return err;
260 }
261
262 static int hci_req_sync(struct hci_dev *hdev,
263                         void (*req)(struct hci_request *req,
264                                     unsigned long opt),
265                         unsigned long opt, __u32 timeout)
266 {
267         int ret;
268
269         if (!test_bit(HCI_UP, &hdev->flags))
270                 return -ENETDOWN;
271
272         /* Serialize all requests */
273         hci_req_lock(hdev);
274         ret = __hci_req_sync(hdev, req, opt, timeout);
275         hci_req_unlock(hdev);
276
277         return ret;
278 }
279
280 static void hci_reset_req(struct hci_request *req, unsigned long opt)
281 {
282         BT_DBG("%s %ld", req->hdev->name, opt);
283
284         /* Reset device */
285         set_bit(HCI_RESET, &req->hdev->flags);
286         hci_req_add(req, HCI_OP_RESET, 0, NULL);
287 }
288
289 static void bredr_init(struct hci_request *req)
290 {
291         req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;
292
293         /* Read Local Supported Features */
294         hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
295
296         /* Read Local Version */
297         hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
298
299         /* Read BD Address */
300         hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
301 }
302
303 static void amp_init(struct hci_request *req)
304 {
305         req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
306
307         /* Read Local Version */
308         hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
309
310         /* Read Local AMP Info */
311         hci_req_add(req, HCI_OP_READ_LOCAL_AMP_INFO, 0, NULL);
312
313         /* Read Data Blk size */
314         hci_req_add(req, HCI_OP_READ_DATA_BLOCK_SIZE, 0, NULL);
315 }
316
317 static void hci_init1_req(struct hci_request *req, unsigned long opt)
318 {
319         struct hci_dev *hdev = req->hdev;
320
321         BT_DBG("%s %ld", hdev->name, opt);
322
323         /* Reset */
324         if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
325                 hci_reset_req(req, 0);
326
327         switch (hdev->dev_type) {
328         case HCI_BREDR:
329                 bredr_init(req);
330                 break;
331
332         case HCI_AMP:
333                 amp_init(req);
334                 break;
335
336         default:
337                 BT_ERR("Unknown device type %d", hdev->dev_type);
338                 break;
339         }
340 }
341
342 static void bredr_setup(struct hci_request *req)
343 {
344         __le16 param;
345         __u8 flt_type;
346
347         /* Read Buffer Size (ACL mtu, max pkt, etc.) */
348         hci_req_add(req, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
349
350         /* Read Class of Device */
351         hci_req_add(req, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
352
353         /* Read Local Name */
354         hci_req_add(req, HCI_OP_READ_LOCAL_NAME, 0, NULL);
355
356         /* Read Voice Setting */
357         hci_req_add(req, HCI_OP_READ_VOICE_SETTING, 0, NULL);
358
359         /* Clear Event Filters */
360         flt_type = HCI_FLT_CLEAR_ALL;
361         hci_req_add(req, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
362
363         /* Connection accept timeout ~20 secs */
364         param = __constant_cpu_to_le16(0x7d00);
365         hci_req_add(req, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
366
367         /* Read page scan parameters */
368         if (req->hdev->hci_ver > BLUETOOTH_VER_1_1) {
369                 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_ACTIVITY, 0, NULL);
370                 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_TYPE, 0, NULL);
371         }
372 }
373
374 static void le_setup(struct hci_request *req)
375 {
376         struct hci_dev *hdev = req->hdev;
377
378         /* Read LE Buffer Size */
379         hci_req_add(req, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
380
381         /* Read LE Local Supported Features */
382         hci_req_add(req, HCI_OP_LE_READ_LOCAL_FEATURES, 0, NULL);
383
384         /* Read LE Advertising Channel TX Power */
385         hci_req_add(req, HCI_OP_LE_READ_ADV_TX_POWER, 0, NULL);
386
387         /* Read LE White List Size */
388         hci_req_add(req, HCI_OP_LE_READ_WHITE_LIST_SIZE, 0, NULL);
389
390         /* Read LE Supported States */
391         hci_req_add(req, HCI_OP_LE_READ_SUPPORTED_STATES, 0, NULL);
392
393         /* LE-only controllers have LE implicitly enabled */
394         if (!lmp_bredr_capable(hdev))
395                 set_bit(HCI_LE_ENABLED, &hdev->dev_flags);
396 }
397
398 static u8 hci_get_inquiry_mode(struct hci_dev *hdev)
399 {
400         if (lmp_ext_inq_capable(hdev))
401                 return 0x02;
402
403         if (lmp_inq_rssi_capable(hdev))
404                 return 0x01;
405
406         if (hdev->manufacturer == 11 && hdev->hci_rev == 0x00 &&
407             hdev->lmp_subver == 0x0757)
408                 return 0x01;
409
410         if (hdev->manufacturer == 15) {
411                 if (hdev->hci_rev == 0x03 && hdev->lmp_subver == 0x6963)
412                         return 0x01;
413                 if (hdev->hci_rev == 0x09 && hdev->lmp_subver == 0x6963)
414                         return 0x01;
415                 if (hdev->hci_rev == 0x00 && hdev->lmp_subver == 0x6965)
416                         return 0x01;
417         }
418
419         if (hdev->manufacturer == 31 && hdev->hci_rev == 0x2005 &&
420             hdev->lmp_subver == 0x1805)
421                 return 0x01;
422
423         return 0x00;
424 }
425
426 static void hci_setup_inquiry_mode(struct hci_request *req)
427 {
428         u8 mode;
429
430         mode = hci_get_inquiry_mode(req->hdev);
431
432         hci_req_add(req, HCI_OP_WRITE_INQUIRY_MODE, 1, &mode);
433 }
434
435 static void hci_setup_event_mask(struct hci_request *req)
436 {
437         struct hci_dev *hdev = req->hdev;
438
439         /* The second byte is 0xff instead of 0x9f (two reserved bits
440          * disabled) since a Broadcom 1.2 dongle doesn't respond to the
441          * command otherwise.
442          */
443         u8 events[8] = { 0xff, 0xff, 0xfb, 0xff, 0x00, 0x00, 0x00, 0x00 };
444
445         /* CSR 1.1 dongles does not accept any bitfield so don't try to set
446          * any event mask for pre 1.2 devices.
447          */
448         if (hdev->hci_ver < BLUETOOTH_VER_1_2)
449                 return;
450
451         if (lmp_bredr_capable(hdev)) {
452                 events[4] |= 0x01; /* Flow Specification Complete */
453                 events[4] |= 0x02; /* Inquiry Result with RSSI */
454                 events[4] |= 0x04; /* Read Remote Extended Features Complete */
455                 events[5] |= 0x08; /* Synchronous Connection Complete */
456                 events[5] |= 0x10; /* Synchronous Connection Changed */
457         } else {
458                 /* Use a different default for LE-only devices */
459                 memset(events, 0, sizeof(events));
460                 events[0] |= 0x10; /* Disconnection Complete */
461                 events[0] |= 0x80; /* Encryption Change */
462                 events[1] |= 0x08; /* Read Remote Version Information Complete */
463                 events[1] |= 0x20; /* Command Complete */
464                 events[1] |= 0x40; /* Command Status */
465                 events[1] |= 0x80; /* Hardware Error */
466                 events[2] |= 0x04; /* Number of Completed Packets */
467                 events[3] |= 0x02; /* Data Buffer Overflow */
468                 events[5] |= 0x80; /* Encryption Key Refresh Complete */
469         }
470
471         if (lmp_inq_rssi_capable(hdev))
472                 events[4] |= 0x02; /* Inquiry Result with RSSI */
473
474         if (lmp_sniffsubr_capable(hdev))
475                 events[5] |= 0x20; /* Sniff Subrating */
476
477         if (lmp_pause_enc_capable(hdev))
478                 events[5] |= 0x80; /* Encryption Key Refresh Complete */
479
480         if (lmp_ext_inq_capable(hdev))
481                 events[5] |= 0x40; /* Extended Inquiry Result */
482
483         if (lmp_no_flush_capable(hdev))
484                 events[7] |= 0x01; /* Enhanced Flush Complete */
485
486         if (lmp_lsto_capable(hdev))
487                 events[6] |= 0x80; /* Link Supervision Timeout Changed */
488
489         if (lmp_ssp_capable(hdev)) {
490                 events[6] |= 0x01;      /* IO Capability Request */
491                 events[6] |= 0x02;      /* IO Capability Response */
492                 events[6] |= 0x04;      /* User Confirmation Request */
493                 events[6] |= 0x08;      /* User Passkey Request */
494                 events[6] |= 0x10;      /* Remote OOB Data Request */
495                 events[6] |= 0x20;      /* Simple Pairing Complete */
496                 events[7] |= 0x04;      /* User Passkey Notification */
497                 events[7] |= 0x08;      /* Keypress Notification */
498                 events[7] |= 0x10;      /* Remote Host Supported
499                                          * Features Notification
500                                          */
501         }
502
503         if (lmp_le_capable(hdev))
504                 events[7] |= 0x20;      /* LE Meta-Event */
505
506         hci_req_add(req, HCI_OP_SET_EVENT_MASK, sizeof(events), events);
507
508         if (lmp_le_capable(hdev)) {
509                 memset(events, 0, sizeof(events));
510                 events[0] = 0x1f;
511                 hci_req_add(req, HCI_OP_LE_SET_EVENT_MASK,
512                             sizeof(events), events);
513         }
514 }
515
516 static void hci_init2_req(struct hci_request *req, unsigned long opt)
517 {
518         struct hci_dev *hdev = req->hdev;
519
520         if (lmp_bredr_capable(hdev))
521                 bredr_setup(req);
522
523         if (lmp_le_capable(hdev))
524                 le_setup(req);
525
526         hci_setup_event_mask(req);
527
528         /* AVM Berlin (31), aka "BlueFRITZ!", doesn't support the read
529          * local supported commands HCI command.
530          */
531         if (hdev->manufacturer != 31 && hdev->hci_ver > BLUETOOTH_VER_1_1)
532                 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
533
534         if (lmp_ssp_capable(hdev)) {
535                 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
536                         u8 mode = 0x01;
537                         hci_req_add(req, HCI_OP_WRITE_SSP_MODE,
538                                     sizeof(mode), &mode);
539                 } else {
540                         struct hci_cp_write_eir cp;
541
542                         memset(hdev->eir, 0, sizeof(hdev->eir));
543                         memset(&cp, 0, sizeof(cp));
544
545                         hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
546                 }
547         }
548
549         if (lmp_inq_rssi_capable(hdev))
550                 hci_setup_inquiry_mode(req);
551
552         if (lmp_inq_tx_pwr_capable(hdev))
553                 hci_req_add(req, HCI_OP_READ_INQ_RSP_TX_POWER, 0, NULL);
554
555         if (lmp_ext_feat_capable(hdev)) {
556                 struct hci_cp_read_local_ext_features cp;
557
558                 cp.page = 0x01;
559                 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
560                             sizeof(cp), &cp);
561         }
562
563         if (test_bit(HCI_LINK_SECURITY, &hdev->dev_flags)) {
564                 u8 enable = 1;
565                 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, sizeof(enable),
566                             &enable);
567         }
568 }
569
570 static void hci_setup_link_policy(struct hci_request *req)
571 {
572         struct hci_dev *hdev = req->hdev;
573         struct hci_cp_write_def_link_policy cp;
574         u16 link_policy = 0;
575
576         if (lmp_rswitch_capable(hdev))
577                 link_policy |= HCI_LP_RSWITCH;
578         if (lmp_hold_capable(hdev))
579                 link_policy |= HCI_LP_HOLD;
580         if (lmp_sniff_capable(hdev))
581                 link_policy |= HCI_LP_SNIFF;
582         if (lmp_park_capable(hdev))
583                 link_policy |= HCI_LP_PARK;
584
585         cp.policy = cpu_to_le16(link_policy);
586         hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, sizeof(cp), &cp);
587 }
588
589 static void hci_set_le_support(struct hci_request *req)
590 {
591         struct hci_dev *hdev = req->hdev;
592         struct hci_cp_write_le_host_supported cp;
593
594         /* LE-only devices do not support explicit enablement */
595         if (!lmp_bredr_capable(hdev))
596                 return;
597
598         memset(&cp, 0, sizeof(cp));
599
600         if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
601                 cp.le = 0x01;
602                 cp.simul = lmp_le_br_capable(hdev);
603         }
604
605         if (cp.le != lmp_host_le_capable(hdev))
606                 hci_req_add(req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(cp),
607                             &cp);
608 }
609
610 static void hci_init3_req(struct hci_request *req, unsigned long opt)
611 {
612         struct hci_dev *hdev = req->hdev;
613         u8 p;
614
615         /* Some Broadcom based Bluetooth controllers do not support the
616          * Delete Stored Link Key command. They are clearly indicating its
617          * absence in the bit mask of supported commands.
618          *
619          * Check the supported commands and only if the the command is marked
620          * as supported send it. If not supported assume that the controller
621          * does not have actual support for stored link keys which makes this
622          * command redundant anyway.
623          */
624         if (hdev->commands[6] & 0x80) {
625                 struct hci_cp_delete_stored_link_key cp;
626
627                 bacpy(&cp.bdaddr, BDADDR_ANY);
628                 cp.delete_all = 0x01;
629                 hci_req_add(req, HCI_OP_DELETE_STORED_LINK_KEY,
630                             sizeof(cp), &cp);
631         }
632
633         if (hdev->commands[5] & 0x10)
634                 hci_setup_link_policy(req);
635
636         if (lmp_le_capable(hdev)) {
637                 hci_set_le_support(req);
638                 hci_update_ad(req);
639         }
640
641         /* Read features beyond page 1 if available */
642         for (p = 2; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
643                 struct hci_cp_read_local_ext_features cp;
644
645                 cp.page = p;
646                 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
647                             sizeof(cp), &cp);
648         }
649 }
650
651 static int __hci_init(struct hci_dev *hdev)
652 {
653         int err;
654
655         err = __hci_req_sync(hdev, hci_init1_req, 0, HCI_INIT_TIMEOUT);
656         if (err < 0)
657                 return err;
658
659         /* HCI_BREDR covers both single-mode LE, BR/EDR and dual-mode
660          * BR/EDR/LE type controllers. AMP controllers only need the
661          * first stage init.
662          */
663         if (hdev->dev_type != HCI_BREDR)
664                 return 0;
665
666         err = __hci_req_sync(hdev, hci_init2_req, 0, HCI_INIT_TIMEOUT);
667         if (err < 0)
668                 return err;
669
670         return __hci_req_sync(hdev, hci_init3_req, 0, HCI_INIT_TIMEOUT);
671 }
672
673 static void hci_scan_req(struct hci_request *req, unsigned long opt)
674 {
675         __u8 scan = opt;
676
677         BT_DBG("%s %x", req->hdev->name, scan);
678
679         /* Inquiry and Page scans */
680         hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
681 }
682
683 static void hci_auth_req(struct hci_request *req, unsigned long opt)
684 {
685         __u8 auth = opt;
686
687         BT_DBG("%s %x", req->hdev->name, auth);
688
689         /* Authentication */
690         hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
691 }
692
693 static void hci_encrypt_req(struct hci_request *req, unsigned long opt)
694 {
695         __u8 encrypt = opt;
696
697         BT_DBG("%s %x", req->hdev->name, encrypt);
698
699         /* Encryption */
700         hci_req_add(req, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
701 }
702
703 static void hci_linkpol_req(struct hci_request *req, unsigned long opt)
704 {
705         __le16 policy = cpu_to_le16(opt);
706
707         BT_DBG("%s %x", req->hdev->name, policy);
708
709         /* Default link policy */
710         hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
711 }
712
713 /* Get HCI device by index.
714  * Device is held on return. */
715 struct hci_dev *hci_dev_get(int index)
716 {
717         struct hci_dev *hdev = NULL, *d;
718
719         BT_DBG("%d", index);
720
721         if (index < 0)
722                 return NULL;
723
724         read_lock(&hci_dev_list_lock);
725         list_for_each_entry(d, &hci_dev_list, list) {
726                 if (d->id == index) {
727                         hdev = hci_dev_hold(d);
728                         break;
729                 }
730         }
731         read_unlock(&hci_dev_list_lock);
732         return hdev;
733 }
734
735 /* ---- Inquiry support ---- */
736
737 bool hci_discovery_active(struct hci_dev *hdev)
738 {
739         struct discovery_state *discov = &hdev->discovery;
740
741         switch (discov->state) {
742         case DISCOVERY_FINDING:
743         case DISCOVERY_RESOLVING:
744                 return true;
745
746         default:
747                 return false;
748         }
749 }
750
751 void hci_discovery_set_state(struct hci_dev *hdev, int state)
752 {
753         BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
754
755         if (hdev->discovery.state == state)
756                 return;
757
758         switch (state) {
759         case DISCOVERY_STOPPED:
760                 if (hdev->discovery.state != DISCOVERY_STARTING)
761                         mgmt_discovering(hdev, 0);
762                 break;
763         case DISCOVERY_STARTING:
764                 break;
765         case DISCOVERY_FINDING:
766                 mgmt_discovering(hdev, 1);
767                 break;
768         case DISCOVERY_RESOLVING:
769                 break;
770         case DISCOVERY_STOPPING:
771                 break;
772         }
773
774         hdev->discovery.state = state;
775 }
776
777 void hci_inquiry_cache_flush(struct hci_dev *hdev)
778 {
779         struct discovery_state *cache = &hdev->discovery;
780         struct inquiry_entry *p, *n;
781
782         list_for_each_entry_safe(p, n, &cache->all, all) {
783                 list_del(&p->all);
784                 kfree(p);
785         }
786
787         INIT_LIST_HEAD(&cache->unknown);
788         INIT_LIST_HEAD(&cache->resolve);
789 }
790
791 struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev,
792                                                bdaddr_t *bdaddr)
793 {
794         struct discovery_state *cache = &hdev->discovery;
795         struct inquiry_entry *e;
796
797         BT_DBG("cache %p, %pMR", cache, bdaddr);
798
799         list_for_each_entry(e, &cache->all, all) {
800                 if (!bacmp(&e->data.bdaddr, bdaddr))
801                         return e;
802         }
803
804         return NULL;
805 }
806
807 struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
808                                                        bdaddr_t *bdaddr)
809 {
810         struct discovery_state *cache = &hdev->discovery;
811         struct inquiry_entry *e;
812
813         BT_DBG("cache %p, %pMR", cache, bdaddr);
814
815         list_for_each_entry(e, &cache->unknown, list) {
816                 if (!bacmp(&e->data.bdaddr, bdaddr))
817                         return e;
818         }
819
820         return NULL;
821 }
822
823 struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
824                                                        bdaddr_t *bdaddr,
825                                                        int state)
826 {
827         struct discovery_state *cache = &hdev->discovery;
828         struct inquiry_entry *e;
829
830         BT_DBG("cache %p bdaddr %pMR state %d", cache, bdaddr, state);
831
832         list_for_each_entry(e, &cache->resolve, list) {
833                 if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
834                         return e;
835                 if (!bacmp(&e->data.bdaddr, bdaddr))
836                         return e;
837         }
838
839         return NULL;
840 }
841
842 void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
843                                       struct inquiry_entry *ie)
844 {
845         struct discovery_state *cache = &hdev->discovery;
846         struct list_head *pos = &cache->resolve;
847         struct inquiry_entry *p;
848
849         list_del(&ie->list);
850
851         list_for_each_entry(p, &cache->resolve, list) {
852                 if (p->name_state != NAME_PENDING &&
853                     abs(p->data.rssi) >= abs(ie->data.rssi))
854                         break;
855                 pos = &p->list;
856         }
857
858         list_add(&ie->list, pos);
859 }
860
861 bool hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
862                               bool name_known, bool *ssp)
863 {
864         struct discovery_state *cache = &hdev->discovery;
865         struct inquiry_entry *ie;
866
867         BT_DBG("cache %p, %pMR", cache, &data->bdaddr);
868
869         hci_remove_remote_oob_data(hdev, &data->bdaddr);
870
871         if (ssp)
872                 *ssp = data->ssp_mode;
873
874         ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
875         if (ie) {
876                 if (ie->data.ssp_mode && ssp)
877                         *ssp = true;
878
879                 if (ie->name_state == NAME_NEEDED &&
880                     data->rssi != ie->data.rssi) {
881                         ie->data.rssi = data->rssi;
882                         hci_inquiry_cache_update_resolve(hdev, ie);
883                 }
884
885                 goto update;
886         }
887
888         /* Entry not in the cache. Add new one. */
889         ie = kzalloc(sizeof(struct inquiry_entry), GFP_ATOMIC);
890         if (!ie)
891                 return false;
892
893         list_add(&ie->all, &cache->all);
894
895         if (name_known) {
896                 ie->name_state = NAME_KNOWN;
897         } else {
898                 ie->name_state = NAME_NOT_KNOWN;
899                 list_add(&ie->list, &cache->unknown);
900         }
901
902 update:
903         if (name_known && ie->name_state != NAME_KNOWN &&
904             ie->name_state != NAME_PENDING) {
905                 ie->name_state = NAME_KNOWN;
906                 list_del(&ie->list);
907         }
908
909         memcpy(&ie->data, data, sizeof(*data));
910         ie->timestamp = jiffies;
911         cache->timestamp = jiffies;
912
913         if (ie->name_state == NAME_NOT_KNOWN)
914                 return false;
915
916         return true;
917 }
918
919 static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
920 {
921         struct discovery_state *cache = &hdev->discovery;
922         struct inquiry_info *info = (struct inquiry_info *) buf;
923         struct inquiry_entry *e;
924         int copied = 0;
925
926         list_for_each_entry(e, &cache->all, all) {
927                 struct inquiry_data *data = &e->data;
928
929                 if (copied >= num)
930                         break;
931
932                 bacpy(&info->bdaddr, &data->bdaddr);
933                 info->pscan_rep_mode    = data->pscan_rep_mode;
934                 info->pscan_period_mode = data->pscan_period_mode;
935                 info->pscan_mode        = data->pscan_mode;
936                 memcpy(info->dev_class, data->dev_class, 3);
937                 info->clock_offset      = data->clock_offset;
938
939                 info++;
940                 copied++;
941         }
942
943         BT_DBG("cache %p, copied %d", cache, copied);
944         return copied;
945 }
946
947 static void hci_inq_req(struct hci_request *req, unsigned long opt)
948 {
949         struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
950         struct hci_dev *hdev = req->hdev;
951         struct hci_cp_inquiry cp;
952
953         BT_DBG("%s", hdev->name);
954
955         if (test_bit(HCI_INQUIRY, &hdev->flags))
956                 return;
957
958         /* Start Inquiry */
959         memcpy(&cp.lap, &ir->lap, 3);
960         cp.length  = ir->length;
961         cp.num_rsp = ir->num_rsp;
962         hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
963 }
964
965 static int wait_inquiry(void *word)
966 {
967         schedule();
968         return signal_pending(current);
969 }
970
971 int hci_inquiry(void __user *arg)
972 {
973         __u8 __user *ptr = arg;
974         struct hci_inquiry_req ir;
975         struct hci_dev *hdev;
976         int err = 0, do_inquiry = 0, max_rsp;
977         long timeo;
978         __u8 *buf;
979
980         if (copy_from_user(&ir, ptr, sizeof(ir)))
981                 return -EFAULT;
982
983         hdev = hci_dev_get(ir.dev_id);
984         if (!hdev)
985                 return -ENODEV;
986
987         hci_dev_lock(hdev);
988         if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
989             inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) {
990                 hci_inquiry_cache_flush(hdev);
991                 do_inquiry = 1;
992         }
993         hci_dev_unlock(hdev);
994
995         timeo = ir.length * msecs_to_jiffies(2000);
996
997         if (do_inquiry) {
998                 err = hci_req_sync(hdev, hci_inq_req, (unsigned long) &ir,
999                                    timeo);
1000                 if (err < 0)
1001                         goto done;
1002
1003                 /* Wait until Inquiry procedure finishes (HCI_INQUIRY flag is
1004                  * cleared). If it is interrupted by a signal, return -EINTR.
1005                  */
1006                 if (wait_on_bit(&hdev->flags, HCI_INQUIRY, wait_inquiry,
1007                                 TASK_INTERRUPTIBLE))
1008                         return -EINTR;
1009         }
1010
1011         /* for unlimited number of responses we will use buffer with
1012          * 255 entries
1013          */
1014         max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
1015
1016         /* cache_dump can't sleep. Therefore we allocate temp buffer and then
1017          * copy it to the user space.
1018          */
1019         buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
1020         if (!buf) {
1021                 err = -ENOMEM;
1022                 goto done;
1023         }
1024
1025         hci_dev_lock(hdev);
1026         ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
1027         hci_dev_unlock(hdev);
1028
1029         BT_DBG("num_rsp %d", ir.num_rsp);
1030
1031         if (!copy_to_user(ptr, &ir, sizeof(ir))) {
1032                 ptr += sizeof(ir);
1033                 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
1034                                  ir.num_rsp))
1035                         err = -EFAULT;
1036         } else
1037                 err = -EFAULT;
1038
1039         kfree(buf);
1040
1041 done:
1042         hci_dev_put(hdev);
1043         return err;
1044 }
1045
1046 static u8 create_ad(struct hci_dev *hdev, u8 *ptr)
1047 {
1048         u8 ad_len = 0, flags = 0;
1049         size_t name_len;
1050
1051         if (test_bit(HCI_LE_PERIPHERAL, &hdev->dev_flags))
1052                 flags |= LE_AD_GENERAL;
1053
1054         if (!lmp_bredr_capable(hdev))
1055                 flags |= LE_AD_NO_BREDR;
1056
1057         if (lmp_le_br_capable(hdev))
1058                 flags |= LE_AD_SIM_LE_BREDR_CTRL;
1059
1060         if (lmp_host_le_br_capable(hdev))
1061                 flags |= LE_AD_SIM_LE_BREDR_HOST;
1062
1063         if (flags) {
1064                 BT_DBG("adv flags 0x%02x", flags);
1065
1066                 ptr[0] = 2;
1067                 ptr[1] = EIR_FLAGS;
1068                 ptr[2] = flags;
1069
1070                 ad_len += 3;
1071                 ptr += 3;
1072         }
1073
1074         if (hdev->adv_tx_power != HCI_TX_POWER_INVALID) {
1075                 ptr[0] = 2;
1076                 ptr[1] = EIR_TX_POWER;
1077                 ptr[2] = (u8) hdev->adv_tx_power;
1078
1079                 ad_len += 3;
1080                 ptr += 3;
1081         }
1082
1083         name_len = strlen(hdev->dev_name);
1084         if (name_len > 0) {
1085                 size_t max_len = HCI_MAX_AD_LENGTH - ad_len - 2;
1086
1087                 if (name_len > max_len) {
1088                         name_len = max_len;
1089                         ptr[1] = EIR_NAME_SHORT;
1090                 } else
1091                         ptr[1] = EIR_NAME_COMPLETE;
1092
1093                 ptr[0] = name_len + 1;
1094
1095                 memcpy(ptr + 2, hdev->dev_name, name_len);
1096
1097                 ad_len += (name_len + 2);
1098                 ptr += (name_len + 2);
1099         }
1100
1101         return ad_len;
1102 }
1103
1104 void hci_update_ad(struct hci_request *req)
1105 {
1106         struct hci_dev *hdev = req->hdev;
1107         struct hci_cp_le_set_adv_data cp;
1108         u8 len;
1109
1110         if (!lmp_le_capable(hdev))
1111                 return;
1112
1113         memset(&cp, 0, sizeof(cp));
1114
1115         len = create_ad(hdev, cp.data);
1116
1117         if (hdev->adv_data_len == len &&
1118             memcmp(cp.data, hdev->adv_data, len) == 0)
1119                 return;
1120
1121         memcpy(hdev->adv_data, cp.data, sizeof(cp.data));
1122         hdev->adv_data_len = len;
1123
1124         cp.length = len;
1125
1126         hci_req_add(req, HCI_OP_LE_SET_ADV_DATA, sizeof(cp), &cp);
1127 }
1128
1129 /* ---- HCI ioctl helpers ---- */
1130
1131 int hci_dev_open(__u16 dev)
1132 {
1133         struct hci_dev *hdev;
1134         int ret = 0;
1135
1136         hdev = hci_dev_get(dev);
1137         if (!hdev)
1138                 return -ENODEV;
1139
1140         BT_DBG("%s %p", hdev->name, hdev);
1141
1142         hci_req_lock(hdev);
1143
1144         if (test_bit(HCI_UNREGISTER, &hdev->dev_flags)) {
1145                 ret = -ENODEV;
1146                 goto done;
1147         }
1148
1149         /* Check for rfkill but allow the HCI setup stage to proceed
1150          * (which in itself doesn't cause any RF activity).
1151          */
1152         if (test_bit(HCI_RFKILLED, &hdev->dev_flags) &&
1153             !test_bit(HCI_SETUP, &hdev->dev_flags)) {
1154                 ret = -ERFKILL;
1155                 goto done;
1156         }
1157
1158         if (test_bit(HCI_UP, &hdev->flags)) {
1159                 ret = -EALREADY;
1160                 goto done;
1161         }
1162
1163         if (hdev->open(hdev)) {
1164                 ret = -EIO;
1165                 goto done;
1166         }
1167
1168         atomic_set(&hdev->cmd_cnt, 1);
1169         set_bit(HCI_INIT, &hdev->flags);
1170
1171         if (hdev->setup && test_bit(HCI_SETUP, &hdev->dev_flags))
1172                 ret = hdev->setup(hdev);
1173
1174         if (!ret) {
1175                 /* Treat all non BR/EDR controllers as raw devices if
1176                  * enable_hs is not set.
1177                  */
1178                 if (hdev->dev_type != HCI_BREDR && !enable_hs)
1179                         set_bit(HCI_RAW, &hdev->flags);
1180
1181                 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
1182                         set_bit(HCI_RAW, &hdev->flags);
1183
1184                 if (!test_bit(HCI_RAW, &hdev->flags))
1185                         ret = __hci_init(hdev);
1186         }
1187
1188         clear_bit(HCI_INIT, &hdev->flags);
1189
1190         if (!ret) {
1191                 hci_dev_hold(hdev);
1192                 set_bit(HCI_UP, &hdev->flags);
1193                 hci_notify(hdev, HCI_DEV_UP);
1194                 if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
1195                     mgmt_valid_hdev(hdev)) {
1196                         hci_dev_lock(hdev);
1197                         mgmt_powered(hdev, 1);
1198                         hci_dev_unlock(hdev);
1199                 }
1200         } else {
1201                 /* Init failed, cleanup */
1202                 flush_work(&hdev->tx_work);
1203                 flush_work(&hdev->cmd_work);
1204                 flush_work(&hdev->rx_work);
1205
1206                 skb_queue_purge(&hdev->cmd_q);
1207                 skb_queue_purge(&hdev->rx_q);
1208
1209                 if (hdev->flush)
1210                         hdev->flush(hdev);
1211
1212                 if (hdev->sent_cmd) {
1213                         kfree_skb(hdev->sent_cmd);
1214                         hdev->sent_cmd = NULL;
1215                 }
1216
1217                 hdev->close(hdev);
1218                 hdev->flags = 0;
1219         }
1220
1221 done:
1222         hci_req_unlock(hdev);
1223         hci_dev_put(hdev);
1224         return ret;
1225 }
1226
1227 static int hci_dev_do_close(struct hci_dev *hdev)
1228 {
1229         BT_DBG("%s %p", hdev->name, hdev);
1230
1231         cancel_delayed_work(&hdev->power_off);
1232
1233         hci_req_cancel(hdev, ENODEV);
1234         hci_req_lock(hdev);
1235
1236         if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
1237                 del_timer_sync(&hdev->cmd_timer);
1238                 hci_req_unlock(hdev);
1239                 return 0;
1240         }
1241
1242         /* Flush RX and TX works */
1243         flush_work(&hdev->tx_work);
1244         flush_work(&hdev->rx_work);
1245
1246         if (hdev->discov_timeout > 0) {
1247                 cancel_delayed_work(&hdev->discov_off);
1248                 hdev->discov_timeout = 0;
1249                 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
1250         }
1251
1252         if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
1253                 cancel_delayed_work(&hdev->service_cache);
1254
1255         cancel_delayed_work_sync(&hdev->le_scan_disable);
1256
1257         hci_dev_lock(hdev);
1258         hci_inquiry_cache_flush(hdev);
1259         hci_conn_hash_flush(hdev);
1260         hci_dev_unlock(hdev);
1261
1262         hci_notify(hdev, HCI_DEV_DOWN);
1263
1264         if (hdev->flush)
1265                 hdev->flush(hdev);
1266
1267         /* Reset device */
1268         skb_queue_purge(&hdev->cmd_q);
1269         atomic_set(&hdev->cmd_cnt, 1);
1270         if (!test_bit(HCI_RAW, &hdev->flags) &&
1271             test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks)) {
1272                 set_bit(HCI_INIT, &hdev->flags);
1273                 __hci_req_sync(hdev, hci_reset_req, 0, HCI_CMD_TIMEOUT);
1274                 clear_bit(HCI_INIT, &hdev->flags);
1275         }
1276
1277         /* flush cmd  work */
1278         flush_work(&hdev->cmd_work);
1279
1280         /* Drop queues */
1281         skb_queue_purge(&hdev->rx_q);
1282         skb_queue_purge(&hdev->cmd_q);
1283         skb_queue_purge(&hdev->raw_q);
1284
1285         /* Drop last sent command */
1286         if (hdev->sent_cmd) {
1287                 del_timer_sync(&hdev->cmd_timer);
1288                 kfree_skb(hdev->sent_cmd);
1289                 hdev->sent_cmd = NULL;
1290         }
1291
1292         kfree_skb(hdev->recv_evt);
1293         hdev->recv_evt = NULL;
1294
1295         /* After this point our queues are empty
1296          * and no tasks are scheduled. */
1297         hdev->close(hdev);
1298
1299         /* Clear flags */
1300         hdev->flags = 0;
1301         hdev->dev_flags &= ~HCI_PERSISTENT_MASK;
1302
1303         if (!test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags) &&
1304             mgmt_valid_hdev(hdev)) {
1305                 hci_dev_lock(hdev);
1306                 mgmt_powered(hdev, 0);
1307                 hci_dev_unlock(hdev);
1308         }
1309
1310         /* Controller radio is available but is currently powered down */
1311         hdev->amp_status = 0;
1312
1313         memset(hdev->eir, 0, sizeof(hdev->eir));
1314         memset(hdev->dev_class, 0, sizeof(hdev->dev_class));
1315
1316         hci_req_unlock(hdev);
1317
1318         hci_dev_put(hdev);
1319         return 0;
1320 }
1321
1322 int hci_dev_close(__u16 dev)
1323 {
1324         struct hci_dev *hdev;
1325         int err;
1326
1327         hdev = hci_dev_get(dev);
1328         if (!hdev)
1329                 return -ENODEV;
1330
1331         if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
1332                 cancel_delayed_work(&hdev->power_off);
1333
1334         err = hci_dev_do_close(hdev);
1335
1336         hci_dev_put(hdev);
1337         return err;
1338 }
1339
1340 int hci_dev_reset(__u16 dev)
1341 {
1342         struct hci_dev *hdev;
1343         int ret = 0;
1344
1345         hdev = hci_dev_get(dev);
1346         if (!hdev)
1347                 return -ENODEV;
1348
1349         hci_req_lock(hdev);
1350
1351         if (!test_bit(HCI_UP, &hdev->flags))
1352                 goto done;
1353
1354         /* Drop queues */
1355         skb_queue_purge(&hdev->rx_q);
1356         skb_queue_purge(&hdev->cmd_q);
1357
1358         hci_dev_lock(hdev);
1359         hci_inquiry_cache_flush(hdev);
1360         hci_conn_hash_flush(hdev);
1361         hci_dev_unlock(hdev);
1362
1363         if (hdev->flush)
1364                 hdev->flush(hdev);
1365
1366         atomic_set(&hdev->cmd_cnt, 1);
1367         hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
1368
1369         if (!test_bit(HCI_RAW, &hdev->flags))
1370                 ret = __hci_req_sync(hdev, hci_reset_req, 0, HCI_INIT_TIMEOUT);
1371
1372 done:
1373         hci_req_unlock(hdev);
1374         hci_dev_put(hdev);
1375         return ret;
1376 }
1377
1378 int hci_dev_reset_stat(__u16 dev)
1379 {
1380         struct hci_dev *hdev;
1381         int ret = 0;
1382
1383         hdev = hci_dev_get(dev);
1384         if (!hdev)
1385                 return -ENODEV;
1386
1387         memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
1388
1389         hci_dev_put(hdev);
1390
1391         return ret;
1392 }
1393
1394 int hci_dev_cmd(unsigned int cmd, void __user *arg)
1395 {
1396         struct hci_dev *hdev;
1397         struct hci_dev_req dr;
1398         int err = 0;
1399
1400         if (copy_from_user(&dr, arg, sizeof(dr)))
1401                 return -EFAULT;
1402
1403         hdev = hci_dev_get(dr.dev_id);
1404         if (!hdev)
1405                 return -ENODEV;
1406
1407         switch (cmd) {
1408         case HCISETAUTH:
1409                 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
1410                                    HCI_INIT_TIMEOUT);
1411                 break;
1412
1413         case HCISETENCRYPT:
1414                 if (!lmp_encrypt_capable(hdev)) {
1415                         err = -EOPNOTSUPP;
1416                         break;
1417                 }
1418
1419                 if (!test_bit(HCI_AUTH, &hdev->flags)) {
1420                         /* Auth must be enabled first */
1421                         err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
1422                                            HCI_INIT_TIMEOUT);
1423                         if (err)
1424                                 break;
1425                 }
1426
1427                 err = hci_req_sync(hdev, hci_encrypt_req, dr.dev_opt,
1428                                    HCI_INIT_TIMEOUT);
1429                 break;
1430
1431         case HCISETSCAN:
1432                 err = hci_req_sync(hdev, hci_scan_req, dr.dev_opt,
1433                                    HCI_INIT_TIMEOUT);
1434                 break;
1435
1436         case HCISETLINKPOL:
1437                 err = hci_req_sync(hdev, hci_linkpol_req, dr.dev_opt,
1438                                    HCI_INIT_TIMEOUT);
1439                 break;
1440
1441         case HCISETLINKMODE:
1442                 hdev->link_mode = ((__u16) dr.dev_opt) &
1443                                         (HCI_LM_MASTER | HCI_LM_ACCEPT);
1444                 break;
1445
1446         case HCISETPTYPE:
1447                 hdev->pkt_type = (__u16) dr.dev_opt;
1448                 break;
1449
1450         case HCISETACLMTU:
1451                 hdev->acl_mtu  = *((__u16 *) &dr.dev_opt + 1);
1452                 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
1453                 break;
1454
1455         case HCISETSCOMTU:
1456                 hdev->sco_mtu  = *((__u16 *) &dr.dev_opt + 1);
1457                 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
1458                 break;
1459
1460         default:
1461                 err = -EINVAL;
1462                 break;
1463         }
1464
1465         hci_dev_put(hdev);
1466         return err;
1467 }
1468
1469 int hci_get_dev_list(void __user *arg)
1470 {
1471         struct hci_dev *hdev;
1472         struct hci_dev_list_req *dl;
1473         struct hci_dev_req *dr;
1474         int n = 0, size, err;
1475         __u16 dev_num;
1476
1477         if (get_user(dev_num, (__u16 __user *) arg))
1478                 return -EFAULT;
1479
1480         if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
1481                 return -EINVAL;
1482
1483         size = sizeof(*dl) + dev_num * sizeof(*dr);
1484
1485         dl = kzalloc(size, GFP_KERNEL);
1486         if (!dl)
1487                 return -ENOMEM;
1488
1489         dr = dl->dev_req;
1490
1491         read_lock(&hci_dev_list_lock);
1492         list_for_each_entry(hdev, &hci_dev_list, list) {
1493                 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
1494                         cancel_delayed_work(&hdev->power_off);
1495
1496                 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
1497                         set_bit(HCI_PAIRABLE, &hdev->dev_flags);
1498
1499                 (dr + n)->dev_id  = hdev->id;
1500                 (dr + n)->dev_opt = hdev->flags;
1501
1502                 if (++n >= dev_num)
1503                         break;
1504         }
1505         read_unlock(&hci_dev_list_lock);
1506
1507         dl->dev_num = n;
1508         size = sizeof(*dl) + n * sizeof(*dr);
1509
1510         err = copy_to_user(arg, dl, size);
1511         kfree(dl);
1512
1513         return err ? -EFAULT : 0;
1514 }
1515
1516 int hci_get_dev_info(void __user *arg)
1517 {
1518         struct hci_dev *hdev;
1519         struct hci_dev_info di;
1520         int err = 0;
1521
1522         if (copy_from_user(&di, arg, sizeof(di)))
1523                 return -EFAULT;
1524
1525         hdev = hci_dev_get(di.dev_id);
1526         if (!hdev)
1527                 return -ENODEV;
1528
1529         if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
1530                 cancel_delayed_work_sync(&hdev->power_off);
1531
1532         if (!test_bit(HCI_MGMT, &hdev->dev_flags))
1533                 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
1534
1535         strcpy(di.name, hdev->name);
1536         di.bdaddr   = hdev->bdaddr;
1537         di.type     = (hdev->bus & 0x0f) | (hdev->dev_type << 4);
1538         di.flags    = hdev->flags;
1539         di.pkt_type = hdev->pkt_type;
1540         if (lmp_bredr_capable(hdev)) {
1541                 di.acl_mtu  = hdev->acl_mtu;
1542                 di.acl_pkts = hdev->acl_pkts;
1543                 di.sco_mtu  = hdev->sco_mtu;
1544                 di.sco_pkts = hdev->sco_pkts;
1545         } else {
1546                 di.acl_mtu  = hdev->le_mtu;
1547                 di.acl_pkts = hdev->le_pkts;
1548                 di.sco_mtu  = 0;
1549                 di.sco_pkts = 0;
1550         }
1551         di.link_policy = hdev->link_policy;
1552         di.link_mode   = hdev->link_mode;
1553
1554         memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
1555         memcpy(&di.features, &hdev->features, sizeof(di.features));
1556
1557         if (copy_to_user(arg, &di, sizeof(di)))
1558                 err = -EFAULT;
1559
1560         hci_dev_put(hdev);
1561
1562         return err;
1563 }
1564
1565 /* ---- Interface to HCI drivers ---- */
1566
1567 static int hci_rfkill_set_block(void *data, bool blocked)
1568 {
1569         struct hci_dev *hdev = data;
1570
1571         BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
1572
1573         if (blocked) {
1574                 set_bit(HCI_RFKILLED, &hdev->dev_flags);
1575                 if (!test_bit(HCI_SETUP, &hdev->dev_flags))
1576                         hci_dev_do_close(hdev);
1577         } else {
1578                 clear_bit(HCI_RFKILLED, &hdev->dev_flags);
1579         }
1580
1581         return 0;
1582 }
1583
1584 static const struct rfkill_ops hci_rfkill_ops = {
1585         .set_block = hci_rfkill_set_block,
1586 };
1587
1588 static void hci_power_on(struct work_struct *work)
1589 {
1590         struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
1591         int err;
1592
1593         BT_DBG("%s", hdev->name);
1594
1595         err = hci_dev_open(hdev->id);
1596         if (err < 0) {
1597                 mgmt_set_powered_failed(hdev, err);
1598                 return;
1599         }
1600
1601         if (test_bit(HCI_RFKILLED, &hdev->dev_flags)) {
1602                 clear_bit(HCI_AUTO_OFF, &hdev->dev_flags);
1603                 hci_dev_do_close(hdev);
1604         } else if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
1605                 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
1606                                    HCI_AUTO_OFF_TIMEOUT);
1607         }
1608
1609         if (test_and_clear_bit(HCI_SETUP, &hdev->dev_flags))
1610                 mgmt_index_added(hdev);
1611 }
1612
1613 static void hci_power_off(struct work_struct *work)
1614 {
1615         struct hci_dev *hdev = container_of(work, struct hci_dev,
1616                                             power_off.work);
1617
1618         BT_DBG("%s", hdev->name);
1619
1620         hci_dev_do_close(hdev);
1621 }
1622
1623 static void hci_discov_off(struct work_struct *work)
1624 {
1625         struct hci_dev *hdev;
1626         u8 scan = SCAN_PAGE;
1627
1628         hdev = container_of(work, struct hci_dev, discov_off.work);
1629
1630         BT_DBG("%s", hdev->name);
1631
1632         hci_dev_lock(hdev);
1633
1634         hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, sizeof(scan), &scan);
1635
1636         hdev->discov_timeout = 0;
1637
1638         hci_dev_unlock(hdev);
1639 }
1640
1641 int hci_uuids_clear(struct hci_dev *hdev)
1642 {
1643         struct bt_uuid *uuid, *tmp;
1644
1645         list_for_each_entry_safe(uuid, tmp, &hdev->uuids, list) {
1646                 list_del(&uuid->list);
1647                 kfree(uuid);
1648         }
1649
1650         return 0;
1651 }
1652
1653 int hci_link_keys_clear(struct hci_dev *hdev)
1654 {
1655         struct list_head *p, *n;
1656
1657         list_for_each_safe(p, n, &hdev->link_keys) {
1658                 struct link_key *key;
1659
1660                 key = list_entry(p, struct link_key, list);
1661
1662                 list_del(p);
1663                 kfree(key);
1664         }
1665
1666         return 0;
1667 }
1668
1669 int hci_smp_ltks_clear(struct hci_dev *hdev)
1670 {
1671         struct smp_ltk *k, *tmp;
1672
1673         list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
1674                 list_del(&k->list);
1675                 kfree(k);
1676         }
1677
1678         return 0;
1679 }
1680
1681 struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1682 {
1683         struct link_key *k;
1684
1685         list_for_each_entry(k, &hdev->link_keys, list)
1686                 if (bacmp(bdaddr, &k->bdaddr) == 0)
1687                         return k;
1688
1689         return NULL;
1690 }
1691
1692 static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
1693                                u8 key_type, u8 old_key_type)
1694 {
1695         /* Legacy key */
1696         if (key_type < 0x03)
1697                 return true;
1698
1699         /* Debug keys are insecure so don't store them persistently */
1700         if (key_type == HCI_LK_DEBUG_COMBINATION)
1701                 return false;
1702
1703         /* Changed combination key and there's no previous one */
1704         if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
1705                 return false;
1706
1707         /* Security mode 3 case */
1708         if (!conn)
1709                 return true;
1710
1711         /* Neither local nor remote side had no-bonding as requirement */
1712         if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
1713                 return true;
1714
1715         /* Local side had dedicated bonding as requirement */
1716         if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
1717                 return true;
1718
1719         /* Remote side had dedicated bonding as requirement */
1720         if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
1721                 return true;
1722
1723         /* If none of the above criteria match, then don't store the key
1724          * persistently */
1725         return false;
1726 }
1727
1728 struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, __le16 ediv, u8 rand[8])
1729 {
1730         struct smp_ltk *k;
1731
1732         list_for_each_entry(k, &hdev->long_term_keys, list) {
1733                 if (k->ediv != ediv ||
1734                     memcmp(rand, k->rand, sizeof(k->rand)))
1735                         continue;
1736
1737                 return k;
1738         }
1739
1740         return NULL;
1741 }
1742
1743 struct smp_ltk *hci_find_ltk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
1744                                      u8 addr_type)
1745 {
1746         struct smp_ltk *k;
1747
1748         list_for_each_entry(k, &hdev->long_term_keys, list)
1749                 if (addr_type == k->bdaddr_type &&
1750                     bacmp(bdaddr, &k->bdaddr) == 0)
1751                         return k;
1752
1753         return NULL;
1754 }
1755
1756 int hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn, int new_key,
1757                      bdaddr_t *bdaddr, u8 *val, u8 type, u8 pin_len)
1758 {
1759         struct link_key *key, *old_key;
1760         u8 old_key_type;
1761         bool persistent;
1762
1763         old_key = hci_find_link_key(hdev, bdaddr);
1764         if (old_key) {
1765                 old_key_type = old_key->type;
1766                 key = old_key;
1767         } else {
1768                 old_key_type = conn ? conn->key_type : 0xff;
1769                 key = kzalloc(sizeof(*key), GFP_ATOMIC);
1770                 if (!key)
1771                         return -ENOMEM;
1772                 list_add(&key->list, &hdev->link_keys);
1773         }
1774
1775         BT_DBG("%s key for %pMR type %u", hdev->name, bdaddr, type);
1776
1777         /* Some buggy controller combinations generate a changed
1778          * combination key for legacy pairing even when there's no
1779          * previous key */
1780         if (type == HCI_LK_CHANGED_COMBINATION &&
1781             (!conn || conn->remote_auth == 0xff) && old_key_type == 0xff) {
1782                 type = HCI_LK_COMBINATION;
1783                 if (conn)
1784                         conn->key_type = type;
1785         }
1786
1787         bacpy(&key->bdaddr, bdaddr);
1788         memcpy(key->val, val, HCI_LINK_KEY_SIZE);
1789         key->pin_len = pin_len;
1790
1791         if (type == HCI_LK_CHANGED_COMBINATION)
1792                 key->type = old_key_type;
1793         else
1794                 key->type = type;
1795
1796         if (!new_key)
1797                 return 0;
1798
1799         persistent = hci_persistent_key(hdev, conn, type, old_key_type);
1800
1801         mgmt_new_link_key(hdev, key, persistent);
1802
1803         if (conn)
1804                 conn->flush_key = !persistent;
1805
1806         return 0;
1807 }
1808
1809 int hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type, u8 type,
1810                 int new_key, u8 authenticated, u8 tk[16], u8 enc_size, __le16
1811                 ediv, u8 rand[8])
1812 {
1813         struct smp_ltk *key, *old_key;
1814
1815         if (!(type & HCI_SMP_STK) && !(type & HCI_SMP_LTK))
1816                 return 0;
1817
1818         old_key = hci_find_ltk_by_addr(hdev, bdaddr, addr_type);
1819         if (old_key)
1820                 key = old_key;
1821         else {
1822                 key = kzalloc(sizeof(*key), GFP_ATOMIC);
1823                 if (!key)
1824                         return -ENOMEM;
1825                 list_add(&key->list, &hdev->long_term_keys);
1826         }
1827
1828         bacpy(&key->bdaddr, bdaddr);
1829         key->bdaddr_type = addr_type;
1830         memcpy(key->val, tk, sizeof(key->val));
1831         key->authenticated = authenticated;
1832         key->ediv = ediv;
1833         key->enc_size = enc_size;
1834         key->type = type;
1835         memcpy(key->rand, rand, sizeof(key->rand));
1836
1837         if (!new_key)
1838                 return 0;
1839
1840         if (type & HCI_SMP_LTK)
1841                 mgmt_new_ltk(hdev, key, 1);
1842
1843         return 0;
1844 }
1845
1846 int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1847 {
1848         struct link_key *key;
1849
1850         key = hci_find_link_key(hdev, bdaddr);
1851         if (!key)
1852                 return -ENOENT;
1853
1854         BT_DBG("%s removing %pMR", hdev->name, bdaddr);
1855
1856         list_del(&key->list);
1857         kfree(key);
1858
1859         return 0;
1860 }
1861
1862 int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr)
1863 {
1864         struct smp_ltk *k, *tmp;
1865
1866         list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
1867                 if (bacmp(bdaddr, &k->bdaddr))
1868                         continue;
1869
1870                 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
1871
1872                 list_del(&k->list);
1873                 kfree(k);
1874         }
1875
1876         return 0;
1877 }
1878
1879 /* HCI command timer function */
1880 static void hci_cmd_timeout(unsigned long arg)
1881 {
1882         struct hci_dev *hdev = (void *) arg;
1883
1884         if (hdev->sent_cmd) {
1885                 struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
1886                 u16 opcode = __le16_to_cpu(sent->opcode);
1887
1888                 BT_ERR("%s command 0x%4.4x tx timeout", hdev->name, opcode);
1889         } else {
1890                 BT_ERR("%s command tx timeout", hdev->name);
1891         }
1892
1893         atomic_set(&hdev->cmd_cnt, 1);
1894         queue_work(hdev->workqueue, &hdev->cmd_work);
1895 }
1896
1897 struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
1898                                           bdaddr_t *bdaddr)
1899 {
1900         struct oob_data *data;
1901
1902         list_for_each_entry(data, &hdev->remote_oob_data, list)
1903                 if (bacmp(bdaddr, &data->bdaddr) == 0)
1904                         return data;
1905
1906         return NULL;
1907 }
1908
1909 int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr)
1910 {
1911         struct oob_data *data;
1912
1913         data = hci_find_remote_oob_data(hdev, bdaddr);
1914         if (!data)
1915                 return -ENOENT;
1916
1917         BT_DBG("%s removing %pMR", hdev->name, bdaddr);
1918
1919         list_del(&data->list);
1920         kfree(data);
1921
1922         return 0;
1923 }
1924
1925 int hci_remote_oob_data_clear(struct hci_dev *hdev)
1926 {
1927         struct oob_data *data, *n;
1928
1929         list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
1930                 list_del(&data->list);
1931                 kfree(data);
1932         }
1933
1934         return 0;
1935 }
1936
1937 int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 *hash,
1938                             u8 *randomizer)
1939 {
1940         struct oob_data *data;
1941
1942         data = hci_find_remote_oob_data(hdev, bdaddr);
1943
1944         if (!data) {
1945                 data = kmalloc(sizeof(*data), GFP_ATOMIC);
1946                 if (!data)
1947                         return -ENOMEM;
1948
1949                 bacpy(&data->bdaddr, bdaddr);
1950                 list_add(&data->list, &hdev->remote_oob_data);
1951         }
1952
1953         memcpy(data->hash, hash, sizeof(data->hash));
1954         memcpy(data->randomizer, randomizer, sizeof(data->randomizer));
1955
1956         BT_DBG("%s for %pMR", hdev->name, bdaddr);
1957
1958         return 0;
1959 }
1960
1961 struct bdaddr_list *hci_blacklist_lookup(struct hci_dev *hdev, bdaddr_t *bdaddr)
1962 {
1963         struct bdaddr_list *b;
1964
1965         list_for_each_entry(b, &hdev->blacklist, list)
1966                 if (bacmp(bdaddr, &b->bdaddr) == 0)
1967                         return b;
1968
1969         return NULL;
1970 }
1971
1972 int hci_blacklist_clear(struct hci_dev *hdev)
1973 {
1974         struct list_head *p, *n;
1975
1976         list_for_each_safe(p, n, &hdev->blacklist) {
1977                 struct bdaddr_list *b;
1978
1979                 b = list_entry(p, struct bdaddr_list, list);
1980
1981                 list_del(p);
1982                 kfree(b);
1983         }
1984
1985         return 0;
1986 }
1987
1988 int hci_blacklist_add(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
1989 {
1990         struct bdaddr_list *entry;
1991
1992         if (bacmp(bdaddr, BDADDR_ANY) == 0)
1993                 return -EBADF;
1994
1995         if (hci_blacklist_lookup(hdev, bdaddr))
1996                 return -EEXIST;
1997
1998         entry = kzalloc(sizeof(struct bdaddr_list), GFP_KERNEL);
1999         if (!entry)
2000                 return -ENOMEM;
2001
2002         bacpy(&entry->bdaddr, bdaddr);
2003
2004         list_add(&entry->list, &hdev->blacklist);
2005
2006         return mgmt_device_blocked(hdev, bdaddr, type);
2007 }
2008
2009 int hci_blacklist_del(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
2010 {
2011         struct bdaddr_list *entry;
2012
2013         if (bacmp(bdaddr, BDADDR_ANY) == 0)
2014                 return hci_blacklist_clear(hdev);
2015
2016         entry = hci_blacklist_lookup(hdev, bdaddr);
2017         if (!entry)
2018                 return -ENOENT;
2019
2020         list_del(&entry->list);
2021         kfree(entry);
2022
2023         return mgmt_device_unblocked(hdev, bdaddr, type);
2024 }
2025
2026 static void inquiry_complete(struct hci_dev *hdev, u8 status)
2027 {
2028         if (status) {
2029                 BT_ERR("Failed to start inquiry: status %d", status);
2030
2031                 hci_dev_lock(hdev);
2032                 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2033                 hci_dev_unlock(hdev);
2034                 return;
2035         }
2036 }
2037
2038 static void le_scan_disable_work_complete(struct hci_dev *hdev, u8 status)
2039 {
2040         /* General inquiry access code (GIAC) */
2041         u8 lap[3] = { 0x33, 0x8b, 0x9e };
2042         struct hci_request req;
2043         struct hci_cp_inquiry cp;
2044         int err;
2045
2046         if (status) {
2047                 BT_ERR("Failed to disable LE scanning: status %d", status);
2048                 return;
2049         }
2050
2051         switch (hdev->discovery.type) {
2052         case DISCOV_TYPE_LE:
2053                 hci_dev_lock(hdev);
2054                 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2055                 hci_dev_unlock(hdev);
2056                 break;
2057
2058         case DISCOV_TYPE_INTERLEAVED:
2059                 hci_req_init(&req, hdev);
2060
2061                 memset(&cp, 0, sizeof(cp));
2062                 memcpy(&cp.lap, lap, sizeof(cp.lap));
2063                 cp.length = DISCOV_INTERLEAVED_INQUIRY_LEN;
2064                 hci_req_add(&req, HCI_OP_INQUIRY, sizeof(cp), &cp);
2065
2066                 hci_dev_lock(hdev);
2067
2068                 hci_inquiry_cache_flush(hdev);
2069
2070                 err = hci_req_run(&req, inquiry_complete);
2071                 if (err) {
2072                         BT_ERR("Inquiry request failed: err %d", err);
2073                         hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2074                 }
2075
2076                 hci_dev_unlock(hdev);
2077                 break;
2078         }
2079 }
2080
2081 static void le_scan_disable_work(struct work_struct *work)
2082 {
2083         struct hci_dev *hdev = container_of(work, struct hci_dev,
2084                                             le_scan_disable.work);
2085         struct hci_cp_le_set_scan_enable cp;
2086         struct hci_request req;
2087         int err;
2088
2089         BT_DBG("%s", hdev->name);
2090
2091         hci_req_init(&req, hdev);
2092
2093         memset(&cp, 0, sizeof(cp));
2094         cp.enable = LE_SCAN_DISABLE;
2095         hci_req_add(&req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
2096
2097         err = hci_req_run(&req, le_scan_disable_work_complete);
2098         if (err)
2099                 BT_ERR("Disable LE scanning request failed: err %d", err);
2100 }
2101
2102 /* Alloc HCI device */
2103 struct hci_dev *hci_alloc_dev(void)
2104 {
2105         struct hci_dev *hdev;
2106
2107         hdev = kzalloc(sizeof(struct hci_dev), GFP_KERNEL);
2108         if (!hdev)
2109                 return NULL;
2110
2111         hdev->pkt_type  = (HCI_DM1 | HCI_DH1 | HCI_HV1);
2112         hdev->esco_type = (ESCO_HV1);
2113         hdev->link_mode = (HCI_LM_ACCEPT);
2114         hdev->io_capability = 0x03; /* No Input No Output */
2115         hdev->inq_tx_power = HCI_TX_POWER_INVALID;
2116         hdev->adv_tx_power = HCI_TX_POWER_INVALID;
2117
2118         hdev->sniff_max_interval = 800;
2119         hdev->sniff_min_interval = 80;
2120
2121         mutex_init(&hdev->lock);
2122         mutex_init(&hdev->req_lock);
2123
2124         INIT_LIST_HEAD(&hdev->mgmt_pending);
2125         INIT_LIST_HEAD(&hdev->blacklist);
2126         INIT_LIST_HEAD(&hdev->uuids);
2127         INIT_LIST_HEAD(&hdev->link_keys);
2128         INIT_LIST_HEAD(&hdev->long_term_keys);
2129         INIT_LIST_HEAD(&hdev->remote_oob_data);
2130         INIT_LIST_HEAD(&hdev->conn_hash.list);
2131
2132         INIT_WORK(&hdev->rx_work, hci_rx_work);
2133         INIT_WORK(&hdev->cmd_work, hci_cmd_work);
2134         INIT_WORK(&hdev->tx_work, hci_tx_work);
2135         INIT_WORK(&hdev->power_on, hci_power_on);
2136
2137         INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
2138         INIT_DELAYED_WORK(&hdev->discov_off, hci_discov_off);
2139         INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
2140
2141         skb_queue_head_init(&hdev->rx_q);
2142         skb_queue_head_init(&hdev->cmd_q);
2143         skb_queue_head_init(&hdev->raw_q);
2144
2145         init_waitqueue_head(&hdev->req_wait_q);
2146
2147         setup_timer(&hdev->cmd_timer, hci_cmd_timeout, (unsigned long) hdev);
2148
2149         hci_init_sysfs(hdev);
2150         discovery_init(hdev);
2151
2152         return hdev;
2153 }
2154 EXPORT_SYMBOL(hci_alloc_dev);
2155
2156 /* Free HCI device */
2157 void hci_free_dev(struct hci_dev *hdev)
2158 {
2159         /* will free via device release */
2160         put_device(&hdev->dev);
2161 }
2162 EXPORT_SYMBOL(hci_free_dev);
2163
2164 /* Register HCI device */
2165 int hci_register_dev(struct hci_dev *hdev)
2166 {
2167         int id, error;
2168
2169         if (!hdev->open || !hdev->close)
2170                 return -EINVAL;
2171
2172         /* Do not allow HCI_AMP devices to register at index 0,
2173          * so the index can be used as the AMP controller ID.
2174          */
2175         switch (hdev->dev_type) {
2176         case HCI_BREDR:
2177                 id = ida_simple_get(&hci_index_ida, 0, 0, GFP_KERNEL);
2178                 break;
2179         case HCI_AMP:
2180                 id = ida_simple_get(&hci_index_ida, 1, 0, GFP_KERNEL);
2181                 break;
2182         default:
2183                 return -EINVAL;
2184         }
2185
2186         if (id < 0)
2187                 return id;
2188
2189         sprintf(hdev->name, "hci%d", id);
2190         hdev->id = id;
2191
2192         BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
2193
2194         hdev->workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
2195                                           WQ_MEM_RECLAIM, 1, hdev->name);
2196         if (!hdev->workqueue) {
2197                 error = -ENOMEM;
2198                 goto err;
2199         }
2200
2201         hdev->req_workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
2202                                               WQ_MEM_RECLAIM, 1, hdev->name);
2203         if (!hdev->req_workqueue) {
2204                 destroy_workqueue(hdev->workqueue);
2205                 error = -ENOMEM;
2206                 goto err;
2207         }
2208
2209         error = hci_add_sysfs(hdev);
2210         if (error < 0)
2211                 goto err_wqueue;
2212
2213         hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
2214                                     RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops,
2215                                     hdev);
2216         if (hdev->rfkill) {
2217                 if (rfkill_register(hdev->rfkill) < 0) {
2218                         rfkill_destroy(hdev->rfkill);
2219                         hdev->rfkill = NULL;
2220                 }
2221         }
2222
2223         if (hdev->rfkill && rfkill_blocked(hdev->rfkill))
2224                 set_bit(HCI_RFKILLED, &hdev->dev_flags);
2225
2226         set_bit(HCI_SETUP, &hdev->dev_flags);
2227
2228         if (hdev->dev_type != HCI_AMP)
2229                 set_bit(HCI_AUTO_OFF, &hdev->dev_flags);
2230
2231         write_lock(&hci_dev_list_lock);
2232         list_add(&hdev->list, &hci_dev_list);
2233         write_unlock(&hci_dev_list_lock);
2234
2235         hci_notify(hdev, HCI_DEV_REG);
2236         hci_dev_hold(hdev);
2237
2238         queue_work(hdev->req_workqueue, &hdev->power_on);
2239
2240         return id;
2241
2242 err_wqueue:
2243         destroy_workqueue(hdev->workqueue);
2244         destroy_workqueue(hdev->req_workqueue);
2245 err:
2246         ida_simple_remove(&hci_index_ida, hdev->id);
2247
2248         return error;
2249 }
2250 EXPORT_SYMBOL(hci_register_dev);
2251
2252 /* Unregister HCI device */
2253 void hci_unregister_dev(struct hci_dev *hdev)
2254 {
2255         int i, id;
2256
2257         BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
2258
2259         set_bit(HCI_UNREGISTER, &hdev->dev_flags);
2260
2261         id = hdev->id;
2262
2263         write_lock(&hci_dev_list_lock);
2264         list_del(&hdev->list);
2265         write_unlock(&hci_dev_list_lock);
2266
2267         hci_dev_do_close(hdev);
2268
2269         for (i = 0; i < NUM_REASSEMBLY; i++)
2270                 kfree_skb(hdev->reassembly[i]);
2271
2272         cancel_work_sync(&hdev->power_on);
2273
2274         if (!test_bit(HCI_INIT, &hdev->flags) &&
2275             !test_bit(HCI_SETUP, &hdev->dev_flags)) {
2276                 hci_dev_lock(hdev);
2277                 mgmt_index_removed(hdev);
2278                 hci_dev_unlock(hdev);
2279         }
2280
2281         /* mgmt_index_removed should take care of emptying the
2282          * pending list */
2283         BUG_ON(!list_empty(&hdev->mgmt_pending));
2284
2285         hci_notify(hdev, HCI_DEV_UNREG);
2286
2287         if (hdev->rfkill) {
2288                 rfkill_unregister(hdev->rfkill);
2289                 rfkill_destroy(hdev->rfkill);
2290         }
2291
2292         hci_del_sysfs(hdev);
2293
2294         destroy_workqueue(hdev->workqueue);
2295         destroy_workqueue(hdev->req_workqueue);
2296
2297         hci_dev_lock(hdev);
2298         hci_blacklist_clear(hdev);
2299         hci_uuids_clear(hdev);
2300         hci_link_keys_clear(hdev);
2301         hci_smp_ltks_clear(hdev);
2302         hci_remote_oob_data_clear(hdev);
2303         hci_dev_unlock(hdev);
2304
2305         hci_dev_put(hdev);
2306
2307         ida_simple_remove(&hci_index_ida, id);
2308 }
2309 EXPORT_SYMBOL(hci_unregister_dev);
2310
2311 /* Suspend HCI device */
2312 int hci_suspend_dev(struct hci_dev *hdev)
2313 {
2314         hci_notify(hdev, HCI_DEV_SUSPEND);
2315         return 0;
2316 }
2317 EXPORT_SYMBOL(hci_suspend_dev);
2318
2319 /* Resume HCI device */
2320 int hci_resume_dev(struct hci_dev *hdev)
2321 {
2322         hci_notify(hdev, HCI_DEV_RESUME);
2323         return 0;
2324 }
2325 EXPORT_SYMBOL(hci_resume_dev);
2326
2327 /* Receive frame from HCI drivers */
2328 int hci_recv_frame(struct sk_buff *skb)
2329 {
2330         struct hci_dev *hdev = (struct hci_dev *) skb->dev;
2331         if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
2332                       && !test_bit(HCI_INIT, &hdev->flags))) {
2333                 kfree_skb(skb);
2334                 return -ENXIO;
2335         }
2336
2337         /* Incoming skb */
2338         bt_cb(skb)->incoming = 1;
2339
2340         /* Time stamp */
2341         __net_timestamp(skb);
2342
2343         skb_queue_tail(&hdev->rx_q, skb);
2344         queue_work(hdev->workqueue, &hdev->rx_work);
2345
2346         return 0;
2347 }
2348 EXPORT_SYMBOL(hci_recv_frame);
2349
2350 static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
2351                           int count, __u8 index)
2352 {
2353         int len = 0;
2354         int hlen = 0;
2355         int remain = count;
2356         struct sk_buff *skb;
2357         struct bt_skb_cb *scb;
2358
2359         if ((type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT) ||
2360             index >= NUM_REASSEMBLY)
2361                 return -EILSEQ;
2362
2363         skb = hdev->reassembly[index];
2364
2365         if (!skb) {
2366                 switch (type) {
2367                 case HCI_ACLDATA_PKT:
2368                         len = HCI_MAX_FRAME_SIZE;
2369                         hlen = HCI_ACL_HDR_SIZE;
2370                         break;
2371                 case HCI_EVENT_PKT:
2372                         len = HCI_MAX_EVENT_SIZE;
2373                         hlen = HCI_EVENT_HDR_SIZE;
2374                         break;
2375                 case HCI_SCODATA_PKT:
2376                         len = HCI_MAX_SCO_SIZE;
2377                         hlen = HCI_SCO_HDR_SIZE;
2378                         break;
2379                 }
2380
2381                 skb = bt_skb_alloc(len, GFP_ATOMIC);
2382                 if (!skb)
2383                         return -ENOMEM;
2384
2385                 scb = (void *) skb->cb;
2386                 scb->expect = hlen;
2387                 scb->pkt_type = type;
2388
2389                 skb->dev = (void *) hdev;
2390                 hdev->reassembly[index] = skb;
2391         }
2392
2393         while (count) {
2394                 scb = (void *) skb->cb;
2395                 len = min_t(uint, scb->expect, count);
2396
2397                 memcpy(skb_put(skb, len), data, len);
2398
2399                 count -= len;
2400                 data += len;
2401                 scb->expect -= len;
2402                 remain = count;
2403
2404                 switch (type) {
2405                 case HCI_EVENT_PKT:
2406                         if (skb->len == HCI_EVENT_HDR_SIZE) {
2407                                 struct hci_event_hdr *h = hci_event_hdr(skb);
2408                                 scb->expect = h->plen;
2409
2410                                 if (skb_tailroom(skb) < scb->expect) {
2411                                         kfree_skb(skb);
2412                                         hdev->reassembly[index] = NULL;
2413                                         return -ENOMEM;
2414                                 }
2415                         }
2416                         break;
2417
2418                 case HCI_ACLDATA_PKT:
2419                         if (skb->len  == HCI_ACL_HDR_SIZE) {
2420                                 struct hci_acl_hdr *h = hci_acl_hdr(skb);
2421                                 scb->expect = __le16_to_cpu(h->dlen);
2422
2423                                 if (skb_tailroom(skb) < scb->expect) {
2424                                         kfree_skb(skb);
2425                                         hdev->reassembly[index] = NULL;
2426                                         return -ENOMEM;
2427                                 }
2428                         }
2429                         break;
2430
2431                 case HCI_SCODATA_PKT:
2432                         if (skb->len == HCI_SCO_HDR_SIZE) {
2433                                 struct hci_sco_hdr *h = hci_sco_hdr(skb);
2434                                 scb->expect = h->dlen;
2435
2436                                 if (skb_tailroom(skb) < scb->expect) {
2437                                         kfree_skb(skb);
2438                                         hdev->reassembly[index] = NULL;
2439                                         return -ENOMEM;
2440                                 }
2441                         }
2442                         break;
2443                 }
2444
2445                 if (scb->expect == 0) {
2446                         /* Complete frame */
2447
2448                         bt_cb(skb)->pkt_type = type;
2449                         hci_recv_frame(skb);
2450
2451                         hdev->reassembly[index] = NULL;
2452                         return remain;
2453                 }
2454         }
2455
2456         return remain;
2457 }
2458
2459 int hci_recv_fragment(struct hci_dev *hdev, int type, void *data, int count)
2460 {
2461         int rem = 0;
2462
2463         if (type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT)
2464                 return -EILSEQ;
2465
2466         while (count) {
2467                 rem = hci_reassembly(hdev, type, data, count, type - 1);
2468                 if (rem < 0)
2469                         return rem;
2470
2471                 data += (count - rem);
2472                 count = rem;
2473         }
2474
2475         return rem;
2476 }
2477 EXPORT_SYMBOL(hci_recv_fragment);
2478
2479 #define STREAM_REASSEMBLY 0
2480
2481 int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count)
2482 {
2483         int type;
2484         int rem = 0;
2485
2486         while (count) {
2487                 struct sk_buff *skb = hdev->reassembly[STREAM_REASSEMBLY];
2488
2489                 if (!skb) {
2490                         struct { char type; } *pkt;
2491
2492                         /* Start of the frame */
2493                         pkt = data;
2494                         type = pkt->type;
2495
2496                         data++;
2497                         count--;
2498                 } else
2499                         type = bt_cb(skb)->pkt_type;
2500
2501                 rem = hci_reassembly(hdev, type, data, count,
2502                                      STREAM_REASSEMBLY);
2503                 if (rem < 0)
2504                         return rem;
2505
2506                 data += (count - rem);
2507                 count = rem;
2508         }
2509
2510         return rem;
2511 }
2512 EXPORT_SYMBOL(hci_recv_stream_fragment);
2513
2514 /* ---- Interface to upper protocols ---- */
2515
2516 int hci_register_cb(struct hci_cb *cb)
2517 {
2518         BT_DBG("%p name %s", cb, cb->name);
2519
2520         write_lock(&hci_cb_list_lock);
2521         list_add(&cb->list, &hci_cb_list);
2522         write_unlock(&hci_cb_list_lock);
2523
2524         return 0;
2525 }
2526 EXPORT_SYMBOL(hci_register_cb);
2527
2528 int hci_unregister_cb(struct hci_cb *cb)
2529 {
2530         BT_DBG("%p name %s", cb, cb->name);
2531
2532         write_lock(&hci_cb_list_lock);
2533         list_del(&cb->list);
2534         write_unlock(&hci_cb_list_lock);
2535
2536         return 0;
2537 }
2538 EXPORT_SYMBOL(hci_unregister_cb);
2539
2540 static int hci_send_frame(struct sk_buff *skb)
2541 {
2542         struct hci_dev *hdev = (struct hci_dev *) skb->dev;
2543
2544         if (!hdev) {
2545                 kfree_skb(skb);
2546                 return -ENODEV;
2547         }
2548
2549         BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
2550
2551         /* Time stamp */
2552         __net_timestamp(skb);
2553
2554         /* Send copy to monitor */
2555         hci_send_to_monitor(hdev, skb);
2556
2557         if (atomic_read(&hdev->promisc)) {
2558                 /* Send copy to the sockets */
2559                 hci_send_to_sock(hdev, skb);
2560         }
2561
2562         /* Get rid of skb owner, prior to sending to the driver. */
2563         skb_orphan(skb);
2564
2565         return hdev->send(skb);
2566 }
2567
2568 void hci_req_init(struct hci_request *req, struct hci_dev *hdev)
2569 {
2570         skb_queue_head_init(&req->cmd_q);
2571         req->hdev = hdev;
2572         req->err = 0;
2573 }
2574
2575 int hci_req_run(struct hci_request *req, hci_req_complete_t complete)
2576 {
2577         struct hci_dev *hdev = req->hdev;
2578         struct sk_buff *skb;
2579         unsigned long flags;
2580
2581         BT_DBG("length %u", skb_queue_len(&req->cmd_q));
2582
2583         /* If an error occured during request building, remove all HCI
2584          * commands queued on the HCI request queue.
2585          */
2586         if (req->err) {
2587                 skb_queue_purge(&req->cmd_q);
2588                 return req->err;
2589         }
2590
2591         /* Do not allow empty requests */
2592         if (skb_queue_empty(&req->cmd_q))
2593                 return -ENODATA;
2594
2595         skb = skb_peek_tail(&req->cmd_q);
2596         bt_cb(skb)->req.complete = complete;
2597
2598         spin_lock_irqsave(&hdev->cmd_q.lock, flags);
2599         skb_queue_splice_tail(&req->cmd_q, &hdev->cmd_q);
2600         spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
2601
2602         queue_work(hdev->workqueue, &hdev->cmd_work);
2603
2604         return 0;
2605 }
2606
2607 static struct sk_buff *hci_prepare_cmd(struct hci_dev *hdev, u16 opcode,
2608                                        u32 plen, const void *param)
2609 {
2610         int len = HCI_COMMAND_HDR_SIZE + plen;
2611         struct hci_command_hdr *hdr;
2612         struct sk_buff *skb;
2613
2614         skb = bt_skb_alloc(len, GFP_ATOMIC);
2615         if (!skb)
2616                 return NULL;
2617
2618         hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
2619         hdr->opcode = cpu_to_le16(opcode);
2620         hdr->plen   = plen;
2621
2622         if (plen)
2623                 memcpy(skb_put(skb, plen), param, plen);
2624
2625         BT_DBG("skb len %d", skb->len);
2626
2627         bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
2628         skb->dev = (void *) hdev;
2629
2630         return skb;
2631 }
2632
2633 /* Send HCI command */
2634 int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen,
2635                  const void *param)
2636 {
2637         struct sk_buff *skb;
2638
2639         BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
2640
2641         skb = hci_prepare_cmd(hdev, opcode, plen, param);
2642         if (!skb) {
2643                 BT_ERR("%s no memory for command", hdev->name);
2644                 return -ENOMEM;
2645         }
2646
2647         /* Stand-alone HCI commands must be flaged as
2648          * single-command requests.
2649          */
2650         bt_cb(skb)->req.start = true;
2651
2652         skb_queue_tail(&hdev->cmd_q, skb);
2653         queue_work(hdev->workqueue, &hdev->cmd_work);
2654
2655         return 0;
2656 }
2657
2658 /* Queue a command to an asynchronous HCI request */
2659 void hci_req_add_ev(struct hci_request *req, u16 opcode, u32 plen,
2660                     const void *param, u8 event)
2661 {
2662         struct hci_dev *hdev = req->hdev;
2663         struct sk_buff *skb;
2664
2665         BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
2666
2667         /* If an error occured during request building, there is no point in
2668          * queueing the HCI command. We can simply return.
2669          */
2670         if (req->err)
2671                 return;
2672
2673         skb = hci_prepare_cmd(hdev, opcode, plen, param);
2674         if (!skb) {
2675                 BT_ERR("%s no memory for command (opcode 0x%4.4x)",
2676                        hdev->name, opcode);
2677                 req->err = -ENOMEM;
2678                 return;
2679         }
2680
2681         if (skb_queue_empty(&req->cmd_q))
2682                 bt_cb(skb)->req.start = true;
2683
2684         bt_cb(skb)->req.event = event;
2685
2686         skb_queue_tail(&req->cmd_q, skb);
2687 }
2688
2689 void hci_req_add(struct hci_request *req, u16 opcode, u32 plen,
2690                  const void *param)
2691 {
2692         hci_req_add_ev(req, opcode, plen, param, 0);
2693 }
2694
2695 /* Get data from the previously sent command */
2696 void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
2697 {
2698         struct hci_command_hdr *hdr;
2699
2700         if (!hdev->sent_cmd)
2701                 return NULL;
2702
2703         hdr = (void *) hdev->sent_cmd->data;
2704
2705         if (hdr->opcode != cpu_to_le16(opcode))
2706                 return NULL;
2707
2708         BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
2709
2710         return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
2711 }
2712
2713 /* Send ACL data */
2714 static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
2715 {
2716         struct hci_acl_hdr *hdr;
2717         int len = skb->len;
2718
2719         skb_push(skb, HCI_ACL_HDR_SIZE);
2720         skb_reset_transport_header(skb);
2721         hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
2722         hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
2723         hdr->dlen   = cpu_to_le16(len);
2724 }
2725
2726 static void hci_queue_acl(struct hci_chan *chan, struct sk_buff_head *queue,
2727                           struct sk_buff *skb, __u16 flags)
2728 {
2729         struct hci_conn *conn = chan->conn;
2730         struct hci_dev *hdev = conn->hdev;
2731         struct sk_buff *list;
2732
2733         skb->len = skb_headlen(skb);
2734         skb->data_len = 0;
2735
2736         bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
2737
2738         switch (hdev->dev_type) {
2739         case HCI_BREDR:
2740                 hci_add_acl_hdr(skb, conn->handle, flags);
2741                 break;
2742         case HCI_AMP:
2743                 hci_add_acl_hdr(skb, chan->handle, flags);
2744                 break;
2745         default:
2746                 BT_ERR("%s unknown dev_type %d", hdev->name, hdev->dev_type);
2747                 return;
2748         }
2749
2750         list = skb_shinfo(skb)->frag_list;
2751         if (!list) {
2752                 /* Non fragmented */
2753                 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
2754
2755                 skb_queue_tail(queue, skb);
2756         } else {
2757                 /* Fragmented */
2758                 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
2759
2760                 skb_shinfo(skb)->frag_list = NULL;
2761
2762                 /* Queue all fragments atomically */
2763                 spin_lock(&queue->lock);
2764
2765                 __skb_queue_tail(queue, skb);
2766
2767                 flags &= ~ACL_START;
2768                 flags |= ACL_CONT;
2769                 do {
2770                         skb = list; list = list->next;
2771
2772                         skb->dev = (void *) hdev;
2773                         bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
2774                         hci_add_acl_hdr(skb, conn->handle, flags);
2775
2776                         BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
2777
2778                         __skb_queue_tail(queue, skb);
2779                 } while (list);
2780
2781                 spin_unlock(&queue->lock);
2782         }
2783 }
2784
2785 void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
2786 {
2787         struct hci_dev *hdev = chan->conn->hdev;
2788
2789         BT_DBG("%s chan %p flags 0x%4.4x", hdev->name, chan, flags);
2790
2791         skb->dev = (void *) hdev;
2792
2793         hci_queue_acl(chan, &chan->data_q, skb, flags);
2794
2795         queue_work(hdev->workqueue, &hdev->tx_work);
2796 }
2797
2798 /* Send SCO data */
2799 void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
2800 {
2801         struct hci_dev *hdev = conn->hdev;
2802         struct hci_sco_hdr hdr;
2803
2804         BT_DBG("%s len %d", hdev->name, skb->len);
2805
2806         hdr.handle = cpu_to_le16(conn->handle);
2807         hdr.dlen   = skb->len;
2808
2809         skb_push(skb, HCI_SCO_HDR_SIZE);
2810         skb_reset_transport_header(skb);
2811         memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
2812
2813         skb->dev = (void *) hdev;
2814         bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
2815
2816         skb_queue_tail(&conn->data_q, skb);
2817         queue_work(hdev->workqueue, &hdev->tx_work);
2818 }
2819
2820 /* ---- HCI TX task (outgoing data) ---- */
2821
2822 /* HCI Connection scheduler */
2823 static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type,
2824                                      int *quote)
2825 {
2826         struct hci_conn_hash *h = &hdev->conn_hash;
2827         struct hci_conn *conn = NULL, *c;
2828         unsigned int num = 0, min = ~0;
2829
2830         /* We don't have to lock device here. Connections are always
2831          * added and removed with TX task disabled. */
2832
2833         rcu_read_lock();
2834
2835         list_for_each_entry_rcu(c, &h->list, list) {
2836                 if (c->type != type || skb_queue_empty(&c->data_q))
2837                         continue;
2838
2839                 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
2840                         continue;
2841
2842                 num++;
2843
2844                 if (c->sent < min) {
2845                         min  = c->sent;
2846                         conn = c;
2847                 }
2848
2849                 if (hci_conn_num(hdev, type) == num)
2850                         break;
2851         }
2852
2853         rcu_read_unlock();
2854
2855         if (conn) {
2856                 int cnt, q;
2857
2858                 switch (conn->type) {
2859                 case ACL_LINK:
2860                         cnt = hdev->acl_cnt;
2861                         break;
2862                 case SCO_LINK:
2863                 case ESCO_LINK:
2864                         cnt = hdev->sco_cnt;
2865                         break;
2866                 case LE_LINK:
2867                         cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
2868                         break;
2869                 default:
2870                         cnt = 0;
2871                         BT_ERR("Unknown link type");
2872                 }
2873
2874                 q = cnt / num;
2875                 *quote = q ? q : 1;
2876         } else
2877                 *quote = 0;
2878
2879         BT_DBG("conn %p quote %d", conn, *quote);
2880         return conn;
2881 }
2882
2883 static void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
2884 {
2885         struct hci_conn_hash *h = &hdev->conn_hash;
2886         struct hci_conn *c;
2887
2888         BT_ERR("%s link tx timeout", hdev->name);
2889
2890         rcu_read_lock();
2891
2892         /* Kill stalled connections */
2893         list_for_each_entry_rcu(c, &h->list, list) {
2894                 if (c->type == type && c->sent) {
2895                         BT_ERR("%s killing stalled connection %pMR",
2896                                hdev->name, &c->dst);
2897                         hci_disconnect(c, HCI_ERROR_REMOTE_USER_TERM);
2898                 }
2899         }
2900
2901         rcu_read_unlock();
2902 }
2903
2904 static struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
2905                                       int *quote)
2906 {
2907         struct hci_conn_hash *h = &hdev->conn_hash;
2908         struct hci_chan *chan = NULL;
2909         unsigned int num = 0, min = ~0, cur_prio = 0;
2910         struct hci_conn *conn;
2911         int cnt, q, conn_num = 0;
2912
2913         BT_DBG("%s", hdev->name);
2914
2915         rcu_read_lock();
2916
2917         list_for_each_entry_rcu(conn, &h->list, list) {
2918                 struct hci_chan *tmp;
2919
2920                 if (conn->type != type)
2921                         continue;
2922
2923                 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
2924                         continue;
2925
2926                 conn_num++;
2927
2928                 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
2929                         struct sk_buff *skb;
2930
2931                         if (skb_queue_empty(&tmp->data_q))
2932                                 continue;
2933
2934                         skb = skb_peek(&tmp->data_q);
2935                         if (skb->priority < cur_prio)
2936                                 continue;
2937
2938                         if (skb->priority > cur_prio) {
2939                                 num = 0;
2940                                 min = ~0;
2941                                 cur_prio = skb->priority;
2942                         }
2943
2944                         num++;
2945
2946                         if (conn->sent < min) {
2947                                 min  = conn->sent;
2948                                 chan = tmp;
2949                         }
2950                 }
2951
2952                 if (hci_conn_num(hdev, type) == conn_num)
2953                         break;
2954         }
2955
2956         rcu_read_unlock();
2957
2958         if (!chan)
2959                 return NULL;
2960
2961         switch (chan->conn->type) {
2962         case ACL_LINK:
2963                 cnt = hdev->acl_cnt;
2964                 break;
2965         case AMP_LINK:
2966                 cnt = hdev->block_cnt;
2967                 break;
2968         case SCO_LINK:
2969         case ESCO_LINK:
2970                 cnt = hdev->sco_cnt;
2971                 break;
2972         case LE_LINK:
2973                 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
2974                 break;
2975         default:
2976                 cnt = 0;
2977                 BT_ERR("Unknown link type");
2978         }
2979
2980         q = cnt / num;
2981         *quote = q ? q : 1;
2982         BT_DBG("chan %p quote %d", chan, *quote);
2983         return chan;
2984 }
2985
2986 static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
2987 {
2988         struct hci_conn_hash *h = &hdev->conn_hash;
2989         struct hci_conn *conn;
2990         int num = 0;
2991
2992         BT_DBG("%s", hdev->name);
2993
2994         rcu_read_lock();
2995
2996         list_for_each_entry_rcu(conn, &h->list, list) {
2997                 struct hci_chan *chan;
2998
2999                 if (conn->type != type)
3000                         continue;
3001
3002                 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
3003                         continue;
3004
3005                 num++;
3006
3007                 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
3008                         struct sk_buff *skb;
3009
3010                         if (chan->sent) {
3011                                 chan->sent = 0;
3012                                 continue;
3013                         }
3014
3015                         if (skb_queue_empty(&chan->data_q))
3016                                 continue;
3017
3018                         skb = skb_peek(&chan->data_q);
3019                         if (skb->priority >= HCI_PRIO_MAX - 1)
3020                                 continue;
3021
3022                         skb->priority = HCI_PRIO_MAX - 1;
3023
3024                         BT_DBG("chan %p skb %p promoted to %d", chan, skb,
3025                                skb->priority);
3026                 }
3027
3028                 if (hci_conn_num(hdev, type) == num)
3029                         break;
3030         }
3031
3032         rcu_read_unlock();
3033
3034 }
3035
3036 static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
3037 {
3038         /* Calculate count of blocks used by this packet */
3039         return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
3040 }
3041
3042 static void __check_timeout(struct hci_dev *hdev, unsigned int cnt)
3043 {
3044         if (!test_bit(HCI_RAW, &hdev->flags)) {
3045                 /* ACL tx timeout must be longer than maximum
3046                  * link supervision timeout (40.9 seconds) */
3047                 if (!cnt && time_after(jiffies, hdev->acl_last_tx +
3048                                        HCI_ACL_TX_TIMEOUT))
3049                         hci_link_tx_to(hdev, ACL_LINK);
3050         }
3051 }
3052
3053 static void hci_sched_acl_pkt(struct hci_dev *hdev)
3054 {
3055         unsigned int cnt = hdev->acl_cnt;
3056         struct hci_chan *chan;
3057         struct sk_buff *skb;
3058         int quote;
3059
3060         __check_timeout(hdev, cnt);
3061
3062         while (hdev->acl_cnt &&
3063                (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
3064                 u32 priority = (skb_peek(&chan->data_q))->priority;
3065                 while (quote-- && (skb = skb_peek(&chan->data_q))) {
3066                         BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
3067                                skb->len, skb->priority);
3068
3069                         /* Stop if priority has changed */
3070                         if (skb->priority < priority)
3071                                 break;
3072
3073                         skb = skb_dequeue(&chan->data_q);
3074
3075                         hci_conn_enter_active_mode(chan->conn,
3076                                                    bt_cb(skb)->force_active);
3077
3078                         hci_send_frame(skb);
3079                         hdev->acl_last_tx = jiffies;
3080
3081                         hdev->acl_cnt--;
3082                         chan->sent++;
3083                         chan->conn->sent++;
3084                 }
3085         }
3086
3087         if (cnt != hdev->acl_cnt)
3088                 hci_prio_recalculate(hdev, ACL_LINK);
3089 }
3090
3091 static void hci_sched_acl_blk(struct hci_dev *hdev)
3092 {
3093         unsigned int cnt = hdev->block_cnt;
3094         struct hci_chan *chan;
3095         struct sk_buff *skb;
3096         int quote;
3097         u8 type;
3098
3099         __check_timeout(hdev, cnt);
3100
3101         BT_DBG("%s", hdev->name);
3102
3103         if (hdev->dev_type == HCI_AMP)
3104                 type = AMP_LINK;
3105         else
3106                 type = ACL_LINK;
3107
3108         while (hdev->block_cnt > 0 &&
3109                (chan = hci_chan_sent(hdev, type, &quote))) {
3110                 u32 priority = (skb_peek(&chan->data_q))->priority;
3111                 while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
3112                         int blocks;
3113
3114                         BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
3115                                skb->len, skb->priority);
3116
3117                         /* Stop if priority has changed */
3118                         if (skb->priority < priority)
3119                                 break;
3120
3121                         skb = skb_dequeue(&chan->data_q);
3122
3123                         blocks = __get_blocks(hdev, skb);
3124                         if (blocks > hdev->block_cnt)
3125                                 return;
3126
3127                         hci_conn_enter_active_mode(chan->conn,
3128                                                    bt_cb(skb)->force_active);
3129
3130                         hci_send_frame(skb);
3131                         hdev->acl_last_tx = jiffies;
3132
3133                         hdev->block_cnt -= blocks;
3134                         quote -= blocks;
3135
3136                         chan->sent += blocks;
3137                         chan->conn->sent += blocks;
3138                 }
3139         }
3140
3141         if (cnt != hdev->block_cnt)
3142                 hci_prio_recalculate(hdev, type);
3143 }
3144
3145 static void hci_sched_acl(struct hci_dev *hdev)
3146 {
3147         BT_DBG("%s", hdev->name);
3148
3149         /* No ACL link over BR/EDR controller */
3150         if (!hci_conn_num(hdev, ACL_LINK) && hdev->dev_type == HCI_BREDR)
3151                 return;
3152
3153         /* No AMP link over AMP controller */
3154         if (!hci_conn_num(hdev, AMP_LINK) && hdev->dev_type == HCI_AMP)
3155                 return;
3156
3157         switch (hdev->flow_ctl_mode) {
3158         case HCI_FLOW_CTL_MODE_PACKET_BASED:
3159                 hci_sched_acl_pkt(hdev);
3160                 break;
3161
3162         case HCI_FLOW_CTL_MODE_BLOCK_BASED:
3163                 hci_sched_acl_blk(hdev);
3164                 break;
3165         }
3166 }
3167
3168 /* Schedule SCO */
3169 static void hci_sched_sco(struct hci_dev *hdev)
3170 {
3171         struct hci_conn *conn;
3172         struct sk_buff *skb;
3173         int quote;
3174
3175         BT_DBG("%s", hdev->name);
3176
3177         if (!hci_conn_num(hdev, SCO_LINK))
3178                 return;
3179
3180         while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
3181                 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
3182                         BT_DBG("skb %p len %d", skb, skb->len);
3183                         hci_send_frame(skb);
3184
3185                         conn->sent++;
3186                         if (conn->sent == ~0)
3187                                 conn->sent = 0;
3188                 }
3189         }
3190 }
3191
3192 static void hci_sched_esco(struct hci_dev *hdev)
3193 {
3194         struct hci_conn *conn;
3195         struct sk_buff *skb;
3196         int quote;
3197
3198         BT_DBG("%s", hdev->name);
3199
3200         if (!hci_conn_num(hdev, ESCO_LINK))
3201                 return;
3202
3203         while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK,
3204                                                      &quote))) {
3205                 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
3206                         BT_DBG("skb %p len %d", skb, skb->len);
3207                         hci_send_frame(skb);
3208
3209                         conn->sent++;
3210                         if (conn->sent == ~0)
3211                                 conn->sent = 0;
3212                 }
3213         }
3214 }
3215
3216 static void hci_sched_le(struct hci_dev *hdev)
3217 {
3218         struct hci_chan *chan;
3219         struct sk_buff *skb;
3220         int quote, cnt, tmp;
3221
3222         BT_DBG("%s", hdev->name);
3223
3224         if (!hci_conn_num(hdev, LE_LINK))
3225                 return;
3226
3227         if (!test_bit(HCI_RAW, &hdev->flags)) {
3228                 /* LE tx timeout must be longer than maximum
3229                  * link supervision timeout (40.9 seconds) */
3230                 if (!hdev->le_cnt && hdev->le_pkts &&
3231                     time_after(jiffies, hdev->le_last_tx + HZ * 45))
3232                         hci_link_tx_to(hdev, LE_LINK);
3233         }
3234
3235         cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
3236         tmp = cnt;
3237         while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, &quote))) {
3238                 u32 priority = (skb_peek(&chan->data_q))->priority;
3239                 while (quote-- && (skb = skb_peek(&chan->data_q))) {
3240                         BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
3241                                skb->len, skb->priority);
3242
3243                         /* Stop if priority has changed */
3244                         if (skb->priority < priority)
3245                                 break;
3246
3247                         skb = skb_dequeue(&chan->data_q);
3248
3249                         hci_send_frame(skb);
3250                         hdev->le_last_tx = jiffies;
3251
3252                         cnt--;
3253                         chan->sent++;
3254                         chan->conn->sent++;
3255                 }
3256         }
3257
3258         if (hdev->le_pkts)
3259                 hdev->le_cnt = cnt;
3260         else
3261                 hdev->acl_cnt = cnt;
3262
3263         if (cnt != tmp)
3264                 hci_prio_recalculate(hdev, LE_LINK);
3265 }
3266
3267 static void hci_tx_work(struct work_struct *work)
3268 {
3269         struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
3270         struct sk_buff *skb;
3271
3272         BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
3273                hdev->sco_cnt, hdev->le_cnt);
3274
3275         /* Schedule queues and send stuff to HCI driver */
3276
3277         hci_sched_acl(hdev);
3278
3279         hci_sched_sco(hdev);
3280
3281         hci_sched_esco(hdev);
3282
3283         hci_sched_le(hdev);
3284
3285         /* Send next queued raw (unknown type) packet */
3286         while ((skb = skb_dequeue(&hdev->raw_q)))
3287                 hci_send_frame(skb);
3288 }
3289
3290 /* ----- HCI RX task (incoming data processing) ----- */
3291
3292 /* ACL data packet */
3293 static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
3294 {
3295         struct hci_acl_hdr *hdr = (void *) skb->data;
3296         struct hci_conn *conn;
3297         __u16 handle, flags;
3298
3299         skb_pull(skb, HCI_ACL_HDR_SIZE);
3300
3301         handle = __le16_to_cpu(hdr->handle);
3302         flags  = hci_flags(handle);
3303         handle = hci_handle(handle);
3304
3305         BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
3306                handle, flags);
3307
3308         hdev->stat.acl_rx++;
3309
3310         hci_dev_lock(hdev);
3311         conn = hci_conn_hash_lookup_handle(hdev, handle);
3312         hci_dev_unlock(hdev);
3313
3314         if (conn) {
3315                 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
3316
3317                 /* Send to upper protocol */
3318                 l2cap_recv_acldata(conn, skb, flags);
3319                 return;
3320         } else {
3321                 BT_ERR("%s ACL packet for unknown connection handle %d",
3322                        hdev->name, handle);
3323         }
3324
3325         kfree_skb(skb);
3326 }
3327
3328 /* SCO data packet */
3329 static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
3330 {
3331         struct hci_sco_hdr *hdr = (void *) skb->data;
3332         struct hci_conn *conn;
3333         __u16 handle;
3334
3335         skb_pull(skb, HCI_SCO_HDR_SIZE);
3336
3337         handle = __le16_to_cpu(hdr->handle);
3338
3339         BT_DBG("%s len %d handle 0x%4.4x", hdev->name, skb->len, handle);
3340
3341         hdev->stat.sco_rx++;
3342
3343         hci_dev_lock(hdev);
3344         conn = hci_conn_hash_lookup_handle(hdev, handle);
3345         hci_dev_unlock(hdev);
3346
3347         if (conn) {
3348                 /* Send to upper protocol */
3349                 sco_recv_scodata(conn, skb);
3350                 return;
3351         } else {
3352                 BT_ERR("%s SCO packet for unknown connection handle %d",
3353                        hdev->name, handle);
3354         }
3355
3356         kfree_skb(skb);
3357 }
3358
3359 static bool hci_req_is_complete(struct hci_dev *hdev)
3360 {
3361         struct sk_buff *skb;
3362
3363         skb = skb_peek(&hdev->cmd_q);
3364         if (!skb)
3365                 return true;
3366
3367         return bt_cb(skb)->req.start;
3368 }
3369
3370 static void hci_resend_last(struct hci_dev *hdev)
3371 {
3372         struct hci_command_hdr *sent;
3373         struct sk_buff *skb;
3374         u16 opcode;
3375
3376         if (!hdev->sent_cmd)
3377                 return;
3378
3379         sent = (void *) hdev->sent_cmd->data;
3380         opcode = __le16_to_cpu(sent->opcode);
3381         if (opcode == HCI_OP_RESET)
3382                 return;
3383
3384         skb = skb_clone(hdev->sent_cmd, GFP_KERNEL);
3385         if (!skb)
3386                 return;
3387
3388         skb_queue_head(&hdev->cmd_q, skb);
3389         queue_work(hdev->workqueue, &hdev->cmd_work);
3390 }
3391
3392 void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status)
3393 {
3394         hci_req_complete_t req_complete = NULL;
3395         struct sk_buff *skb;
3396         unsigned long flags;
3397
3398         BT_DBG("opcode 0x%04x status 0x%02x", opcode, status);
3399
3400         /* If the completed command doesn't match the last one that was
3401          * sent we need to do special handling of it.
3402          */
3403         if (!hci_sent_cmd_data(hdev, opcode)) {
3404                 /* Some CSR based controllers generate a spontaneous
3405                  * reset complete event during init and any pending
3406                  * command will never be completed. In such a case we
3407                  * need to resend whatever was the last sent
3408                  * command.
3409                  */
3410                 if (test_bit(HCI_INIT, &hdev->flags) && opcode == HCI_OP_RESET)
3411                         hci_resend_last(hdev);
3412
3413                 return;
3414         }
3415
3416         /* If the command succeeded and there's still more commands in
3417          * this request the request is not yet complete.
3418          */
3419         if (!status && !hci_req_is_complete(hdev))
3420                 return;
3421
3422         /* If this was the last command in a request the complete
3423          * callback would be found in hdev->sent_cmd instead of the
3424          * command queue (hdev->cmd_q).
3425          */
3426         if (hdev->sent_cmd) {
3427                 req_complete = bt_cb(hdev->sent_cmd)->req.complete;
3428
3429                 if (req_complete) {
3430                         /* We must set the complete callback to NULL to
3431                          * avoid calling the callback more than once if
3432                          * this function gets called again.
3433                          */
3434                         bt_cb(hdev->sent_cmd)->req.complete = NULL;
3435
3436                         goto call_complete;
3437                 }
3438         }
3439
3440         /* Remove all pending commands belonging to this request */
3441         spin_lock_irqsave(&hdev->cmd_q.lock, flags);
3442         while ((skb = __skb_dequeue(&hdev->cmd_q))) {
3443                 if (bt_cb(skb)->req.start) {
3444                         __skb_queue_head(&hdev->cmd_q, skb);
3445                         break;
3446                 }
3447
3448                 req_complete = bt_cb(skb)->req.complete;
3449                 kfree_skb(skb);
3450         }
3451         spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
3452
3453 call_complete:
3454         if (req_complete)
3455                 req_complete(hdev, status);
3456 }
3457
3458 static void hci_rx_work(struct work_struct *work)
3459 {
3460         struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
3461         struct sk_buff *skb;
3462
3463         BT_DBG("%s", hdev->name);
3464
3465         while ((skb = skb_dequeue(&hdev->rx_q))) {
3466                 /* Send copy to monitor */
3467                 hci_send_to_monitor(hdev, skb);
3468
3469                 if (atomic_read(&hdev->promisc)) {
3470                         /* Send copy to the sockets */
3471                         hci_send_to_sock(hdev, skb);
3472                 }
3473
3474                 if (test_bit(HCI_RAW, &hdev->flags)) {
3475                         kfree_skb(skb);
3476                         continue;
3477                 }
3478
3479                 if (test_bit(HCI_INIT, &hdev->flags)) {
3480                         /* Don't process data packets in this states. */
3481                         switch (bt_cb(skb)->pkt_type) {
3482                         case HCI_ACLDATA_PKT:
3483                         case HCI_SCODATA_PKT:
3484                                 kfree_skb(skb);
3485                                 continue;
3486                         }
3487                 }
3488
3489                 /* Process frame */
3490                 switch (bt_cb(skb)->pkt_type) {
3491                 case HCI_EVENT_PKT:
3492                         BT_DBG("%s Event packet", hdev->name);
3493                         hci_event_packet(hdev, skb);
3494                         break;
3495
3496                 case HCI_ACLDATA_PKT:
3497                         BT_DBG("%s ACL data packet", hdev->name);
3498                         hci_acldata_packet(hdev, skb);
3499                         break;
3500
3501                 case HCI_SCODATA_PKT:
3502                         BT_DBG("%s SCO data packet", hdev->name);
3503                         hci_scodata_packet(hdev, skb);
3504                         break;
3505
3506                 default:
3507                         kfree_skb(skb);
3508                         break;
3509                 }
3510         }
3511 }
3512
3513 static void hci_cmd_work(struct work_struct *work)
3514 {
3515         struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
3516         struct sk_buff *skb;
3517
3518         BT_DBG("%s cmd_cnt %d cmd queued %d", hdev->name,
3519                atomic_read(&hdev->cmd_cnt), skb_queue_len(&hdev->cmd_q));
3520
3521         /* Send queued commands */
3522         if (atomic_read(&hdev->cmd_cnt)) {
3523                 skb = skb_dequeue(&hdev->cmd_q);
3524                 if (!skb)
3525                         return;
3526
3527                 kfree_skb(hdev->sent_cmd);
3528
3529                 hdev->sent_cmd = skb_clone(skb, GFP_ATOMIC);
3530                 if (hdev->sent_cmd) {
3531                         atomic_dec(&hdev->cmd_cnt);
3532                         hci_send_frame(skb);
3533                         if (test_bit(HCI_RESET, &hdev->flags))
3534                                 del_timer(&hdev->cmd_timer);
3535                         else
3536                                 mod_timer(&hdev->cmd_timer,
3537                                           jiffies + HCI_CMD_TIMEOUT);
3538                 } else {
3539                         skb_queue_head(&hdev->cmd_q, skb);
3540                         queue_work(hdev->workqueue, &hdev->cmd_work);
3541                 }
3542         }
3543 }
3544
3545 u8 bdaddr_to_le(u8 bdaddr_type)
3546 {
3547         switch (bdaddr_type) {
3548         case BDADDR_LE_PUBLIC:
3549                 return ADDR_LE_DEV_PUBLIC;
3550
3551         default:
3552                 /* Fallback to LE Random address type */
3553                 return ADDR_LE_DEV_RANDOM;
3554         }
3555 }