]> git.karo-electronics.de Git - karo-tx-linux.git/blob - net/bluetooth/hci_core.c
bc5486ea541182db54b06474aaf1e3971d686ca0
[karo-tx-linux.git] / net / bluetooth / hci_core.c
1 /*
2    BlueZ - Bluetooth protocol stack for Linux
3    Copyright (C) 2000-2001 Qualcomm Incorporated
4    Copyright (C) 2011 ProFUSION Embedded Systems
5
6    Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7
8    This program is free software; you can redistribute it and/or modify
9    it under the terms of the GNU General Public License version 2 as
10    published by the Free Software Foundation;
11
12    THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13    OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14    FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15    IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
16    CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17    WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18    ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
19    OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20
21    ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22    COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
23    SOFTWARE IS DISCLAIMED.
24 */
25
26 /* Bluetooth HCI core. */
27
28 #include <linux/export.h>
29 #include <linux/idr.h>
30 #include <linux/rfkill.h>
31 #include <linux/debugfs.h>
32 #include <linux/crypto.h>
33 #include <asm/unaligned.h>
34
35 #include <net/bluetooth/bluetooth.h>
36 #include <net/bluetooth/hci_core.h>
37 #include <net/bluetooth/l2cap.h>
38 #include <net/bluetooth/mgmt.h>
39
40 #include "hci_request.h"
41 #include "hci_debugfs.h"
42 #include "smp.h"
43
44 static void hci_rx_work(struct work_struct *work);
45 static void hci_cmd_work(struct work_struct *work);
46 static void hci_tx_work(struct work_struct *work);
47
48 /* HCI device list */
49 LIST_HEAD(hci_dev_list);
50 DEFINE_RWLOCK(hci_dev_list_lock);
51
52 /* HCI callback list */
53 LIST_HEAD(hci_cb_list);
54 DEFINE_RWLOCK(hci_cb_list_lock);
55
56 /* HCI ID Numbering */
57 static DEFINE_IDA(hci_index_ida);
58
59 /* ----- HCI requests ----- */
60
61 #define HCI_REQ_DONE      0
62 #define HCI_REQ_PEND      1
63 #define HCI_REQ_CANCELED  2
64
65 #define hci_req_lock(d)         mutex_lock(&d->req_lock)
66 #define hci_req_unlock(d)       mutex_unlock(&d->req_lock)
67
68 /* ---- HCI notifications ---- */
69
70 static void hci_notify(struct hci_dev *hdev, int event)
71 {
72         hci_sock_dev_event(hdev, event);
73 }
74
75 /* ---- HCI debugfs entries ---- */
76
77 static ssize_t dut_mode_read(struct file *file, char __user *user_buf,
78                              size_t count, loff_t *ppos)
79 {
80         struct hci_dev *hdev = file->private_data;
81         char buf[3];
82
83         buf[0] = test_bit(HCI_DUT_MODE, &hdev->dbg_flags) ? 'Y': 'N';
84         buf[1] = '\n';
85         buf[2] = '\0';
86         return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
87 }
88
89 static ssize_t dut_mode_write(struct file *file, const char __user *user_buf,
90                               size_t count, loff_t *ppos)
91 {
92         struct hci_dev *hdev = file->private_data;
93         struct sk_buff *skb;
94         char buf[32];
95         size_t buf_size = min(count, (sizeof(buf)-1));
96         bool enable;
97         int err;
98
99         if (!test_bit(HCI_UP, &hdev->flags))
100                 return -ENETDOWN;
101
102         if (copy_from_user(buf, user_buf, buf_size))
103                 return -EFAULT;
104
105         buf[buf_size] = '\0';
106         if (strtobool(buf, &enable))
107                 return -EINVAL;
108
109         if (enable == test_bit(HCI_DUT_MODE, &hdev->dbg_flags))
110                 return -EALREADY;
111
112         hci_req_lock(hdev);
113         if (enable)
114                 skb = __hci_cmd_sync(hdev, HCI_OP_ENABLE_DUT_MODE, 0, NULL,
115                                      HCI_CMD_TIMEOUT);
116         else
117                 skb = __hci_cmd_sync(hdev, HCI_OP_RESET, 0, NULL,
118                                      HCI_CMD_TIMEOUT);
119         hci_req_unlock(hdev);
120
121         if (IS_ERR(skb))
122                 return PTR_ERR(skb);
123
124         err = -bt_to_errno(skb->data[0]);
125         kfree_skb(skb);
126
127         if (err < 0)
128                 return err;
129
130         change_bit(HCI_DUT_MODE, &hdev->dbg_flags);
131
132         return count;
133 }
134
135 static const struct file_operations dut_mode_fops = {
136         .open           = simple_open,
137         .read           = dut_mode_read,
138         .write          = dut_mode_write,
139         .llseek         = default_llseek,
140 };
141
142 /* ---- HCI requests ---- */
143
144 static void hci_req_sync_complete(struct hci_dev *hdev, u8 result)
145 {
146         BT_DBG("%s result 0x%2.2x", hdev->name, result);
147
148         if (hdev->req_status == HCI_REQ_PEND) {
149                 hdev->req_result = result;
150                 hdev->req_status = HCI_REQ_DONE;
151                 wake_up_interruptible(&hdev->req_wait_q);
152         }
153 }
154
155 static void hci_req_cancel(struct hci_dev *hdev, int err)
156 {
157         BT_DBG("%s err 0x%2.2x", hdev->name, err);
158
159         if (hdev->req_status == HCI_REQ_PEND) {
160                 hdev->req_result = err;
161                 hdev->req_status = HCI_REQ_CANCELED;
162                 wake_up_interruptible(&hdev->req_wait_q);
163         }
164 }
165
166 static struct sk_buff *hci_get_cmd_complete(struct hci_dev *hdev, u16 opcode,
167                                             u8 event)
168 {
169         struct hci_ev_cmd_complete *ev;
170         struct hci_event_hdr *hdr;
171         struct sk_buff *skb;
172
173         hci_dev_lock(hdev);
174
175         skb = hdev->recv_evt;
176         hdev->recv_evt = NULL;
177
178         hci_dev_unlock(hdev);
179
180         if (!skb)
181                 return ERR_PTR(-ENODATA);
182
183         if (skb->len < sizeof(*hdr)) {
184                 BT_ERR("Too short HCI event");
185                 goto failed;
186         }
187
188         hdr = (void *) skb->data;
189         skb_pull(skb, HCI_EVENT_HDR_SIZE);
190
191         if (event) {
192                 if (hdr->evt != event)
193                         goto failed;
194                 return skb;
195         }
196
197         if (hdr->evt != HCI_EV_CMD_COMPLETE) {
198                 BT_DBG("Last event is not cmd complete (0x%2.2x)", hdr->evt);
199                 goto failed;
200         }
201
202         if (skb->len < sizeof(*ev)) {
203                 BT_ERR("Too short cmd_complete event");
204                 goto failed;
205         }
206
207         ev = (void *) skb->data;
208         skb_pull(skb, sizeof(*ev));
209
210         if (opcode == __le16_to_cpu(ev->opcode))
211                 return skb;
212
213         BT_DBG("opcode doesn't match (0x%2.2x != 0x%2.2x)", opcode,
214                __le16_to_cpu(ev->opcode));
215
216 failed:
217         kfree_skb(skb);
218         return ERR_PTR(-ENODATA);
219 }
220
221 struct sk_buff *__hci_cmd_sync_ev(struct hci_dev *hdev, u16 opcode, u32 plen,
222                                   const void *param, u8 event, u32 timeout)
223 {
224         DECLARE_WAITQUEUE(wait, current);
225         struct hci_request req;
226         int err = 0;
227
228         BT_DBG("%s", hdev->name);
229
230         hci_req_init(&req, hdev);
231
232         hci_req_add_ev(&req, opcode, plen, param, event);
233
234         hdev->req_status = HCI_REQ_PEND;
235
236         add_wait_queue(&hdev->req_wait_q, &wait);
237         set_current_state(TASK_INTERRUPTIBLE);
238
239         err = hci_req_run(&req, hci_req_sync_complete);
240         if (err < 0) {
241                 remove_wait_queue(&hdev->req_wait_q, &wait);
242                 set_current_state(TASK_RUNNING);
243                 return ERR_PTR(err);
244         }
245
246         schedule_timeout(timeout);
247
248         remove_wait_queue(&hdev->req_wait_q, &wait);
249
250         if (signal_pending(current))
251                 return ERR_PTR(-EINTR);
252
253         switch (hdev->req_status) {
254         case HCI_REQ_DONE:
255                 err = -bt_to_errno(hdev->req_result);
256                 break;
257
258         case HCI_REQ_CANCELED:
259                 err = -hdev->req_result;
260                 break;
261
262         default:
263                 err = -ETIMEDOUT;
264                 break;
265         }
266
267         hdev->req_status = hdev->req_result = 0;
268
269         BT_DBG("%s end: err %d", hdev->name, err);
270
271         if (err < 0)
272                 return ERR_PTR(err);
273
274         return hci_get_cmd_complete(hdev, opcode, event);
275 }
276 EXPORT_SYMBOL(__hci_cmd_sync_ev);
277
278 struct sk_buff *__hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
279                                const void *param, u32 timeout)
280 {
281         return __hci_cmd_sync_ev(hdev, opcode, plen, param, 0, timeout);
282 }
283 EXPORT_SYMBOL(__hci_cmd_sync);
284
285 /* Execute request and wait for completion. */
286 static int __hci_req_sync(struct hci_dev *hdev,
287                           void (*func)(struct hci_request *req,
288                                       unsigned long opt),
289                           unsigned long opt, __u32 timeout)
290 {
291         struct hci_request req;
292         DECLARE_WAITQUEUE(wait, current);
293         int err = 0;
294
295         BT_DBG("%s start", hdev->name);
296
297         hci_req_init(&req, hdev);
298
299         hdev->req_status = HCI_REQ_PEND;
300
301         func(&req, opt);
302
303         add_wait_queue(&hdev->req_wait_q, &wait);
304         set_current_state(TASK_INTERRUPTIBLE);
305
306         err = hci_req_run(&req, hci_req_sync_complete);
307         if (err < 0) {
308                 hdev->req_status = 0;
309
310                 remove_wait_queue(&hdev->req_wait_q, &wait);
311                 set_current_state(TASK_RUNNING);
312
313                 /* ENODATA means the HCI request command queue is empty.
314                  * This can happen when a request with conditionals doesn't
315                  * trigger any commands to be sent. This is normal behavior
316                  * and should not trigger an error return.
317                  */
318                 if (err == -ENODATA)
319                         return 0;
320
321                 return err;
322         }
323
324         schedule_timeout(timeout);
325
326         remove_wait_queue(&hdev->req_wait_q, &wait);
327
328         if (signal_pending(current))
329                 return -EINTR;
330
331         switch (hdev->req_status) {
332         case HCI_REQ_DONE:
333                 err = -bt_to_errno(hdev->req_result);
334                 break;
335
336         case HCI_REQ_CANCELED:
337                 err = -hdev->req_result;
338                 break;
339
340         default:
341                 err = -ETIMEDOUT;
342                 break;
343         }
344
345         hdev->req_status = hdev->req_result = 0;
346
347         BT_DBG("%s end: err %d", hdev->name, err);
348
349         return err;
350 }
351
352 static int hci_req_sync(struct hci_dev *hdev,
353                         void (*req)(struct hci_request *req,
354                                     unsigned long opt),
355                         unsigned long opt, __u32 timeout)
356 {
357         int ret;
358
359         if (!test_bit(HCI_UP, &hdev->flags))
360                 return -ENETDOWN;
361
362         /* Serialize all requests */
363         hci_req_lock(hdev);
364         ret = __hci_req_sync(hdev, req, opt, timeout);
365         hci_req_unlock(hdev);
366
367         return ret;
368 }
369
370 static void hci_reset_req(struct hci_request *req, unsigned long opt)
371 {
372         BT_DBG("%s %ld", req->hdev->name, opt);
373
374         /* Reset device */
375         set_bit(HCI_RESET, &req->hdev->flags);
376         hci_req_add(req, HCI_OP_RESET, 0, NULL);
377 }
378
379 static void bredr_init(struct hci_request *req)
380 {
381         req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;
382
383         /* Read Local Supported Features */
384         hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
385
386         /* Read Local Version */
387         hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
388
389         /* Read BD Address */
390         hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
391 }
392
393 static void amp_init(struct hci_request *req)
394 {
395         req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
396
397         /* Read Local Version */
398         hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
399
400         /* Read Local Supported Commands */
401         hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
402
403         /* Read Local Supported Features */
404         hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
405
406         /* Read Local AMP Info */
407         hci_req_add(req, HCI_OP_READ_LOCAL_AMP_INFO, 0, NULL);
408
409         /* Read Data Blk size */
410         hci_req_add(req, HCI_OP_READ_DATA_BLOCK_SIZE, 0, NULL);
411
412         /* Read Flow Control Mode */
413         hci_req_add(req, HCI_OP_READ_FLOW_CONTROL_MODE, 0, NULL);
414
415         /* Read Location Data */
416         hci_req_add(req, HCI_OP_READ_LOCATION_DATA, 0, NULL);
417 }
418
419 static void hci_init1_req(struct hci_request *req, unsigned long opt)
420 {
421         struct hci_dev *hdev = req->hdev;
422
423         BT_DBG("%s %ld", hdev->name, opt);
424
425         /* Reset */
426         if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
427                 hci_reset_req(req, 0);
428
429         switch (hdev->dev_type) {
430         case HCI_BREDR:
431                 bredr_init(req);
432                 break;
433
434         case HCI_AMP:
435                 amp_init(req);
436                 break;
437
438         default:
439                 BT_ERR("Unknown device type %d", hdev->dev_type);
440                 break;
441         }
442 }
443
444 static void bredr_setup(struct hci_request *req)
445 {
446         __le16 param;
447         __u8 flt_type;
448
449         /* Read Buffer Size (ACL mtu, max pkt, etc.) */
450         hci_req_add(req, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
451
452         /* Read Class of Device */
453         hci_req_add(req, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
454
455         /* Read Local Name */
456         hci_req_add(req, HCI_OP_READ_LOCAL_NAME, 0, NULL);
457
458         /* Read Voice Setting */
459         hci_req_add(req, HCI_OP_READ_VOICE_SETTING, 0, NULL);
460
461         /* Read Number of Supported IAC */
462         hci_req_add(req, HCI_OP_READ_NUM_SUPPORTED_IAC, 0, NULL);
463
464         /* Read Current IAC LAP */
465         hci_req_add(req, HCI_OP_READ_CURRENT_IAC_LAP, 0, NULL);
466
467         /* Clear Event Filters */
468         flt_type = HCI_FLT_CLEAR_ALL;
469         hci_req_add(req, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
470
471         /* Connection accept timeout ~20 secs */
472         param = cpu_to_le16(0x7d00);
473         hci_req_add(req, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
474 }
475
476 static void le_setup(struct hci_request *req)
477 {
478         struct hci_dev *hdev = req->hdev;
479
480         /* Read LE Buffer Size */
481         hci_req_add(req, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
482
483         /* Read LE Local Supported Features */
484         hci_req_add(req, HCI_OP_LE_READ_LOCAL_FEATURES, 0, NULL);
485
486         /* Read LE Supported States */
487         hci_req_add(req, HCI_OP_LE_READ_SUPPORTED_STATES, 0, NULL);
488
489         /* Read LE White List Size */
490         hci_req_add(req, HCI_OP_LE_READ_WHITE_LIST_SIZE, 0, NULL);
491
492         /* Clear LE White List */
493         hci_req_add(req, HCI_OP_LE_CLEAR_WHITE_LIST, 0, NULL);
494
495         /* LE-only controllers have LE implicitly enabled */
496         if (!lmp_bredr_capable(hdev))
497                 set_bit(HCI_LE_ENABLED, &hdev->dev_flags);
498 }
499
500 static void hci_setup_event_mask(struct hci_request *req)
501 {
502         struct hci_dev *hdev = req->hdev;
503
504         /* The second byte is 0xff instead of 0x9f (two reserved bits
505          * disabled) since a Broadcom 1.2 dongle doesn't respond to the
506          * command otherwise.
507          */
508         u8 events[8] = { 0xff, 0xff, 0xfb, 0xff, 0x00, 0x00, 0x00, 0x00 };
509
510         /* CSR 1.1 dongles does not accept any bitfield so don't try to set
511          * any event mask for pre 1.2 devices.
512          */
513         if (hdev->hci_ver < BLUETOOTH_VER_1_2)
514                 return;
515
516         if (lmp_bredr_capable(hdev)) {
517                 events[4] |= 0x01; /* Flow Specification Complete */
518                 events[4] |= 0x02; /* Inquiry Result with RSSI */
519                 events[4] |= 0x04; /* Read Remote Extended Features Complete */
520                 events[5] |= 0x08; /* Synchronous Connection Complete */
521                 events[5] |= 0x10; /* Synchronous Connection Changed */
522         } else {
523                 /* Use a different default for LE-only devices */
524                 memset(events, 0, sizeof(events));
525                 events[0] |= 0x10; /* Disconnection Complete */
526                 events[1] |= 0x08; /* Read Remote Version Information Complete */
527                 events[1] |= 0x20; /* Command Complete */
528                 events[1] |= 0x40; /* Command Status */
529                 events[1] |= 0x80; /* Hardware Error */
530                 events[2] |= 0x04; /* Number of Completed Packets */
531                 events[3] |= 0x02; /* Data Buffer Overflow */
532
533                 if (hdev->le_features[0] & HCI_LE_ENCRYPTION) {
534                         events[0] |= 0x80; /* Encryption Change */
535                         events[5] |= 0x80; /* Encryption Key Refresh Complete */
536                 }
537         }
538
539         if (lmp_inq_rssi_capable(hdev))
540                 events[4] |= 0x02; /* Inquiry Result with RSSI */
541
542         if (lmp_sniffsubr_capable(hdev))
543                 events[5] |= 0x20; /* Sniff Subrating */
544
545         if (lmp_pause_enc_capable(hdev))
546                 events[5] |= 0x80; /* Encryption Key Refresh Complete */
547
548         if (lmp_ext_inq_capable(hdev))
549                 events[5] |= 0x40; /* Extended Inquiry Result */
550
551         if (lmp_no_flush_capable(hdev))
552                 events[7] |= 0x01; /* Enhanced Flush Complete */
553
554         if (lmp_lsto_capable(hdev))
555                 events[6] |= 0x80; /* Link Supervision Timeout Changed */
556
557         if (lmp_ssp_capable(hdev)) {
558                 events[6] |= 0x01;      /* IO Capability Request */
559                 events[6] |= 0x02;      /* IO Capability Response */
560                 events[6] |= 0x04;      /* User Confirmation Request */
561                 events[6] |= 0x08;      /* User Passkey Request */
562                 events[6] |= 0x10;      /* Remote OOB Data Request */
563                 events[6] |= 0x20;      /* Simple Pairing Complete */
564                 events[7] |= 0x04;      /* User Passkey Notification */
565                 events[7] |= 0x08;      /* Keypress Notification */
566                 events[7] |= 0x10;      /* Remote Host Supported
567                                          * Features Notification
568                                          */
569         }
570
571         if (lmp_le_capable(hdev))
572                 events[7] |= 0x20;      /* LE Meta-Event */
573
574         hci_req_add(req, HCI_OP_SET_EVENT_MASK, sizeof(events), events);
575 }
576
577 static void hci_init2_req(struct hci_request *req, unsigned long opt)
578 {
579         struct hci_dev *hdev = req->hdev;
580
581         if (lmp_bredr_capable(hdev))
582                 bredr_setup(req);
583         else
584                 clear_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
585
586         if (lmp_le_capable(hdev))
587                 le_setup(req);
588
589         /* All Bluetooth 1.2 and later controllers should support the
590          * HCI command for reading the local supported commands.
591          *
592          * Unfortunately some controllers indicate Bluetooth 1.2 support,
593          * but do not have support for this command. If that is the case,
594          * the driver can quirk the behavior and skip reading the local
595          * supported commands.
596          */
597         if (hdev->hci_ver > BLUETOOTH_VER_1_1 &&
598             !test_bit(HCI_QUIRK_BROKEN_LOCAL_COMMANDS, &hdev->quirks))
599                 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
600
601         if (lmp_ssp_capable(hdev)) {
602                 /* When SSP is available, then the host features page
603                  * should also be available as well. However some
604                  * controllers list the max_page as 0 as long as SSP
605                  * has not been enabled. To achieve proper debugging
606                  * output, force the minimum max_page to 1 at least.
607                  */
608                 hdev->max_page = 0x01;
609
610                 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
611                         u8 mode = 0x01;
612                         hci_req_add(req, HCI_OP_WRITE_SSP_MODE,
613                                     sizeof(mode), &mode);
614                 } else {
615                         struct hci_cp_write_eir cp;
616
617                         memset(hdev->eir, 0, sizeof(hdev->eir));
618                         memset(&cp, 0, sizeof(cp));
619
620                         hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
621                 }
622         }
623
624         if (lmp_inq_rssi_capable(hdev) ||
625             test_bit(HCI_QUIRK_FIXUP_INQUIRY_MODE, &hdev->quirks)) {
626                 u8 mode;
627
628                 /* If Extended Inquiry Result events are supported, then
629                  * they are clearly preferred over Inquiry Result with RSSI
630                  * events.
631                  */
632                 mode = lmp_ext_inq_capable(hdev) ? 0x02 : 0x01;
633
634                 hci_req_add(req, HCI_OP_WRITE_INQUIRY_MODE, 1, &mode);
635         }
636
637         if (lmp_inq_tx_pwr_capable(hdev))
638                 hci_req_add(req, HCI_OP_READ_INQ_RSP_TX_POWER, 0, NULL);
639
640         if (lmp_ext_feat_capable(hdev)) {
641                 struct hci_cp_read_local_ext_features cp;
642
643                 cp.page = 0x01;
644                 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
645                             sizeof(cp), &cp);
646         }
647
648         if (test_bit(HCI_LINK_SECURITY, &hdev->dev_flags)) {
649                 u8 enable = 1;
650                 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, sizeof(enable),
651                             &enable);
652         }
653 }
654
655 static void hci_setup_link_policy(struct hci_request *req)
656 {
657         struct hci_dev *hdev = req->hdev;
658         struct hci_cp_write_def_link_policy cp;
659         u16 link_policy = 0;
660
661         if (lmp_rswitch_capable(hdev))
662                 link_policy |= HCI_LP_RSWITCH;
663         if (lmp_hold_capable(hdev))
664                 link_policy |= HCI_LP_HOLD;
665         if (lmp_sniff_capable(hdev))
666                 link_policy |= HCI_LP_SNIFF;
667         if (lmp_park_capable(hdev))
668                 link_policy |= HCI_LP_PARK;
669
670         cp.policy = cpu_to_le16(link_policy);
671         hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, sizeof(cp), &cp);
672 }
673
674 static void hci_set_le_support(struct hci_request *req)
675 {
676         struct hci_dev *hdev = req->hdev;
677         struct hci_cp_write_le_host_supported cp;
678
679         /* LE-only devices do not support explicit enablement */
680         if (!lmp_bredr_capable(hdev))
681                 return;
682
683         memset(&cp, 0, sizeof(cp));
684
685         if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
686                 cp.le = 0x01;
687                 cp.simul = 0x00;
688         }
689
690         if (cp.le != lmp_host_le_capable(hdev))
691                 hci_req_add(req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(cp),
692                             &cp);
693 }
694
695 static void hci_set_event_mask_page_2(struct hci_request *req)
696 {
697         struct hci_dev *hdev = req->hdev;
698         u8 events[8] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
699
700         /* If Connectionless Slave Broadcast master role is supported
701          * enable all necessary events for it.
702          */
703         if (lmp_csb_master_capable(hdev)) {
704                 events[1] |= 0x40;      /* Triggered Clock Capture */
705                 events[1] |= 0x80;      /* Synchronization Train Complete */
706                 events[2] |= 0x10;      /* Slave Page Response Timeout */
707                 events[2] |= 0x20;      /* CSB Channel Map Change */
708         }
709
710         /* If Connectionless Slave Broadcast slave role is supported
711          * enable all necessary events for it.
712          */
713         if (lmp_csb_slave_capable(hdev)) {
714                 events[2] |= 0x01;      /* Synchronization Train Received */
715                 events[2] |= 0x02;      /* CSB Receive */
716                 events[2] |= 0x04;      /* CSB Timeout */
717                 events[2] |= 0x08;      /* Truncated Page Complete */
718         }
719
720         /* Enable Authenticated Payload Timeout Expired event if supported */
721         if (lmp_ping_capable(hdev) || hdev->le_features[0] & HCI_LE_PING)
722                 events[2] |= 0x80;
723
724         hci_req_add(req, HCI_OP_SET_EVENT_MASK_PAGE_2, sizeof(events), events);
725 }
726
727 static void hci_init3_req(struct hci_request *req, unsigned long opt)
728 {
729         struct hci_dev *hdev = req->hdev;
730         u8 p;
731
732         hci_setup_event_mask(req);
733
734         /* Some Broadcom based Bluetooth controllers do not support the
735          * Delete Stored Link Key command. They are clearly indicating its
736          * absence in the bit mask of supported commands.
737          *
738          * Check the supported commands and only if the the command is marked
739          * as supported send it. If not supported assume that the controller
740          * does not have actual support for stored link keys which makes this
741          * command redundant anyway.
742          *
743          * Some controllers indicate that they support handling deleting
744          * stored link keys, but they don't. The quirk lets a driver
745          * just disable this command.
746          */
747         if (hdev->commands[6] & 0x80 &&
748             !test_bit(HCI_QUIRK_BROKEN_STORED_LINK_KEY, &hdev->quirks)) {
749                 struct hci_cp_delete_stored_link_key cp;
750
751                 bacpy(&cp.bdaddr, BDADDR_ANY);
752                 cp.delete_all = 0x01;
753                 hci_req_add(req, HCI_OP_DELETE_STORED_LINK_KEY,
754                             sizeof(cp), &cp);
755         }
756
757         if (hdev->commands[5] & 0x10)
758                 hci_setup_link_policy(req);
759
760         if (hdev->commands[8] & 0x01)
761                 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_ACTIVITY, 0, NULL);
762
763         /* Some older Broadcom based Bluetooth 1.2 controllers do not
764          * support the Read Page Scan Type command. Check support for
765          * this command in the bit mask of supported commands.
766          */
767         if (hdev->commands[13] & 0x01)
768                 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_TYPE, 0, NULL);
769
770         if (lmp_le_capable(hdev)) {
771                 u8 events[8];
772
773                 memset(events, 0, sizeof(events));
774                 events[0] = 0x0f;
775
776                 if (hdev->le_features[0] & HCI_LE_ENCRYPTION)
777                         events[0] |= 0x10;      /* LE Long Term Key Request */
778
779                 /* If controller supports the Connection Parameters Request
780                  * Link Layer Procedure, enable the corresponding event.
781                  */
782                 if (hdev->le_features[0] & HCI_LE_CONN_PARAM_REQ_PROC)
783                         events[0] |= 0x20;      /* LE Remote Connection
784                                                  * Parameter Request
785                                                  */
786
787                 /* If the controller supports the Data Length Extension
788                  * feature, enable the corresponding event.
789                  */
790                 if (hdev->le_features[0] & HCI_LE_DATA_LEN_EXT)
791                         events[0] |= 0x40;      /* LE Data Length Change */
792
793                 /* If the controller supports Extended Scanner Filter
794                  * Policies, enable the correspondig event.
795                  */
796                 if (hdev->le_features[0] & HCI_LE_EXT_SCAN_POLICY)
797                         events[1] |= 0x04;      /* LE Direct Advertising
798                                                  * Report
799                                                  */
800
801                 /* If the controller supports the LE Read Local P-256
802                  * Public Key command, enable the corresponding event.
803                  */
804                 if (hdev->commands[34] & 0x02)
805                         events[0] |= 0x80;      /* LE Read Local P-256
806                                                  * Public Key Complete
807                                                  */
808
809                 /* If the controller supports the LE Generate DHKey
810                  * command, enable the corresponding event.
811                  */
812                 if (hdev->commands[34] & 0x04)
813                         events[1] |= 0x01;      /* LE Generate DHKey Complete */
814
815                 hci_req_add(req, HCI_OP_LE_SET_EVENT_MASK, sizeof(events),
816                             events);
817
818                 if (hdev->commands[25] & 0x40) {
819                         /* Read LE Advertising Channel TX Power */
820                         hci_req_add(req, HCI_OP_LE_READ_ADV_TX_POWER, 0, NULL);
821                 }
822
823                 if (hdev->le_features[0] & HCI_LE_DATA_LEN_EXT) {
824                         /* Read LE Maximum Data Length */
825                         hci_req_add(req, HCI_OP_LE_READ_MAX_DATA_LEN, 0, NULL);
826
827                         /* Read LE Suggested Default Data Length */
828                         hci_req_add(req, HCI_OP_LE_READ_DEF_DATA_LEN, 0, NULL);
829                 }
830
831                 hci_set_le_support(req);
832         }
833
834         /* Read features beyond page 1 if available */
835         for (p = 2; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
836                 struct hci_cp_read_local_ext_features cp;
837
838                 cp.page = p;
839                 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
840                             sizeof(cp), &cp);
841         }
842 }
843
844 static void hci_init4_req(struct hci_request *req, unsigned long opt)
845 {
846         struct hci_dev *hdev = req->hdev;
847
848         /* Set event mask page 2 if the HCI command for it is supported */
849         if (hdev->commands[22] & 0x04)
850                 hci_set_event_mask_page_2(req);
851
852         /* Read local codec list if the HCI command is supported */
853         if (hdev->commands[29] & 0x20)
854                 hci_req_add(req, HCI_OP_READ_LOCAL_CODECS, 0, NULL);
855
856         /* Get MWS transport configuration if the HCI command is supported */
857         if (hdev->commands[30] & 0x08)
858                 hci_req_add(req, HCI_OP_GET_MWS_TRANSPORT_CONFIG, 0, NULL);
859
860         /* Check for Synchronization Train support */
861         if (lmp_sync_train_capable(hdev))
862                 hci_req_add(req, HCI_OP_READ_SYNC_TRAIN_PARAMS, 0, NULL);
863
864         /* Enable Secure Connections if supported and configured */
865         if (bredr_sc_enabled(hdev)) {
866                 u8 support = 0x01;
867                 hci_req_add(req, HCI_OP_WRITE_SC_SUPPORT,
868                             sizeof(support), &support);
869         }
870 }
871
872 static int __hci_init(struct hci_dev *hdev)
873 {
874         int err;
875
876         err = __hci_req_sync(hdev, hci_init1_req, 0, HCI_INIT_TIMEOUT);
877         if (err < 0)
878                 return err;
879
880         /* The Device Under Test (DUT) mode is special and available for
881          * all controller types. So just create it early on.
882          */
883         if (test_bit(HCI_SETUP, &hdev->dev_flags)) {
884                 debugfs_create_file("dut_mode", 0644, hdev->debugfs, hdev,
885                                     &dut_mode_fops);
886         }
887
888         /* HCI_BREDR covers both single-mode LE, BR/EDR and dual-mode
889          * BR/EDR/LE type controllers. AMP controllers only need the
890          * first stage init.
891          */
892         if (hdev->dev_type != HCI_BREDR)
893                 return 0;
894
895         err = __hci_req_sync(hdev, hci_init2_req, 0, HCI_INIT_TIMEOUT);
896         if (err < 0)
897                 return err;
898
899         err = __hci_req_sync(hdev, hci_init3_req, 0, HCI_INIT_TIMEOUT);
900         if (err < 0)
901                 return err;
902
903         err = __hci_req_sync(hdev, hci_init4_req, 0, HCI_INIT_TIMEOUT);
904         if (err < 0)
905                 return err;
906
907         /* This function is only called when the controller is actually in
908          * configured state. When the controller is marked as unconfigured,
909          * this initialization procedure is not run.
910          *
911          * It means that it is possible that a controller runs through its
912          * setup phase and then discovers missing settings. If that is the
913          * case, then this function will not be called. It then will only
914          * be called during the config phase.
915          *
916          * So only when in setup phase or config phase, create the debugfs
917          * entries and register the SMP channels.
918          */
919         if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
920             !test_bit(HCI_CONFIG, &hdev->dev_flags))
921                 return 0;
922
923         hci_debugfs_create_common(hdev);
924
925         if (lmp_bredr_capable(hdev))
926                 hci_debugfs_create_bredr(hdev);
927
928         if (lmp_le_capable(hdev)) {
929                 hci_debugfs_create_le(hdev);
930                 smp_register(hdev);
931         }
932
933         return 0;
934 }
935
936 static void hci_init0_req(struct hci_request *req, unsigned long opt)
937 {
938         struct hci_dev *hdev = req->hdev;
939
940         BT_DBG("%s %ld", hdev->name, opt);
941
942         /* Reset */
943         if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
944                 hci_reset_req(req, 0);
945
946         /* Read Local Version */
947         hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
948
949         /* Read BD Address */
950         if (hdev->set_bdaddr)
951                 hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
952 }
953
954 static int __hci_unconf_init(struct hci_dev *hdev)
955 {
956         int err;
957
958         if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
959                 return 0;
960
961         err = __hci_req_sync(hdev, hci_init0_req, 0, HCI_INIT_TIMEOUT);
962         if (err < 0)
963                 return err;
964
965         return 0;
966 }
967
968 static void hci_scan_req(struct hci_request *req, unsigned long opt)
969 {
970         __u8 scan = opt;
971
972         BT_DBG("%s %x", req->hdev->name, scan);
973
974         /* Inquiry and Page scans */
975         hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
976 }
977
978 static void hci_auth_req(struct hci_request *req, unsigned long opt)
979 {
980         __u8 auth = opt;
981
982         BT_DBG("%s %x", req->hdev->name, auth);
983
984         /* Authentication */
985         hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
986 }
987
988 static void hci_encrypt_req(struct hci_request *req, unsigned long opt)
989 {
990         __u8 encrypt = opt;
991
992         BT_DBG("%s %x", req->hdev->name, encrypt);
993
994         /* Encryption */
995         hci_req_add(req, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
996 }
997
998 static void hci_linkpol_req(struct hci_request *req, unsigned long opt)
999 {
1000         __le16 policy = cpu_to_le16(opt);
1001
1002         BT_DBG("%s %x", req->hdev->name, policy);
1003
1004         /* Default link policy */
1005         hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
1006 }
1007
1008 /* Get HCI device by index.
1009  * Device is held on return. */
1010 struct hci_dev *hci_dev_get(int index)
1011 {
1012         struct hci_dev *hdev = NULL, *d;
1013
1014         BT_DBG("%d", index);
1015
1016         if (index < 0)
1017                 return NULL;
1018
1019         read_lock(&hci_dev_list_lock);
1020         list_for_each_entry(d, &hci_dev_list, list) {
1021                 if (d->id == index) {
1022                         hdev = hci_dev_hold(d);
1023                         break;
1024                 }
1025         }
1026         read_unlock(&hci_dev_list_lock);
1027         return hdev;
1028 }
1029
1030 /* ---- Inquiry support ---- */
1031
1032 bool hci_discovery_active(struct hci_dev *hdev)
1033 {
1034         struct discovery_state *discov = &hdev->discovery;
1035
1036         switch (discov->state) {
1037         case DISCOVERY_FINDING:
1038         case DISCOVERY_RESOLVING:
1039                 return true;
1040
1041         default:
1042                 return false;
1043         }
1044 }
1045
1046 void hci_discovery_set_state(struct hci_dev *hdev, int state)
1047 {
1048         int old_state = hdev->discovery.state;
1049
1050         BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
1051
1052         if (old_state == state)
1053                 return;
1054
1055         hdev->discovery.state = state;
1056
1057         switch (state) {
1058         case DISCOVERY_STOPPED:
1059                 hci_update_background_scan(hdev);
1060
1061                 if (old_state != DISCOVERY_STARTING)
1062                         mgmt_discovering(hdev, 0);
1063                 break;
1064         case DISCOVERY_STARTING:
1065                 break;
1066         case DISCOVERY_FINDING:
1067                 mgmt_discovering(hdev, 1);
1068                 break;
1069         case DISCOVERY_RESOLVING:
1070                 break;
1071         case DISCOVERY_STOPPING:
1072                 break;
1073         }
1074 }
1075
1076 void hci_inquiry_cache_flush(struct hci_dev *hdev)
1077 {
1078         struct discovery_state *cache = &hdev->discovery;
1079         struct inquiry_entry *p, *n;
1080
1081         list_for_each_entry_safe(p, n, &cache->all, all) {
1082                 list_del(&p->all);
1083                 kfree(p);
1084         }
1085
1086         INIT_LIST_HEAD(&cache->unknown);
1087         INIT_LIST_HEAD(&cache->resolve);
1088 }
1089
1090 struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev,
1091                                                bdaddr_t *bdaddr)
1092 {
1093         struct discovery_state *cache = &hdev->discovery;
1094         struct inquiry_entry *e;
1095
1096         BT_DBG("cache %p, %pMR", cache, bdaddr);
1097
1098         list_for_each_entry(e, &cache->all, all) {
1099                 if (!bacmp(&e->data.bdaddr, bdaddr))
1100                         return e;
1101         }
1102
1103         return NULL;
1104 }
1105
1106 struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
1107                                                        bdaddr_t *bdaddr)
1108 {
1109         struct discovery_state *cache = &hdev->discovery;
1110         struct inquiry_entry *e;
1111
1112         BT_DBG("cache %p, %pMR", cache, bdaddr);
1113
1114         list_for_each_entry(e, &cache->unknown, list) {
1115                 if (!bacmp(&e->data.bdaddr, bdaddr))
1116                         return e;
1117         }
1118
1119         return NULL;
1120 }
1121
1122 struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
1123                                                        bdaddr_t *bdaddr,
1124                                                        int state)
1125 {
1126         struct discovery_state *cache = &hdev->discovery;
1127         struct inquiry_entry *e;
1128
1129         BT_DBG("cache %p bdaddr %pMR state %d", cache, bdaddr, state);
1130
1131         list_for_each_entry(e, &cache->resolve, list) {
1132                 if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
1133                         return e;
1134                 if (!bacmp(&e->data.bdaddr, bdaddr))
1135                         return e;
1136         }
1137
1138         return NULL;
1139 }
1140
1141 void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
1142                                       struct inquiry_entry *ie)
1143 {
1144         struct discovery_state *cache = &hdev->discovery;
1145         struct list_head *pos = &cache->resolve;
1146         struct inquiry_entry *p;
1147
1148         list_del(&ie->list);
1149
1150         list_for_each_entry(p, &cache->resolve, list) {
1151                 if (p->name_state != NAME_PENDING &&
1152                     abs(p->data.rssi) >= abs(ie->data.rssi))
1153                         break;
1154                 pos = &p->list;
1155         }
1156
1157         list_add(&ie->list, pos);
1158 }
1159
1160 u32 hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
1161                              bool name_known)
1162 {
1163         struct discovery_state *cache = &hdev->discovery;
1164         struct inquiry_entry *ie;
1165         u32 flags = 0;
1166
1167         BT_DBG("cache %p, %pMR", cache, &data->bdaddr);
1168
1169         hci_remove_remote_oob_data(hdev, &data->bdaddr, BDADDR_BREDR);
1170
1171         if (!data->ssp_mode)
1172                 flags |= MGMT_DEV_FOUND_LEGACY_PAIRING;
1173
1174         ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
1175         if (ie) {
1176                 if (!ie->data.ssp_mode)
1177                         flags |= MGMT_DEV_FOUND_LEGACY_PAIRING;
1178
1179                 if (ie->name_state == NAME_NEEDED &&
1180                     data->rssi != ie->data.rssi) {
1181                         ie->data.rssi = data->rssi;
1182                         hci_inquiry_cache_update_resolve(hdev, ie);
1183                 }
1184
1185                 goto update;
1186         }
1187
1188         /* Entry not in the cache. Add new one. */
1189         ie = kzalloc(sizeof(*ie), GFP_KERNEL);
1190         if (!ie) {
1191                 flags |= MGMT_DEV_FOUND_CONFIRM_NAME;
1192                 goto done;
1193         }
1194
1195         list_add(&ie->all, &cache->all);
1196
1197         if (name_known) {
1198                 ie->name_state = NAME_KNOWN;
1199         } else {
1200                 ie->name_state = NAME_NOT_KNOWN;
1201                 list_add(&ie->list, &cache->unknown);
1202         }
1203
1204 update:
1205         if (name_known && ie->name_state != NAME_KNOWN &&
1206             ie->name_state != NAME_PENDING) {
1207                 ie->name_state = NAME_KNOWN;
1208                 list_del(&ie->list);
1209         }
1210
1211         memcpy(&ie->data, data, sizeof(*data));
1212         ie->timestamp = jiffies;
1213         cache->timestamp = jiffies;
1214
1215         if (ie->name_state == NAME_NOT_KNOWN)
1216                 flags |= MGMT_DEV_FOUND_CONFIRM_NAME;
1217
1218 done:
1219         return flags;
1220 }
1221
1222 static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
1223 {
1224         struct discovery_state *cache = &hdev->discovery;
1225         struct inquiry_info *info = (struct inquiry_info *) buf;
1226         struct inquiry_entry *e;
1227         int copied = 0;
1228
1229         list_for_each_entry(e, &cache->all, all) {
1230                 struct inquiry_data *data = &e->data;
1231
1232                 if (copied >= num)
1233                         break;
1234
1235                 bacpy(&info->bdaddr, &data->bdaddr);
1236                 info->pscan_rep_mode    = data->pscan_rep_mode;
1237                 info->pscan_period_mode = data->pscan_period_mode;
1238                 info->pscan_mode        = data->pscan_mode;
1239                 memcpy(info->dev_class, data->dev_class, 3);
1240                 info->clock_offset      = data->clock_offset;
1241
1242                 info++;
1243                 copied++;
1244         }
1245
1246         BT_DBG("cache %p, copied %d", cache, copied);
1247         return copied;
1248 }
1249
1250 static void hci_inq_req(struct hci_request *req, unsigned long opt)
1251 {
1252         struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
1253         struct hci_dev *hdev = req->hdev;
1254         struct hci_cp_inquiry cp;
1255
1256         BT_DBG("%s", hdev->name);
1257
1258         if (test_bit(HCI_INQUIRY, &hdev->flags))
1259                 return;
1260
1261         /* Start Inquiry */
1262         memcpy(&cp.lap, &ir->lap, 3);
1263         cp.length  = ir->length;
1264         cp.num_rsp = ir->num_rsp;
1265         hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
1266 }
1267
1268 int hci_inquiry(void __user *arg)
1269 {
1270         __u8 __user *ptr = arg;
1271         struct hci_inquiry_req ir;
1272         struct hci_dev *hdev;
1273         int err = 0, do_inquiry = 0, max_rsp;
1274         long timeo;
1275         __u8 *buf;
1276
1277         if (copy_from_user(&ir, ptr, sizeof(ir)))
1278                 return -EFAULT;
1279
1280         hdev = hci_dev_get(ir.dev_id);
1281         if (!hdev)
1282                 return -ENODEV;
1283
1284         if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
1285                 err = -EBUSY;
1286                 goto done;
1287         }
1288
1289         if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
1290                 err = -EOPNOTSUPP;
1291                 goto done;
1292         }
1293
1294         if (hdev->dev_type != HCI_BREDR) {
1295                 err = -EOPNOTSUPP;
1296                 goto done;
1297         }
1298
1299         if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
1300                 err = -EOPNOTSUPP;
1301                 goto done;
1302         }
1303
1304         hci_dev_lock(hdev);
1305         if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
1306             inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) {
1307                 hci_inquiry_cache_flush(hdev);
1308                 do_inquiry = 1;
1309         }
1310         hci_dev_unlock(hdev);
1311
1312         timeo = ir.length * msecs_to_jiffies(2000);
1313
1314         if (do_inquiry) {
1315                 err = hci_req_sync(hdev, hci_inq_req, (unsigned long) &ir,
1316                                    timeo);
1317                 if (err < 0)
1318                         goto done;
1319
1320                 /* Wait until Inquiry procedure finishes (HCI_INQUIRY flag is
1321                  * cleared). If it is interrupted by a signal, return -EINTR.
1322                  */
1323                 if (wait_on_bit(&hdev->flags, HCI_INQUIRY,
1324                                 TASK_INTERRUPTIBLE))
1325                         return -EINTR;
1326         }
1327
1328         /* for unlimited number of responses we will use buffer with
1329          * 255 entries
1330          */
1331         max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
1332
1333         /* cache_dump can't sleep. Therefore we allocate temp buffer and then
1334          * copy it to the user space.
1335          */
1336         buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
1337         if (!buf) {
1338                 err = -ENOMEM;
1339                 goto done;
1340         }
1341
1342         hci_dev_lock(hdev);
1343         ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
1344         hci_dev_unlock(hdev);
1345
1346         BT_DBG("num_rsp %d", ir.num_rsp);
1347
1348         if (!copy_to_user(ptr, &ir, sizeof(ir))) {
1349                 ptr += sizeof(ir);
1350                 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
1351                                  ir.num_rsp))
1352                         err = -EFAULT;
1353         } else
1354                 err = -EFAULT;
1355
1356         kfree(buf);
1357
1358 done:
1359         hci_dev_put(hdev);
1360         return err;
1361 }
1362
1363 static int hci_dev_do_open(struct hci_dev *hdev)
1364 {
1365         int ret = 0;
1366
1367         BT_DBG("%s %p", hdev->name, hdev);
1368
1369         hci_req_lock(hdev);
1370
1371         if (test_bit(HCI_UNREGISTER, &hdev->dev_flags)) {
1372                 ret = -ENODEV;
1373                 goto done;
1374         }
1375
1376         if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
1377             !test_bit(HCI_CONFIG, &hdev->dev_flags)) {
1378                 /* Check for rfkill but allow the HCI setup stage to
1379                  * proceed (which in itself doesn't cause any RF activity).
1380                  */
1381                 if (test_bit(HCI_RFKILLED, &hdev->dev_flags)) {
1382                         ret = -ERFKILL;
1383                         goto done;
1384                 }
1385
1386                 /* Check for valid public address or a configured static
1387                  * random adddress, but let the HCI setup proceed to
1388                  * be able to determine if there is a public address
1389                  * or not.
1390                  *
1391                  * In case of user channel usage, it is not important
1392                  * if a public address or static random address is
1393                  * available.
1394                  *
1395                  * This check is only valid for BR/EDR controllers
1396                  * since AMP controllers do not have an address.
1397                  */
1398                 if (!test_bit(HCI_USER_CHANNEL, &hdev->dev_flags) &&
1399                     hdev->dev_type == HCI_BREDR &&
1400                     !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
1401                     !bacmp(&hdev->static_addr, BDADDR_ANY)) {
1402                         ret = -EADDRNOTAVAIL;
1403                         goto done;
1404                 }
1405         }
1406
1407         if (test_bit(HCI_UP, &hdev->flags)) {
1408                 ret = -EALREADY;
1409                 goto done;
1410         }
1411
1412         if (hdev->open(hdev)) {
1413                 ret = -EIO;
1414                 goto done;
1415         }
1416
1417         atomic_set(&hdev->cmd_cnt, 1);
1418         set_bit(HCI_INIT, &hdev->flags);
1419
1420         if (test_bit(HCI_SETUP, &hdev->dev_flags)) {
1421                 if (hdev->setup)
1422                         ret = hdev->setup(hdev);
1423
1424                 /* The transport driver can set these quirks before
1425                  * creating the HCI device or in its setup callback.
1426                  *
1427                  * In case any of them is set, the controller has to
1428                  * start up as unconfigured.
1429                  */
1430                 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) ||
1431                     test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks))
1432                         set_bit(HCI_UNCONFIGURED, &hdev->dev_flags);
1433
1434                 /* For an unconfigured controller it is required to
1435                  * read at least the version information provided by
1436                  * the Read Local Version Information command.
1437                  *
1438                  * If the set_bdaddr driver callback is provided, then
1439                  * also the original Bluetooth public device address
1440                  * will be read using the Read BD Address command.
1441                  */
1442                 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags))
1443                         ret = __hci_unconf_init(hdev);
1444         }
1445
1446         if (test_bit(HCI_CONFIG, &hdev->dev_flags)) {
1447                 /* If public address change is configured, ensure that
1448                  * the address gets programmed. If the driver does not
1449                  * support changing the public address, fail the power
1450                  * on procedure.
1451                  */
1452                 if (bacmp(&hdev->public_addr, BDADDR_ANY) &&
1453                     hdev->set_bdaddr)
1454                         ret = hdev->set_bdaddr(hdev, &hdev->public_addr);
1455                 else
1456                         ret = -EADDRNOTAVAIL;
1457         }
1458
1459         if (!ret) {
1460                 if (!test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) &&
1461                     !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags))
1462                         ret = __hci_init(hdev);
1463         }
1464
1465         clear_bit(HCI_INIT, &hdev->flags);
1466
1467         if (!ret) {
1468                 hci_dev_hold(hdev);
1469                 set_bit(HCI_RPA_EXPIRED, &hdev->dev_flags);
1470                 set_bit(HCI_UP, &hdev->flags);
1471                 hci_notify(hdev, HCI_DEV_UP);
1472                 if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
1473                     !test_bit(HCI_CONFIG, &hdev->dev_flags) &&
1474                     !test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) &&
1475                     !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags) &&
1476                     hdev->dev_type == HCI_BREDR) {
1477                         hci_dev_lock(hdev);
1478                         mgmt_powered(hdev, 1);
1479                         hci_dev_unlock(hdev);
1480                 }
1481         } else {
1482                 /* Init failed, cleanup */
1483                 flush_work(&hdev->tx_work);
1484                 flush_work(&hdev->cmd_work);
1485                 flush_work(&hdev->rx_work);
1486
1487                 skb_queue_purge(&hdev->cmd_q);
1488                 skb_queue_purge(&hdev->rx_q);
1489
1490                 if (hdev->flush)
1491                         hdev->flush(hdev);
1492
1493                 if (hdev->sent_cmd) {
1494                         kfree_skb(hdev->sent_cmd);
1495                         hdev->sent_cmd = NULL;
1496                 }
1497
1498                 hdev->close(hdev);
1499                 hdev->flags &= BIT(HCI_RAW);
1500         }
1501
1502 done:
1503         hci_req_unlock(hdev);
1504         return ret;
1505 }
1506
1507 /* ---- HCI ioctl helpers ---- */
1508
1509 int hci_dev_open(__u16 dev)
1510 {
1511         struct hci_dev *hdev;
1512         int err;
1513
1514         hdev = hci_dev_get(dev);
1515         if (!hdev)
1516                 return -ENODEV;
1517
1518         /* Devices that are marked as unconfigured can only be powered
1519          * up as user channel. Trying to bring them up as normal devices
1520          * will result into a failure. Only user channel operation is
1521          * possible.
1522          *
1523          * When this function is called for a user channel, the flag
1524          * HCI_USER_CHANNEL will be set first before attempting to
1525          * open the device.
1526          */
1527         if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) &&
1528             !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
1529                 err = -EOPNOTSUPP;
1530                 goto done;
1531         }
1532
1533         /* We need to ensure that no other power on/off work is pending
1534          * before proceeding to call hci_dev_do_open. This is
1535          * particularly important if the setup procedure has not yet
1536          * completed.
1537          */
1538         if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
1539                 cancel_delayed_work(&hdev->power_off);
1540
1541         /* After this call it is guaranteed that the setup procedure
1542          * has finished. This means that error conditions like RFKILL
1543          * or no valid public or static random address apply.
1544          */
1545         flush_workqueue(hdev->req_workqueue);
1546
1547         /* For controllers not using the management interface and that
1548          * are brought up using legacy ioctl, set the HCI_BONDABLE bit
1549          * so that pairing works for them. Once the management interface
1550          * is in use this bit will be cleared again and userspace has
1551          * to explicitly enable it.
1552          */
1553         if (!test_bit(HCI_USER_CHANNEL, &hdev->dev_flags) &&
1554             !test_bit(HCI_MGMT, &hdev->dev_flags))
1555                 set_bit(HCI_BONDABLE, &hdev->dev_flags);
1556
1557         err = hci_dev_do_open(hdev);
1558
1559 done:
1560         hci_dev_put(hdev);
1561         return err;
1562 }
1563
1564 /* This function requires the caller holds hdev->lock */
1565 static void hci_pend_le_actions_clear(struct hci_dev *hdev)
1566 {
1567         struct hci_conn_params *p;
1568
1569         list_for_each_entry(p, &hdev->le_conn_params, list) {
1570                 if (p->conn) {
1571                         hci_conn_drop(p->conn);
1572                         hci_conn_put(p->conn);
1573                         p->conn = NULL;
1574                 }
1575                 list_del_init(&p->action);
1576         }
1577
1578         BT_DBG("All LE pending actions cleared");
1579 }
1580
1581 static int hci_dev_do_close(struct hci_dev *hdev)
1582 {
1583         BT_DBG("%s %p", hdev->name, hdev);
1584
1585         cancel_delayed_work(&hdev->power_off);
1586
1587         hci_req_cancel(hdev, ENODEV);
1588         hci_req_lock(hdev);
1589
1590         if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
1591                 cancel_delayed_work_sync(&hdev->cmd_timer);
1592                 hci_req_unlock(hdev);
1593                 return 0;
1594         }
1595
1596         /* Flush RX and TX works */
1597         flush_work(&hdev->tx_work);
1598         flush_work(&hdev->rx_work);
1599
1600         if (hdev->discov_timeout > 0) {
1601                 cancel_delayed_work(&hdev->discov_off);
1602                 hdev->discov_timeout = 0;
1603                 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
1604                 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
1605         }
1606
1607         if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
1608                 cancel_delayed_work(&hdev->service_cache);
1609
1610         cancel_delayed_work_sync(&hdev->le_scan_disable);
1611
1612         if (test_bit(HCI_MGMT, &hdev->dev_flags))
1613                 cancel_delayed_work_sync(&hdev->rpa_expired);
1614
1615         /* Avoid potential lockdep warnings from the *_flush() calls by
1616          * ensuring the workqueue is empty up front.
1617          */
1618         drain_workqueue(hdev->workqueue);
1619
1620         hci_dev_lock(hdev);
1621
1622         if (!test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
1623                 if (hdev->dev_type == HCI_BREDR)
1624                         mgmt_powered(hdev, 0);
1625         }
1626
1627         hci_inquiry_cache_flush(hdev);
1628         hci_pend_le_actions_clear(hdev);
1629         hci_conn_hash_flush(hdev);
1630         hci_dev_unlock(hdev);
1631
1632         hci_notify(hdev, HCI_DEV_DOWN);
1633
1634         if (hdev->flush)
1635                 hdev->flush(hdev);
1636
1637         /* Reset device */
1638         skb_queue_purge(&hdev->cmd_q);
1639         atomic_set(&hdev->cmd_cnt, 1);
1640         if (!test_bit(HCI_AUTO_OFF, &hdev->dev_flags) &&
1641             !test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) &&
1642             test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks)) {
1643                 set_bit(HCI_INIT, &hdev->flags);
1644                 __hci_req_sync(hdev, hci_reset_req, 0, HCI_CMD_TIMEOUT);
1645                 clear_bit(HCI_INIT, &hdev->flags);
1646         }
1647
1648         /* flush cmd  work */
1649         flush_work(&hdev->cmd_work);
1650
1651         /* Drop queues */
1652         skb_queue_purge(&hdev->rx_q);
1653         skb_queue_purge(&hdev->cmd_q);
1654         skb_queue_purge(&hdev->raw_q);
1655
1656         /* Drop last sent command */
1657         if (hdev->sent_cmd) {
1658                 cancel_delayed_work_sync(&hdev->cmd_timer);
1659                 kfree_skb(hdev->sent_cmd);
1660                 hdev->sent_cmd = NULL;
1661         }
1662
1663         kfree_skb(hdev->recv_evt);
1664         hdev->recv_evt = NULL;
1665
1666         /* After this point our queues are empty
1667          * and no tasks are scheduled. */
1668         hdev->close(hdev);
1669
1670         /* Clear flags */
1671         hdev->flags &= BIT(HCI_RAW);
1672         hdev->dev_flags &= ~HCI_PERSISTENT_MASK;
1673
1674         /* Controller radio is available but is currently powered down */
1675         hdev->amp_status = AMP_STATUS_POWERED_DOWN;
1676
1677         memset(hdev->eir, 0, sizeof(hdev->eir));
1678         memset(hdev->dev_class, 0, sizeof(hdev->dev_class));
1679         bacpy(&hdev->random_addr, BDADDR_ANY);
1680
1681         hci_req_unlock(hdev);
1682
1683         hci_dev_put(hdev);
1684         return 0;
1685 }
1686
1687 int hci_dev_close(__u16 dev)
1688 {
1689         struct hci_dev *hdev;
1690         int err;
1691
1692         hdev = hci_dev_get(dev);
1693         if (!hdev)
1694                 return -ENODEV;
1695
1696         if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
1697                 err = -EBUSY;
1698                 goto done;
1699         }
1700
1701         if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
1702                 cancel_delayed_work(&hdev->power_off);
1703
1704         err = hci_dev_do_close(hdev);
1705
1706 done:
1707         hci_dev_put(hdev);
1708         return err;
1709 }
1710
1711 int hci_dev_reset(__u16 dev)
1712 {
1713         struct hci_dev *hdev;
1714         int ret = 0;
1715
1716         hdev = hci_dev_get(dev);
1717         if (!hdev)
1718                 return -ENODEV;
1719
1720         hci_req_lock(hdev);
1721
1722         if (!test_bit(HCI_UP, &hdev->flags)) {
1723                 ret = -ENETDOWN;
1724                 goto done;
1725         }
1726
1727         if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
1728                 ret = -EBUSY;
1729                 goto done;
1730         }
1731
1732         if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
1733                 ret = -EOPNOTSUPP;
1734                 goto done;
1735         }
1736
1737         /* Drop queues */
1738         skb_queue_purge(&hdev->rx_q);
1739         skb_queue_purge(&hdev->cmd_q);
1740
1741         /* Avoid potential lockdep warnings from the *_flush() calls by
1742          * ensuring the workqueue is empty up front.
1743          */
1744         drain_workqueue(hdev->workqueue);
1745
1746         hci_dev_lock(hdev);
1747         hci_inquiry_cache_flush(hdev);
1748         hci_conn_hash_flush(hdev);
1749         hci_dev_unlock(hdev);
1750
1751         if (hdev->flush)
1752                 hdev->flush(hdev);
1753
1754         atomic_set(&hdev->cmd_cnt, 1);
1755         hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
1756
1757         ret = __hci_req_sync(hdev, hci_reset_req, 0, HCI_INIT_TIMEOUT);
1758
1759 done:
1760         hci_req_unlock(hdev);
1761         hci_dev_put(hdev);
1762         return ret;
1763 }
1764
1765 int hci_dev_reset_stat(__u16 dev)
1766 {
1767         struct hci_dev *hdev;
1768         int ret = 0;
1769
1770         hdev = hci_dev_get(dev);
1771         if (!hdev)
1772                 return -ENODEV;
1773
1774         if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
1775                 ret = -EBUSY;
1776                 goto done;
1777         }
1778
1779         if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
1780                 ret = -EOPNOTSUPP;
1781                 goto done;
1782         }
1783
1784         memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
1785
1786 done:
1787         hci_dev_put(hdev);
1788         return ret;
1789 }
1790
1791 static void hci_update_scan_state(struct hci_dev *hdev, u8 scan)
1792 {
1793         bool conn_changed, discov_changed;
1794
1795         BT_DBG("%s scan 0x%02x", hdev->name, scan);
1796
1797         if ((scan & SCAN_PAGE))
1798                 conn_changed = !test_and_set_bit(HCI_CONNECTABLE,
1799                                                  &hdev->dev_flags);
1800         else
1801                 conn_changed = test_and_clear_bit(HCI_CONNECTABLE,
1802                                                   &hdev->dev_flags);
1803
1804         if ((scan & SCAN_INQUIRY)) {
1805                 discov_changed = !test_and_set_bit(HCI_DISCOVERABLE,
1806                                                    &hdev->dev_flags);
1807         } else {
1808                 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
1809                 discov_changed = test_and_clear_bit(HCI_DISCOVERABLE,
1810                                                     &hdev->dev_flags);
1811         }
1812
1813         if (!test_bit(HCI_MGMT, &hdev->dev_flags))
1814                 return;
1815
1816         if (conn_changed || discov_changed) {
1817                 /* In case this was disabled through mgmt */
1818                 set_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
1819
1820                 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
1821                         mgmt_update_adv_data(hdev);
1822
1823                 mgmt_new_settings(hdev);
1824         }
1825 }
1826
1827 int hci_dev_cmd(unsigned int cmd, void __user *arg)
1828 {
1829         struct hci_dev *hdev;
1830         struct hci_dev_req dr;
1831         int err = 0;
1832
1833         if (copy_from_user(&dr, arg, sizeof(dr)))
1834                 return -EFAULT;
1835
1836         hdev = hci_dev_get(dr.dev_id);
1837         if (!hdev)
1838                 return -ENODEV;
1839
1840         if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
1841                 err = -EBUSY;
1842                 goto done;
1843         }
1844
1845         if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
1846                 err = -EOPNOTSUPP;
1847                 goto done;
1848         }
1849
1850         if (hdev->dev_type != HCI_BREDR) {
1851                 err = -EOPNOTSUPP;
1852                 goto done;
1853         }
1854
1855         if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
1856                 err = -EOPNOTSUPP;
1857                 goto done;
1858         }
1859
1860         switch (cmd) {
1861         case HCISETAUTH:
1862                 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
1863                                    HCI_INIT_TIMEOUT);
1864                 break;
1865
1866         case HCISETENCRYPT:
1867                 if (!lmp_encrypt_capable(hdev)) {
1868                         err = -EOPNOTSUPP;
1869                         break;
1870                 }
1871
1872                 if (!test_bit(HCI_AUTH, &hdev->flags)) {
1873                         /* Auth must be enabled first */
1874                         err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
1875                                            HCI_INIT_TIMEOUT);
1876                         if (err)
1877                                 break;
1878                 }
1879
1880                 err = hci_req_sync(hdev, hci_encrypt_req, dr.dev_opt,
1881                                    HCI_INIT_TIMEOUT);
1882                 break;
1883
1884         case HCISETSCAN:
1885                 err = hci_req_sync(hdev, hci_scan_req, dr.dev_opt,
1886                                    HCI_INIT_TIMEOUT);
1887
1888                 /* Ensure that the connectable and discoverable states
1889                  * get correctly modified as this was a non-mgmt change.
1890                  */
1891                 if (!err)
1892                         hci_update_scan_state(hdev, dr.dev_opt);
1893                 break;
1894
1895         case HCISETLINKPOL:
1896                 err = hci_req_sync(hdev, hci_linkpol_req, dr.dev_opt,
1897                                    HCI_INIT_TIMEOUT);
1898                 break;
1899
1900         case HCISETLINKMODE:
1901                 hdev->link_mode = ((__u16) dr.dev_opt) &
1902                                         (HCI_LM_MASTER | HCI_LM_ACCEPT);
1903                 break;
1904
1905         case HCISETPTYPE:
1906                 hdev->pkt_type = (__u16) dr.dev_opt;
1907                 break;
1908
1909         case HCISETACLMTU:
1910                 hdev->acl_mtu  = *((__u16 *) &dr.dev_opt + 1);
1911                 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
1912                 break;
1913
1914         case HCISETSCOMTU:
1915                 hdev->sco_mtu  = *((__u16 *) &dr.dev_opt + 1);
1916                 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
1917                 break;
1918
1919         default:
1920                 err = -EINVAL;
1921                 break;
1922         }
1923
1924 done:
1925         hci_dev_put(hdev);
1926         return err;
1927 }
1928
1929 int hci_get_dev_list(void __user *arg)
1930 {
1931         struct hci_dev *hdev;
1932         struct hci_dev_list_req *dl;
1933         struct hci_dev_req *dr;
1934         int n = 0, size, err;
1935         __u16 dev_num;
1936
1937         if (get_user(dev_num, (__u16 __user *) arg))
1938                 return -EFAULT;
1939
1940         if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
1941                 return -EINVAL;
1942
1943         size = sizeof(*dl) + dev_num * sizeof(*dr);
1944
1945         dl = kzalloc(size, GFP_KERNEL);
1946         if (!dl)
1947                 return -ENOMEM;
1948
1949         dr = dl->dev_req;
1950
1951         read_lock(&hci_dev_list_lock);
1952         list_for_each_entry(hdev, &hci_dev_list, list) {
1953                 unsigned long flags = hdev->flags;
1954
1955                 /* When the auto-off is configured it means the transport
1956                  * is running, but in that case still indicate that the
1957                  * device is actually down.
1958                  */
1959                 if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags))
1960                         flags &= ~BIT(HCI_UP);
1961
1962                 (dr + n)->dev_id  = hdev->id;
1963                 (dr + n)->dev_opt = flags;
1964
1965                 if (++n >= dev_num)
1966                         break;
1967         }
1968         read_unlock(&hci_dev_list_lock);
1969
1970         dl->dev_num = n;
1971         size = sizeof(*dl) + n * sizeof(*dr);
1972
1973         err = copy_to_user(arg, dl, size);
1974         kfree(dl);
1975
1976         return err ? -EFAULT : 0;
1977 }
1978
1979 int hci_get_dev_info(void __user *arg)
1980 {
1981         struct hci_dev *hdev;
1982         struct hci_dev_info di;
1983         unsigned long flags;
1984         int err = 0;
1985
1986         if (copy_from_user(&di, arg, sizeof(di)))
1987                 return -EFAULT;
1988
1989         hdev = hci_dev_get(di.dev_id);
1990         if (!hdev)
1991                 return -ENODEV;
1992
1993         /* When the auto-off is configured it means the transport
1994          * is running, but in that case still indicate that the
1995          * device is actually down.
1996          */
1997         if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags))
1998                 flags = hdev->flags & ~BIT(HCI_UP);
1999         else
2000                 flags = hdev->flags;
2001
2002         strcpy(di.name, hdev->name);
2003         di.bdaddr   = hdev->bdaddr;
2004         di.type     = (hdev->bus & 0x0f) | ((hdev->dev_type & 0x03) << 4);
2005         di.flags    = flags;
2006         di.pkt_type = hdev->pkt_type;
2007         if (lmp_bredr_capable(hdev)) {
2008                 di.acl_mtu  = hdev->acl_mtu;
2009                 di.acl_pkts = hdev->acl_pkts;
2010                 di.sco_mtu  = hdev->sco_mtu;
2011                 di.sco_pkts = hdev->sco_pkts;
2012         } else {
2013                 di.acl_mtu  = hdev->le_mtu;
2014                 di.acl_pkts = hdev->le_pkts;
2015                 di.sco_mtu  = 0;
2016                 di.sco_pkts = 0;
2017         }
2018         di.link_policy = hdev->link_policy;
2019         di.link_mode   = hdev->link_mode;
2020
2021         memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
2022         memcpy(&di.features, &hdev->features, sizeof(di.features));
2023
2024         if (copy_to_user(arg, &di, sizeof(di)))
2025                 err = -EFAULT;
2026
2027         hci_dev_put(hdev);
2028
2029         return err;
2030 }
2031
2032 /* ---- Interface to HCI drivers ---- */
2033
2034 static int hci_rfkill_set_block(void *data, bool blocked)
2035 {
2036         struct hci_dev *hdev = data;
2037
2038         BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
2039
2040         if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags))
2041                 return -EBUSY;
2042
2043         if (blocked) {
2044                 set_bit(HCI_RFKILLED, &hdev->dev_flags);
2045                 if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
2046                     !test_bit(HCI_CONFIG, &hdev->dev_flags))
2047                         hci_dev_do_close(hdev);
2048         } else {
2049                 clear_bit(HCI_RFKILLED, &hdev->dev_flags);
2050         }
2051
2052         return 0;
2053 }
2054
2055 static const struct rfkill_ops hci_rfkill_ops = {
2056         .set_block = hci_rfkill_set_block,
2057 };
2058
2059 static void hci_power_on(struct work_struct *work)
2060 {
2061         struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
2062         int err;
2063
2064         BT_DBG("%s", hdev->name);
2065
2066         err = hci_dev_do_open(hdev);
2067         if (err < 0) {
2068                 hci_dev_lock(hdev);
2069                 mgmt_set_powered_failed(hdev, err);
2070                 hci_dev_unlock(hdev);
2071                 return;
2072         }
2073
2074         /* During the HCI setup phase, a few error conditions are
2075          * ignored and they need to be checked now. If they are still
2076          * valid, it is important to turn the device back off.
2077          */
2078         if (test_bit(HCI_RFKILLED, &hdev->dev_flags) ||
2079             test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) ||
2080             (hdev->dev_type == HCI_BREDR &&
2081              !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
2082              !bacmp(&hdev->static_addr, BDADDR_ANY))) {
2083                 clear_bit(HCI_AUTO_OFF, &hdev->dev_flags);
2084                 hci_dev_do_close(hdev);
2085         } else if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
2086                 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
2087                                    HCI_AUTO_OFF_TIMEOUT);
2088         }
2089
2090         if (test_and_clear_bit(HCI_SETUP, &hdev->dev_flags)) {
2091                 /* For unconfigured devices, set the HCI_RAW flag
2092                  * so that userspace can easily identify them.
2093                  */
2094                 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags))
2095                         set_bit(HCI_RAW, &hdev->flags);
2096
2097                 /* For fully configured devices, this will send
2098                  * the Index Added event. For unconfigured devices,
2099                  * it will send Unconfigued Index Added event.
2100                  *
2101                  * Devices with HCI_QUIRK_RAW_DEVICE are ignored
2102                  * and no event will be send.
2103                  */
2104                 mgmt_index_added(hdev);
2105         } else if (test_and_clear_bit(HCI_CONFIG, &hdev->dev_flags)) {
2106                 /* When the controller is now configured, then it
2107                  * is important to clear the HCI_RAW flag.
2108                  */
2109                 if (!test_bit(HCI_UNCONFIGURED, &hdev->dev_flags))
2110                         clear_bit(HCI_RAW, &hdev->flags);
2111
2112                 /* Powering on the controller with HCI_CONFIG set only
2113                  * happens with the transition from unconfigured to
2114                  * configured. This will send the Index Added event.
2115                  */
2116                 mgmt_index_added(hdev);
2117         }
2118 }
2119
2120 static void hci_power_off(struct work_struct *work)
2121 {
2122         struct hci_dev *hdev = container_of(work, struct hci_dev,
2123                                             power_off.work);
2124
2125         BT_DBG("%s", hdev->name);
2126
2127         hci_dev_do_close(hdev);
2128 }
2129
2130 static void hci_discov_off(struct work_struct *work)
2131 {
2132         struct hci_dev *hdev;
2133
2134         hdev = container_of(work, struct hci_dev, discov_off.work);
2135
2136         BT_DBG("%s", hdev->name);
2137
2138         mgmt_discoverable_timeout(hdev);
2139 }
2140
2141 void hci_uuids_clear(struct hci_dev *hdev)
2142 {
2143         struct bt_uuid *uuid, *tmp;
2144
2145         list_for_each_entry_safe(uuid, tmp, &hdev->uuids, list) {
2146                 list_del(&uuid->list);
2147                 kfree(uuid);
2148         }
2149 }
2150
2151 void hci_link_keys_clear(struct hci_dev *hdev)
2152 {
2153         struct link_key *key;
2154
2155         list_for_each_entry_rcu(key, &hdev->link_keys, list) {
2156                 list_del_rcu(&key->list);
2157                 kfree_rcu(key, rcu);
2158         }
2159 }
2160
2161 void hci_smp_ltks_clear(struct hci_dev *hdev)
2162 {
2163         struct smp_ltk *k;
2164
2165         list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
2166                 list_del_rcu(&k->list);
2167                 kfree_rcu(k, rcu);
2168         }
2169 }
2170
2171 void hci_smp_irks_clear(struct hci_dev *hdev)
2172 {
2173         struct smp_irk *k;
2174
2175         list_for_each_entry_rcu(k, &hdev->identity_resolving_keys, list) {
2176                 list_del_rcu(&k->list);
2177                 kfree_rcu(k, rcu);
2178         }
2179 }
2180
2181 struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
2182 {
2183         struct link_key *k;
2184
2185         rcu_read_lock();
2186         list_for_each_entry_rcu(k, &hdev->link_keys, list) {
2187                 if (bacmp(bdaddr, &k->bdaddr) == 0) {
2188                         rcu_read_unlock();
2189                         return k;
2190                 }
2191         }
2192         rcu_read_unlock();
2193
2194         return NULL;
2195 }
2196
2197 static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
2198                                u8 key_type, u8 old_key_type)
2199 {
2200         /* Legacy key */
2201         if (key_type < 0x03)
2202                 return true;
2203
2204         /* Debug keys are insecure so don't store them persistently */
2205         if (key_type == HCI_LK_DEBUG_COMBINATION)
2206                 return false;
2207
2208         /* Changed combination key and there's no previous one */
2209         if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
2210                 return false;
2211
2212         /* Security mode 3 case */
2213         if (!conn)
2214                 return true;
2215
2216         /* BR/EDR key derived using SC from an LE link */
2217         if (conn->type == LE_LINK)
2218                 return true;
2219
2220         /* Neither local nor remote side had no-bonding as requirement */
2221         if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
2222                 return true;
2223
2224         /* Local side had dedicated bonding as requirement */
2225         if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
2226                 return true;
2227
2228         /* Remote side had dedicated bonding as requirement */
2229         if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
2230                 return true;
2231
2232         /* If none of the above criteria match, then don't store the key
2233          * persistently */
2234         return false;
2235 }
2236
2237 static u8 ltk_role(u8 type)
2238 {
2239         if (type == SMP_LTK)
2240                 return HCI_ROLE_MASTER;
2241
2242         return HCI_ROLE_SLAVE;
2243 }
2244
2245 struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr,
2246                              u8 addr_type, u8 role)
2247 {
2248         struct smp_ltk *k;
2249
2250         rcu_read_lock();
2251         list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
2252                 if (addr_type != k->bdaddr_type || bacmp(bdaddr, &k->bdaddr))
2253                         continue;
2254
2255                 if (smp_ltk_is_sc(k) || ltk_role(k->type) == role) {
2256                         rcu_read_unlock();
2257                         return k;
2258                 }
2259         }
2260         rcu_read_unlock();
2261
2262         return NULL;
2263 }
2264
2265 struct smp_irk *hci_find_irk_by_rpa(struct hci_dev *hdev, bdaddr_t *rpa)
2266 {
2267         struct smp_irk *irk;
2268
2269         rcu_read_lock();
2270         list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
2271                 if (!bacmp(&irk->rpa, rpa)) {
2272                         rcu_read_unlock();
2273                         return irk;
2274                 }
2275         }
2276
2277         list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
2278                 if (smp_irk_matches(hdev, irk->val, rpa)) {
2279                         bacpy(&irk->rpa, rpa);
2280                         rcu_read_unlock();
2281                         return irk;
2282                 }
2283         }
2284         rcu_read_unlock();
2285
2286         return NULL;
2287 }
2288
2289 struct smp_irk *hci_find_irk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
2290                                      u8 addr_type)
2291 {
2292         struct smp_irk *irk;
2293
2294         /* Identity Address must be public or static random */
2295         if (addr_type == ADDR_LE_DEV_RANDOM && (bdaddr->b[5] & 0xc0) != 0xc0)
2296                 return NULL;
2297
2298         rcu_read_lock();
2299         list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
2300                 if (addr_type == irk->addr_type &&
2301                     bacmp(bdaddr, &irk->bdaddr) == 0) {
2302                         rcu_read_unlock();
2303                         return irk;
2304                 }
2305         }
2306         rcu_read_unlock();
2307
2308         return NULL;
2309 }
2310
2311 struct link_key *hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn,
2312                                   bdaddr_t *bdaddr, u8 *val, u8 type,
2313                                   u8 pin_len, bool *persistent)
2314 {
2315         struct link_key *key, *old_key;
2316         u8 old_key_type;
2317
2318         old_key = hci_find_link_key(hdev, bdaddr);
2319         if (old_key) {
2320                 old_key_type = old_key->type;
2321                 key = old_key;
2322         } else {
2323                 old_key_type = conn ? conn->key_type : 0xff;
2324                 key = kzalloc(sizeof(*key), GFP_KERNEL);
2325                 if (!key)
2326                         return NULL;
2327                 list_add_rcu(&key->list, &hdev->link_keys);
2328         }
2329
2330         BT_DBG("%s key for %pMR type %u", hdev->name, bdaddr, type);
2331
2332         /* Some buggy controller combinations generate a changed
2333          * combination key for legacy pairing even when there's no
2334          * previous key */
2335         if (type == HCI_LK_CHANGED_COMBINATION &&
2336             (!conn || conn->remote_auth == 0xff) && old_key_type == 0xff) {
2337                 type = HCI_LK_COMBINATION;
2338                 if (conn)
2339                         conn->key_type = type;
2340         }
2341
2342         bacpy(&key->bdaddr, bdaddr);
2343         memcpy(key->val, val, HCI_LINK_KEY_SIZE);
2344         key->pin_len = pin_len;
2345
2346         if (type == HCI_LK_CHANGED_COMBINATION)
2347                 key->type = old_key_type;
2348         else
2349                 key->type = type;
2350
2351         if (persistent)
2352                 *persistent = hci_persistent_key(hdev, conn, type,
2353                                                  old_key_type);
2354
2355         return key;
2356 }
2357
2358 struct smp_ltk *hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr,
2359                             u8 addr_type, u8 type, u8 authenticated,
2360                             u8 tk[16], u8 enc_size, __le16 ediv, __le64 rand)
2361 {
2362         struct smp_ltk *key, *old_key;
2363         u8 role = ltk_role(type);
2364
2365         old_key = hci_find_ltk(hdev, bdaddr, addr_type, role);
2366         if (old_key)
2367                 key = old_key;
2368         else {
2369                 key = kzalloc(sizeof(*key), GFP_KERNEL);
2370                 if (!key)
2371                         return NULL;
2372                 list_add_rcu(&key->list, &hdev->long_term_keys);
2373         }
2374
2375         bacpy(&key->bdaddr, bdaddr);
2376         key->bdaddr_type = addr_type;
2377         memcpy(key->val, tk, sizeof(key->val));
2378         key->authenticated = authenticated;
2379         key->ediv = ediv;
2380         key->rand = rand;
2381         key->enc_size = enc_size;
2382         key->type = type;
2383
2384         return key;
2385 }
2386
2387 struct smp_irk *hci_add_irk(struct hci_dev *hdev, bdaddr_t *bdaddr,
2388                             u8 addr_type, u8 val[16], bdaddr_t *rpa)
2389 {
2390         struct smp_irk *irk;
2391
2392         irk = hci_find_irk_by_addr(hdev, bdaddr, addr_type);
2393         if (!irk) {
2394                 irk = kzalloc(sizeof(*irk), GFP_KERNEL);
2395                 if (!irk)
2396                         return NULL;
2397
2398                 bacpy(&irk->bdaddr, bdaddr);
2399                 irk->addr_type = addr_type;
2400
2401                 list_add_rcu(&irk->list, &hdev->identity_resolving_keys);
2402         }
2403
2404         memcpy(irk->val, val, 16);
2405         bacpy(&irk->rpa, rpa);
2406
2407         return irk;
2408 }
2409
2410 int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
2411 {
2412         struct link_key *key;
2413
2414         key = hci_find_link_key(hdev, bdaddr);
2415         if (!key)
2416                 return -ENOENT;
2417
2418         BT_DBG("%s removing %pMR", hdev->name, bdaddr);
2419
2420         list_del_rcu(&key->list);
2421         kfree_rcu(key, rcu);
2422
2423         return 0;
2424 }
2425
2426 int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 bdaddr_type)
2427 {
2428         struct smp_ltk *k;
2429         int removed = 0;
2430
2431         list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
2432                 if (bacmp(bdaddr, &k->bdaddr) || k->bdaddr_type != bdaddr_type)
2433                         continue;
2434
2435                 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
2436
2437                 list_del_rcu(&k->list);
2438                 kfree_rcu(k, rcu);
2439                 removed++;
2440         }
2441
2442         return removed ? 0 : -ENOENT;
2443 }
2444
2445 void hci_remove_irk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type)
2446 {
2447         struct smp_irk *k;
2448
2449         list_for_each_entry_rcu(k, &hdev->identity_resolving_keys, list) {
2450                 if (bacmp(bdaddr, &k->bdaddr) || k->addr_type != addr_type)
2451                         continue;
2452
2453                 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
2454
2455                 list_del_rcu(&k->list);
2456                 kfree_rcu(k, rcu);
2457         }
2458 }
2459
2460 /* HCI command timer function */
2461 static void hci_cmd_timeout(struct work_struct *work)
2462 {
2463         struct hci_dev *hdev = container_of(work, struct hci_dev,
2464                                             cmd_timer.work);
2465
2466         if (hdev->sent_cmd) {
2467                 struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
2468                 u16 opcode = __le16_to_cpu(sent->opcode);
2469
2470                 BT_ERR("%s command 0x%4.4x tx timeout", hdev->name, opcode);
2471         } else {
2472                 BT_ERR("%s command tx timeout", hdev->name);
2473         }
2474
2475         atomic_set(&hdev->cmd_cnt, 1);
2476         queue_work(hdev->workqueue, &hdev->cmd_work);
2477 }
2478
2479 struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
2480                                           bdaddr_t *bdaddr, u8 bdaddr_type)
2481 {
2482         struct oob_data *data;
2483
2484         list_for_each_entry(data, &hdev->remote_oob_data, list) {
2485                 if (bacmp(bdaddr, &data->bdaddr) != 0)
2486                         continue;
2487                 if (data->bdaddr_type != bdaddr_type)
2488                         continue;
2489                 return data;
2490         }
2491
2492         return NULL;
2493 }
2494
2495 int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
2496                                u8 bdaddr_type)
2497 {
2498         struct oob_data *data;
2499
2500         data = hci_find_remote_oob_data(hdev, bdaddr, bdaddr_type);
2501         if (!data)
2502                 return -ENOENT;
2503
2504         BT_DBG("%s removing %pMR (%u)", hdev->name, bdaddr, bdaddr_type);
2505
2506         list_del(&data->list);
2507         kfree(data);
2508
2509         return 0;
2510 }
2511
2512 void hci_remote_oob_data_clear(struct hci_dev *hdev)
2513 {
2514         struct oob_data *data, *n;
2515
2516         list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
2517                 list_del(&data->list);
2518                 kfree(data);
2519         }
2520 }
2521
2522 int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
2523                             u8 bdaddr_type, u8 *hash192, u8 *rand192,
2524                             u8 *hash256, u8 *rand256)
2525 {
2526         struct oob_data *data;
2527
2528         data = hci_find_remote_oob_data(hdev, bdaddr, bdaddr_type);
2529         if (!data) {
2530                 data = kmalloc(sizeof(*data), GFP_KERNEL);
2531                 if (!data)
2532                         return -ENOMEM;
2533
2534                 bacpy(&data->bdaddr, bdaddr);
2535                 data->bdaddr_type = bdaddr_type;
2536                 list_add(&data->list, &hdev->remote_oob_data);
2537         }
2538
2539         if (hash192 && rand192) {
2540                 memcpy(data->hash192, hash192, sizeof(data->hash192));
2541                 memcpy(data->rand192, rand192, sizeof(data->rand192));
2542         } else {
2543                 memset(data->hash192, 0, sizeof(data->hash192));
2544                 memset(data->rand192, 0, sizeof(data->rand192));
2545         }
2546
2547         if (hash256 && rand256) {
2548                 memcpy(data->hash256, hash256, sizeof(data->hash256));
2549                 memcpy(data->rand256, rand256, sizeof(data->rand256));
2550         } else {
2551                 memset(data->hash256, 0, sizeof(data->hash256));
2552                 memset(data->rand256, 0, sizeof(data->rand256));
2553         }
2554
2555         BT_DBG("%s for %pMR", hdev->name, bdaddr);
2556
2557         return 0;
2558 }
2559
2560 struct bdaddr_list *hci_bdaddr_list_lookup(struct list_head *bdaddr_list,
2561                                          bdaddr_t *bdaddr, u8 type)
2562 {
2563         struct bdaddr_list *b;
2564
2565         list_for_each_entry(b, bdaddr_list, list) {
2566                 if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
2567                         return b;
2568         }
2569
2570         return NULL;
2571 }
2572
2573 void hci_bdaddr_list_clear(struct list_head *bdaddr_list)
2574 {
2575         struct list_head *p, *n;
2576
2577         list_for_each_safe(p, n, bdaddr_list) {
2578                 struct bdaddr_list *b = list_entry(p, struct bdaddr_list, list);
2579
2580                 list_del(p);
2581                 kfree(b);
2582         }
2583 }
2584
2585 int hci_bdaddr_list_add(struct list_head *list, bdaddr_t *bdaddr, u8 type)
2586 {
2587         struct bdaddr_list *entry;
2588
2589         if (!bacmp(bdaddr, BDADDR_ANY))
2590                 return -EBADF;
2591
2592         if (hci_bdaddr_list_lookup(list, bdaddr, type))
2593                 return -EEXIST;
2594
2595         entry = kzalloc(sizeof(*entry), GFP_KERNEL);
2596         if (!entry)
2597                 return -ENOMEM;
2598
2599         bacpy(&entry->bdaddr, bdaddr);
2600         entry->bdaddr_type = type;
2601
2602         list_add(&entry->list, list);
2603
2604         return 0;
2605 }
2606
2607 int hci_bdaddr_list_del(struct list_head *list, bdaddr_t *bdaddr, u8 type)
2608 {
2609         struct bdaddr_list *entry;
2610
2611         if (!bacmp(bdaddr, BDADDR_ANY)) {
2612                 hci_bdaddr_list_clear(list);
2613                 return 0;
2614         }
2615
2616         entry = hci_bdaddr_list_lookup(list, bdaddr, type);
2617         if (!entry)
2618                 return -ENOENT;
2619
2620         list_del(&entry->list);
2621         kfree(entry);
2622
2623         return 0;
2624 }
2625
2626 /* This function requires the caller holds hdev->lock */
2627 struct hci_conn_params *hci_conn_params_lookup(struct hci_dev *hdev,
2628                                                bdaddr_t *addr, u8 addr_type)
2629 {
2630         struct hci_conn_params *params;
2631
2632         /* The conn params list only contains identity addresses */
2633         if (!hci_is_identity_address(addr, addr_type))
2634                 return NULL;
2635
2636         list_for_each_entry(params, &hdev->le_conn_params, list) {
2637                 if (bacmp(&params->addr, addr) == 0 &&
2638                     params->addr_type == addr_type) {
2639                         return params;
2640                 }
2641         }
2642
2643         return NULL;
2644 }
2645
2646 /* This function requires the caller holds hdev->lock */
2647 struct hci_conn_params *hci_pend_le_action_lookup(struct list_head *list,
2648                                                   bdaddr_t *addr, u8 addr_type)
2649 {
2650         struct hci_conn_params *param;
2651
2652         /* The list only contains identity addresses */
2653         if (!hci_is_identity_address(addr, addr_type))
2654                 return NULL;
2655
2656         list_for_each_entry(param, list, action) {
2657                 if (bacmp(&param->addr, addr) == 0 &&
2658                     param->addr_type == addr_type)
2659                         return param;
2660         }
2661
2662         return NULL;
2663 }
2664
2665 /* This function requires the caller holds hdev->lock */
2666 struct hci_conn_params *hci_conn_params_add(struct hci_dev *hdev,
2667                                             bdaddr_t *addr, u8 addr_type)
2668 {
2669         struct hci_conn_params *params;
2670
2671         if (!hci_is_identity_address(addr, addr_type))
2672                 return NULL;
2673
2674         params = hci_conn_params_lookup(hdev, addr, addr_type);
2675         if (params)
2676                 return params;
2677
2678         params = kzalloc(sizeof(*params), GFP_KERNEL);
2679         if (!params) {
2680                 BT_ERR("Out of memory");
2681                 return NULL;
2682         }
2683
2684         bacpy(&params->addr, addr);
2685         params->addr_type = addr_type;
2686
2687         list_add(&params->list, &hdev->le_conn_params);
2688         INIT_LIST_HEAD(&params->action);
2689
2690         params->conn_min_interval = hdev->le_conn_min_interval;
2691         params->conn_max_interval = hdev->le_conn_max_interval;
2692         params->conn_latency = hdev->le_conn_latency;
2693         params->supervision_timeout = hdev->le_supv_timeout;
2694         params->auto_connect = HCI_AUTO_CONN_DISABLED;
2695
2696         BT_DBG("addr %pMR (type %u)", addr, addr_type);
2697
2698         return params;
2699 }
2700
2701 static void hci_conn_params_free(struct hci_conn_params *params)
2702 {
2703         if (params->conn) {
2704                 hci_conn_drop(params->conn);
2705                 hci_conn_put(params->conn);
2706         }
2707
2708         list_del(&params->action);
2709         list_del(&params->list);
2710         kfree(params);
2711 }
2712
2713 /* This function requires the caller holds hdev->lock */
2714 void hci_conn_params_del(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type)
2715 {
2716         struct hci_conn_params *params;
2717
2718         params = hci_conn_params_lookup(hdev, addr, addr_type);
2719         if (!params)
2720                 return;
2721
2722         hci_conn_params_free(params);
2723
2724         hci_update_background_scan(hdev);
2725
2726         BT_DBG("addr %pMR (type %u)", addr, addr_type);
2727 }
2728
2729 /* This function requires the caller holds hdev->lock */
2730 void hci_conn_params_clear_disabled(struct hci_dev *hdev)
2731 {
2732         struct hci_conn_params *params, *tmp;
2733
2734         list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list) {
2735                 if (params->auto_connect != HCI_AUTO_CONN_DISABLED)
2736                         continue;
2737                 list_del(&params->list);
2738                 kfree(params);
2739         }
2740
2741         BT_DBG("All LE disabled connection parameters were removed");
2742 }
2743
2744 /* This function requires the caller holds hdev->lock */
2745 void hci_conn_params_clear_all(struct hci_dev *hdev)
2746 {
2747         struct hci_conn_params *params, *tmp;
2748
2749         list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list)
2750                 hci_conn_params_free(params);
2751
2752         hci_update_background_scan(hdev);
2753
2754         BT_DBG("All LE connection parameters were removed");
2755 }
2756
2757 static void inquiry_complete(struct hci_dev *hdev, u8 status)
2758 {
2759         if (status) {
2760                 BT_ERR("Failed to start inquiry: status %d", status);
2761
2762                 hci_dev_lock(hdev);
2763                 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2764                 hci_dev_unlock(hdev);
2765                 return;
2766         }
2767 }
2768
2769 static void le_scan_disable_work_complete(struct hci_dev *hdev, u8 status)
2770 {
2771         /* General inquiry access code (GIAC) */
2772         u8 lap[3] = { 0x33, 0x8b, 0x9e };
2773         struct hci_request req;
2774         struct hci_cp_inquiry cp;
2775         int err;
2776
2777         if (status) {
2778                 BT_ERR("Failed to disable LE scanning: status %d", status);
2779                 return;
2780         }
2781
2782         switch (hdev->discovery.type) {
2783         case DISCOV_TYPE_LE:
2784                 hci_dev_lock(hdev);
2785                 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2786                 hci_dev_unlock(hdev);
2787                 break;
2788
2789         case DISCOV_TYPE_INTERLEAVED:
2790                 hci_req_init(&req, hdev);
2791
2792                 memset(&cp, 0, sizeof(cp));
2793                 memcpy(&cp.lap, lap, sizeof(cp.lap));
2794                 cp.length = DISCOV_INTERLEAVED_INQUIRY_LEN;
2795                 hci_req_add(&req, HCI_OP_INQUIRY, sizeof(cp), &cp);
2796
2797                 hci_dev_lock(hdev);
2798
2799                 hci_inquiry_cache_flush(hdev);
2800
2801                 err = hci_req_run(&req, inquiry_complete);
2802                 if (err) {
2803                         BT_ERR("Inquiry request failed: err %d", err);
2804                         hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2805                 }
2806
2807                 hci_dev_unlock(hdev);
2808                 break;
2809         }
2810 }
2811
2812 static void le_scan_disable_work(struct work_struct *work)
2813 {
2814         struct hci_dev *hdev = container_of(work, struct hci_dev,
2815                                             le_scan_disable.work);
2816         struct hci_request req;
2817         int err;
2818
2819         BT_DBG("%s", hdev->name);
2820
2821         hci_req_init(&req, hdev);
2822
2823         hci_req_add_le_scan_disable(&req);
2824
2825         err = hci_req_run(&req, le_scan_disable_work_complete);
2826         if (err)
2827                 BT_ERR("Disable LE scanning request failed: err %d", err);
2828 }
2829
2830 /* Copy the Identity Address of the controller.
2831  *
2832  * If the controller has a public BD_ADDR, then by default use that one.
2833  * If this is a LE only controller without a public address, default to
2834  * the static random address.
2835  *
2836  * For debugging purposes it is possible to force controllers with a
2837  * public address to use the static random address instead.
2838  *
2839  * In case BR/EDR has been disabled on a dual-mode controller and
2840  * userspace has configured a static address, then that address
2841  * becomes the identity address instead of the public BR/EDR address.
2842  */
2843 void hci_copy_identity_address(struct hci_dev *hdev, bdaddr_t *bdaddr,
2844                                u8 *bdaddr_type)
2845 {
2846         if (test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags) ||
2847             !bacmp(&hdev->bdaddr, BDADDR_ANY) ||
2848             (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags) &&
2849              bacmp(&hdev->static_addr, BDADDR_ANY))) {
2850                 bacpy(bdaddr, &hdev->static_addr);
2851                 *bdaddr_type = ADDR_LE_DEV_RANDOM;
2852         } else {
2853                 bacpy(bdaddr, &hdev->bdaddr);
2854                 *bdaddr_type = ADDR_LE_DEV_PUBLIC;
2855         }
2856 }
2857
2858 /* Alloc HCI device */
2859 struct hci_dev *hci_alloc_dev(void)
2860 {
2861         struct hci_dev *hdev;
2862
2863         hdev = kzalloc(sizeof(*hdev), GFP_KERNEL);
2864         if (!hdev)
2865                 return NULL;
2866
2867         hdev->pkt_type  = (HCI_DM1 | HCI_DH1 | HCI_HV1);
2868         hdev->esco_type = (ESCO_HV1);
2869         hdev->link_mode = (HCI_LM_ACCEPT);
2870         hdev->num_iac = 0x01;           /* One IAC support is mandatory */
2871         hdev->io_capability = 0x03;     /* No Input No Output */
2872         hdev->manufacturer = 0xffff;    /* Default to internal use */
2873         hdev->inq_tx_power = HCI_TX_POWER_INVALID;
2874         hdev->adv_tx_power = HCI_TX_POWER_INVALID;
2875
2876         hdev->sniff_max_interval = 800;
2877         hdev->sniff_min_interval = 80;
2878
2879         hdev->le_adv_channel_map = 0x07;
2880         hdev->le_adv_min_interval = 0x0800;
2881         hdev->le_adv_max_interval = 0x0800;
2882         hdev->le_scan_interval = 0x0060;
2883         hdev->le_scan_window = 0x0030;
2884         hdev->le_conn_min_interval = 0x0028;
2885         hdev->le_conn_max_interval = 0x0038;
2886         hdev->le_conn_latency = 0x0000;
2887         hdev->le_supv_timeout = 0x002a;
2888         hdev->le_def_tx_len = 0x001b;
2889         hdev->le_def_tx_time = 0x0148;
2890         hdev->le_max_tx_len = 0x001b;
2891         hdev->le_max_tx_time = 0x0148;
2892         hdev->le_max_rx_len = 0x001b;
2893         hdev->le_max_rx_time = 0x0148;
2894
2895         hdev->rpa_timeout = HCI_DEFAULT_RPA_TIMEOUT;
2896         hdev->discov_interleaved_timeout = DISCOV_INTERLEAVED_TIMEOUT;
2897         hdev->conn_info_min_age = DEFAULT_CONN_INFO_MIN_AGE;
2898         hdev->conn_info_max_age = DEFAULT_CONN_INFO_MAX_AGE;
2899
2900         mutex_init(&hdev->lock);
2901         mutex_init(&hdev->req_lock);
2902
2903         INIT_LIST_HEAD(&hdev->mgmt_pending);
2904         INIT_LIST_HEAD(&hdev->blacklist);
2905         INIT_LIST_HEAD(&hdev->whitelist);
2906         INIT_LIST_HEAD(&hdev->uuids);
2907         INIT_LIST_HEAD(&hdev->link_keys);
2908         INIT_LIST_HEAD(&hdev->long_term_keys);
2909         INIT_LIST_HEAD(&hdev->identity_resolving_keys);
2910         INIT_LIST_HEAD(&hdev->remote_oob_data);
2911         INIT_LIST_HEAD(&hdev->le_white_list);
2912         INIT_LIST_HEAD(&hdev->le_conn_params);
2913         INIT_LIST_HEAD(&hdev->pend_le_conns);
2914         INIT_LIST_HEAD(&hdev->pend_le_reports);
2915         INIT_LIST_HEAD(&hdev->conn_hash.list);
2916
2917         INIT_WORK(&hdev->rx_work, hci_rx_work);
2918         INIT_WORK(&hdev->cmd_work, hci_cmd_work);
2919         INIT_WORK(&hdev->tx_work, hci_tx_work);
2920         INIT_WORK(&hdev->power_on, hci_power_on);
2921
2922         INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
2923         INIT_DELAYED_WORK(&hdev->discov_off, hci_discov_off);
2924         INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
2925
2926         skb_queue_head_init(&hdev->rx_q);
2927         skb_queue_head_init(&hdev->cmd_q);
2928         skb_queue_head_init(&hdev->raw_q);
2929
2930         init_waitqueue_head(&hdev->req_wait_q);
2931
2932         INIT_DELAYED_WORK(&hdev->cmd_timer, hci_cmd_timeout);
2933
2934         hci_init_sysfs(hdev);
2935         discovery_init(hdev);
2936
2937         return hdev;
2938 }
2939 EXPORT_SYMBOL(hci_alloc_dev);
2940
2941 /* Free HCI device */
2942 void hci_free_dev(struct hci_dev *hdev)
2943 {
2944         /* will free via device release */
2945         put_device(&hdev->dev);
2946 }
2947 EXPORT_SYMBOL(hci_free_dev);
2948
2949 /* Register HCI device */
2950 int hci_register_dev(struct hci_dev *hdev)
2951 {
2952         int id, error;
2953
2954         if (!hdev->open || !hdev->close || !hdev->send)
2955                 return -EINVAL;
2956
2957         /* Do not allow HCI_AMP devices to register at index 0,
2958          * so the index can be used as the AMP controller ID.
2959          */
2960         switch (hdev->dev_type) {
2961         case HCI_BREDR:
2962                 id = ida_simple_get(&hci_index_ida, 0, 0, GFP_KERNEL);
2963                 break;
2964         case HCI_AMP:
2965                 id = ida_simple_get(&hci_index_ida, 1, 0, GFP_KERNEL);
2966                 break;
2967         default:
2968                 return -EINVAL;
2969         }
2970
2971         if (id < 0)
2972                 return id;
2973
2974         sprintf(hdev->name, "hci%d", id);
2975         hdev->id = id;
2976
2977         BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
2978
2979         hdev->workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
2980                                           WQ_MEM_RECLAIM, 1, hdev->name);
2981         if (!hdev->workqueue) {
2982                 error = -ENOMEM;
2983                 goto err;
2984         }
2985
2986         hdev->req_workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
2987                                               WQ_MEM_RECLAIM, 1, hdev->name);
2988         if (!hdev->req_workqueue) {
2989                 destroy_workqueue(hdev->workqueue);
2990                 error = -ENOMEM;
2991                 goto err;
2992         }
2993
2994         if (!IS_ERR_OR_NULL(bt_debugfs))
2995                 hdev->debugfs = debugfs_create_dir(hdev->name, bt_debugfs);
2996
2997         dev_set_name(&hdev->dev, "%s", hdev->name);
2998
2999         error = device_add(&hdev->dev);
3000         if (error < 0)
3001                 goto err_wqueue;
3002
3003         hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
3004                                     RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops,
3005                                     hdev);
3006         if (hdev->rfkill) {
3007                 if (rfkill_register(hdev->rfkill) < 0) {
3008                         rfkill_destroy(hdev->rfkill);
3009                         hdev->rfkill = NULL;
3010                 }
3011         }
3012
3013         if (hdev->rfkill && rfkill_blocked(hdev->rfkill))
3014                 set_bit(HCI_RFKILLED, &hdev->dev_flags);
3015
3016         set_bit(HCI_SETUP, &hdev->dev_flags);
3017         set_bit(HCI_AUTO_OFF, &hdev->dev_flags);
3018
3019         if (hdev->dev_type == HCI_BREDR) {
3020                 /* Assume BR/EDR support until proven otherwise (such as
3021                  * through reading supported features during init.
3022                  */
3023                 set_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
3024         }
3025
3026         write_lock(&hci_dev_list_lock);
3027         list_add(&hdev->list, &hci_dev_list);
3028         write_unlock(&hci_dev_list_lock);
3029
3030         /* Devices that are marked for raw-only usage are unconfigured
3031          * and should not be included in normal operation.
3032          */
3033         if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
3034                 set_bit(HCI_UNCONFIGURED, &hdev->dev_flags);
3035
3036         hci_notify(hdev, HCI_DEV_REG);
3037         hci_dev_hold(hdev);
3038
3039         queue_work(hdev->req_workqueue, &hdev->power_on);
3040
3041         return id;
3042
3043 err_wqueue:
3044         destroy_workqueue(hdev->workqueue);
3045         destroy_workqueue(hdev->req_workqueue);
3046 err:
3047         ida_simple_remove(&hci_index_ida, hdev->id);
3048
3049         return error;
3050 }
3051 EXPORT_SYMBOL(hci_register_dev);
3052
3053 /* Unregister HCI device */
3054 void hci_unregister_dev(struct hci_dev *hdev)
3055 {
3056         int i, id;
3057
3058         BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
3059
3060         set_bit(HCI_UNREGISTER, &hdev->dev_flags);
3061
3062         id = hdev->id;
3063
3064         write_lock(&hci_dev_list_lock);
3065         list_del(&hdev->list);
3066         write_unlock(&hci_dev_list_lock);
3067
3068         hci_dev_do_close(hdev);
3069
3070         for (i = 0; i < NUM_REASSEMBLY; i++)
3071                 kfree_skb(hdev->reassembly[i]);
3072
3073         cancel_work_sync(&hdev->power_on);
3074
3075         if (!test_bit(HCI_INIT, &hdev->flags) &&
3076             !test_bit(HCI_SETUP, &hdev->dev_flags) &&
3077             !test_bit(HCI_CONFIG, &hdev->dev_flags)) {
3078                 hci_dev_lock(hdev);
3079                 mgmt_index_removed(hdev);
3080                 hci_dev_unlock(hdev);
3081         }
3082
3083         /* mgmt_index_removed should take care of emptying the
3084          * pending list */
3085         BUG_ON(!list_empty(&hdev->mgmt_pending));
3086
3087         hci_notify(hdev, HCI_DEV_UNREG);
3088
3089         if (hdev->rfkill) {
3090                 rfkill_unregister(hdev->rfkill);
3091                 rfkill_destroy(hdev->rfkill);
3092         }
3093
3094         smp_unregister(hdev);
3095
3096         device_del(&hdev->dev);
3097
3098         debugfs_remove_recursive(hdev->debugfs);
3099
3100         destroy_workqueue(hdev->workqueue);
3101         destroy_workqueue(hdev->req_workqueue);
3102
3103         hci_dev_lock(hdev);
3104         hci_bdaddr_list_clear(&hdev->blacklist);
3105         hci_bdaddr_list_clear(&hdev->whitelist);
3106         hci_uuids_clear(hdev);
3107         hci_link_keys_clear(hdev);
3108         hci_smp_ltks_clear(hdev);
3109         hci_smp_irks_clear(hdev);
3110         hci_remote_oob_data_clear(hdev);
3111         hci_bdaddr_list_clear(&hdev->le_white_list);
3112         hci_conn_params_clear_all(hdev);
3113         hci_discovery_filter_clear(hdev);
3114         hci_dev_unlock(hdev);
3115
3116         hci_dev_put(hdev);
3117
3118         ida_simple_remove(&hci_index_ida, id);
3119 }
3120 EXPORT_SYMBOL(hci_unregister_dev);
3121
3122 /* Suspend HCI device */
3123 int hci_suspend_dev(struct hci_dev *hdev)
3124 {
3125         hci_notify(hdev, HCI_DEV_SUSPEND);
3126         return 0;
3127 }
3128 EXPORT_SYMBOL(hci_suspend_dev);
3129
3130 /* Resume HCI device */
3131 int hci_resume_dev(struct hci_dev *hdev)
3132 {
3133         hci_notify(hdev, HCI_DEV_RESUME);
3134         return 0;
3135 }
3136 EXPORT_SYMBOL(hci_resume_dev);
3137
3138 /* Reset HCI device */
3139 int hci_reset_dev(struct hci_dev *hdev)
3140 {
3141         const u8 hw_err[] = { HCI_EV_HARDWARE_ERROR, 0x01, 0x00 };
3142         struct sk_buff *skb;
3143
3144         skb = bt_skb_alloc(3, GFP_ATOMIC);
3145         if (!skb)
3146                 return -ENOMEM;
3147
3148         bt_cb(skb)->pkt_type = HCI_EVENT_PKT;
3149         memcpy(skb_put(skb, 3), hw_err, 3);
3150
3151         /* Send Hardware Error to upper stack */
3152         return hci_recv_frame(hdev, skb);
3153 }
3154 EXPORT_SYMBOL(hci_reset_dev);
3155
3156 /* Receive frame from HCI drivers */
3157 int hci_recv_frame(struct hci_dev *hdev, struct sk_buff *skb)
3158 {
3159         if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
3160                       && !test_bit(HCI_INIT, &hdev->flags))) {
3161                 kfree_skb(skb);
3162                 return -ENXIO;
3163         }
3164
3165         /* Incoming skb */
3166         bt_cb(skb)->incoming = 1;
3167
3168         /* Time stamp */
3169         __net_timestamp(skb);
3170
3171         skb_queue_tail(&hdev->rx_q, skb);
3172         queue_work(hdev->workqueue, &hdev->rx_work);
3173
3174         return 0;
3175 }
3176 EXPORT_SYMBOL(hci_recv_frame);
3177
3178 static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
3179                           int count, __u8 index)
3180 {
3181         int len = 0;
3182         int hlen = 0;
3183         int remain = count;
3184         struct sk_buff *skb;
3185         struct bt_skb_cb *scb;
3186
3187         if ((type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT) ||
3188             index >= NUM_REASSEMBLY)
3189                 return -EILSEQ;
3190
3191         skb = hdev->reassembly[index];
3192
3193         if (!skb) {
3194                 switch (type) {
3195                 case HCI_ACLDATA_PKT:
3196                         len = HCI_MAX_FRAME_SIZE;
3197                         hlen = HCI_ACL_HDR_SIZE;
3198                         break;
3199                 case HCI_EVENT_PKT:
3200                         len = HCI_MAX_EVENT_SIZE;
3201                         hlen = HCI_EVENT_HDR_SIZE;
3202                         break;
3203                 case HCI_SCODATA_PKT:
3204                         len = HCI_MAX_SCO_SIZE;
3205                         hlen = HCI_SCO_HDR_SIZE;
3206                         break;
3207                 }
3208
3209                 skb = bt_skb_alloc(len, GFP_ATOMIC);
3210                 if (!skb)
3211                         return -ENOMEM;
3212
3213                 scb = (void *) skb->cb;
3214                 scb->expect = hlen;
3215                 scb->pkt_type = type;
3216
3217                 hdev->reassembly[index] = skb;
3218         }
3219
3220         while (count) {
3221                 scb = (void *) skb->cb;
3222                 len = min_t(uint, scb->expect, count);
3223
3224                 memcpy(skb_put(skb, len), data, len);
3225
3226                 count -= len;
3227                 data += len;
3228                 scb->expect -= len;
3229                 remain = count;
3230
3231                 switch (type) {
3232                 case HCI_EVENT_PKT:
3233                         if (skb->len == HCI_EVENT_HDR_SIZE) {
3234                                 struct hci_event_hdr *h = hci_event_hdr(skb);
3235                                 scb->expect = h->plen;
3236
3237                                 if (skb_tailroom(skb) < scb->expect) {
3238                                         kfree_skb(skb);
3239                                         hdev->reassembly[index] = NULL;
3240                                         return -ENOMEM;
3241                                 }
3242                         }
3243                         break;
3244
3245                 case HCI_ACLDATA_PKT:
3246                         if (skb->len  == HCI_ACL_HDR_SIZE) {
3247                                 struct hci_acl_hdr *h = hci_acl_hdr(skb);
3248                                 scb->expect = __le16_to_cpu(h->dlen);
3249
3250                                 if (skb_tailroom(skb) < scb->expect) {
3251                                         kfree_skb(skb);
3252                                         hdev->reassembly[index] = NULL;
3253                                         return -ENOMEM;
3254                                 }
3255                         }
3256                         break;
3257
3258                 case HCI_SCODATA_PKT:
3259                         if (skb->len == HCI_SCO_HDR_SIZE) {
3260                                 struct hci_sco_hdr *h = hci_sco_hdr(skb);
3261                                 scb->expect = h->dlen;
3262
3263                                 if (skb_tailroom(skb) < scb->expect) {
3264                                         kfree_skb(skb);
3265                                         hdev->reassembly[index] = NULL;
3266                                         return -ENOMEM;
3267                                 }
3268                         }
3269                         break;
3270                 }
3271
3272                 if (scb->expect == 0) {
3273                         /* Complete frame */
3274
3275                         bt_cb(skb)->pkt_type = type;
3276                         hci_recv_frame(hdev, skb);
3277
3278                         hdev->reassembly[index] = NULL;
3279                         return remain;
3280                 }
3281         }
3282
3283         return remain;
3284 }
3285
3286 #define STREAM_REASSEMBLY 0
3287
3288 int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count)
3289 {
3290         int type;
3291         int rem = 0;
3292
3293         while (count) {
3294                 struct sk_buff *skb = hdev->reassembly[STREAM_REASSEMBLY];
3295
3296                 if (!skb) {
3297                         struct { char type; } *pkt;
3298
3299                         /* Start of the frame */
3300                         pkt = data;
3301                         type = pkt->type;
3302
3303                         data++;
3304                         count--;
3305                 } else
3306                         type = bt_cb(skb)->pkt_type;
3307
3308                 rem = hci_reassembly(hdev, type, data, count,
3309                                      STREAM_REASSEMBLY);
3310                 if (rem < 0)
3311                         return rem;
3312
3313                 data += (count - rem);
3314                 count = rem;
3315         }
3316
3317         return rem;
3318 }
3319 EXPORT_SYMBOL(hci_recv_stream_fragment);
3320
3321 /* ---- Interface to upper protocols ---- */
3322
3323 int hci_register_cb(struct hci_cb *cb)
3324 {
3325         BT_DBG("%p name %s", cb, cb->name);
3326
3327         write_lock(&hci_cb_list_lock);
3328         list_add(&cb->list, &hci_cb_list);
3329         write_unlock(&hci_cb_list_lock);
3330
3331         return 0;
3332 }
3333 EXPORT_SYMBOL(hci_register_cb);
3334
3335 int hci_unregister_cb(struct hci_cb *cb)
3336 {
3337         BT_DBG("%p name %s", cb, cb->name);
3338
3339         write_lock(&hci_cb_list_lock);
3340         list_del(&cb->list);
3341         write_unlock(&hci_cb_list_lock);
3342
3343         return 0;
3344 }
3345 EXPORT_SYMBOL(hci_unregister_cb);
3346
3347 static void hci_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
3348 {
3349         int err;
3350
3351         BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
3352
3353         /* Time stamp */
3354         __net_timestamp(skb);
3355
3356         /* Send copy to monitor */
3357         hci_send_to_monitor(hdev, skb);
3358
3359         if (atomic_read(&hdev->promisc)) {
3360                 /* Send copy to the sockets */
3361                 hci_send_to_sock(hdev, skb);
3362         }
3363
3364         /* Get rid of skb owner, prior to sending to the driver. */
3365         skb_orphan(skb);
3366
3367         err = hdev->send(hdev, skb);
3368         if (err < 0) {
3369                 BT_ERR("%s sending frame failed (%d)", hdev->name, err);
3370                 kfree_skb(skb);
3371         }
3372 }
3373
3374 bool hci_req_pending(struct hci_dev *hdev)
3375 {
3376         return (hdev->req_status == HCI_REQ_PEND);
3377 }
3378
3379 /* Send HCI command */
3380 int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen,
3381                  const void *param)
3382 {
3383         struct sk_buff *skb;
3384
3385         BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
3386
3387         skb = hci_prepare_cmd(hdev, opcode, plen, param);
3388         if (!skb) {
3389                 BT_ERR("%s no memory for command", hdev->name);
3390                 return -ENOMEM;
3391         }
3392
3393         /* Stand-alone HCI commands must be flagged as
3394          * single-command requests.
3395          */
3396         bt_cb(skb)->req.start = true;
3397
3398         skb_queue_tail(&hdev->cmd_q, skb);
3399         queue_work(hdev->workqueue, &hdev->cmd_work);
3400
3401         return 0;
3402 }
3403
3404 /* Get data from the previously sent command */
3405 void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
3406 {
3407         struct hci_command_hdr *hdr;
3408
3409         if (!hdev->sent_cmd)
3410                 return NULL;
3411
3412         hdr = (void *) hdev->sent_cmd->data;
3413
3414         if (hdr->opcode != cpu_to_le16(opcode))
3415                 return NULL;
3416
3417         BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
3418
3419         return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
3420 }
3421
3422 /* Send ACL data */
3423 static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
3424 {
3425         struct hci_acl_hdr *hdr;
3426         int len = skb->len;
3427
3428         skb_push(skb, HCI_ACL_HDR_SIZE);
3429         skb_reset_transport_header(skb);
3430         hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
3431         hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
3432         hdr->dlen   = cpu_to_le16(len);
3433 }
3434
3435 static void hci_queue_acl(struct hci_chan *chan, struct sk_buff_head *queue,
3436                           struct sk_buff *skb, __u16 flags)
3437 {
3438         struct hci_conn *conn = chan->conn;
3439         struct hci_dev *hdev = conn->hdev;
3440         struct sk_buff *list;
3441
3442         skb->len = skb_headlen(skb);
3443         skb->data_len = 0;
3444
3445         bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
3446
3447         switch (hdev->dev_type) {
3448         case HCI_BREDR:
3449                 hci_add_acl_hdr(skb, conn->handle, flags);
3450                 break;
3451         case HCI_AMP:
3452                 hci_add_acl_hdr(skb, chan->handle, flags);
3453                 break;
3454         default:
3455                 BT_ERR("%s unknown dev_type %d", hdev->name, hdev->dev_type);
3456                 return;
3457         }
3458
3459         list = skb_shinfo(skb)->frag_list;
3460         if (!list) {
3461                 /* Non fragmented */
3462                 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
3463
3464                 skb_queue_tail(queue, skb);
3465         } else {
3466                 /* Fragmented */
3467                 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
3468
3469                 skb_shinfo(skb)->frag_list = NULL;
3470
3471                 /* Queue all fragments atomically. We need to use spin_lock_bh
3472                  * here because of 6LoWPAN links, as there this function is
3473                  * called from softirq and using normal spin lock could cause
3474                  * deadlocks.
3475                  */
3476                 spin_lock_bh(&queue->lock);
3477
3478                 __skb_queue_tail(queue, skb);
3479
3480                 flags &= ~ACL_START;
3481                 flags |= ACL_CONT;
3482                 do {
3483                         skb = list; list = list->next;
3484
3485                         bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
3486                         hci_add_acl_hdr(skb, conn->handle, flags);
3487
3488                         BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
3489
3490                         __skb_queue_tail(queue, skb);
3491                 } while (list);
3492
3493                 spin_unlock_bh(&queue->lock);
3494         }
3495 }
3496
3497 void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
3498 {
3499         struct hci_dev *hdev = chan->conn->hdev;
3500
3501         BT_DBG("%s chan %p flags 0x%4.4x", hdev->name, chan, flags);
3502
3503         hci_queue_acl(chan, &chan->data_q, skb, flags);
3504
3505         queue_work(hdev->workqueue, &hdev->tx_work);
3506 }
3507
3508 /* Send SCO data */
3509 void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
3510 {
3511         struct hci_dev *hdev = conn->hdev;
3512         struct hci_sco_hdr hdr;
3513
3514         BT_DBG("%s len %d", hdev->name, skb->len);
3515
3516         hdr.handle = cpu_to_le16(conn->handle);
3517         hdr.dlen   = skb->len;
3518
3519         skb_push(skb, HCI_SCO_HDR_SIZE);
3520         skb_reset_transport_header(skb);
3521         memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
3522
3523         bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
3524
3525         skb_queue_tail(&conn->data_q, skb);
3526         queue_work(hdev->workqueue, &hdev->tx_work);
3527 }
3528
3529 /* ---- HCI TX task (outgoing data) ---- */
3530
3531 /* HCI Connection scheduler */
3532 static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type,
3533                                      int *quote)
3534 {
3535         struct hci_conn_hash *h = &hdev->conn_hash;
3536         struct hci_conn *conn = NULL, *c;
3537         unsigned int num = 0, min = ~0;
3538
3539         /* We don't have to lock device here. Connections are always
3540          * added and removed with TX task disabled. */
3541
3542         rcu_read_lock();
3543
3544         list_for_each_entry_rcu(c, &h->list, list) {
3545                 if (c->type != type || skb_queue_empty(&c->data_q))
3546                         continue;
3547
3548                 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
3549                         continue;
3550
3551                 num++;
3552
3553                 if (c->sent < min) {
3554                         min  = c->sent;
3555                         conn = c;
3556                 }
3557
3558                 if (hci_conn_num(hdev, type) == num)
3559                         break;
3560         }
3561
3562         rcu_read_unlock();
3563
3564         if (conn) {
3565                 int cnt, q;
3566
3567                 switch (conn->type) {
3568                 case ACL_LINK:
3569                         cnt = hdev->acl_cnt;
3570                         break;
3571                 case SCO_LINK:
3572                 case ESCO_LINK:
3573                         cnt = hdev->sco_cnt;
3574                         break;
3575                 case LE_LINK:
3576                         cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
3577                         break;
3578                 default:
3579                         cnt = 0;
3580                         BT_ERR("Unknown link type");
3581                 }
3582
3583                 q = cnt / num;
3584                 *quote = q ? q : 1;
3585         } else
3586                 *quote = 0;
3587
3588         BT_DBG("conn %p quote %d", conn, *quote);
3589         return conn;
3590 }
3591
3592 static void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
3593 {
3594         struct hci_conn_hash *h = &hdev->conn_hash;
3595         struct hci_conn *c;
3596
3597         BT_ERR("%s link tx timeout", hdev->name);
3598
3599         rcu_read_lock();
3600
3601         /* Kill stalled connections */
3602         list_for_each_entry_rcu(c, &h->list, list) {
3603                 if (c->type == type && c->sent) {
3604                         BT_ERR("%s killing stalled connection %pMR",
3605                                hdev->name, &c->dst);
3606                         hci_disconnect(c, HCI_ERROR_REMOTE_USER_TERM);
3607                 }
3608         }
3609
3610         rcu_read_unlock();
3611 }
3612
3613 static struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
3614                                       int *quote)
3615 {
3616         struct hci_conn_hash *h = &hdev->conn_hash;
3617         struct hci_chan *chan = NULL;
3618         unsigned int num = 0, min = ~0, cur_prio = 0;
3619         struct hci_conn *conn;
3620         int cnt, q, conn_num = 0;
3621
3622         BT_DBG("%s", hdev->name);
3623
3624         rcu_read_lock();
3625
3626         list_for_each_entry_rcu(conn, &h->list, list) {
3627                 struct hci_chan *tmp;
3628
3629                 if (conn->type != type)
3630                         continue;
3631
3632                 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
3633                         continue;
3634
3635                 conn_num++;
3636
3637                 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
3638                         struct sk_buff *skb;
3639
3640                         if (skb_queue_empty(&tmp->data_q))
3641                                 continue;
3642
3643                         skb = skb_peek(&tmp->data_q);
3644                         if (skb->priority < cur_prio)
3645                                 continue;
3646
3647                         if (skb->priority > cur_prio) {
3648                                 num = 0;
3649                                 min = ~0;
3650                                 cur_prio = skb->priority;
3651                         }
3652
3653                         num++;
3654
3655                         if (conn->sent < min) {
3656                                 min  = conn->sent;
3657                                 chan = tmp;
3658                         }
3659                 }
3660
3661                 if (hci_conn_num(hdev, type) == conn_num)
3662                         break;
3663         }
3664
3665         rcu_read_unlock();
3666
3667         if (!chan)
3668                 return NULL;
3669
3670         switch (chan->conn->type) {
3671         case ACL_LINK:
3672                 cnt = hdev->acl_cnt;
3673                 break;
3674         case AMP_LINK:
3675                 cnt = hdev->block_cnt;
3676                 break;
3677         case SCO_LINK:
3678         case ESCO_LINK:
3679                 cnt = hdev->sco_cnt;
3680                 break;
3681         case LE_LINK:
3682                 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
3683                 break;
3684         default:
3685                 cnt = 0;
3686                 BT_ERR("Unknown link type");
3687         }
3688
3689         q = cnt / num;
3690         *quote = q ? q : 1;
3691         BT_DBG("chan %p quote %d", chan, *quote);
3692         return chan;
3693 }
3694
3695 static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
3696 {
3697         struct hci_conn_hash *h = &hdev->conn_hash;
3698         struct hci_conn *conn;
3699         int num = 0;
3700
3701         BT_DBG("%s", hdev->name);
3702
3703         rcu_read_lock();
3704
3705         list_for_each_entry_rcu(conn, &h->list, list) {
3706                 struct hci_chan *chan;
3707
3708                 if (conn->type != type)
3709                         continue;
3710
3711                 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
3712                         continue;
3713
3714                 num++;
3715
3716                 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
3717                         struct sk_buff *skb;
3718
3719                         if (chan->sent) {
3720                                 chan->sent = 0;
3721                                 continue;
3722                         }
3723
3724                         if (skb_queue_empty(&chan->data_q))
3725                                 continue;
3726
3727                         skb = skb_peek(&chan->data_q);
3728                         if (skb->priority >= HCI_PRIO_MAX - 1)
3729                                 continue;
3730
3731                         skb->priority = HCI_PRIO_MAX - 1;
3732
3733                         BT_DBG("chan %p skb %p promoted to %d", chan, skb,
3734                                skb->priority);
3735                 }
3736
3737                 if (hci_conn_num(hdev, type) == num)
3738                         break;
3739         }
3740
3741         rcu_read_unlock();
3742
3743 }
3744
3745 static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
3746 {
3747         /* Calculate count of blocks used by this packet */
3748         return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
3749 }
3750
3751 static void __check_timeout(struct hci_dev *hdev, unsigned int cnt)
3752 {
3753         if (!test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
3754                 /* ACL tx timeout must be longer than maximum
3755                  * link supervision timeout (40.9 seconds) */
3756                 if (!cnt && time_after(jiffies, hdev->acl_last_tx +
3757                                        HCI_ACL_TX_TIMEOUT))
3758                         hci_link_tx_to(hdev, ACL_LINK);
3759         }
3760 }
3761
3762 static void hci_sched_acl_pkt(struct hci_dev *hdev)
3763 {
3764         unsigned int cnt = hdev->acl_cnt;
3765         struct hci_chan *chan;
3766         struct sk_buff *skb;
3767         int quote;
3768
3769         __check_timeout(hdev, cnt);
3770
3771         while (hdev->acl_cnt &&
3772                (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
3773                 u32 priority = (skb_peek(&chan->data_q))->priority;
3774                 while (quote-- && (skb = skb_peek(&chan->data_q))) {
3775                         BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
3776                                skb->len, skb->priority);
3777
3778                         /* Stop if priority has changed */
3779                         if (skb->priority < priority)
3780                                 break;
3781
3782                         skb = skb_dequeue(&chan->data_q);
3783
3784                         hci_conn_enter_active_mode(chan->conn,
3785                                                    bt_cb(skb)->force_active);
3786
3787                         hci_send_frame(hdev, skb);
3788                         hdev->acl_last_tx = jiffies;
3789
3790                         hdev->acl_cnt--;
3791                         chan->sent++;
3792                         chan->conn->sent++;
3793                 }
3794         }
3795
3796         if (cnt != hdev->acl_cnt)
3797                 hci_prio_recalculate(hdev, ACL_LINK);
3798 }
3799
3800 static void hci_sched_acl_blk(struct hci_dev *hdev)
3801 {
3802         unsigned int cnt = hdev->block_cnt;
3803         struct hci_chan *chan;
3804         struct sk_buff *skb;
3805         int quote;
3806         u8 type;
3807
3808         __check_timeout(hdev, cnt);
3809
3810         BT_DBG("%s", hdev->name);
3811
3812         if (hdev->dev_type == HCI_AMP)
3813                 type = AMP_LINK;
3814         else
3815                 type = ACL_LINK;
3816
3817         while (hdev->block_cnt > 0 &&
3818                (chan = hci_chan_sent(hdev, type, &quote))) {
3819                 u32 priority = (skb_peek(&chan->data_q))->priority;
3820                 while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
3821                         int blocks;
3822
3823                         BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
3824                                skb->len, skb->priority);
3825
3826                         /* Stop if priority has changed */
3827                         if (skb->priority < priority)
3828                                 break;
3829
3830                         skb = skb_dequeue(&chan->data_q);
3831
3832                         blocks = __get_blocks(hdev, skb);
3833                         if (blocks > hdev->block_cnt)
3834                                 return;
3835
3836                         hci_conn_enter_active_mode(chan->conn,
3837                                                    bt_cb(skb)->force_active);
3838
3839                         hci_send_frame(hdev, skb);
3840                         hdev->acl_last_tx = jiffies;
3841
3842                         hdev->block_cnt -= blocks;
3843                         quote -= blocks;
3844
3845                         chan->sent += blocks;
3846                         chan->conn->sent += blocks;
3847                 }
3848         }
3849
3850         if (cnt != hdev->block_cnt)
3851                 hci_prio_recalculate(hdev, type);
3852 }
3853
3854 static void hci_sched_acl(struct hci_dev *hdev)
3855 {
3856         BT_DBG("%s", hdev->name);
3857
3858         /* No ACL link over BR/EDR controller */
3859         if (!hci_conn_num(hdev, ACL_LINK) && hdev->dev_type == HCI_BREDR)
3860                 return;
3861
3862         /* No AMP link over AMP controller */
3863         if (!hci_conn_num(hdev, AMP_LINK) && hdev->dev_type == HCI_AMP)
3864                 return;
3865
3866         switch (hdev->flow_ctl_mode) {
3867         case HCI_FLOW_CTL_MODE_PACKET_BASED:
3868                 hci_sched_acl_pkt(hdev);
3869                 break;
3870
3871         case HCI_FLOW_CTL_MODE_BLOCK_BASED:
3872                 hci_sched_acl_blk(hdev);
3873                 break;
3874         }
3875 }
3876
3877 /* Schedule SCO */
3878 static void hci_sched_sco(struct hci_dev *hdev)
3879 {
3880         struct hci_conn *conn;
3881         struct sk_buff *skb;
3882         int quote;
3883
3884         BT_DBG("%s", hdev->name);
3885
3886         if (!hci_conn_num(hdev, SCO_LINK))
3887                 return;
3888
3889         while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
3890                 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
3891                         BT_DBG("skb %p len %d", skb, skb->len);
3892                         hci_send_frame(hdev, skb);
3893
3894                         conn->sent++;
3895                         if (conn->sent == ~0)
3896                                 conn->sent = 0;
3897                 }
3898         }
3899 }
3900
3901 static void hci_sched_esco(struct hci_dev *hdev)
3902 {
3903         struct hci_conn *conn;
3904         struct sk_buff *skb;
3905         int quote;
3906
3907         BT_DBG("%s", hdev->name);
3908
3909         if (!hci_conn_num(hdev, ESCO_LINK))
3910                 return;
3911
3912         while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK,
3913                                                      &quote))) {
3914                 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
3915                         BT_DBG("skb %p len %d", skb, skb->len);
3916                         hci_send_frame(hdev, skb);
3917
3918                         conn->sent++;
3919                         if (conn->sent == ~0)
3920                                 conn->sent = 0;
3921                 }
3922         }
3923 }
3924
3925 static void hci_sched_le(struct hci_dev *hdev)
3926 {
3927         struct hci_chan *chan;
3928         struct sk_buff *skb;
3929         int quote, cnt, tmp;
3930
3931         BT_DBG("%s", hdev->name);
3932
3933         if (!hci_conn_num(hdev, LE_LINK))
3934                 return;
3935
3936         if (!test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
3937                 /* LE tx timeout must be longer than maximum
3938                  * link supervision timeout (40.9 seconds) */
3939                 if (!hdev->le_cnt && hdev->le_pkts &&
3940                     time_after(jiffies, hdev->le_last_tx + HZ * 45))
3941                         hci_link_tx_to(hdev, LE_LINK);
3942         }
3943
3944         cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
3945         tmp = cnt;
3946         while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, &quote))) {
3947                 u32 priority = (skb_peek(&chan->data_q))->priority;
3948                 while (quote-- && (skb = skb_peek(&chan->data_q))) {
3949                         BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
3950                                skb->len, skb->priority);
3951
3952                         /* Stop if priority has changed */
3953                         if (skb->priority < priority)
3954                                 break;
3955
3956                         skb = skb_dequeue(&chan->data_q);
3957
3958                         hci_send_frame(hdev, skb);
3959                         hdev->le_last_tx = jiffies;
3960
3961                         cnt--;
3962                         chan->sent++;
3963                         chan->conn->sent++;
3964                 }
3965         }
3966
3967         if (hdev->le_pkts)
3968                 hdev->le_cnt = cnt;
3969         else
3970                 hdev->acl_cnt = cnt;
3971
3972         if (cnt != tmp)
3973                 hci_prio_recalculate(hdev, LE_LINK);
3974 }
3975
3976 static void hci_tx_work(struct work_struct *work)
3977 {
3978         struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
3979         struct sk_buff *skb;
3980
3981         BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
3982                hdev->sco_cnt, hdev->le_cnt);
3983
3984         if (!test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
3985                 /* Schedule queues and send stuff to HCI driver */
3986                 hci_sched_acl(hdev);
3987                 hci_sched_sco(hdev);
3988                 hci_sched_esco(hdev);
3989                 hci_sched_le(hdev);
3990         }
3991
3992         /* Send next queued raw (unknown type) packet */
3993         while ((skb = skb_dequeue(&hdev->raw_q)))
3994                 hci_send_frame(hdev, skb);
3995 }
3996
3997 /* ----- HCI RX task (incoming data processing) ----- */
3998
3999 /* ACL data packet */
4000 static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
4001 {
4002         struct hci_acl_hdr *hdr = (void *) skb->data;
4003         struct hci_conn *conn;
4004         __u16 handle, flags;
4005
4006         skb_pull(skb, HCI_ACL_HDR_SIZE);
4007
4008         handle = __le16_to_cpu(hdr->handle);
4009         flags  = hci_flags(handle);
4010         handle = hci_handle(handle);
4011
4012         BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
4013                handle, flags);
4014
4015         hdev->stat.acl_rx++;
4016
4017         hci_dev_lock(hdev);
4018         conn = hci_conn_hash_lookup_handle(hdev, handle);
4019         hci_dev_unlock(hdev);
4020
4021         if (conn) {
4022                 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
4023
4024                 /* Send to upper protocol */
4025                 l2cap_recv_acldata(conn, skb, flags);
4026                 return;
4027         } else {
4028                 BT_ERR("%s ACL packet for unknown connection handle %d",
4029                        hdev->name, handle);
4030         }
4031
4032         kfree_skb(skb);
4033 }
4034
4035 /* SCO data packet */
4036 static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
4037 {
4038         struct hci_sco_hdr *hdr = (void *) skb->data;
4039         struct hci_conn *conn;
4040         __u16 handle;
4041
4042         skb_pull(skb, HCI_SCO_HDR_SIZE);
4043
4044         handle = __le16_to_cpu(hdr->handle);
4045
4046         BT_DBG("%s len %d handle 0x%4.4x", hdev->name, skb->len, handle);
4047
4048         hdev->stat.sco_rx++;
4049
4050         hci_dev_lock(hdev);
4051         conn = hci_conn_hash_lookup_handle(hdev, handle);
4052         hci_dev_unlock(hdev);
4053
4054         if (conn) {
4055                 /* Send to upper protocol */
4056                 sco_recv_scodata(conn, skb);
4057                 return;
4058         } else {
4059                 BT_ERR("%s SCO packet for unknown connection handle %d",
4060                        hdev->name, handle);
4061         }
4062
4063         kfree_skb(skb);
4064 }
4065
4066 static bool hci_req_is_complete(struct hci_dev *hdev)
4067 {
4068         struct sk_buff *skb;
4069
4070         skb = skb_peek(&hdev->cmd_q);
4071         if (!skb)
4072                 return true;
4073
4074         return bt_cb(skb)->req.start;
4075 }
4076
4077 static void hci_resend_last(struct hci_dev *hdev)
4078 {
4079         struct hci_command_hdr *sent;
4080         struct sk_buff *skb;
4081         u16 opcode;
4082
4083         if (!hdev->sent_cmd)
4084                 return;
4085
4086         sent = (void *) hdev->sent_cmd->data;
4087         opcode = __le16_to_cpu(sent->opcode);
4088         if (opcode == HCI_OP_RESET)
4089                 return;
4090
4091         skb = skb_clone(hdev->sent_cmd, GFP_KERNEL);
4092         if (!skb)
4093                 return;
4094
4095         skb_queue_head(&hdev->cmd_q, skb);
4096         queue_work(hdev->workqueue, &hdev->cmd_work);
4097 }
4098
4099 void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status)
4100 {
4101         hci_req_complete_t req_complete = NULL;
4102         struct sk_buff *skb;
4103         unsigned long flags;
4104
4105         BT_DBG("opcode 0x%04x status 0x%02x", opcode, status);
4106
4107         /* If the completed command doesn't match the last one that was
4108          * sent we need to do special handling of it.
4109          */
4110         if (!hci_sent_cmd_data(hdev, opcode)) {
4111                 /* Some CSR based controllers generate a spontaneous
4112                  * reset complete event during init and any pending
4113                  * command will never be completed. In such a case we
4114                  * need to resend whatever was the last sent
4115                  * command.
4116                  */
4117                 if (test_bit(HCI_INIT, &hdev->flags) && opcode == HCI_OP_RESET)
4118                         hci_resend_last(hdev);
4119
4120                 return;
4121         }
4122
4123         /* If the command succeeded and there's still more commands in
4124          * this request the request is not yet complete.
4125          */
4126         if (!status && !hci_req_is_complete(hdev))
4127                 return;
4128
4129         /* If this was the last command in a request the complete
4130          * callback would be found in hdev->sent_cmd instead of the
4131          * command queue (hdev->cmd_q).
4132          */
4133         if (hdev->sent_cmd) {
4134                 req_complete = bt_cb(hdev->sent_cmd)->req.complete;
4135
4136                 if (req_complete) {
4137                         /* We must set the complete callback to NULL to
4138                          * avoid calling the callback more than once if
4139                          * this function gets called again.
4140                          */
4141                         bt_cb(hdev->sent_cmd)->req.complete = NULL;
4142
4143                         goto call_complete;
4144                 }
4145         }
4146
4147         /* Remove all pending commands belonging to this request */
4148         spin_lock_irqsave(&hdev->cmd_q.lock, flags);
4149         while ((skb = __skb_dequeue(&hdev->cmd_q))) {
4150                 if (bt_cb(skb)->req.start) {
4151                         __skb_queue_head(&hdev->cmd_q, skb);
4152                         break;
4153                 }
4154
4155                 req_complete = bt_cb(skb)->req.complete;
4156                 kfree_skb(skb);
4157         }
4158         spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
4159
4160 call_complete:
4161         if (req_complete)
4162                 req_complete(hdev, status);
4163 }
4164
4165 static void hci_rx_work(struct work_struct *work)
4166 {
4167         struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
4168         struct sk_buff *skb;
4169
4170         BT_DBG("%s", hdev->name);
4171
4172         while ((skb = skb_dequeue(&hdev->rx_q))) {
4173                 /* Send copy to monitor */
4174                 hci_send_to_monitor(hdev, skb);
4175
4176                 if (atomic_read(&hdev->promisc)) {
4177                         /* Send copy to the sockets */
4178                         hci_send_to_sock(hdev, skb);
4179                 }
4180
4181                 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
4182                         kfree_skb(skb);
4183                         continue;
4184                 }
4185
4186                 if (test_bit(HCI_INIT, &hdev->flags)) {
4187                         /* Don't process data packets in this states. */
4188                         switch (bt_cb(skb)->pkt_type) {
4189                         case HCI_ACLDATA_PKT:
4190                         case HCI_SCODATA_PKT:
4191                                 kfree_skb(skb);
4192                                 continue;
4193                         }
4194                 }
4195
4196                 /* Process frame */
4197                 switch (bt_cb(skb)->pkt_type) {
4198                 case HCI_EVENT_PKT:
4199                         BT_DBG("%s Event packet", hdev->name);
4200                         hci_event_packet(hdev, skb);
4201                         break;
4202
4203                 case HCI_ACLDATA_PKT:
4204                         BT_DBG("%s ACL data packet", hdev->name);
4205                         hci_acldata_packet(hdev, skb);
4206                         break;
4207
4208                 case HCI_SCODATA_PKT:
4209                         BT_DBG("%s SCO data packet", hdev->name);
4210                         hci_scodata_packet(hdev, skb);
4211                         break;
4212
4213                 default:
4214                         kfree_skb(skb);
4215                         break;
4216                 }
4217         }
4218 }
4219
4220 static void hci_cmd_work(struct work_struct *work)
4221 {
4222         struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
4223         struct sk_buff *skb;
4224
4225         BT_DBG("%s cmd_cnt %d cmd queued %d", hdev->name,
4226                atomic_read(&hdev->cmd_cnt), skb_queue_len(&hdev->cmd_q));
4227
4228         /* Send queued commands */
4229         if (atomic_read(&hdev->cmd_cnt)) {
4230                 skb = skb_dequeue(&hdev->cmd_q);
4231                 if (!skb)
4232                         return;
4233
4234                 kfree_skb(hdev->sent_cmd);
4235
4236                 hdev->sent_cmd = skb_clone(skb, GFP_KERNEL);
4237                 if (hdev->sent_cmd) {
4238                         atomic_dec(&hdev->cmd_cnt);
4239                         hci_send_frame(hdev, skb);
4240                         if (test_bit(HCI_RESET, &hdev->flags))
4241                                 cancel_delayed_work(&hdev->cmd_timer);
4242                         else
4243                                 schedule_delayed_work(&hdev->cmd_timer,
4244                                                       HCI_CMD_TIMEOUT);
4245                 } else {
4246                         skb_queue_head(&hdev->cmd_q, skb);
4247                         queue_work(hdev->workqueue, &hdev->cmd_work);
4248                 }
4249         }
4250 }