]> git.karo-electronics.de Git - karo-tx-linux.git/blob - net/bluetooth/hci_core.c
Merge remote-tracking branch 'crypto/master'
[karo-tx-linux.git] / net / bluetooth / hci_core.c
1 /*
2    BlueZ - Bluetooth protocol stack for Linux
3    Copyright (C) 2000-2001 Qualcomm Incorporated
4    Copyright (C) 2011 ProFUSION Embedded Systems
5
6    Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7
8    This program is free software; you can redistribute it and/or modify
9    it under the terms of the GNU General Public License version 2 as
10    published by the Free Software Foundation;
11
12    THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13    OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14    FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15    IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
16    CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17    WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18    ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
19    OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20
21    ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22    COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
23    SOFTWARE IS DISCLAIMED.
24 */
25
26 /* Bluetooth HCI core. */
27
28 #include <linux/export.h>
29 #include <linux/idr.h>
30 #include <linux/rfkill.h>
31 #include <linux/debugfs.h>
32 #include <linux/crypto.h>
33 #include <asm/unaligned.h>
34
35 #include <net/bluetooth/bluetooth.h>
36 #include <net/bluetooth/hci_core.h>
37 #include <net/bluetooth/l2cap.h>
38 #include <net/bluetooth/mgmt.h>
39
40 #include "hci_request.h"
41 #include "hci_debugfs.h"
42 #include "smp.h"
43 #include "leds.h"
44
45 static void hci_rx_work(struct work_struct *work);
46 static void hci_cmd_work(struct work_struct *work);
47 static void hci_tx_work(struct work_struct *work);
48
49 /* HCI device list */
50 LIST_HEAD(hci_dev_list);
51 DEFINE_RWLOCK(hci_dev_list_lock);
52
53 /* HCI callback list */
54 LIST_HEAD(hci_cb_list);
55 DEFINE_MUTEX(hci_cb_list_lock);
56
57 /* HCI ID Numbering */
58 static DEFINE_IDA(hci_index_ida);
59
60 /* ---- HCI debugfs entries ---- */
61
62 static ssize_t dut_mode_read(struct file *file, char __user *user_buf,
63                              size_t count, loff_t *ppos)
64 {
65         struct hci_dev *hdev = file->private_data;
66         char buf[3];
67
68         buf[0] = hci_dev_test_flag(hdev, HCI_DUT_MODE) ? 'Y' : 'N';
69         buf[1] = '\n';
70         buf[2] = '\0';
71         return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
72 }
73
74 static ssize_t dut_mode_write(struct file *file, const char __user *user_buf,
75                               size_t count, loff_t *ppos)
76 {
77         struct hci_dev *hdev = file->private_data;
78         struct sk_buff *skb;
79         char buf[32];
80         size_t buf_size = min(count, (sizeof(buf)-1));
81         bool enable;
82
83         if (!test_bit(HCI_UP, &hdev->flags))
84                 return -ENETDOWN;
85
86         if (copy_from_user(buf, user_buf, buf_size))
87                 return -EFAULT;
88
89         buf[buf_size] = '\0';
90         if (strtobool(buf, &enable))
91                 return -EINVAL;
92
93         if (enable == hci_dev_test_flag(hdev, HCI_DUT_MODE))
94                 return -EALREADY;
95
96         hci_req_sync_lock(hdev);
97         if (enable)
98                 skb = __hci_cmd_sync(hdev, HCI_OP_ENABLE_DUT_MODE, 0, NULL,
99                                      HCI_CMD_TIMEOUT);
100         else
101                 skb = __hci_cmd_sync(hdev, HCI_OP_RESET, 0, NULL,
102                                      HCI_CMD_TIMEOUT);
103         hci_req_sync_unlock(hdev);
104
105         if (IS_ERR(skb))
106                 return PTR_ERR(skb);
107
108         kfree_skb(skb);
109
110         hci_dev_change_flag(hdev, HCI_DUT_MODE);
111
112         return count;
113 }
114
115 static const struct file_operations dut_mode_fops = {
116         .open           = simple_open,
117         .read           = dut_mode_read,
118         .write          = dut_mode_write,
119         .llseek         = default_llseek,
120 };
121
122 static ssize_t vendor_diag_read(struct file *file, char __user *user_buf,
123                                 size_t count, loff_t *ppos)
124 {
125         struct hci_dev *hdev = file->private_data;
126         char buf[3];
127
128         buf[0] = hci_dev_test_flag(hdev, HCI_VENDOR_DIAG) ? 'Y' : 'N';
129         buf[1] = '\n';
130         buf[2] = '\0';
131         return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
132 }
133
134 static ssize_t vendor_diag_write(struct file *file, const char __user *user_buf,
135                                  size_t count, loff_t *ppos)
136 {
137         struct hci_dev *hdev = file->private_data;
138         char buf[32];
139         size_t buf_size = min(count, (sizeof(buf)-1));
140         bool enable;
141         int err;
142
143         if (copy_from_user(buf, user_buf, buf_size))
144                 return -EFAULT;
145
146         buf[buf_size] = '\0';
147         if (strtobool(buf, &enable))
148                 return -EINVAL;
149
150         /* When the diagnostic flags are not persistent and the transport
151          * is not active, then there is no need for the vendor callback.
152          *
153          * Instead just store the desired value. If needed the setting
154          * will be programmed when the controller gets powered on.
155          */
156         if (test_bit(HCI_QUIRK_NON_PERSISTENT_DIAG, &hdev->quirks) &&
157             !test_bit(HCI_RUNNING, &hdev->flags))
158                 goto done;
159
160         hci_req_sync_lock(hdev);
161         err = hdev->set_diag(hdev, enable);
162         hci_req_sync_unlock(hdev);
163
164         if (err < 0)
165                 return err;
166
167 done:
168         if (enable)
169                 hci_dev_set_flag(hdev, HCI_VENDOR_DIAG);
170         else
171                 hci_dev_clear_flag(hdev, HCI_VENDOR_DIAG);
172
173         return count;
174 }
175
176 static const struct file_operations vendor_diag_fops = {
177         .open           = simple_open,
178         .read           = vendor_diag_read,
179         .write          = vendor_diag_write,
180         .llseek         = default_llseek,
181 };
182
183 static void hci_debugfs_create_basic(struct hci_dev *hdev)
184 {
185         debugfs_create_file("dut_mode", 0644, hdev->debugfs, hdev,
186                             &dut_mode_fops);
187
188         if (hdev->set_diag)
189                 debugfs_create_file("vendor_diag", 0644, hdev->debugfs, hdev,
190                                     &vendor_diag_fops);
191 }
192
193 static int hci_reset_req(struct hci_request *req, unsigned long opt)
194 {
195         BT_DBG("%s %ld", req->hdev->name, opt);
196
197         /* Reset device */
198         set_bit(HCI_RESET, &req->hdev->flags);
199         hci_req_add(req, HCI_OP_RESET, 0, NULL);
200         return 0;
201 }
202
203 static void bredr_init(struct hci_request *req)
204 {
205         req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;
206
207         /* Read Local Supported Features */
208         hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
209
210         /* Read Local Version */
211         hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
212
213         /* Read BD Address */
214         hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
215 }
216
217 static void amp_init1(struct hci_request *req)
218 {
219         req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
220
221         /* Read Local Version */
222         hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
223
224         /* Read Local Supported Commands */
225         hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
226
227         /* Read Local AMP Info */
228         hci_req_add(req, HCI_OP_READ_LOCAL_AMP_INFO, 0, NULL);
229
230         /* Read Data Blk size */
231         hci_req_add(req, HCI_OP_READ_DATA_BLOCK_SIZE, 0, NULL);
232
233         /* Read Flow Control Mode */
234         hci_req_add(req, HCI_OP_READ_FLOW_CONTROL_MODE, 0, NULL);
235
236         /* Read Location Data */
237         hci_req_add(req, HCI_OP_READ_LOCATION_DATA, 0, NULL);
238 }
239
240 static int amp_init2(struct hci_request *req)
241 {
242         /* Read Local Supported Features. Not all AMP controllers
243          * support this so it's placed conditionally in the second
244          * stage init.
245          */
246         if (req->hdev->commands[14] & 0x20)
247                 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
248
249         return 0;
250 }
251
252 static int hci_init1_req(struct hci_request *req, unsigned long opt)
253 {
254         struct hci_dev *hdev = req->hdev;
255
256         BT_DBG("%s %ld", hdev->name, opt);
257
258         /* Reset */
259         if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
260                 hci_reset_req(req, 0);
261
262         switch (hdev->dev_type) {
263         case HCI_BREDR:
264                 bredr_init(req);
265                 break;
266
267         case HCI_AMP:
268                 amp_init1(req);
269                 break;
270
271         default:
272                 BT_ERR("Unknown device type %d", hdev->dev_type);
273                 break;
274         }
275
276         return 0;
277 }
278
279 static void bredr_setup(struct hci_request *req)
280 {
281         __le16 param;
282         __u8 flt_type;
283
284         /* Read Buffer Size (ACL mtu, max pkt, etc.) */
285         hci_req_add(req, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
286
287         /* Read Class of Device */
288         hci_req_add(req, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
289
290         /* Read Local Name */
291         hci_req_add(req, HCI_OP_READ_LOCAL_NAME, 0, NULL);
292
293         /* Read Voice Setting */
294         hci_req_add(req, HCI_OP_READ_VOICE_SETTING, 0, NULL);
295
296         /* Read Number of Supported IAC */
297         hci_req_add(req, HCI_OP_READ_NUM_SUPPORTED_IAC, 0, NULL);
298
299         /* Read Current IAC LAP */
300         hci_req_add(req, HCI_OP_READ_CURRENT_IAC_LAP, 0, NULL);
301
302         /* Clear Event Filters */
303         flt_type = HCI_FLT_CLEAR_ALL;
304         hci_req_add(req, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
305
306         /* Connection accept timeout ~20 secs */
307         param = cpu_to_le16(0x7d00);
308         hci_req_add(req, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
309 }
310
311 static void le_setup(struct hci_request *req)
312 {
313         struct hci_dev *hdev = req->hdev;
314
315         /* Read LE Buffer Size */
316         hci_req_add(req, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
317
318         /* Read LE Local Supported Features */
319         hci_req_add(req, HCI_OP_LE_READ_LOCAL_FEATURES, 0, NULL);
320
321         /* Read LE Supported States */
322         hci_req_add(req, HCI_OP_LE_READ_SUPPORTED_STATES, 0, NULL);
323
324         /* LE-only controllers have LE implicitly enabled */
325         if (!lmp_bredr_capable(hdev))
326                 hci_dev_set_flag(hdev, HCI_LE_ENABLED);
327 }
328
329 static void hci_setup_event_mask(struct hci_request *req)
330 {
331         struct hci_dev *hdev = req->hdev;
332
333         /* The second byte is 0xff instead of 0x9f (two reserved bits
334          * disabled) since a Broadcom 1.2 dongle doesn't respond to the
335          * command otherwise.
336          */
337         u8 events[8] = { 0xff, 0xff, 0xfb, 0xff, 0x00, 0x00, 0x00, 0x00 };
338
339         /* CSR 1.1 dongles does not accept any bitfield so don't try to set
340          * any event mask for pre 1.2 devices.
341          */
342         if (hdev->hci_ver < BLUETOOTH_VER_1_2)
343                 return;
344
345         if (lmp_bredr_capable(hdev)) {
346                 events[4] |= 0x01; /* Flow Specification Complete */
347         } else {
348                 /* Use a different default for LE-only devices */
349                 memset(events, 0, sizeof(events));
350                 events[1] |= 0x20; /* Command Complete */
351                 events[1] |= 0x40; /* Command Status */
352                 events[1] |= 0x80; /* Hardware Error */
353
354                 /* If the controller supports the Disconnect command, enable
355                  * the corresponding event. In addition enable packet flow
356                  * control related events.
357                  */
358                 if (hdev->commands[0] & 0x20) {
359                         events[0] |= 0x10; /* Disconnection Complete */
360                         events[2] |= 0x04; /* Number of Completed Packets */
361                         events[3] |= 0x02; /* Data Buffer Overflow */
362                 }
363
364                 /* If the controller supports the Read Remote Version
365                  * Information command, enable the corresponding event.
366                  */
367                 if (hdev->commands[2] & 0x80)
368                         events[1] |= 0x08; /* Read Remote Version Information
369                                             * Complete
370                                             */
371
372                 if (hdev->le_features[0] & HCI_LE_ENCRYPTION) {
373                         events[0] |= 0x80; /* Encryption Change */
374                         events[5] |= 0x80; /* Encryption Key Refresh Complete */
375                 }
376         }
377
378         if (lmp_inq_rssi_capable(hdev) ||
379             test_bit(HCI_QUIRK_FIXUP_INQUIRY_MODE, &hdev->quirks))
380                 events[4] |= 0x02; /* Inquiry Result with RSSI */
381
382         if (lmp_ext_feat_capable(hdev))
383                 events[4] |= 0x04; /* Read Remote Extended Features Complete */
384
385         if (lmp_esco_capable(hdev)) {
386                 events[5] |= 0x08; /* Synchronous Connection Complete */
387                 events[5] |= 0x10; /* Synchronous Connection Changed */
388         }
389
390         if (lmp_sniffsubr_capable(hdev))
391                 events[5] |= 0x20; /* Sniff Subrating */
392
393         if (lmp_pause_enc_capable(hdev))
394                 events[5] |= 0x80; /* Encryption Key Refresh Complete */
395
396         if (lmp_ext_inq_capable(hdev))
397                 events[5] |= 0x40; /* Extended Inquiry Result */
398
399         if (lmp_no_flush_capable(hdev))
400                 events[7] |= 0x01; /* Enhanced Flush Complete */
401
402         if (lmp_lsto_capable(hdev))
403                 events[6] |= 0x80; /* Link Supervision Timeout Changed */
404
405         if (lmp_ssp_capable(hdev)) {
406                 events[6] |= 0x01;      /* IO Capability Request */
407                 events[6] |= 0x02;      /* IO Capability Response */
408                 events[6] |= 0x04;      /* User Confirmation Request */
409                 events[6] |= 0x08;      /* User Passkey Request */
410                 events[6] |= 0x10;      /* Remote OOB Data Request */
411                 events[6] |= 0x20;      /* Simple Pairing Complete */
412                 events[7] |= 0x04;      /* User Passkey Notification */
413                 events[7] |= 0x08;      /* Keypress Notification */
414                 events[7] |= 0x10;      /* Remote Host Supported
415                                          * Features Notification
416                                          */
417         }
418
419         if (lmp_le_capable(hdev))
420                 events[7] |= 0x20;      /* LE Meta-Event */
421
422         hci_req_add(req, HCI_OP_SET_EVENT_MASK, sizeof(events), events);
423 }
424
425 static int hci_init2_req(struct hci_request *req, unsigned long opt)
426 {
427         struct hci_dev *hdev = req->hdev;
428
429         if (hdev->dev_type == HCI_AMP)
430                 return amp_init2(req);
431
432         if (lmp_bredr_capable(hdev))
433                 bredr_setup(req);
434         else
435                 hci_dev_clear_flag(hdev, HCI_BREDR_ENABLED);
436
437         if (lmp_le_capable(hdev))
438                 le_setup(req);
439
440         /* All Bluetooth 1.2 and later controllers should support the
441          * HCI command for reading the local supported commands.
442          *
443          * Unfortunately some controllers indicate Bluetooth 1.2 support,
444          * but do not have support for this command. If that is the case,
445          * the driver can quirk the behavior and skip reading the local
446          * supported commands.
447          */
448         if (hdev->hci_ver > BLUETOOTH_VER_1_1 &&
449             !test_bit(HCI_QUIRK_BROKEN_LOCAL_COMMANDS, &hdev->quirks))
450                 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
451
452         if (lmp_ssp_capable(hdev)) {
453                 /* When SSP is available, then the host features page
454                  * should also be available as well. However some
455                  * controllers list the max_page as 0 as long as SSP
456                  * has not been enabled. To achieve proper debugging
457                  * output, force the minimum max_page to 1 at least.
458                  */
459                 hdev->max_page = 0x01;
460
461                 if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
462                         u8 mode = 0x01;
463
464                         hci_req_add(req, HCI_OP_WRITE_SSP_MODE,
465                                     sizeof(mode), &mode);
466                 } else {
467                         struct hci_cp_write_eir cp;
468
469                         memset(hdev->eir, 0, sizeof(hdev->eir));
470                         memset(&cp, 0, sizeof(cp));
471
472                         hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
473                 }
474         }
475
476         if (lmp_inq_rssi_capable(hdev) ||
477             test_bit(HCI_QUIRK_FIXUP_INQUIRY_MODE, &hdev->quirks)) {
478                 u8 mode;
479
480                 /* If Extended Inquiry Result events are supported, then
481                  * they are clearly preferred over Inquiry Result with RSSI
482                  * events.
483                  */
484                 mode = lmp_ext_inq_capable(hdev) ? 0x02 : 0x01;
485
486                 hci_req_add(req, HCI_OP_WRITE_INQUIRY_MODE, 1, &mode);
487         }
488
489         if (lmp_inq_tx_pwr_capable(hdev))
490                 hci_req_add(req, HCI_OP_READ_INQ_RSP_TX_POWER, 0, NULL);
491
492         if (lmp_ext_feat_capable(hdev)) {
493                 struct hci_cp_read_local_ext_features cp;
494
495                 cp.page = 0x01;
496                 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
497                             sizeof(cp), &cp);
498         }
499
500         if (hci_dev_test_flag(hdev, HCI_LINK_SECURITY)) {
501                 u8 enable = 1;
502                 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, sizeof(enable),
503                             &enable);
504         }
505
506         return 0;
507 }
508
509 static void hci_setup_link_policy(struct hci_request *req)
510 {
511         struct hci_dev *hdev = req->hdev;
512         struct hci_cp_write_def_link_policy cp;
513         u16 link_policy = 0;
514
515         if (lmp_rswitch_capable(hdev))
516                 link_policy |= HCI_LP_RSWITCH;
517         if (lmp_hold_capable(hdev))
518                 link_policy |= HCI_LP_HOLD;
519         if (lmp_sniff_capable(hdev))
520                 link_policy |= HCI_LP_SNIFF;
521         if (lmp_park_capable(hdev))
522                 link_policy |= HCI_LP_PARK;
523
524         cp.policy = cpu_to_le16(link_policy);
525         hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, sizeof(cp), &cp);
526 }
527
528 static void hci_set_le_support(struct hci_request *req)
529 {
530         struct hci_dev *hdev = req->hdev;
531         struct hci_cp_write_le_host_supported cp;
532
533         /* LE-only devices do not support explicit enablement */
534         if (!lmp_bredr_capable(hdev))
535                 return;
536
537         memset(&cp, 0, sizeof(cp));
538
539         if (hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
540                 cp.le = 0x01;
541                 cp.simul = 0x00;
542         }
543
544         if (cp.le != lmp_host_le_capable(hdev))
545                 hci_req_add(req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(cp),
546                             &cp);
547 }
548
549 static void hci_set_event_mask_page_2(struct hci_request *req)
550 {
551         struct hci_dev *hdev = req->hdev;
552         u8 events[8] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
553
554         /* If Connectionless Slave Broadcast master role is supported
555          * enable all necessary events for it.
556          */
557         if (lmp_csb_master_capable(hdev)) {
558                 events[1] |= 0x40;      /* Triggered Clock Capture */
559                 events[1] |= 0x80;      /* Synchronization Train Complete */
560                 events[2] |= 0x10;      /* Slave Page Response Timeout */
561                 events[2] |= 0x20;      /* CSB Channel Map Change */
562         }
563
564         /* If Connectionless Slave Broadcast slave role is supported
565          * enable all necessary events for it.
566          */
567         if (lmp_csb_slave_capable(hdev)) {
568                 events[2] |= 0x01;      /* Synchronization Train Received */
569                 events[2] |= 0x02;      /* CSB Receive */
570                 events[2] |= 0x04;      /* CSB Timeout */
571                 events[2] |= 0x08;      /* Truncated Page Complete */
572         }
573
574         /* Enable Authenticated Payload Timeout Expired event if supported */
575         if (lmp_ping_capable(hdev) || hdev->le_features[0] & HCI_LE_PING)
576                 events[2] |= 0x80;
577
578         hci_req_add(req, HCI_OP_SET_EVENT_MASK_PAGE_2, sizeof(events), events);
579 }
580
581 static int hci_init3_req(struct hci_request *req, unsigned long opt)
582 {
583         struct hci_dev *hdev = req->hdev;
584         u8 p;
585
586         hci_setup_event_mask(req);
587
588         if (hdev->commands[6] & 0x20 &&
589             !test_bit(HCI_QUIRK_BROKEN_STORED_LINK_KEY, &hdev->quirks)) {
590                 struct hci_cp_read_stored_link_key cp;
591
592                 bacpy(&cp.bdaddr, BDADDR_ANY);
593                 cp.read_all = 0x01;
594                 hci_req_add(req, HCI_OP_READ_STORED_LINK_KEY, sizeof(cp), &cp);
595         }
596
597         if (hdev->commands[5] & 0x10)
598                 hci_setup_link_policy(req);
599
600         if (hdev->commands[8] & 0x01)
601                 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_ACTIVITY, 0, NULL);
602
603         /* Some older Broadcom based Bluetooth 1.2 controllers do not
604          * support the Read Page Scan Type command. Check support for
605          * this command in the bit mask of supported commands.
606          */
607         if (hdev->commands[13] & 0x01)
608                 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_TYPE, 0, NULL);
609
610         if (lmp_le_capable(hdev)) {
611                 u8 events[8];
612
613                 memset(events, 0, sizeof(events));
614
615                 if (hdev->le_features[0] & HCI_LE_ENCRYPTION)
616                         events[0] |= 0x10;      /* LE Long Term Key Request */
617
618                 /* If controller supports the Connection Parameters Request
619                  * Link Layer Procedure, enable the corresponding event.
620                  */
621                 if (hdev->le_features[0] & HCI_LE_CONN_PARAM_REQ_PROC)
622                         events[0] |= 0x20;      /* LE Remote Connection
623                                                  * Parameter Request
624                                                  */
625
626                 /* If the controller supports the Data Length Extension
627                  * feature, enable the corresponding event.
628                  */
629                 if (hdev->le_features[0] & HCI_LE_DATA_LEN_EXT)
630                         events[0] |= 0x40;      /* LE Data Length Change */
631
632                 /* If the controller supports Extended Scanner Filter
633                  * Policies, enable the correspondig event.
634                  */
635                 if (hdev->le_features[0] & HCI_LE_EXT_SCAN_POLICY)
636                         events[1] |= 0x04;      /* LE Direct Advertising
637                                                  * Report
638                                                  */
639
640                 /* If the controller supports the LE Set Scan Enable command,
641                  * enable the corresponding advertising report event.
642                  */
643                 if (hdev->commands[26] & 0x08)
644                         events[0] |= 0x02;      /* LE Advertising Report */
645
646                 /* If the controller supports the LE Create Connection
647                  * command, enable the corresponding event.
648                  */
649                 if (hdev->commands[26] & 0x10)
650                         events[0] |= 0x01;      /* LE Connection Complete */
651
652                 /* If the controller supports the LE Connection Update
653                  * command, enable the corresponding event.
654                  */
655                 if (hdev->commands[27] & 0x04)
656                         events[0] |= 0x04;      /* LE Connection Update
657                                                  * Complete
658                                                  */
659
660                 /* If the controller supports the LE Read Remote Used Features
661                  * command, enable the corresponding event.
662                  */
663                 if (hdev->commands[27] & 0x20)
664                         events[0] |= 0x08;      /* LE Read Remote Used
665                                                  * Features Complete
666                                                  */
667
668                 /* If the controller supports the LE Read Local P-256
669                  * Public Key command, enable the corresponding event.
670                  */
671                 if (hdev->commands[34] & 0x02)
672                         events[0] |= 0x80;      /* LE Read Local P-256
673                                                  * Public Key Complete
674                                                  */
675
676                 /* If the controller supports the LE Generate DHKey
677                  * command, enable the corresponding event.
678                  */
679                 if (hdev->commands[34] & 0x04)
680                         events[1] |= 0x01;      /* LE Generate DHKey Complete */
681
682                 hci_req_add(req, HCI_OP_LE_SET_EVENT_MASK, sizeof(events),
683                             events);
684
685                 if (hdev->commands[25] & 0x40) {
686                         /* Read LE Advertising Channel TX Power */
687                         hci_req_add(req, HCI_OP_LE_READ_ADV_TX_POWER, 0, NULL);
688                 }
689
690                 if (hdev->commands[26] & 0x40) {
691                         /* Read LE White List Size */
692                         hci_req_add(req, HCI_OP_LE_READ_WHITE_LIST_SIZE,
693                                     0, NULL);
694                 }
695
696                 if (hdev->commands[26] & 0x80) {
697                         /* Clear LE White List */
698                         hci_req_add(req, HCI_OP_LE_CLEAR_WHITE_LIST, 0, NULL);
699                 }
700
701                 if (hdev->le_features[0] & HCI_LE_DATA_LEN_EXT) {
702                         /* Read LE Maximum Data Length */
703                         hci_req_add(req, HCI_OP_LE_READ_MAX_DATA_LEN, 0, NULL);
704
705                         /* Read LE Suggested Default Data Length */
706                         hci_req_add(req, HCI_OP_LE_READ_DEF_DATA_LEN, 0, NULL);
707                 }
708
709                 hci_set_le_support(req);
710         }
711
712         /* Read features beyond page 1 if available */
713         for (p = 2; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
714                 struct hci_cp_read_local_ext_features cp;
715
716                 cp.page = p;
717                 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
718                             sizeof(cp), &cp);
719         }
720
721         return 0;
722 }
723
724 static int hci_init4_req(struct hci_request *req, unsigned long opt)
725 {
726         struct hci_dev *hdev = req->hdev;
727
728         /* Some Broadcom based Bluetooth controllers do not support the
729          * Delete Stored Link Key command. They are clearly indicating its
730          * absence in the bit mask of supported commands.
731          *
732          * Check the supported commands and only if the the command is marked
733          * as supported send it. If not supported assume that the controller
734          * does not have actual support for stored link keys which makes this
735          * command redundant anyway.
736          *
737          * Some controllers indicate that they support handling deleting
738          * stored link keys, but they don't. The quirk lets a driver
739          * just disable this command.
740          */
741         if (hdev->commands[6] & 0x80 &&
742             !test_bit(HCI_QUIRK_BROKEN_STORED_LINK_KEY, &hdev->quirks)) {
743                 struct hci_cp_delete_stored_link_key cp;
744
745                 bacpy(&cp.bdaddr, BDADDR_ANY);
746                 cp.delete_all = 0x01;
747                 hci_req_add(req, HCI_OP_DELETE_STORED_LINK_KEY,
748                             sizeof(cp), &cp);
749         }
750
751         /* Set event mask page 2 if the HCI command for it is supported */
752         if (hdev->commands[22] & 0x04)
753                 hci_set_event_mask_page_2(req);
754
755         /* Read local codec list if the HCI command is supported */
756         if (hdev->commands[29] & 0x20)
757                 hci_req_add(req, HCI_OP_READ_LOCAL_CODECS, 0, NULL);
758
759         /* Get MWS transport configuration if the HCI command is supported */
760         if (hdev->commands[30] & 0x08)
761                 hci_req_add(req, HCI_OP_GET_MWS_TRANSPORT_CONFIG, 0, NULL);
762
763         /* Check for Synchronization Train support */
764         if (lmp_sync_train_capable(hdev))
765                 hci_req_add(req, HCI_OP_READ_SYNC_TRAIN_PARAMS, 0, NULL);
766
767         /* Enable Secure Connections if supported and configured */
768         if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED) &&
769             bredr_sc_enabled(hdev)) {
770                 u8 support = 0x01;
771
772                 hci_req_add(req, HCI_OP_WRITE_SC_SUPPORT,
773                             sizeof(support), &support);
774         }
775
776         return 0;
777 }
778
779 static int __hci_init(struct hci_dev *hdev)
780 {
781         int err;
782
783         err = __hci_req_sync(hdev, hci_init1_req, 0, HCI_INIT_TIMEOUT, NULL);
784         if (err < 0)
785                 return err;
786
787         if (hci_dev_test_flag(hdev, HCI_SETUP))
788                 hci_debugfs_create_basic(hdev);
789
790         err = __hci_req_sync(hdev, hci_init2_req, 0, HCI_INIT_TIMEOUT, NULL);
791         if (err < 0)
792                 return err;
793
794         /* HCI_BREDR covers both single-mode LE, BR/EDR and dual-mode
795          * BR/EDR/LE type controllers. AMP controllers only need the
796          * first two stages of init.
797          */
798         if (hdev->dev_type != HCI_BREDR)
799                 return 0;
800
801         err = __hci_req_sync(hdev, hci_init3_req, 0, HCI_INIT_TIMEOUT, NULL);
802         if (err < 0)
803                 return err;
804
805         err = __hci_req_sync(hdev, hci_init4_req, 0, HCI_INIT_TIMEOUT, NULL);
806         if (err < 0)
807                 return err;
808
809         /* This function is only called when the controller is actually in
810          * configured state. When the controller is marked as unconfigured,
811          * this initialization procedure is not run.
812          *
813          * It means that it is possible that a controller runs through its
814          * setup phase and then discovers missing settings. If that is the
815          * case, then this function will not be called. It then will only
816          * be called during the config phase.
817          *
818          * So only when in setup phase or config phase, create the debugfs
819          * entries and register the SMP channels.
820          */
821         if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
822             !hci_dev_test_flag(hdev, HCI_CONFIG))
823                 return 0;
824
825         hci_debugfs_create_common(hdev);
826
827         if (lmp_bredr_capable(hdev))
828                 hci_debugfs_create_bredr(hdev);
829
830         if (lmp_le_capable(hdev))
831                 hci_debugfs_create_le(hdev);
832
833         return 0;
834 }
835
836 static int hci_init0_req(struct hci_request *req, unsigned long opt)
837 {
838         struct hci_dev *hdev = req->hdev;
839
840         BT_DBG("%s %ld", hdev->name, opt);
841
842         /* Reset */
843         if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
844                 hci_reset_req(req, 0);
845
846         /* Read Local Version */
847         hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
848
849         /* Read BD Address */
850         if (hdev->set_bdaddr)
851                 hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
852
853         return 0;
854 }
855
856 static int __hci_unconf_init(struct hci_dev *hdev)
857 {
858         int err;
859
860         if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
861                 return 0;
862
863         err = __hci_req_sync(hdev, hci_init0_req, 0, HCI_INIT_TIMEOUT, NULL);
864         if (err < 0)
865                 return err;
866
867         if (hci_dev_test_flag(hdev, HCI_SETUP))
868                 hci_debugfs_create_basic(hdev);
869
870         return 0;
871 }
872
873 static int hci_scan_req(struct hci_request *req, unsigned long opt)
874 {
875         __u8 scan = opt;
876
877         BT_DBG("%s %x", req->hdev->name, scan);
878
879         /* Inquiry and Page scans */
880         hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
881         return 0;
882 }
883
884 static int hci_auth_req(struct hci_request *req, unsigned long opt)
885 {
886         __u8 auth = opt;
887
888         BT_DBG("%s %x", req->hdev->name, auth);
889
890         /* Authentication */
891         hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
892         return 0;
893 }
894
895 static int hci_encrypt_req(struct hci_request *req, unsigned long opt)
896 {
897         __u8 encrypt = opt;
898
899         BT_DBG("%s %x", req->hdev->name, encrypt);
900
901         /* Encryption */
902         hci_req_add(req, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
903         return 0;
904 }
905
906 static int hci_linkpol_req(struct hci_request *req, unsigned long opt)
907 {
908         __le16 policy = cpu_to_le16(opt);
909
910         BT_DBG("%s %x", req->hdev->name, policy);
911
912         /* Default link policy */
913         hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
914         return 0;
915 }
916
917 /* Get HCI device by index.
918  * Device is held on return. */
919 struct hci_dev *hci_dev_get(int index)
920 {
921         struct hci_dev *hdev = NULL, *d;
922
923         BT_DBG("%d", index);
924
925         if (index < 0)
926                 return NULL;
927
928         read_lock(&hci_dev_list_lock);
929         list_for_each_entry(d, &hci_dev_list, list) {
930                 if (d->id == index) {
931                         hdev = hci_dev_hold(d);
932                         break;
933                 }
934         }
935         read_unlock(&hci_dev_list_lock);
936         return hdev;
937 }
938
939 /* ---- Inquiry support ---- */
940
941 bool hci_discovery_active(struct hci_dev *hdev)
942 {
943         struct discovery_state *discov = &hdev->discovery;
944
945         switch (discov->state) {
946         case DISCOVERY_FINDING:
947         case DISCOVERY_RESOLVING:
948                 return true;
949
950         default:
951                 return false;
952         }
953 }
954
955 void hci_discovery_set_state(struct hci_dev *hdev, int state)
956 {
957         int old_state = hdev->discovery.state;
958
959         BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
960
961         if (old_state == state)
962                 return;
963
964         hdev->discovery.state = state;
965
966         switch (state) {
967         case DISCOVERY_STOPPED:
968                 hci_update_background_scan(hdev);
969
970                 if (old_state != DISCOVERY_STARTING)
971                         mgmt_discovering(hdev, 0);
972                 break;
973         case DISCOVERY_STARTING:
974                 break;
975         case DISCOVERY_FINDING:
976                 mgmt_discovering(hdev, 1);
977                 break;
978         case DISCOVERY_RESOLVING:
979                 break;
980         case DISCOVERY_STOPPING:
981                 break;
982         }
983 }
984
985 void hci_inquiry_cache_flush(struct hci_dev *hdev)
986 {
987         struct discovery_state *cache = &hdev->discovery;
988         struct inquiry_entry *p, *n;
989
990         list_for_each_entry_safe(p, n, &cache->all, all) {
991                 list_del(&p->all);
992                 kfree(p);
993         }
994
995         INIT_LIST_HEAD(&cache->unknown);
996         INIT_LIST_HEAD(&cache->resolve);
997 }
998
999 struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev,
1000                                                bdaddr_t *bdaddr)
1001 {
1002         struct discovery_state *cache = &hdev->discovery;
1003         struct inquiry_entry *e;
1004
1005         BT_DBG("cache %p, %pMR", cache, bdaddr);
1006
1007         list_for_each_entry(e, &cache->all, all) {
1008                 if (!bacmp(&e->data.bdaddr, bdaddr))
1009                         return e;
1010         }
1011
1012         return NULL;
1013 }
1014
1015 struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
1016                                                        bdaddr_t *bdaddr)
1017 {
1018         struct discovery_state *cache = &hdev->discovery;
1019         struct inquiry_entry *e;
1020
1021         BT_DBG("cache %p, %pMR", cache, bdaddr);
1022
1023         list_for_each_entry(e, &cache->unknown, list) {
1024                 if (!bacmp(&e->data.bdaddr, bdaddr))
1025                         return e;
1026         }
1027
1028         return NULL;
1029 }
1030
1031 struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
1032                                                        bdaddr_t *bdaddr,
1033                                                        int state)
1034 {
1035         struct discovery_state *cache = &hdev->discovery;
1036         struct inquiry_entry *e;
1037
1038         BT_DBG("cache %p bdaddr %pMR state %d", cache, bdaddr, state);
1039
1040         list_for_each_entry(e, &cache->resolve, list) {
1041                 if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
1042                         return e;
1043                 if (!bacmp(&e->data.bdaddr, bdaddr))
1044                         return e;
1045         }
1046
1047         return NULL;
1048 }
1049
1050 void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
1051                                       struct inquiry_entry *ie)
1052 {
1053         struct discovery_state *cache = &hdev->discovery;
1054         struct list_head *pos = &cache->resolve;
1055         struct inquiry_entry *p;
1056
1057         list_del(&ie->list);
1058
1059         list_for_each_entry(p, &cache->resolve, list) {
1060                 if (p->name_state != NAME_PENDING &&
1061                     abs(p->data.rssi) >= abs(ie->data.rssi))
1062                         break;
1063                 pos = &p->list;
1064         }
1065
1066         list_add(&ie->list, pos);
1067 }
1068
1069 u32 hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
1070                              bool name_known)
1071 {
1072         struct discovery_state *cache = &hdev->discovery;
1073         struct inquiry_entry *ie;
1074         u32 flags = 0;
1075
1076         BT_DBG("cache %p, %pMR", cache, &data->bdaddr);
1077
1078         hci_remove_remote_oob_data(hdev, &data->bdaddr, BDADDR_BREDR);
1079
1080         if (!data->ssp_mode)
1081                 flags |= MGMT_DEV_FOUND_LEGACY_PAIRING;
1082
1083         ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
1084         if (ie) {
1085                 if (!ie->data.ssp_mode)
1086                         flags |= MGMT_DEV_FOUND_LEGACY_PAIRING;
1087
1088                 if (ie->name_state == NAME_NEEDED &&
1089                     data->rssi != ie->data.rssi) {
1090                         ie->data.rssi = data->rssi;
1091                         hci_inquiry_cache_update_resolve(hdev, ie);
1092                 }
1093
1094                 goto update;
1095         }
1096
1097         /* Entry not in the cache. Add new one. */
1098         ie = kzalloc(sizeof(*ie), GFP_KERNEL);
1099         if (!ie) {
1100                 flags |= MGMT_DEV_FOUND_CONFIRM_NAME;
1101                 goto done;
1102         }
1103
1104         list_add(&ie->all, &cache->all);
1105
1106         if (name_known) {
1107                 ie->name_state = NAME_KNOWN;
1108         } else {
1109                 ie->name_state = NAME_NOT_KNOWN;
1110                 list_add(&ie->list, &cache->unknown);
1111         }
1112
1113 update:
1114         if (name_known && ie->name_state != NAME_KNOWN &&
1115             ie->name_state != NAME_PENDING) {
1116                 ie->name_state = NAME_KNOWN;
1117                 list_del(&ie->list);
1118         }
1119
1120         memcpy(&ie->data, data, sizeof(*data));
1121         ie->timestamp = jiffies;
1122         cache->timestamp = jiffies;
1123
1124         if (ie->name_state == NAME_NOT_KNOWN)
1125                 flags |= MGMT_DEV_FOUND_CONFIRM_NAME;
1126
1127 done:
1128         return flags;
1129 }
1130
1131 static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
1132 {
1133         struct discovery_state *cache = &hdev->discovery;
1134         struct inquiry_info *info = (struct inquiry_info *) buf;
1135         struct inquiry_entry *e;
1136         int copied = 0;
1137
1138         list_for_each_entry(e, &cache->all, all) {
1139                 struct inquiry_data *data = &e->data;
1140
1141                 if (copied >= num)
1142                         break;
1143
1144                 bacpy(&info->bdaddr, &data->bdaddr);
1145                 info->pscan_rep_mode    = data->pscan_rep_mode;
1146                 info->pscan_period_mode = data->pscan_period_mode;
1147                 info->pscan_mode        = data->pscan_mode;
1148                 memcpy(info->dev_class, data->dev_class, 3);
1149                 info->clock_offset      = data->clock_offset;
1150
1151                 info++;
1152                 copied++;
1153         }
1154
1155         BT_DBG("cache %p, copied %d", cache, copied);
1156         return copied;
1157 }
1158
1159 static int hci_inq_req(struct hci_request *req, unsigned long opt)
1160 {
1161         struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
1162         struct hci_dev *hdev = req->hdev;
1163         struct hci_cp_inquiry cp;
1164
1165         BT_DBG("%s", hdev->name);
1166
1167         if (test_bit(HCI_INQUIRY, &hdev->flags))
1168                 return 0;
1169
1170         /* Start Inquiry */
1171         memcpy(&cp.lap, &ir->lap, 3);
1172         cp.length  = ir->length;
1173         cp.num_rsp = ir->num_rsp;
1174         hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
1175
1176         return 0;
1177 }
1178
1179 int hci_inquiry(void __user *arg)
1180 {
1181         __u8 __user *ptr = arg;
1182         struct hci_inquiry_req ir;
1183         struct hci_dev *hdev;
1184         int err = 0, do_inquiry = 0, max_rsp;
1185         long timeo;
1186         __u8 *buf;
1187
1188         if (copy_from_user(&ir, ptr, sizeof(ir)))
1189                 return -EFAULT;
1190
1191         hdev = hci_dev_get(ir.dev_id);
1192         if (!hdev)
1193                 return -ENODEV;
1194
1195         if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1196                 err = -EBUSY;
1197                 goto done;
1198         }
1199
1200         if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
1201                 err = -EOPNOTSUPP;
1202                 goto done;
1203         }
1204
1205         if (hdev->dev_type != HCI_BREDR) {
1206                 err = -EOPNOTSUPP;
1207                 goto done;
1208         }
1209
1210         if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
1211                 err = -EOPNOTSUPP;
1212                 goto done;
1213         }
1214
1215         hci_dev_lock(hdev);
1216         if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
1217             inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) {
1218                 hci_inquiry_cache_flush(hdev);
1219                 do_inquiry = 1;
1220         }
1221         hci_dev_unlock(hdev);
1222
1223         timeo = ir.length * msecs_to_jiffies(2000);
1224
1225         if (do_inquiry) {
1226                 err = hci_req_sync(hdev, hci_inq_req, (unsigned long) &ir,
1227                                    timeo, NULL);
1228                 if (err < 0)
1229                         goto done;
1230
1231                 /* Wait until Inquiry procedure finishes (HCI_INQUIRY flag is
1232                  * cleared). If it is interrupted by a signal, return -EINTR.
1233                  */
1234                 if (wait_on_bit(&hdev->flags, HCI_INQUIRY,
1235                                 TASK_INTERRUPTIBLE))
1236                         return -EINTR;
1237         }
1238
1239         /* for unlimited number of responses we will use buffer with
1240          * 255 entries
1241          */
1242         max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
1243
1244         /* cache_dump can't sleep. Therefore we allocate temp buffer and then
1245          * copy it to the user space.
1246          */
1247         buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
1248         if (!buf) {
1249                 err = -ENOMEM;
1250                 goto done;
1251         }
1252
1253         hci_dev_lock(hdev);
1254         ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
1255         hci_dev_unlock(hdev);
1256
1257         BT_DBG("num_rsp %d", ir.num_rsp);
1258
1259         if (!copy_to_user(ptr, &ir, sizeof(ir))) {
1260                 ptr += sizeof(ir);
1261                 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
1262                                  ir.num_rsp))
1263                         err = -EFAULT;
1264         } else
1265                 err = -EFAULT;
1266
1267         kfree(buf);
1268
1269 done:
1270         hci_dev_put(hdev);
1271         return err;
1272 }
1273
1274 static int hci_dev_do_open(struct hci_dev *hdev)
1275 {
1276         int ret = 0;
1277
1278         BT_DBG("%s %p", hdev->name, hdev);
1279
1280         hci_req_sync_lock(hdev);
1281
1282         if (hci_dev_test_flag(hdev, HCI_UNREGISTER)) {
1283                 ret = -ENODEV;
1284                 goto done;
1285         }
1286
1287         if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
1288             !hci_dev_test_flag(hdev, HCI_CONFIG)) {
1289                 /* Check for rfkill but allow the HCI setup stage to
1290                  * proceed (which in itself doesn't cause any RF activity).
1291                  */
1292                 if (hci_dev_test_flag(hdev, HCI_RFKILLED)) {
1293                         ret = -ERFKILL;
1294                         goto done;
1295                 }
1296
1297                 /* Check for valid public address or a configured static
1298                  * random adddress, but let the HCI setup proceed to
1299                  * be able to determine if there is a public address
1300                  * or not.
1301                  *
1302                  * In case of user channel usage, it is not important
1303                  * if a public address or static random address is
1304                  * available.
1305                  *
1306                  * This check is only valid for BR/EDR controllers
1307                  * since AMP controllers do not have an address.
1308                  */
1309                 if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
1310                     hdev->dev_type == HCI_BREDR &&
1311                     !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
1312                     !bacmp(&hdev->static_addr, BDADDR_ANY)) {
1313                         ret = -EADDRNOTAVAIL;
1314                         goto done;
1315                 }
1316         }
1317
1318         if (test_bit(HCI_UP, &hdev->flags)) {
1319                 ret = -EALREADY;
1320                 goto done;
1321         }
1322
1323         if (hdev->open(hdev)) {
1324                 ret = -EIO;
1325                 goto done;
1326         }
1327
1328         set_bit(HCI_RUNNING, &hdev->flags);
1329         hci_sock_dev_event(hdev, HCI_DEV_OPEN);
1330
1331         atomic_set(&hdev->cmd_cnt, 1);
1332         set_bit(HCI_INIT, &hdev->flags);
1333
1334         if (hci_dev_test_flag(hdev, HCI_SETUP)) {
1335                 hci_sock_dev_event(hdev, HCI_DEV_SETUP);
1336
1337                 if (hdev->setup)
1338                         ret = hdev->setup(hdev);
1339
1340                 /* The transport driver can set these quirks before
1341                  * creating the HCI device or in its setup callback.
1342                  *
1343                  * In case any of them is set, the controller has to
1344                  * start up as unconfigured.
1345                  */
1346                 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) ||
1347                     test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks))
1348                         hci_dev_set_flag(hdev, HCI_UNCONFIGURED);
1349
1350                 /* For an unconfigured controller it is required to
1351                  * read at least the version information provided by
1352                  * the Read Local Version Information command.
1353                  *
1354                  * If the set_bdaddr driver callback is provided, then
1355                  * also the original Bluetooth public device address
1356                  * will be read using the Read BD Address command.
1357                  */
1358                 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
1359                         ret = __hci_unconf_init(hdev);
1360         }
1361
1362         if (hci_dev_test_flag(hdev, HCI_CONFIG)) {
1363                 /* If public address change is configured, ensure that
1364                  * the address gets programmed. If the driver does not
1365                  * support changing the public address, fail the power
1366                  * on procedure.
1367                  */
1368                 if (bacmp(&hdev->public_addr, BDADDR_ANY) &&
1369                     hdev->set_bdaddr)
1370                         ret = hdev->set_bdaddr(hdev, &hdev->public_addr);
1371                 else
1372                         ret = -EADDRNOTAVAIL;
1373         }
1374
1375         if (!ret) {
1376                 if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED) &&
1377                     !hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1378                         ret = __hci_init(hdev);
1379                         if (!ret && hdev->post_init)
1380                                 ret = hdev->post_init(hdev);
1381                 }
1382         }
1383
1384         /* If the HCI Reset command is clearing all diagnostic settings,
1385          * then they need to be reprogrammed after the init procedure
1386          * completed.
1387          */
1388         if (test_bit(HCI_QUIRK_NON_PERSISTENT_DIAG, &hdev->quirks) &&
1389             hci_dev_test_flag(hdev, HCI_VENDOR_DIAG) && hdev->set_diag)
1390                 ret = hdev->set_diag(hdev, true);
1391
1392         clear_bit(HCI_INIT, &hdev->flags);
1393
1394         if (!ret) {
1395                 hci_dev_hold(hdev);
1396                 hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
1397                 set_bit(HCI_UP, &hdev->flags);
1398                 hci_sock_dev_event(hdev, HCI_DEV_UP);
1399                 hci_leds_update_powered(hdev, true);
1400                 if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
1401                     !hci_dev_test_flag(hdev, HCI_CONFIG) &&
1402                     !hci_dev_test_flag(hdev, HCI_UNCONFIGURED) &&
1403                     !hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
1404                     hci_dev_test_flag(hdev, HCI_MGMT) &&
1405                     hdev->dev_type == HCI_BREDR) {
1406                         ret = __hci_req_hci_power_on(hdev);
1407                         mgmt_power_on(hdev, ret);
1408                 }
1409         } else {
1410                 /* Init failed, cleanup */
1411                 flush_work(&hdev->tx_work);
1412                 flush_work(&hdev->cmd_work);
1413                 flush_work(&hdev->rx_work);
1414
1415                 skb_queue_purge(&hdev->cmd_q);
1416                 skb_queue_purge(&hdev->rx_q);
1417
1418                 if (hdev->flush)
1419                         hdev->flush(hdev);
1420
1421                 if (hdev->sent_cmd) {
1422                         kfree_skb(hdev->sent_cmd);
1423                         hdev->sent_cmd = NULL;
1424                 }
1425
1426                 clear_bit(HCI_RUNNING, &hdev->flags);
1427                 hci_sock_dev_event(hdev, HCI_DEV_CLOSE);
1428
1429                 hdev->close(hdev);
1430                 hdev->flags &= BIT(HCI_RAW);
1431         }
1432
1433 done:
1434         hci_req_sync_unlock(hdev);
1435         return ret;
1436 }
1437
1438 /* ---- HCI ioctl helpers ---- */
1439
1440 int hci_dev_open(__u16 dev)
1441 {
1442         struct hci_dev *hdev;
1443         int err;
1444
1445         hdev = hci_dev_get(dev);
1446         if (!hdev)
1447                 return -ENODEV;
1448
1449         /* Devices that are marked as unconfigured can only be powered
1450          * up as user channel. Trying to bring them up as normal devices
1451          * will result into a failure. Only user channel operation is
1452          * possible.
1453          *
1454          * When this function is called for a user channel, the flag
1455          * HCI_USER_CHANNEL will be set first before attempting to
1456          * open the device.
1457          */
1458         if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED) &&
1459             !hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1460                 err = -EOPNOTSUPP;
1461                 goto done;
1462         }
1463
1464         /* We need to ensure that no other power on/off work is pending
1465          * before proceeding to call hci_dev_do_open. This is
1466          * particularly important if the setup procedure has not yet
1467          * completed.
1468          */
1469         if (hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF))
1470                 cancel_delayed_work(&hdev->power_off);
1471
1472         /* After this call it is guaranteed that the setup procedure
1473          * has finished. This means that error conditions like RFKILL
1474          * or no valid public or static random address apply.
1475          */
1476         flush_workqueue(hdev->req_workqueue);
1477
1478         /* For controllers not using the management interface and that
1479          * are brought up using legacy ioctl, set the HCI_BONDABLE bit
1480          * so that pairing works for them. Once the management interface
1481          * is in use this bit will be cleared again and userspace has
1482          * to explicitly enable it.
1483          */
1484         if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
1485             !hci_dev_test_flag(hdev, HCI_MGMT))
1486                 hci_dev_set_flag(hdev, HCI_BONDABLE);
1487
1488         err = hci_dev_do_open(hdev);
1489
1490 done:
1491         hci_dev_put(hdev);
1492         return err;
1493 }
1494
1495 /* This function requires the caller holds hdev->lock */
1496 static void hci_pend_le_actions_clear(struct hci_dev *hdev)
1497 {
1498         struct hci_conn_params *p;
1499
1500         list_for_each_entry(p, &hdev->le_conn_params, list) {
1501                 if (p->conn) {
1502                         hci_conn_drop(p->conn);
1503                         hci_conn_put(p->conn);
1504                         p->conn = NULL;
1505                 }
1506                 list_del_init(&p->action);
1507         }
1508
1509         BT_DBG("All LE pending actions cleared");
1510 }
1511
1512 int hci_dev_do_close(struct hci_dev *hdev)
1513 {
1514         bool auto_off;
1515
1516         BT_DBG("%s %p", hdev->name, hdev);
1517
1518         if (!hci_dev_test_flag(hdev, HCI_UNREGISTER) &&
1519             !hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
1520             test_bit(HCI_UP, &hdev->flags)) {
1521                 /* Execute vendor specific shutdown routine */
1522                 if (hdev->shutdown)
1523                         hdev->shutdown(hdev);
1524         }
1525
1526         cancel_delayed_work(&hdev->power_off);
1527
1528         hci_request_cancel_all(hdev);
1529         hci_req_sync_lock(hdev);
1530
1531         if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
1532                 cancel_delayed_work_sync(&hdev->cmd_timer);
1533                 hci_req_sync_unlock(hdev);
1534                 return 0;
1535         }
1536
1537         hci_leds_update_powered(hdev, false);
1538
1539         /* Flush RX and TX works */
1540         flush_work(&hdev->tx_work);
1541         flush_work(&hdev->rx_work);
1542
1543         if (hdev->discov_timeout > 0) {
1544                 hdev->discov_timeout = 0;
1545                 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1546                 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1547         }
1548
1549         if (hci_dev_test_and_clear_flag(hdev, HCI_SERVICE_CACHE))
1550                 cancel_delayed_work(&hdev->service_cache);
1551
1552         if (hci_dev_test_flag(hdev, HCI_MGMT))
1553                 cancel_delayed_work_sync(&hdev->rpa_expired);
1554
1555         /* Avoid potential lockdep warnings from the *_flush() calls by
1556          * ensuring the workqueue is empty up front.
1557          */
1558         drain_workqueue(hdev->workqueue);
1559
1560         hci_dev_lock(hdev);
1561
1562         hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1563
1564         auto_off = hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF);
1565
1566         if (!auto_off && hdev->dev_type == HCI_BREDR &&
1567             hci_dev_test_flag(hdev, HCI_MGMT))
1568                 __mgmt_power_off(hdev);
1569
1570         hci_inquiry_cache_flush(hdev);
1571         hci_pend_le_actions_clear(hdev);
1572         hci_conn_hash_flush(hdev);
1573         hci_dev_unlock(hdev);
1574
1575         smp_unregister(hdev);
1576
1577         hci_sock_dev_event(hdev, HCI_DEV_DOWN);
1578
1579         if (hdev->flush)
1580                 hdev->flush(hdev);
1581
1582         /* Reset device */
1583         skb_queue_purge(&hdev->cmd_q);
1584         atomic_set(&hdev->cmd_cnt, 1);
1585         if (test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks) &&
1586             !auto_off && !hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
1587                 set_bit(HCI_INIT, &hdev->flags);
1588                 __hci_req_sync(hdev, hci_reset_req, 0, HCI_CMD_TIMEOUT, NULL);
1589                 clear_bit(HCI_INIT, &hdev->flags);
1590         }
1591
1592         /* flush cmd  work */
1593         flush_work(&hdev->cmd_work);
1594
1595         /* Drop queues */
1596         skb_queue_purge(&hdev->rx_q);
1597         skb_queue_purge(&hdev->cmd_q);
1598         skb_queue_purge(&hdev->raw_q);
1599
1600         /* Drop last sent command */
1601         if (hdev->sent_cmd) {
1602                 cancel_delayed_work_sync(&hdev->cmd_timer);
1603                 kfree_skb(hdev->sent_cmd);
1604                 hdev->sent_cmd = NULL;
1605         }
1606
1607         clear_bit(HCI_RUNNING, &hdev->flags);
1608         hci_sock_dev_event(hdev, HCI_DEV_CLOSE);
1609
1610         /* After this point our queues are empty
1611          * and no tasks are scheduled. */
1612         hdev->close(hdev);
1613
1614         /* Clear flags */
1615         hdev->flags &= BIT(HCI_RAW);
1616         hci_dev_clear_volatile_flags(hdev);
1617
1618         /* Controller radio is available but is currently powered down */
1619         hdev->amp_status = AMP_STATUS_POWERED_DOWN;
1620
1621         memset(hdev->eir, 0, sizeof(hdev->eir));
1622         memset(hdev->dev_class, 0, sizeof(hdev->dev_class));
1623         bacpy(&hdev->random_addr, BDADDR_ANY);
1624
1625         hci_req_sync_unlock(hdev);
1626
1627         hci_dev_put(hdev);
1628         return 0;
1629 }
1630
1631 int hci_dev_close(__u16 dev)
1632 {
1633         struct hci_dev *hdev;
1634         int err;
1635
1636         hdev = hci_dev_get(dev);
1637         if (!hdev)
1638                 return -ENODEV;
1639
1640         if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1641                 err = -EBUSY;
1642                 goto done;
1643         }
1644
1645         if (hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF))
1646                 cancel_delayed_work(&hdev->power_off);
1647
1648         err = hci_dev_do_close(hdev);
1649
1650 done:
1651         hci_dev_put(hdev);
1652         return err;
1653 }
1654
1655 static int hci_dev_do_reset(struct hci_dev *hdev)
1656 {
1657         int ret;
1658
1659         BT_DBG("%s %p", hdev->name, hdev);
1660
1661         hci_req_sync_lock(hdev);
1662
1663         /* Drop queues */
1664         skb_queue_purge(&hdev->rx_q);
1665         skb_queue_purge(&hdev->cmd_q);
1666
1667         /* Avoid potential lockdep warnings from the *_flush() calls by
1668          * ensuring the workqueue is empty up front.
1669          */
1670         drain_workqueue(hdev->workqueue);
1671
1672         hci_dev_lock(hdev);
1673         hci_inquiry_cache_flush(hdev);
1674         hci_conn_hash_flush(hdev);
1675         hci_dev_unlock(hdev);
1676
1677         if (hdev->flush)
1678                 hdev->flush(hdev);
1679
1680         atomic_set(&hdev->cmd_cnt, 1);
1681         hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
1682
1683         ret = __hci_req_sync(hdev, hci_reset_req, 0, HCI_INIT_TIMEOUT, NULL);
1684
1685         hci_req_sync_unlock(hdev);
1686         return ret;
1687 }
1688
1689 int hci_dev_reset(__u16 dev)
1690 {
1691         struct hci_dev *hdev;
1692         int err;
1693
1694         hdev = hci_dev_get(dev);
1695         if (!hdev)
1696                 return -ENODEV;
1697
1698         if (!test_bit(HCI_UP, &hdev->flags)) {
1699                 err = -ENETDOWN;
1700                 goto done;
1701         }
1702
1703         if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1704                 err = -EBUSY;
1705                 goto done;
1706         }
1707
1708         if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
1709                 err = -EOPNOTSUPP;
1710                 goto done;
1711         }
1712
1713         err = hci_dev_do_reset(hdev);
1714
1715 done:
1716         hci_dev_put(hdev);
1717         return err;
1718 }
1719
1720 int hci_dev_reset_stat(__u16 dev)
1721 {
1722         struct hci_dev *hdev;
1723         int ret = 0;
1724
1725         hdev = hci_dev_get(dev);
1726         if (!hdev)
1727                 return -ENODEV;
1728
1729         if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1730                 ret = -EBUSY;
1731                 goto done;
1732         }
1733
1734         if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
1735                 ret = -EOPNOTSUPP;
1736                 goto done;
1737         }
1738
1739         memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
1740
1741 done:
1742         hci_dev_put(hdev);
1743         return ret;
1744 }
1745
1746 static void hci_update_scan_state(struct hci_dev *hdev, u8 scan)
1747 {
1748         bool conn_changed, discov_changed;
1749
1750         BT_DBG("%s scan 0x%02x", hdev->name, scan);
1751
1752         if ((scan & SCAN_PAGE))
1753                 conn_changed = !hci_dev_test_and_set_flag(hdev,
1754                                                           HCI_CONNECTABLE);
1755         else
1756                 conn_changed = hci_dev_test_and_clear_flag(hdev,
1757                                                            HCI_CONNECTABLE);
1758
1759         if ((scan & SCAN_INQUIRY)) {
1760                 discov_changed = !hci_dev_test_and_set_flag(hdev,
1761                                                             HCI_DISCOVERABLE);
1762         } else {
1763                 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1764                 discov_changed = hci_dev_test_and_clear_flag(hdev,
1765                                                              HCI_DISCOVERABLE);
1766         }
1767
1768         if (!hci_dev_test_flag(hdev, HCI_MGMT))
1769                 return;
1770
1771         if (conn_changed || discov_changed) {
1772                 /* In case this was disabled through mgmt */
1773                 hci_dev_set_flag(hdev, HCI_BREDR_ENABLED);
1774
1775                 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1776                         hci_req_update_adv_data(hdev, hdev->cur_adv_instance);
1777
1778                 mgmt_new_settings(hdev);
1779         }
1780 }
1781
1782 int hci_dev_cmd(unsigned int cmd, void __user *arg)
1783 {
1784         struct hci_dev *hdev;
1785         struct hci_dev_req dr;
1786         int err = 0;
1787
1788         if (copy_from_user(&dr, arg, sizeof(dr)))
1789                 return -EFAULT;
1790
1791         hdev = hci_dev_get(dr.dev_id);
1792         if (!hdev)
1793                 return -ENODEV;
1794
1795         if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1796                 err = -EBUSY;
1797                 goto done;
1798         }
1799
1800         if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
1801                 err = -EOPNOTSUPP;
1802                 goto done;
1803         }
1804
1805         if (hdev->dev_type != HCI_BREDR) {
1806                 err = -EOPNOTSUPP;
1807                 goto done;
1808         }
1809
1810         if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
1811                 err = -EOPNOTSUPP;
1812                 goto done;
1813         }
1814
1815         switch (cmd) {
1816         case HCISETAUTH:
1817                 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
1818                                    HCI_INIT_TIMEOUT, NULL);
1819                 break;
1820
1821         case HCISETENCRYPT:
1822                 if (!lmp_encrypt_capable(hdev)) {
1823                         err = -EOPNOTSUPP;
1824                         break;
1825                 }
1826
1827                 if (!test_bit(HCI_AUTH, &hdev->flags)) {
1828                         /* Auth must be enabled first */
1829                         err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
1830                                            HCI_INIT_TIMEOUT, NULL);
1831                         if (err)
1832                                 break;
1833                 }
1834
1835                 err = hci_req_sync(hdev, hci_encrypt_req, dr.dev_opt,
1836                                    HCI_INIT_TIMEOUT, NULL);
1837                 break;
1838
1839         case HCISETSCAN:
1840                 err = hci_req_sync(hdev, hci_scan_req, dr.dev_opt,
1841                                    HCI_INIT_TIMEOUT, NULL);
1842
1843                 /* Ensure that the connectable and discoverable states
1844                  * get correctly modified as this was a non-mgmt change.
1845                  */
1846                 if (!err)
1847                         hci_update_scan_state(hdev, dr.dev_opt);
1848                 break;
1849
1850         case HCISETLINKPOL:
1851                 err = hci_req_sync(hdev, hci_linkpol_req, dr.dev_opt,
1852                                    HCI_INIT_TIMEOUT, NULL);
1853                 break;
1854
1855         case HCISETLINKMODE:
1856                 hdev->link_mode = ((__u16) dr.dev_opt) &
1857                                         (HCI_LM_MASTER | HCI_LM_ACCEPT);
1858                 break;
1859
1860         case HCISETPTYPE:
1861                 hdev->pkt_type = (__u16) dr.dev_opt;
1862                 break;
1863
1864         case HCISETACLMTU:
1865                 hdev->acl_mtu  = *((__u16 *) &dr.dev_opt + 1);
1866                 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
1867                 break;
1868
1869         case HCISETSCOMTU:
1870                 hdev->sco_mtu  = *((__u16 *) &dr.dev_opt + 1);
1871                 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
1872                 break;
1873
1874         default:
1875                 err = -EINVAL;
1876                 break;
1877         }
1878
1879 done:
1880         hci_dev_put(hdev);
1881         return err;
1882 }
1883
1884 int hci_get_dev_list(void __user *arg)
1885 {
1886         struct hci_dev *hdev;
1887         struct hci_dev_list_req *dl;
1888         struct hci_dev_req *dr;
1889         int n = 0, size, err;
1890         __u16 dev_num;
1891
1892         if (get_user(dev_num, (__u16 __user *) arg))
1893                 return -EFAULT;
1894
1895         if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
1896                 return -EINVAL;
1897
1898         size = sizeof(*dl) + dev_num * sizeof(*dr);
1899
1900         dl = kzalloc(size, GFP_KERNEL);
1901         if (!dl)
1902                 return -ENOMEM;
1903
1904         dr = dl->dev_req;
1905
1906         read_lock(&hci_dev_list_lock);
1907         list_for_each_entry(hdev, &hci_dev_list, list) {
1908                 unsigned long flags = hdev->flags;
1909
1910                 /* When the auto-off is configured it means the transport
1911                  * is running, but in that case still indicate that the
1912                  * device is actually down.
1913                  */
1914                 if (hci_dev_test_flag(hdev, HCI_AUTO_OFF))
1915                         flags &= ~BIT(HCI_UP);
1916
1917                 (dr + n)->dev_id  = hdev->id;
1918                 (dr + n)->dev_opt = flags;
1919
1920                 if (++n >= dev_num)
1921                         break;
1922         }
1923         read_unlock(&hci_dev_list_lock);
1924
1925         dl->dev_num = n;
1926         size = sizeof(*dl) + n * sizeof(*dr);
1927
1928         err = copy_to_user(arg, dl, size);
1929         kfree(dl);
1930
1931         return err ? -EFAULT : 0;
1932 }
1933
1934 int hci_get_dev_info(void __user *arg)
1935 {
1936         struct hci_dev *hdev;
1937         struct hci_dev_info di;
1938         unsigned long flags;
1939         int err = 0;
1940
1941         if (copy_from_user(&di, arg, sizeof(di)))
1942                 return -EFAULT;
1943
1944         hdev = hci_dev_get(di.dev_id);
1945         if (!hdev)
1946                 return -ENODEV;
1947
1948         /* When the auto-off is configured it means the transport
1949          * is running, but in that case still indicate that the
1950          * device is actually down.
1951          */
1952         if (hci_dev_test_flag(hdev, HCI_AUTO_OFF))
1953                 flags = hdev->flags & ~BIT(HCI_UP);
1954         else
1955                 flags = hdev->flags;
1956
1957         strcpy(di.name, hdev->name);
1958         di.bdaddr   = hdev->bdaddr;
1959         di.type     = (hdev->bus & 0x0f) | ((hdev->dev_type & 0x03) << 4);
1960         di.flags    = flags;
1961         di.pkt_type = hdev->pkt_type;
1962         if (lmp_bredr_capable(hdev)) {
1963                 di.acl_mtu  = hdev->acl_mtu;
1964                 di.acl_pkts = hdev->acl_pkts;
1965                 di.sco_mtu  = hdev->sco_mtu;
1966                 di.sco_pkts = hdev->sco_pkts;
1967         } else {
1968                 di.acl_mtu  = hdev->le_mtu;
1969                 di.acl_pkts = hdev->le_pkts;
1970                 di.sco_mtu  = 0;
1971                 di.sco_pkts = 0;
1972         }
1973         di.link_policy = hdev->link_policy;
1974         di.link_mode   = hdev->link_mode;
1975
1976         memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
1977         memcpy(&di.features, &hdev->features, sizeof(di.features));
1978
1979         if (copy_to_user(arg, &di, sizeof(di)))
1980                 err = -EFAULT;
1981
1982         hci_dev_put(hdev);
1983
1984         return err;
1985 }
1986
1987 /* ---- Interface to HCI drivers ---- */
1988
1989 static int hci_rfkill_set_block(void *data, bool blocked)
1990 {
1991         struct hci_dev *hdev = data;
1992
1993         BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
1994
1995         if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL))
1996                 return -EBUSY;
1997
1998         if (blocked) {
1999                 hci_dev_set_flag(hdev, HCI_RFKILLED);
2000                 if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
2001                     !hci_dev_test_flag(hdev, HCI_CONFIG))
2002                         hci_dev_do_close(hdev);
2003         } else {
2004                 hci_dev_clear_flag(hdev, HCI_RFKILLED);
2005         }
2006
2007         return 0;
2008 }
2009
2010 static const struct rfkill_ops hci_rfkill_ops = {
2011         .set_block = hci_rfkill_set_block,
2012 };
2013
2014 static void hci_power_on(struct work_struct *work)
2015 {
2016         struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
2017         int err;
2018
2019         BT_DBG("%s", hdev->name);
2020
2021         if (test_bit(HCI_UP, &hdev->flags) &&
2022             hci_dev_test_flag(hdev, HCI_MGMT) &&
2023             hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF)) {
2024                 hci_req_sync_lock(hdev);
2025                 err = __hci_req_hci_power_on(hdev);
2026                 hci_req_sync_unlock(hdev);
2027                 mgmt_power_on(hdev, err);
2028                 return;
2029         }
2030
2031         err = hci_dev_do_open(hdev);
2032         if (err < 0) {
2033                 hci_dev_lock(hdev);
2034                 mgmt_set_powered_failed(hdev, err);
2035                 hci_dev_unlock(hdev);
2036                 return;
2037         }
2038
2039         /* During the HCI setup phase, a few error conditions are
2040          * ignored and they need to be checked now. If they are still
2041          * valid, it is important to turn the device back off.
2042          */
2043         if (hci_dev_test_flag(hdev, HCI_RFKILLED) ||
2044             hci_dev_test_flag(hdev, HCI_UNCONFIGURED) ||
2045             (hdev->dev_type == HCI_BREDR &&
2046              !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
2047              !bacmp(&hdev->static_addr, BDADDR_ANY))) {
2048                 hci_dev_clear_flag(hdev, HCI_AUTO_OFF);
2049                 hci_dev_do_close(hdev);
2050         } else if (hci_dev_test_flag(hdev, HCI_AUTO_OFF)) {
2051                 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
2052                                    HCI_AUTO_OFF_TIMEOUT);
2053         }
2054
2055         if (hci_dev_test_and_clear_flag(hdev, HCI_SETUP)) {
2056                 /* For unconfigured devices, set the HCI_RAW flag
2057                  * so that userspace can easily identify them.
2058                  */
2059                 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
2060                         set_bit(HCI_RAW, &hdev->flags);
2061
2062                 /* For fully configured devices, this will send
2063                  * the Index Added event. For unconfigured devices,
2064                  * it will send Unconfigued Index Added event.
2065                  *
2066                  * Devices with HCI_QUIRK_RAW_DEVICE are ignored
2067                  * and no event will be send.
2068                  */
2069                 mgmt_index_added(hdev);
2070         } else if (hci_dev_test_and_clear_flag(hdev, HCI_CONFIG)) {
2071                 /* When the controller is now configured, then it
2072                  * is important to clear the HCI_RAW flag.
2073                  */
2074                 if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
2075                         clear_bit(HCI_RAW, &hdev->flags);
2076
2077                 /* Powering on the controller with HCI_CONFIG set only
2078                  * happens with the transition from unconfigured to
2079                  * configured. This will send the Index Added event.
2080                  */
2081                 mgmt_index_added(hdev);
2082         }
2083 }
2084
2085 static void hci_power_off(struct work_struct *work)
2086 {
2087         struct hci_dev *hdev = container_of(work, struct hci_dev,
2088                                             power_off.work);
2089
2090         BT_DBG("%s", hdev->name);
2091
2092         hci_dev_do_close(hdev);
2093 }
2094
2095 static void hci_error_reset(struct work_struct *work)
2096 {
2097         struct hci_dev *hdev = container_of(work, struct hci_dev, error_reset);
2098
2099         BT_DBG("%s", hdev->name);
2100
2101         if (hdev->hw_error)
2102                 hdev->hw_error(hdev, hdev->hw_error_code);
2103         else
2104                 BT_ERR("%s hardware error 0x%2.2x", hdev->name,
2105                        hdev->hw_error_code);
2106
2107         if (hci_dev_do_close(hdev))
2108                 return;
2109
2110         hci_dev_do_open(hdev);
2111 }
2112
2113 void hci_uuids_clear(struct hci_dev *hdev)
2114 {
2115         struct bt_uuid *uuid, *tmp;
2116
2117         list_for_each_entry_safe(uuid, tmp, &hdev->uuids, list) {
2118                 list_del(&uuid->list);
2119                 kfree(uuid);
2120         }
2121 }
2122
2123 void hci_link_keys_clear(struct hci_dev *hdev)
2124 {
2125         struct link_key *key;
2126
2127         list_for_each_entry_rcu(key, &hdev->link_keys, list) {
2128                 list_del_rcu(&key->list);
2129                 kfree_rcu(key, rcu);
2130         }
2131 }
2132
2133 void hci_smp_ltks_clear(struct hci_dev *hdev)
2134 {
2135         struct smp_ltk *k;
2136
2137         list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
2138                 list_del_rcu(&k->list);
2139                 kfree_rcu(k, rcu);
2140         }
2141 }
2142
2143 void hci_smp_irks_clear(struct hci_dev *hdev)
2144 {
2145         struct smp_irk *k;
2146
2147         list_for_each_entry_rcu(k, &hdev->identity_resolving_keys, list) {
2148                 list_del_rcu(&k->list);
2149                 kfree_rcu(k, rcu);
2150         }
2151 }
2152
2153 struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
2154 {
2155         struct link_key *k;
2156
2157         rcu_read_lock();
2158         list_for_each_entry_rcu(k, &hdev->link_keys, list) {
2159                 if (bacmp(bdaddr, &k->bdaddr) == 0) {
2160                         rcu_read_unlock();
2161                         return k;
2162                 }
2163         }
2164         rcu_read_unlock();
2165
2166         return NULL;
2167 }
2168
2169 static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
2170                                u8 key_type, u8 old_key_type)
2171 {
2172         /* Legacy key */
2173         if (key_type < 0x03)
2174                 return true;
2175
2176         /* Debug keys are insecure so don't store them persistently */
2177         if (key_type == HCI_LK_DEBUG_COMBINATION)
2178                 return false;
2179
2180         /* Changed combination key and there's no previous one */
2181         if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
2182                 return false;
2183
2184         /* Security mode 3 case */
2185         if (!conn)
2186                 return true;
2187
2188         /* BR/EDR key derived using SC from an LE link */
2189         if (conn->type == LE_LINK)
2190                 return true;
2191
2192         /* Neither local nor remote side had no-bonding as requirement */
2193         if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
2194                 return true;
2195
2196         /* Local side had dedicated bonding as requirement */
2197         if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
2198                 return true;
2199
2200         /* Remote side had dedicated bonding as requirement */
2201         if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
2202                 return true;
2203
2204         /* If none of the above criteria match, then don't store the key
2205          * persistently */
2206         return false;
2207 }
2208
2209 static u8 ltk_role(u8 type)
2210 {
2211         if (type == SMP_LTK)
2212                 return HCI_ROLE_MASTER;
2213
2214         return HCI_ROLE_SLAVE;
2215 }
2216
2217 struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr,
2218                              u8 addr_type, u8 role)
2219 {
2220         struct smp_ltk *k;
2221
2222         rcu_read_lock();
2223         list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
2224                 if (addr_type != k->bdaddr_type || bacmp(bdaddr, &k->bdaddr))
2225                         continue;
2226
2227                 if (smp_ltk_is_sc(k) || ltk_role(k->type) == role) {
2228                         rcu_read_unlock();
2229                         return k;
2230                 }
2231         }
2232         rcu_read_unlock();
2233
2234         return NULL;
2235 }
2236
2237 struct smp_irk *hci_find_irk_by_rpa(struct hci_dev *hdev, bdaddr_t *rpa)
2238 {
2239         struct smp_irk *irk;
2240
2241         rcu_read_lock();
2242         list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
2243                 if (!bacmp(&irk->rpa, rpa)) {
2244                         rcu_read_unlock();
2245                         return irk;
2246                 }
2247         }
2248
2249         list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
2250                 if (smp_irk_matches(hdev, irk->val, rpa)) {
2251                         bacpy(&irk->rpa, rpa);
2252                         rcu_read_unlock();
2253                         return irk;
2254                 }
2255         }
2256         rcu_read_unlock();
2257
2258         return NULL;
2259 }
2260
2261 struct smp_irk *hci_find_irk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
2262                                      u8 addr_type)
2263 {
2264         struct smp_irk *irk;
2265
2266         /* Identity Address must be public or static random */
2267         if (addr_type == ADDR_LE_DEV_RANDOM && (bdaddr->b[5] & 0xc0) != 0xc0)
2268                 return NULL;
2269
2270         rcu_read_lock();
2271         list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
2272                 if (addr_type == irk->addr_type &&
2273                     bacmp(bdaddr, &irk->bdaddr) == 0) {
2274                         rcu_read_unlock();
2275                         return irk;
2276                 }
2277         }
2278         rcu_read_unlock();
2279
2280         return NULL;
2281 }
2282
2283 struct link_key *hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn,
2284                                   bdaddr_t *bdaddr, u8 *val, u8 type,
2285                                   u8 pin_len, bool *persistent)
2286 {
2287         struct link_key *key, *old_key;
2288         u8 old_key_type;
2289
2290         old_key = hci_find_link_key(hdev, bdaddr);
2291         if (old_key) {
2292                 old_key_type = old_key->type;
2293                 key = old_key;
2294         } else {
2295                 old_key_type = conn ? conn->key_type : 0xff;
2296                 key = kzalloc(sizeof(*key), GFP_KERNEL);
2297                 if (!key)
2298                         return NULL;
2299                 list_add_rcu(&key->list, &hdev->link_keys);
2300         }
2301
2302         BT_DBG("%s key for %pMR type %u", hdev->name, bdaddr, type);
2303
2304         /* Some buggy controller combinations generate a changed
2305          * combination key for legacy pairing even when there's no
2306          * previous key */
2307         if (type == HCI_LK_CHANGED_COMBINATION &&
2308             (!conn || conn->remote_auth == 0xff) && old_key_type == 0xff) {
2309                 type = HCI_LK_COMBINATION;
2310                 if (conn)
2311                         conn->key_type = type;
2312         }
2313
2314         bacpy(&key->bdaddr, bdaddr);
2315         memcpy(key->val, val, HCI_LINK_KEY_SIZE);
2316         key->pin_len = pin_len;
2317
2318         if (type == HCI_LK_CHANGED_COMBINATION)
2319                 key->type = old_key_type;
2320         else
2321                 key->type = type;
2322
2323         if (persistent)
2324                 *persistent = hci_persistent_key(hdev, conn, type,
2325                                                  old_key_type);
2326
2327         return key;
2328 }
2329
2330 struct smp_ltk *hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr,
2331                             u8 addr_type, u8 type, u8 authenticated,
2332                             u8 tk[16], u8 enc_size, __le16 ediv, __le64 rand)
2333 {
2334         struct smp_ltk *key, *old_key;
2335         u8 role = ltk_role(type);
2336
2337         old_key = hci_find_ltk(hdev, bdaddr, addr_type, role);
2338         if (old_key)
2339                 key = old_key;
2340         else {
2341                 key = kzalloc(sizeof(*key), GFP_KERNEL);
2342                 if (!key)
2343                         return NULL;
2344                 list_add_rcu(&key->list, &hdev->long_term_keys);
2345         }
2346
2347         bacpy(&key->bdaddr, bdaddr);
2348         key->bdaddr_type = addr_type;
2349         memcpy(key->val, tk, sizeof(key->val));
2350         key->authenticated = authenticated;
2351         key->ediv = ediv;
2352         key->rand = rand;
2353         key->enc_size = enc_size;
2354         key->type = type;
2355
2356         return key;
2357 }
2358
2359 struct smp_irk *hci_add_irk(struct hci_dev *hdev, bdaddr_t *bdaddr,
2360                             u8 addr_type, u8 val[16], bdaddr_t *rpa)
2361 {
2362         struct smp_irk *irk;
2363
2364         irk = hci_find_irk_by_addr(hdev, bdaddr, addr_type);
2365         if (!irk) {
2366                 irk = kzalloc(sizeof(*irk), GFP_KERNEL);
2367                 if (!irk)
2368                         return NULL;
2369
2370                 bacpy(&irk->bdaddr, bdaddr);
2371                 irk->addr_type = addr_type;
2372
2373                 list_add_rcu(&irk->list, &hdev->identity_resolving_keys);
2374         }
2375
2376         memcpy(irk->val, val, 16);
2377         bacpy(&irk->rpa, rpa);
2378
2379         return irk;
2380 }
2381
2382 int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
2383 {
2384         struct link_key *key;
2385
2386         key = hci_find_link_key(hdev, bdaddr);
2387         if (!key)
2388                 return -ENOENT;
2389
2390         BT_DBG("%s removing %pMR", hdev->name, bdaddr);
2391
2392         list_del_rcu(&key->list);
2393         kfree_rcu(key, rcu);
2394
2395         return 0;
2396 }
2397
2398 int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 bdaddr_type)
2399 {
2400         struct smp_ltk *k;
2401         int removed = 0;
2402
2403         list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
2404                 if (bacmp(bdaddr, &k->bdaddr) || k->bdaddr_type != bdaddr_type)
2405                         continue;
2406
2407                 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
2408
2409                 list_del_rcu(&k->list);
2410                 kfree_rcu(k, rcu);
2411                 removed++;
2412         }
2413
2414         return removed ? 0 : -ENOENT;
2415 }
2416
2417 void hci_remove_irk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type)
2418 {
2419         struct smp_irk *k;
2420
2421         list_for_each_entry_rcu(k, &hdev->identity_resolving_keys, list) {
2422                 if (bacmp(bdaddr, &k->bdaddr) || k->addr_type != addr_type)
2423                         continue;
2424
2425                 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
2426
2427                 list_del_rcu(&k->list);
2428                 kfree_rcu(k, rcu);
2429         }
2430 }
2431
2432 bool hci_bdaddr_is_paired(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
2433 {
2434         struct smp_ltk *k;
2435         struct smp_irk *irk;
2436         u8 addr_type;
2437
2438         if (type == BDADDR_BREDR) {
2439                 if (hci_find_link_key(hdev, bdaddr))
2440                         return true;
2441                 return false;
2442         }
2443
2444         /* Convert to HCI addr type which struct smp_ltk uses */
2445         if (type == BDADDR_LE_PUBLIC)
2446                 addr_type = ADDR_LE_DEV_PUBLIC;
2447         else
2448                 addr_type = ADDR_LE_DEV_RANDOM;
2449
2450         irk = hci_get_irk(hdev, bdaddr, addr_type);
2451         if (irk) {
2452                 bdaddr = &irk->bdaddr;
2453                 addr_type = irk->addr_type;
2454         }
2455
2456         rcu_read_lock();
2457         list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
2458                 if (k->bdaddr_type == addr_type && !bacmp(bdaddr, &k->bdaddr)) {
2459                         rcu_read_unlock();
2460                         return true;
2461                 }
2462         }
2463         rcu_read_unlock();
2464
2465         return false;
2466 }
2467
2468 /* HCI command timer function */
2469 static void hci_cmd_timeout(struct work_struct *work)
2470 {
2471         struct hci_dev *hdev = container_of(work, struct hci_dev,
2472                                             cmd_timer.work);
2473
2474         if (hdev->sent_cmd) {
2475                 struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
2476                 u16 opcode = __le16_to_cpu(sent->opcode);
2477
2478                 BT_ERR("%s command 0x%4.4x tx timeout", hdev->name, opcode);
2479         } else {
2480                 BT_ERR("%s command tx timeout", hdev->name);
2481         }
2482
2483         atomic_set(&hdev->cmd_cnt, 1);
2484         queue_work(hdev->workqueue, &hdev->cmd_work);
2485 }
2486
2487 struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
2488                                           bdaddr_t *bdaddr, u8 bdaddr_type)
2489 {
2490         struct oob_data *data;
2491
2492         list_for_each_entry(data, &hdev->remote_oob_data, list) {
2493                 if (bacmp(bdaddr, &data->bdaddr) != 0)
2494                         continue;
2495                 if (data->bdaddr_type != bdaddr_type)
2496                         continue;
2497                 return data;
2498         }
2499
2500         return NULL;
2501 }
2502
2503 int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
2504                                u8 bdaddr_type)
2505 {
2506         struct oob_data *data;
2507
2508         data = hci_find_remote_oob_data(hdev, bdaddr, bdaddr_type);
2509         if (!data)
2510                 return -ENOENT;
2511
2512         BT_DBG("%s removing %pMR (%u)", hdev->name, bdaddr, bdaddr_type);
2513
2514         list_del(&data->list);
2515         kfree(data);
2516
2517         return 0;
2518 }
2519
2520 void hci_remote_oob_data_clear(struct hci_dev *hdev)
2521 {
2522         struct oob_data *data, *n;
2523
2524         list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
2525                 list_del(&data->list);
2526                 kfree(data);
2527         }
2528 }
2529
2530 int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
2531                             u8 bdaddr_type, u8 *hash192, u8 *rand192,
2532                             u8 *hash256, u8 *rand256)
2533 {
2534         struct oob_data *data;
2535
2536         data = hci_find_remote_oob_data(hdev, bdaddr, bdaddr_type);
2537         if (!data) {
2538                 data = kmalloc(sizeof(*data), GFP_KERNEL);
2539                 if (!data)
2540                         return -ENOMEM;
2541
2542                 bacpy(&data->bdaddr, bdaddr);
2543                 data->bdaddr_type = bdaddr_type;
2544                 list_add(&data->list, &hdev->remote_oob_data);
2545         }
2546
2547         if (hash192 && rand192) {
2548                 memcpy(data->hash192, hash192, sizeof(data->hash192));
2549                 memcpy(data->rand192, rand192, sizeof(data->rand192));
2550                 if (hash256 && rand256)
2551                         data->present = 0x03;
2552         } else {
2553                 memset(data->hash192, 0, sizeof(data->hash192));
2554                 memset(data->rand192, 0, sizeof(data->rand192));
2555                 if (hash256 && rand256)
2556                         data->present = 0x02;
2557                 else
2558                         data->present = 0x00;
2559         }
2560
2561         if (hash256 && rand256) {
2562                 memcpy(data->hash256, hash256, sizeof(data->hash256));
2563                 memcpy(data->rand256, rand256, sizeof(data->rand256));
2564         } else {
2565                 memset(data->hash256, 0, sizeof(data->hash256));
2566                 memset(data->rand256, 0, sizeof(data->rand256));
2567                 if (hash192 && rand192)
2568                         data->present = 0x01;
2569         }
2570
2571         BT_DBG("%s for %pMR", hdev->name, bdaddr);
2572
2573         return 0;
2574 }
2575
2576 /* This function requires the caller holds hdev->lock */
2577 struct adv_info *hci_find_adv_instance(struct hci_dev *hdev, u8 instance)
2578 {
2579         struct adv_info *adv_instance;
2580
2581         list_for_each_entry(adv_instance, &hdev->adv_instances, list) {
2582                 if (adv_instance->instance == instance)
2583                         return adv_instance;
2584         }
2585
2586         return NULL;
2587 }
2588
2589 /* This function requires the caller holds hdev->lock */
2590 struct adv_info *hci_get_next_instance(struct hci_dev *hdev, u8 instance)
2591 {
2592         struct adv_info *cur_instance;
2593
2594         cur_instance = hci_find_adv_instance(hdev, instance);
2595         if (!cur_instance)
2596                 return NULL;
2597
2598         if (cur_instance == list_last_entry(&hdev->adv_instances,
2599                                             struct adv_info, list))
2600                 return list_first_entry(&hdev->adv_instances,
2601                                                  struct adv_info, list);
2602         else
2603                 return list_next_entry(cur_instance, list);
2604 }
2605
2606 /* This function requires the caller holds hdev->lock */
2607 int hci_remove_adv_instance(struct hci_dev *hdev, u8 instance)
2608 {
2609         struct adv_info *adv_instance;
2610
2611         adv_instance = hci_find_adv_instance(hdev, instance);
2612         if (!adv_instance)
2613                 return -ENOENT;
2614
2615         BT_DBG("%s removing %dMR", hdev->name, instance);
2616
2617         if (hdev->cur_adv_instance == instance) {
2618                 if (hdev->adv_instance_timeout) {
2619                         cancel_delayed_work(&hdev->adv_instance_expire);
2620                         hdev->adv_instance_timeout = 0;
2621                 }
2622                 hdev->cur_adv_instance = 0x00;
2623         }
2624
2625         list_del(&adv_instance->list);
2626         kfree(adv_instance);
2627
2628         hdev->adv_instance_cnt--;
2629
2630         return 0;
2631 }
2632
2633 /* This function requires the caller holds hdev->lock */
2634 void hci_adv_instances_clear(struct hci_dev *hdev)
2635 {
2636         struct adv_info *adv_instance, *n;
2637
2638         if (hdev->adv_instance_timeout) {
2639                 cancel_delayed_work(&hdev->adv_instance_expire);
2640                 hdev->adv_instance_timeout = 0;
2641         }
2642
2643         list_for_each_entry_safe(adv_instance, n, &hdev->adv_instances, list) {
2644                 list_del(&adv_instance->list);
2645                 kfree(adv_instance);
2646         }
2647
2648         hdev->adv_instance_cnt = 0;
2649         hdev->cur_adv_instance = 0x00;
2650 }
2651
2652 /* This function requires the caller holds hdev->lock */
2653 int hci_add_adv_instance(struct hci_dev *hdev, u8 instance, u32 flags,
2654                          u16 adv_data_len, u8 *adv_data,
2655                          u16 scan_rsp_len, u8 *scan_rsp_data,
2656                          u16 timeout, u16 duration)
2657 {
2658         struct adv_info *adv_instance;
2659
2660         adv_instance = hci_find_adv_instance(hdev, instance);
2661         if (adv_instance) {
2662                 memset(adv_instance->adv_data, 0,
2663                        sizeof(adv_instance->adv_data));
2664                 memset(adv_instance->scan_rsp_data, 0,
2665                        sizeof(adv_instance->scan_rsp_data));
2666         } else {
2667                 if (hdev->adv_instance_cnt >= HCI_MAX_ADV_INSTANCES ||
2668                     instance < 1 || instance > HCI_MAX_ADV_INSTANCES)
2669                         return -EOVERFLOW;
2670
2671                 adv_instance = kzalloc(sizeof(*adv_instance), GFP_KERNEL);
2672                 if (!adv_instance)
2673                         return -ENOMEM;
2674
2675                 adv_instance->pending = true;
2676                 adv_instance->instance = instance;
2677                 list_add(&adv_instance->list, &hdev->adv_instances);
2678                 hdev->adv_instance_cnt++;
2679         }
2680
2681         adv_instance->flags = flags;
2682         adv_instance->adv_data_len = adv_data_len;
2683         adv_instance->scan_rsp_len = scan_rsp_len;
2684
2685         if (adv_data_len)
2686                 memcpy(adv_instance->adv_data, adv_data, adv_data_len);
2687
2688         if (scan_rsp_len)
2689                 memcpy(adv_instance->scan_rsp_data,
2690                        scan_rsp_data, scan_rsp_len);
2691
2692         adv_instance->timeout = timeout;
2693         adv_instance->remaining_time = timeout;
2694
2695         if (duration == 0)
2696                 adv_instance->duration = HCI_DEFAULT_ADV_DURATION;
2697         else
2698                 adv_instance->duration = duration;
2699
2700         BT_DBG("%s for %dMR", hdev->name, instance);
2701
2702         return 0;
2703 }
2704
2705 struct bdaddr_list *hci_bdaddr_list_lookup(struct list_head *bdaddr_list,
2706                                          bdaddr_t *bdaddr, u8 type)
2707 {
2708         struct bdaddr_list *b;
2709
2710         list_for_each_entry(b, bdaddr_list, list) {
2711                 if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
2712                         return b;
2713         }
2714
2715         return NULL;
2716 }
2717
2718 void hci_bdaddr_list_clear(struct list_head *bdaddr_list)
2719 {
2720         struct bdaddr_list *b, *n;
2721
2722         list_for_each_entry_safe(b, n, bdaddr_list, list) {
2723                 list_del(&b->list);
2724                 kfree(b);
2725         }
2726 }
2727
2728 int hci_bdaddr_list_add(struct list_head *list, bdaddr_t *bdaddr, u8 type)
2729 {
2730         struct bdaddr_list *entry;
2731
2732         if (!bacmp(bdaddr, BDADDR_ANY))
2733                 return -EBADF;
2734
2735         if (hci_bdaddr_list_lookup(list, bdaddr, type))
2736                 return -EEXIST;
2737
2738         entry = kzalloc(sizeof(*entry), GFP_KERNEL);
2739         if (!entry)
2740                 return -ENOMEM;
2741
2742         bacpy(&entry->bdaddr, bdaddr);
2743         entry->bdaddr_type = type;
2744
2745         list_add(&entry->list, list);
2746
2747         return 0;
2748 }
2749
2750 int hci_bdaddr_list_del(struct list_head *list, bdaddr_t *bdaddr, u8 type)
2751 {
2752         struct bdaddr_list *entry;
2753
2754         if (!bacmp(bdaddr, BDADDR_ANY)) {
2755                 hci_bdaddr_list_clear(list);
2756                 return 0;
2757         }
2758
2759         entry = hci_bdaddr_list_lookup(list, bdaddr, type);
2760         if (!entry)
2761                 return -ENOENT;
2762
2763         list_del(&entry->list);
2764         kfree(entry);
2765
2766         return 0;
2767 }
2768
2769 /* This function requires the caller holds hdev->lock */
2770 struct hci_conn_params *hci_conn_params_lookup(struct hci_dev *hdev,
2771                                                bdaddr_t *addr, u8 addr_type)
2772 {
2773         struct hci_conn_params *params;
2774
2775         list_for_each_entry(params, &hdev->le_conn_params, list) {
2776                 if (bacmp(&params->addr, addr) == 0 &&
2777                     params->addr_type == addr_type) {
2778                         return params;
2779                 }
2780         }
2781
2782         return NULL;
2783 }
2784
2785 /* This function requires the caller holds hdev->lock */
2786 struct hci_conn_params *hci_pend_le_action_lookup(struct list_head *list,
2787                                                   bdaddr_t *addr, u8 addr_type)
2788 {
2789         struct hci_conn_params *param;
2790
2791         list_for_each_entry(param, list, action) {
2792                 if (bacmp(&param->addr, addr) == 0 &&
2793                     param->addr_type == addr_type)
2794                         return param;
2795         }
2796
2797         return NULL;
2798 }
2799
2800 /* This function requires the caller holds hdev->lock */
2801 struct hci_conn_params *hci_conn_params_add(struct hci_dev *hdev,
2802                                             bdaddr_t *addr, u8 addr_type)
2803 {
2804         struct hci_conn_params *params;
2805
2806         params = hci_conn_params_lookup(hdev, addr, addr_type);
2807         if (params)
2808                 return params;
2809
2810         params = kzalloc(sizeof(*params), GFP_KERNEL);
2811         if (!params) {
2812                 BT_ERR("Out of memory");
2813                 return NULL;
2814         }
2815
2816         bacpy(&params->addr, addr);
2817         params->addr_type = addr_type;
2818
2819         list_add(&params->list, &hdev->le_conn_params);
2820         INIT_LIST_HEAD(&params->action);
2821
2822         params->conn_min_interval = hdev->le_conn_min_interval;
2823         params->conn_max_interval = hdev->le_conn_max_interval;
2824         params->conn_latency = hdev->le_conn_latency;
2825         params->supervision_timeout = hdev->le_supv_timeout;
2826         params->auto_connect = HCI_AUTO_CONN_DISABLED;
2827
2828         BT_DBG("addr %pMR (type %u)", addr, addr_type);
2829
2830         return params;
2831 }
2832
2833 static void hci_conn_params_free(struct hci_conn_params *params)
2834 {
2835         if (params->conn) {
2836                 hci_conn_drop(params->conn);
2837                 hci_conn_put(params->conn);
2838         }
2839
2840         list_del(&params->action);
2841         list_del(&params->list);
2842         kfree(params);
2843 }
2844
2845 /* This function requires the caller holds hdev->lock */
2846 void hci_conn_params_del(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type)
2847 {
2848         struct hci_conn_params *params;
2849
2850         params = hci_conn_params_lookup(hdev, addr, addr_type);
2851         if (!params)
2852                 return;
2853
2854         hci_conn_params_free(params);
2855
2856         hci_update_background_scan(hdev);
2857
2858         BT_DBG("addr %pMR (type %u)", addr, addr_type);
2859 }
2860
2861 /* This function requires the caller holds hdev->lock */
2862 void hci_conn_params_clear_disabled(struct hci_dev *hdev)
2863 {
2864         struct hci_conn_params *params, *tmp;
2865
2866         list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list) {
2867                 if (params->auto_connect != HCI_AUTO_CONN_DISABLED)
2868                         continue;
2869
2870                 /* If trying to estabilish one time connection to disabled
2871                  * device, leave the params, but mark them as just once.
2872                  */
2873                 if (params->explicit_connect) {
2874                         params->auto_connect = HCI_AUTO_CONN_EXPLICIT;
2875                         continue;
2876                 }
2877
2878                 list_del(&params->list);
2879                 kfree(params);
2880         }
2881
2882         BT_DBG("All LE disabled connection parameters were removed");
2883 }
2884
2885 /* This function requires the caller holds hdev->lock */
2886 static void hci_conn_params_clear_all(struct hci_dev *hdev)
2887 {
2888         struct hci_conn_params *params, *tmp;
2889
2890         list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list)
2891                 hci_conn_params_free(params);
2892
2893         BT_DBG("All LE connection parameters were removed");
2894 }
2895
2896 /* Copy the Identity Address of the controller.
2897  *
2898  * If the controller has a public BD_ADDR, then by default use that one.
2899  * If this is a LE only controller without a public address, default to
2900  * the static random address.
2901  *
2902  * For debugging purposes it is possible to force controllers with a
2903  * public address to use the static random address instead.
2904  *
2905  * In case BR/EDR has been disabled on a dual-mode controller and
2906  * userspace has configured a static address, then that address
2907  * becomes the identity address instead of the public BR/EDR address.
2908  */
2909 void hci_copy_identity_address(struct hci_dev *hdev, bdaddr_t *bdaddr,
2910                                u8 *bdaddr_type)
2911 {
2912         if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
2913             !bacmp(&hdev->bdaddr, BDADDR_ANY) ||
2914             (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
2915              bacmp(&hdev->static_addr, BDADDR_ANY))) {
2916                 bacpy(bdaddr, &hdev->static_addr);
2917                 *bdaddr_type = ADDR_LE_DEV_RANDOM;
2918         } else {
2919                 bacpy(bdaddr, &hdev->bdaddr);
2920                 *bdaddr_type = ADDR_LE_DEV_PUBLIC;
2921         }
2922 }
2923
2924 /* Alloc HCI device */
2925 struct hci_dev *hci_alloc_dev(void)
2926 {
2927         struct hci_dev *hdev;
2928
2929         hdev = kzalloc(sizeof(*hdev), GFP_KERNEL);
2930         if (!hdev)
2931                 return NULL;
2932
2933         hdev->pkt_type  = (HCI_DM1 | HCI_DH1 | HCI_HV1);
2934         hdev->esco_type = (ESCO_HV1);
2935         hdev->link_mode = (HCI_LM_ACCEPT);
2936         hdev->num_iac = 0x01;           /* One IAC support is mandatory */
2937         hdev->io_capability = 0x03;     /* No Input No Output */
2938         hdev->manufacturer = 0xffff;    /* Default to internal use */
2939         hdev->inq_tx_power = HCI_TX_POWER_INVALID;
2940         hdev->adv_tx_power = HCI_TX_POWER_INVALID;
2941         hdev->adv_instance_cnt = 0;
2942         hdev->cur_adv_instance = 0x00;
2943         hdev->adv_instance_timeout = 0;
2944
2945         hdev->sniff_max_interval = 800;
2946         hdev->sniff_min_interval = 80;
2947
2948         hdev->le_adv_channel_map = 0x07;
2949         hdev->le_adv_min_interval = 0x0800;
2950         hdev->le_adv_max_interval = 0x0800;
2951         hdev->le_scan_interval = 0x0060;
2952         hdev->le_scan_window = 0x0030;
2953         hdev->le_conn_min_interval = 0x0028;
2954         hdev->le_conn_max_interval = 0x0038;
2955         hdev->le_conn_latency = 0x0000;
2956         hdev->le_supv_timeout = 0x002a;
2957         hdev->le_def_tx_len = 0x001b;
2958         hdev->le_def_tx_time = 0x0148;
2959         hdev->le_max_tx_len = 0x001b;
2960         hdev->le_max_tx_time = 0x0148;
2961         hdev->le_max_rx_len = 0x001b;
2962         hdev->le_max_rx_time = 0x0148;
2963
2964         hdev->rpa_timeout = HCI_DEFAULT_RPA_TIMEOUT;
2965         hdev->discov_interleaved_timeout = DISCOV_INTERLEAVED_TIMEOUT;
2966         hdev->conn_info_min_age = DEFAULT_CONN_INFO_MIN_AGE;
2967         hdev->conn_info_max_age = DEFAULT_CONN_INFO_MAX_AGE;
2968
2969         mutex_init(&hdev->lock);
2970         mutex_init(&hdev->req_lock);
2971
2972         INIT_LIST_HEAD(&hdev->mgmt_pending);
2973         INIT_LIST_HEAD(&hdev->blacklist);
2974         INIT_LIST_HEAD(&hdev->whitelist);
2975         INIT_LIST_HEAD(&hdev->uuids);
2976         INIT_LIST_HEAD(&hdev->link_keys);
2977         INIT_LIST_HEAD(&hdev->long_term_keys);
2978         INIT_LIST_HEAD(&hdev->identity_resolving_keys);
2979         INIT_LIST_HEAD(&hdev->remote_oob_data);
2980         INIT_LIST_HEAD(&hdev->le_white_list);
2981         INIT_LIST_HEAD(&hdev->le_conn_params);
2982         INIT_LIST_HEAD(&hdev->pend_le_conns);
2983         INIT_LIST_HEAD(&hdev->pend_le_reports);
2984         INIT_LIST_HEAD(&hdev->conn_hash.list);
2985         INIT_LIST_HEAD(&hdev->adv_instances);
2986
2987         INIT_WORK(&hdev->rx_work, hci_rx_work);
2988         INIT_WORK(&hdev->cmd_work, hci_cmd_work);
2989         INIT_WORK(&hdev->tx_work, hci_tx_work);
2990         INIT_WORK(&hdev->power_on, hci_power_on);
2991         INIT_WORK(&hdev->error_reset, hci_error_reset);
2992
2993         INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
2994
2995         skb_queue_head_init(&hdev->rx_q);
2996         skb_queue_head_init(&hdev->cmd_q);
2997         skb_queue_head_init(&hdev->raw_q);
2998
2999         init_waitqueue_head(&hdev->req_wait_q);
3000
3001         INIT_DELAYED_WORK(&hdev->cmd_timer, hci_cmd_timeout);
3002
3003         hci_request_setup(hdev);
3004
3005         hci_init_sysfs(hdev);
3006         discovery_init(hdev);
3007
3008         return hdev;
3009 }
3010 EXPORT_SYMBOL(hci_alloc_dev);
3011
3012 /* Free HCI device */
3013 void hci_free_dev(struct hci_dev *hdev)
3014 {
3015         /* will free via device release */
3016         put_device(&hdev->dev);
3017 }
3018 EXPORT_SYMBOL(hci_free_dev);
3019
3020 /* Register HCI device */
3021 int hci_register_dev(struct hci_dev *hdev)
3022 {
3023         int id, error;
3024
3025         if (!hdev->open || !hdev->close || !hdev->send)
3026                 return -EINVAL;
3027
3028         /* Do not allow HCI_AMP devices to register at index 0,
3029          * so the index can be used as the AMP controller ID.
3030          */
3031         switch (hdev->dev_type) {
3032         case HCI_BREDR:
3033                 id = ida_simple_get(&hci_index_ida, 0, 0, GFP_KERNEL);
3034                 break;
3035         case HCI_AMP:
3036                 id = ida_simple_get(&hci_index_ida, 1, 0, GFP_KERNEL);
3037                 break;
3038         default:
3039                 return -EINVAL;
3040         }
3041
3042         if (id < 0)
3043                 return id;
3044
3045         sprintf(hdev->name, "hci%d", id);
3046         hdev->id = id;
3047
3048         BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
3049
3050         hdev->workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
3051                                           WQ_MEM_RECLAIM, 1, hdev->name);
3052         if (!hdev->workqueue) {
3053                 error = -ENOMEM;
3054                 goto err;
3055         }
3056
3057         hdev->req_workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
3058                                               WQ_MEM_RECLAIM, 1, hdev->name);
3059         if (!hdev->req_workqueue) {
3060                 destroy_workqueue(hdev->workqueue);
3061                 error = -ENOMEM;
3062                 goto err;
3063         }
3064
3065         if (!IS_ERR_OR_NULL(bt_debugfs))
3066                 hdev->debugfs = debugfs_create_dir(hdev->name, bt_debugfs);
3067
3068         dev_set_name(&hdev->dev, "%s", hdev->name);
3069
3070         error = device_add(&hdev->dev);
3071         if (error < 0)
3072                 goto err_wqueue;
3073
3074         hci_leds_init(hdev);
3075
3076         hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
3077                                     RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops,
3078                                     hdev);
3079         if (hdev->rfkill) {
3080                 if (rfkill_register(hdev->rfkill) < 0) {
3081                         rfkill_destroy(hdev->rfkill);
3082                         hdev->rfkill = NULL;
3083                 }
3084         }
3085
3086         if (hdev->rfkill && rfkill_blocked(hdev->rfkill))
3087                 hci_dev_set_flag(hdev, HCI_RFKILLED);
3088
3089         hci_dev_set_flag(hdev, HCI_SETUP);
3090         hci_dev_set_flag(hdev, HCI_AUTO_OFF);
3091
3092         if (hdev->dev_type == HCI_BREDR) {
3093                 /* Assume BR/EDR support until proven otherwise (such as
3094                  * through reading supported features during init.
3095                  */
3096                 hci_dev_set_flag(hdev, HCI_BREDR_ENABLED);
3097         }
3098
3099         write_lock(&hci_dev_list_lock);
3100         list_add(&hdev->list, &hci_dev_list);
3101         write_unlock(&hci_dev_list_lock);
3102
3103         /* Devices that are marked for raw-only usage are unconfigured
3104          * and should not be included in normal operation.
3105          */
3106         if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
3107                 hci_dev_set_flag(hdev, HCI_UNCONFIGURED);
3108
3109         hci_sock_dev_event(hdev, HCI_DEV_REG);
3110         hci_dev_hold(hdev);
3111
3112         queue_work(hdev->req_workqueue, &hdev->power_on);
3113
3114         return id;
3115
3116 err_wqueue:
3117         destroy_workqueue(hdev->workqueue);
3118         destroy_workqueue(hdev->req_workqueue);
3119 err:
3120         ida_simple_remove(&hci_index_ida, hdev->id);
3121
3122         return error;
3123 }
3124 EXPORT_SYMBOL(hci_register_dev);
3125
3126 /* Unregister HCI device */
3127 void hci_unregister_dev(struct hci_dev *hdev)
3128 {
3129         int id;
3130
3131         BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
3132
3133         hci_dev_set_flag(hdev, HCI_UNREGISTER);
3134
3135         id = hdev->id;
3136
3137         write_lock(&hci_dev_list_lock);
3138         list_del(&hdev->list);
3139         write_unlock(&hci_dev_list_lock);
3140
3141         hci_dev_do_close(hdev);
3142
3143         cancel_work_sync(&hdev->power_on);
3144
3145         if (!test_bit(HCI_INIT, &hdev->flags) &&
3146             !hci_dev_test_flag(hdev, HCI_SETUP) &&
3147             !hci_dev_test_flag(hdev, HCI_CONFIG)) {
3148                 hci_dev_lock(hdev);
3149                 mgmt_index_removed(hdev);
3150                 hci_dev_unlock(hdev);
3151         }
3152
3153         /* mgmt_index_removed should take care of emptying the
3154          * pending list */
3155         BUG_ON(!list_empty(&hdev->mgmt_pending));
3156
3157         hci_sock_dev_event(hdev, HCI_DEV_UNREG);
3158
3159         if (hdev->rfkill) {
3160                 rfkill_unregister(hdev->rfkill);
3161                 rfkill_destroy(hdev->rfkill);
3162         }
3163
3164         device_del(&hdev->dev);
3165
3166         debugfs_remove_recursive(hdev->debugfs);
3167
3168         destroy_workqueue(hdev->workqueue);
3169         destroy_workqueue(hdev->req_workqueue);
3170
3171         hci_dev_lock(hdev);
3172         hci_bdaddr_list_clear(&hdev->blacklist);
3173         hci_bdaddr_list_clear(&hdev->whitelist);
3174         hci_uuids_clear(hdev);
3175         hci_link_keys_clear(hdev);
3176         hci_smp_ltks_clear(hdev);
3177         hci_smp_irks_clear(hdev);
3178         hci_remote_oob_data_clear(hdev);
3179         hci_adv_instances_clear(hdev);
3180         hci_bdaddr_list_clear(&hdev->le_white_list);
3181         hci_conn_params_clear_all(hdev);
3182         hci_discovery_filter_clear(hdev);
3183         hci_dev_unlock(hdev);
3184
3185         hci_dev_put(hdev);
3186
3187         ida_simple_remove(&hci_index_ida, id);
3188 }
3189 EXPORT_SYMBOL(hci_unregister_dev);
3190
3191 /* Suspend HCI device */
3192 int hci_suspend_dev(struct hci_dev *hdev)
3193 {
3194         hci_sock_dev_event(hdev, HCI_DEV_SUSPEND);
3195         return 0;
3196 }
3197 EXPORT_SYMBOL(hci_suspend_dev);
3198
3199 /* Resume HCI device */
3200 int hci_resume_dev(struct hci_dev *hdev)
3201 {
3202         hci_sock_dev_event(hdev, HCI_DEV_RESUME);
3203         return 0;
3204 }
3205 EXPORT_SYMBOL(hci_resume_dev);
3206
3207 /* Reset HCI device */
3208 int hci_reset_dev(struct hci_dev *hdev)
3209 {
3210         const u8 hw_err[] = { HCI_EV_HARDWARE_ERROR, 0x01, 0x00 };
3211         struct sk_buff *skb;
3212
3213         skb = bt_skb_alloc(3, GFP_ATOMIC);
3214         if (!skb)
3215                 return -ENOMEM;
3216
3217         hci_skb_pkt_type(skb) = HCI_EVENT_PKT;
3218         memcpy(skb_put(skb, 3), hw_err, 3);
3219
3220         /* Send Hardware Error to upper stack */
3221         return hci_recv_frame(hdev, skb);
3222 }
3223 EXPORT_SYMBOL(hci_reset_dev);
3224
3225 /* Receive frame from HCI drivers */
3226 int hci_recv_frame(struct hci_dev *hdev, struct sk_buff *skb)
3227 {
3228         if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
3229                       && !test_bit(HCI_INIT, &hdev->flags))) {
3230                 kfree_skb(skb);
3231                 return -ENXIO;
3232         }
3233
3234         if (hci_skb_pkt_type(skb) != HCI_EVENT_PKT &&
3235             hci_skb_pkt_type(skb) != HCI_ACLDATA_PKT &&
3236             hci_skb_pkt_type(skb) != HCI_SCODATA_PKT) {
3237                 kfree_skb(skb);
3238                 return -EINVAL;
3239         }
3240
3241         /* Incoming skb */
3242         bt_cb(skb)->incoming = 1;
3243
3244         /* Time stamp */
3245         __net_timestamp(skb);
3246
3247         skb_queue_tail(&hdev->rx_q, skb);
3248         queue_work(hdev->workqueue, &hdev->rx_work);
3249
3250         return 0;
3251 }
3252 EXPORT_SYMBOL(hci_recv_frame);
3253
3254 /* Receive diagnostic message from HCI drivers */
3255 int hci_recv_diag(struct hci_dev *hdev, struct sk_buff *skb)
3256 {
3257         /* Mark as diagnostic packet */
3258         hci_skb_pkt_type(skb) = HCI_DIAG_PKT;
3259
3260         /* Time stamp */
3261         __net_timestamp(skb);
3262
3263         skb_queue_tail(&hdev->rx_q, skb);
3264         queue_work(hdev->workqueue, &hdev->rx_work);
3265
3266         return 0;
3267 }
3268 EXPORT_SYMBOL(hci_recv_diag);
3269
3270 /* ---- Interface to upper protocols ---- */
3271
3272 int hci_register_cb(struct hci_cb *cb)
3273 {
3274         BT_DBG("%p name %s", cb, cb->name);
3275
3276         mutex_lock(&hci_cb_list_lock);
3277         list_add_tail(&cb->list, &hci_cb_list);
3278         mutex_unlock(&hci_cb_list_lock);
3279
3280         return 0;
3281 }
3282 EXPORT_SYMBOL(hci_register_cb);
3283
3284 int hci_unregister_cb(struct hci_cb *cb)
3285 {
3286         BT_DBG("%p name %s", cb, cb->name);
3287
3288         mutex_lock(&hci_cb_list_lock);
3289         list_del(&cb->list);
3290         mutex_unlock(&hci_cb_list_lock);
3291
3292         return 0;
3293 }
3294 EXPORT_SYMBOL(hci_unregister_cb);
3295
3296 static void hci_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
3297 {
3298         int err;
3299
3300         BT_DBG("%s type %d len %d", hdev->name, hci_skb_pkt_type(skb),
3301                skb->len);
3302
3303         /* Time stamp */
3304         __net_timestamp(skb);
3305
3306         /* Send copy to monitor */
3307         hci_send_to_monitor(hdev, skb);
3308
3309         if (atomic_read(&hdev->promisc)) {
3310                 /* Send copy to the sockets */
3311                 hci_send_to_sock(hdev, skb);
3312         }
3313
3314         /* Get rid of skb owner, prior to sending to the driver. */
3315         skb_orphan(skb);
3316
3317         if (!test_bit(HCI_RUNNING, &hdev->flags)) {
3318                 kfree_skb(skb);
3319                 return;
3320         }
3321
3322         err = hdev->send(hdev, skb);
3323         if (err < 0) {
3324                 BT_ERR("%s sending frame failed (%d)", hdev->name, err);
3325                 kfree_skb(skb);
3326         }
3327 }
3328
3329 /* Send HCI command */
3330 int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen,
3331                  const void *param)
3332 {
3333         struct sk_buff *skb;
3334
3335         BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
3336
3337         skb = hci_prepare_cmd(hdev, opcode, plen, param);
3338         if (!skb) {
3339                 BT_ERR("%s no memory for command", hdev->name);
3340                 return -ENOMEM;
3341         }
3342
3343         /* Stand-alone HCI commands must be flagged as
3344          * single-command requests.
3345          */
3346         bt_cb(skb)->hci.req_flags |= HCI_REQ_START;
3347
3348         skb_queue_tail(&hdev->cmd_q, skb);
3349         queue_work(hdev->workqueue, &hdev->cmd_work);
3350
3351         return 0;
3352 }
3353
3354 /* Get data from the previously sent command */
3355 void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
3356 {
3357         struct hci_command_hdr *hdr;
3358
3359         if (!hdev->sent_cmd)
3360                 return NULL;
3361
3362         hdr = (void *) hdev->sent_cmd->data;
3363
3364         if (hdr->opcode != cpu_to_le16(opcode))
3365                 return NULL;
3366
3367         BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
3368
3369         return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
3370 }
3371
3372 /* Send HCI command and wait for command commplete event */
3373 struct sk_buff *hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
3374                              const void *param, u32 timeout)
3375 {
3376         struct sk_buff *skb;
3377
3378         if (!test_bit(HCI_UP, &hdev->flags))
3379                 return ERR_PTR(-ENETDOWN);
3380
3381         bt_dev_dbg(hdev, "opcode 0x%4.4x plen %d", opcode, plen);
3382
3383         hci_req_sync_lock(hdev);
3384         skb = __hci_cmd_sync(hdev, opcode, plen, param, timeout);
3385         hci_req_sync_unlock(hdev);
3386
3387         return skb;
3388 }
3389 EXPORT_SYMBOL(hci_cmd_sync);
3390
3391 /* Send ACL data */
3392 static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
3393 {
3394         struct hci_acl_hdr *hdr;
3395         int len = skb->len;
3396
3397         skb_push(skb, HCI_ACL_HDR_SIZE);
3398         skb_reset_transport_header(skb);
3399         hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
3400         hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
3401         hdr->dlen   = cpu_to_le16(len);
3402 }
3403
3404 static void hci_queue_acl(struct hci_chan *chan, struct sk_buff_head *queue,
3405                           struct sk_buff *skb, __u16 flags)
3406 {
3407         struct hci_conn *conn = chan->conn;
3408         struct hci_dev *hdev = conn->hdev;
3409         struct sk_buff *list;
3410
3411         skb->len = skb_headlen(skb);
3412         skb->data_len = 0;
3413
3414         hci_skb_pkt_type(skb) = HCI_ACLDATA_PKT;
3415
3416         switch (hdev->dev_type) {
3417         case HCI_BREDR:
3418                 hci_add_acl_hdr(skb, conn->handle, flags);
3419                 break;
3420         case HCI_AMP:
3421                 hci_add_acl_hdr(skb, chan->handle, flags);
3422                 break;
3423         default:
3424                 BT_ERR("%s unknown dev_type %d", hdev->name, hdev->dev_type);
3425                 return;
3426         }
3427
3428         list = skb_shinfo(skb)->frag_list;
3429         if (!list) {
3430                 /* Non fragmented */
3431                 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
3432
3433                 skb_queue_tail(queue, skb);
3434         } else {
3435                 /* Fragmented */
3436                 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
3437
3438                 skb_shinfo(skb)->frag_list = NULL;
3439
3440                 /* Queue all fragments atomically. We need to use spin_lock_bh
3441                  * here because of 6LoWPAN links, as there this function is
3442                  * called from softirq and using normal spin lock could cause
3443                  * deadlocks.
3444                  */
3445                 spin_lock_bh(&queue->lock);
3446
3447                 __skb_queue_tail(queue, skb);
3448
3449                 flags &= ~ACL_START;
3450                 flags |= ACL_CONT;
3451                 do {
3452                         skb = list; list = list->next;
3453
3454                         hci_skb_pkt_type(skb) = HCI_ACLDATA_PKT;
3455                         hci_add_acl_hdr(skb, conn->handle, flags);
3456
3457                         BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
3458
3459                         __skb_queue_tail(queue, skb);
3460                 } while (list);
3461
3462                 spin_unlock_bh(&queue->lock);
3463         }
3464 }
3465
3466 void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
3467 {
3468         struct hci_dev *hdev = chan->conn->hdev;
3469
3470         BT_DBG("%s chan %p flags 0x%4.4x", hdev->name, chan, flags);
3471
3472         hci_queue_acl(chan, &chan->data_q, skb, flags);
3473
3474         queue_work(hdev->workqueue, &hdev->tx_work);
3475 }
3476
3477 /* Send SCO data */
3478 void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
3479 {
3480         struct hci_dev *hdev = conn->hdev;
3481         struct hci_sco_hdr hdr;
3482
3483         BT_DBG("%s len %d", hdev->name, skb->len);
3484
3485         hdr.handle = cpu_to_le16(conn->handle);
3486         hdr.dlen   = skb->len;
3487
3488         skb_push(skb, HCI_SCO_HDR_SIZE);
3489         skb_reset_transport_header(skb);
3490         memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
3491
3492         hci_skb_pkt_type(skb) = HCI_SCODATA_PKT;
3493
3494         skb_queue_tail(&conn->data_q, skb);
3495         queue_work(hdev->workqueue, &hdev->tx_work);
3496 }
3497
3498 /* ---- HCI TX task (outgoing data) ---- */
3499
3500 /* HCI Connection scheduler */
3501 static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type,
3502                                      int *quote)
3503 {
3504         struct hci_conn_hash *h = &hdev->conn_hash;
3505         struct hci_conn *conn = NULL, *c;
3506         unsigned int num = 0, min = ~0;
3507
3508         /* We don't have to lock device here. Connections are always
3509          * added and removed with TX task disabled. */
3510
3511         rcu_read_lock();
3512
3513         list_for_each_entry_rcu(c, &h->list, list) {
3514                 if (c->type != type || skb_queue_empty(&c->data_q))
3515                         continue;
3516
3517                 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
3518                         continue;
3519
3520                 num++;
3521
3522                 if (c->sent < min) {
3523                         min  = c->sent;
3524                         conn = c;
3525                 }
3526
3527                 if (hci_conn_num(hdev, type) == num)
3528                         break;
3529         }
3530
3531         rcu_read_unlock();
3532
3533         if (conn) {
3534                 int cnt, q;
3535
3536                 switch (conn->type) {
3537                 case ACL_LINK:
3538                         cnt = hdev->acl_cnt;
3539                         break;
3540                 case SCO_LINK:
3541                 case ESCO_LINK:
3542                         cnt = hdev->sco_cnt;
3543                         break;
3544                 case LE_LINK:
3545                         cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
3546                         break;
3547                 default:
3548                         cnt = 0;
3549                         BT_ERR("Unknown link type");
3550                 }
3551
3552                 q = cnt / num;
3553                 *quote = q ? q : 1;
3554         } else
3555                 *quote = 0;
3556
3557         BT_DBG("conn %p quote %d", conn, *quote);
3558         return conn;
3559 }
3560
3561 static void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
3562 {
3563         struct hci_conn_hash *h = &hdev->conn_hash;
3564         struct hci_conn *c;
3565
3566         BT_ERR("%s link tx timeout", hdev->name);
3567
3568         rcu_read_lock();
3569
3570         /* Kill stalled connections */
3571         list_for_each_entry_rcu(c, &h->list, list) {
3572                 if (c->type == type && c->sent) {
3573                         BT_ERR("%s killing stalled connection %pMR",
3574                                hdev->name, &c->dst);
3575                         hci_disconnect(c, HCI_ERROR_REMOTE_USER_TERM);
3576                 }
3577         }
3578
3579         rcu_read_unlock();
3580 }
3581
3582 static struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
3583                                       int *quote)
3584 {
3585         struct hci_conn_hash *h = &hdev->conn_hash;
3586         struct hci_chan *chan = NULL;
3587         unsigned int num = 0, min = ~0, cur_prio = 0;
3588         struct hci_conn *conn;
3589         int cnt, q, conn_num = 0;
3590
3591         BT_DBG("%s", hdev->name);
3592
3593         rcu_read_lock();
3594
3595         list_for_each_entry_rcu(conn, &h->list, list) {
3596                 struct hci_chan *tmp;
3597
3598                 if (conn->type != type)
3599                         continue;
3600
3601                 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
3602                         continue;
3603
3604                 conn_num++;
3605
3606                 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
3607                         struct sk_buff *skb;
3608
3609                         if (skb_queue_empty(&tmp->data_q))
3610                                 continue;
3611
3612                         skb = skb_peek(&tmp->data_q);
3613                         if (skb->priority < cur_prio)
3614                                 continue;
3615
3616                         if (skb->priority > cur_prio) {
3617                                 num = 0;
3618                                 min = ~0;
3619                                 cur_prio = skb->priority;
3620                         }
3621
3622                         num++;
3623
3624                         if (conn->sent < min) {
3625                                 min  = conn->sent;
3626                                 chan = tmp;
3627                         }
3628                 }
3629
3630                 if (hci_conn_num(hdev, type) == conn_num)
3631                         break;
3632         }
3633
3634         rcu_read_unlock();
3635
3636         if (!chan)
3637                 return NULL;
3638
3639         switch (chan->conn->type) {
3640         case ACL_LINK:
3641                 cnt = hdev->acl_cnt;
3642                 break;
3643         case AMP_LINK:
3644                 cnt = hdev->block_cnt;
3645                 break;
3646         case SCO_LINK:
3647         case ESCO_LINK:
3648                 cnt = hdev->sco_cnt;
3649                 break;
3650         case LE_LINK:
3651                 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
3652                 break;
3653         default:
3654                 cnt = 0;
3655                 BT_ERR("Unknown link type");
3656         }
3657
3658         q = cnt / num;
3659         *quote = q ? q : 1;
3660         BT_DBG("chan %p quote %d", chan, *quote);
3661         return chan;
3662 }
3663
3664 static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
3665 {
3666         struct hci_conn_hash *h = &hdev->conn_hash;
3667         struct hci_conn *conn;
3668         int num = 0;
3669
3670         BT_DBG("%s", hdev->name);
3671
3672         rcu_read_lock();
3673
3674         list_for_each_entry_rcu(conn, &h->list, list) {
3675                 struct hci_chan *chan;
3676
3677                 if (conn->type != type)
3678                         continue;
3679
3680                 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
3681                         continue;
3682
3683                 num++;
3684
3685                 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
3686                         struct sk_buff *skb;
3687
3688                         if (chan->sent) {
3689                                 chan->sent = 0;
3690                                 continue;
3691                         }
3692
3693                         if (skb_queue_empty(&chan->data_q))
3694                                 continue;
3695
3696                         skb = skb_peek(&chan->data_q);
3697                         if (skb->priority >= HCI_PRIO_MAX - 1)
3698                                 continue;
3699
3700                         skb->priority = HCI_PRIO_MAX - 1;
3701
3702                         BT_DBG("chan %p skb %p promoted to %d", chan, skb,
3703                                skb->priority);
3704                 }
3705
3706                 if (hci_conn_num(hdev, type) == num)
3707                         break;
3708         }
3709
3710         rcu_read_unlock();
3711
3712 }
3713
3714 static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
3715 {
3716         /* Calculate count of blocks used by this packet */
3717         return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
3718 }
3719
3720 static void __check_timeout(struct hci_dev *hdev, unsigned int cnt)
3721 {
3722         if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
3723                 /* ACL tx timeout must be longer than maximum
3724                  * link supervision timeout (40.9 seconds) */
3725                 if (!cnt && time_after(jiffies, hdev->acl_last_tx +
3726                                        HCI_ACL_TX_TIMEOUT))
3727                         hci_link_tx_to(hdev, ACL_LINK);
3728         }
3729 }
3730
3731 static void hci_sched_acl_pkt(struct hci_dev *hdev)
3732 {
3733         unsigned int cnt = hdev->acl_cnt;
3734         struct hci_chan *chan;
3735         struct sk_buff *skb;
3736         int quote;
3737
3738         __check_timeout(hdev, cnt);
3739
3740         while (hdev->acl_cnt &&
3741                (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
3742                 u32 priority = (skb_peek(&chan->data_q))->priority;
3743                 while (quote-- && (skb = skb_peek(&chan->data_q))) {
3744                         BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
3745                                skb->len, skb->priority);
3746
3747                         /* Stop if priority has changed */
3748                         if (skb->priority < priority)
3749                                 break;
3750
3751                         skb = skb_dequeue(&chan->data_q);
3752
3753                         hci_conn_enter_active_mode(chan->conn,
3754                                                    bt_cb(skb)->force_active);
3755
3756                         hci_send_frame(hdev, skb);
3757                         hdev->acl_last_tx = jiffies;
3758
3759                         hdev->acl_cnt--;
3760                         chan->sent++;
3761                         chan->conn->sent++;
3762                 }
3763         }
3764
3765         if (cnt != hdev->acl_cnt)
3766                 hci_prio_recalculate(hdev, ACL_LINK);
3767 }
3768
3769 static void hci_sched_acl_blk(struct hci_dev *hdev)
3770 {
3771         unsigned int cnt = hdev->block_cnt;
3772         struct hci_chan *chan;
3773         struct sk_buff *skb;
3774         int quote;
3775         u8 type;
3776
3777         __check_timeout(hdev, cnt);
3778
3779         BT_DBG("%s", hdev->name);
3780
3781         if (hdev->dev_type == HCI_AMP)
3782                 type = AMP_LINK;
3783         else
3784                 type = ACL_LINK;
3785
3786         while (hdev->block_cnt > 0 &&
3787                (chan = hci_chan_sent(hdev, type, &quote))) {
3788                 u32 priority = (skb_peek(&chan->data_q))->priority;
3789                 while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
3790                         int blocks;
3791
3792                         BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
3793                                skb->len, skb->priority);
3794
3795                         /* Stop if priority has changed */
3796                         if (skb->priority < priority)
3797                                 break;
3798
3799                         skb = skb_dequeue(&chan->data_q);
3800
3801                         blocks = __get_blocks(hdev, skb);
3802                         if (blocks > hdev->block_cnt)
3803                                 return;
3804
3805                         hci_conn_enter_active_mode(chan->conn,
3806                                                    bt_cb(skb)->force_active);
3807
3808                         hci_send_frame(hdev, skb);
3809                         hdev->acl_last_tx = jiffies;
3810
3811                         hdev->block_cnt -= blocks;
3812                         quote -= blocks;
3813
3814                         chan->sent += blocks;
3815                         chan->conn->sent += blocks;
3816                 }
3817         }
3818
3819         if (cnt != hdev->block_cnt)
3820                 hci_prio_recalculate(hdev, type);
3821 }
3822
3823 static void hci_sched_acl(struct hci_dev *hdev)
3824 {
3825         BT_DBG("%s", hdev->name);
3826
3827         /* No ACL link over BR/EDR controller */
3828         if (!hci_conn_num(hdev, ACL_LINK) && hdev->dev_type == HCI_BREDR)
3829                 return;
3830
3831         /* No AMP link over AMP controller */
3832         if (!hci_conn_num(hdev, AMP_LINK) && hdev->dev_type == HCI_AMP)
3833                 return;
3834
3835         switch (hdev->flow_ctl_mode) {
3836         case HCI_FLOW_CTL_MODE_PACKET_BASED:
3837                 hci_sched_acl_pkt(hdev);
3838                 break;
3839
3840         case HCI_FLOW_CTL_MODE_BLOCK_BASED:
3841                 hci_sched_acl_blk(hdev);
3842                 break;
3843         }
3844 }
3845
3846 /* Schedule SCO */
3847 static void hci_sched_sco(struct hci_dev *hdev)
3848 {
3849         struct hci_conn *conn;
3850         struct sk_buff *skb;
3851         int quote;
3852
3853         BT_DBG("%s", hdev->name);
3854
3855         if (!hci_conn_num(hdev, SCO_LINK))
3856                 return;
3857
3858         while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
3859                 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
3860                         BT_DBG("skb %p len %d", skb, skb->len);
3861                         hci_send_frame(hdev, skb);
3862
3863                         conn->sent++;
3864                         if (conn->sent == ~0)
3865                                 conn->sent = 0;
3866                 }
3867         }
3868 }
3869
3870 static void hci_sched_esco(struct hci_dev *hdev)
3871 {
3872         struct hci_conn *conn;
3873         struct sk_buff *skb;
3874         int quote;
3875
3876         BT_DBG("%s", hdev->name);
3877
3878         if (!hci_conn_num(hdev, ESCO_LINK))
3879                 return;
3880
3881         while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK,
3882                                                      &quote))) {
3883                 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
3884                         BT_DBG("skb %p len %d", skb, skb->len);
3885                         hci_send_frame(hdev, skb);
3886
3887                         conn->sent++;
3888                         if (conn->sent == ~0)
3889                                 conn->sent = 0;
3890                 }
3891         }
3892 }
3893
3894 static void hci_sched_le(struct hci_dev *hdev)
3895 {
3896         struct hci_chan *chan;
3897         struct sk_buff *skb;
3898         int quote, cnt, tmp;
3899
3900         BT_DBG("%s", hdev->name);
3901
3902         if (!hci_conn_num(hdev, LE_LINK))
3903                 return;
3904
3905         if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
3906                 /* LE tx timeout must be longer than maximum
3907                  * link supervision timeout (40.9 seconds) */
3908                 if (!hdev->le_cnt && hdev->le_pkts &&
3909                     time_after(jiffies, hdev->le_last_tx + HZ * 45))
3910                         hci_link_tx_to(hdev, LE_LINK);
3911         }
3912
3913         cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
3914         tmp = cnt;
3915         while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, &quote))) {
3916                 u32 priority = (skb_peek(&chan->data_q))->priority;
3917                 while (quote-- && (skb = skb_peek(&chan->data_q))) {
3918                         BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
3919                                skb->len, skb->priority);
3920
3921                         /* Stop if priority has changed */
3922                         if (skb->priority < priority)
3923                                 break;
3924
3925                         skb = skb_dequeue(&chan->data_q);
3926
3927                         hci_send_frame(hdev, skb);
3928                         hdev->le_last_tx = jiffies;
3929
3930                         cnt--;
3931                         chan->sent++;
3932                         chan->conn->sent++;
3933                 }
3934         }
3935
3936         if (hdev->le_pkts)
3937                 hdev->le_cnt = cnt;
3938         else
3939                 hdev->acl_cnt = cnt;
3940
3941         if (cnt != tmp)
3942                 hci_prio_recalculate(hdev, LE_LINK);
3943 }
3944
3945 static void hci_tx_work(struct work_struct *work)
3946 {
3947         struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
3948         struct sk_buff *skb;
3949
3950         BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
3951                hdev->sco_cnt, hdev->le_cnt);
3952
3953         if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
3954                 /* Schedule queues and send stuff to HCI driver */
3955                 hci_sched_acl(hdev);
3956                 hci_sched_sco(hdev);
3957                 hci_sched_esco(hdev);
3958                 hci_sched_le(hdev);
3959         }
3960
3961         /* Send next queued raw (unknown type) packet */
3962         while ((skb = skb_dequeue(&hdev->raw_q)))
3963                 hci_send_frame(hdev, skb);
3964 }
3965
3966 /* ----- HCI RX task (incoming data processing) ----- */
3967
3968 /* ACL data packet */
3969 static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
3970 {
3971         struct hci_acl_hdr *hdr = (void *) skb->data;
3972         struct hci_conn *conn;
3973         __u16 handle, flags;
3974
3975         skb_pull(skb, HCI_ACL_HDR_SIZE);
3976
3977         handle = __le16_to_cpu(hdr->handle);
3978         flags  = hci_flags(handle);
3979         handle = hci_handle(handle);
3980
3981         BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
3982                handle, flags);
3983
3984         hdev->stat.acl_rx++;
3985
3986         hci_dev_lock(hdev);
3987         conn = hci_conn_hash_lookup_handle(hdev, handle);
3988         hci_dev_unlock(hdev);
3989
3990         if (conn) {
3991                 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
3992
3993                 /* Send to upper protocol */
3994                 l2cap_recv_acldata(conn, skb, flags);
3995                 return;
3996         } else {
3997                 BT_ERR("%s ACL packet for unknown connection handle %d",
3998                        hdev->name, handle);
3999         }
4000
4001         kfree_skb(skb);
4002 }
4003
4004 /* SCO data packet */
4005 static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
4006 {
4007         struct hci_sco_hdr *hdr = (void *) skb->data;
4008         struct hci_conn *conn;
4009         __u16 handle;
4010
4011         skb_pull(skb, HCI_SCO_HDR_SIZE);
4012
4013         handle = __le16_to_cpu(hdr->handle);
4014
4015         BT_DBG("%s len %d handle 0x%4.4x", hdev->name, skb->len, handle);
4016
4017         hdev->stat.sco_rx++;
4018
4019         hci_dev_lock(hdev);
4020         conn = hci_conn_hash_lookup_handle(hdev, handle);
4021         hci_dev_unlock(hdev);
4022
4023         if (conn) {
4024                 /* Send to upper protocol */
4025                 sco_recv_scodata(conn, skb);
4026                 return;
4027         } else {
4028                 BT_ERR("%s SCO packet for unknown connection handle %d",
4029                        hdev->name, handle);
4030         }
4031
4032         kfree_skb(skb);
4033 }
4034
4035 static bool hci_req_is_complete(struct hci_dev *hdev)
4036 {
4037         struct sk_buff *skb;
4038
4039         skb = skb_peek(&hdev->cmd_q);
4040         if (!skb)
4041                 return true;
4042
4043         return (bt_cb(skb)->hci.req_flags & HCI_REQ_START);
4044 }
4045
4046 static void hci_resend_last(struct hci_dev *hdev)
4047 {
4048         struct hci_command_hdr *sent;
4049         struct sk_buff *skb;
4050         u16 opcode;
4051
4052         if (!hdev->sent_cmd)
4053                 return;
4054
4055         sent = (void *) hdev->sent_cmd->data;
4056         opcode = __le16_to_cpu(sent->opcode);
4057         if (opcode == HCI_OP_RESET)
4058                 return;
4059
4060         skb = skb_clone(hdev->sent_cmd, GFP_KERNEL);
4061         if (!skb)
4062                 return;
4063
4064         skb_queue_head(&hdev->cmd_q, skb);
4065         queue_work(hdev->workqueue, &hdev->cmd_work);
4066 }
4067
4068 void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status,
4069                           hci_req_complete_t *req_complete,
4070                           hci_req_complete_skb_t *req_complete_skb)
4071 {
4072         struct sk_buff *skb;
4073         unsigned long flags;
4074
4075         BT_DBG("opcode 0x%04x status 0x%02x", opcode, status);
4076
4077         /* If the completed command doesn't match the last one that was
4078          * sent we need to do special handling of it.
4079          */
4080         if (!hci_sent_cmd_data(hdev, opcode)) {
4081                 /* Some CSR based controllers generate a spontaneous
4082                  * reset complete event during init and any pending
4083                  * command will never be completed. In such a case we
4084                  * need to resend whatever was the last sent
4085                  * command.
4086                  */
4087                 if (test_bit(HCI_INIT, &hdev->flags) && opcode == HCI_OP_RESET)
4088                         hci_resend_last(hdev);
4089
4090                 return;
4091         }
4092
4093         /* If the command succeeded and there's still more commands in
4094          * this request the request is not yet complete.
4095          */
4096         if (!status && !hci_req_is_complete(hdev))
4097                 return;
4098
4099         /* If this was the last command in a request the complete
4100          * callback would be found in hdev->sent_cmd instead of the
4101          * command queue (hdev->cmd_q).
4102          */
4103         if (bt_cb(hdev->sent_cmd)->hci.req_flags & HCI_REQ_SKB) {
4104                 *req_complete_skb = bt_cb(hdev->sent_cmd)->hci.req_complete_skb;
4105                 return;
4106         }
4107
4108         if (bt_cb(hdev->sent_cmd)->hci.req_complete) {
4109                 *req_complete = bt_cb(hdev->sent_cmd)->hci.req_complete;
4110                 return;
4111         }
4112
4113         /* Remove all pending commands belonging to this request */
4114         spin_lock_irqsave(&hdev->cmd_q.lock, flags);
4115         while ((skb = __skb_dequeue(&hdev->cmd_q))) {
4116                 if (bt_cb(skb)->hci.req_flags & HCI_REQ_START) {
4117                         __skb_queue_head(&hdev->cmd_q, skb);
4118                         break;
4119                 }
4120
4121                 *req_complete = bt_cb(skb)->hci.req_complete;
4122                 *req_complete_skb = bt_cb(skb)->hci.req_complete_skb;
4123                 kfree_skb(skb);
4124         }
4125         spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
4126 }
4127
4128 static void hci_rx_work(struct work_struct *work)
4129 {
4130         struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
4131         struct sk_buff *skb;
4132
4133         BT_DBG("%s", hdev->name);
4134
4135         while ((skb = skb_dequeue(&hdev->rx_q))) {
4136                 /* Send copy to monitor */
4137                 hci_send_to_monitor(hdev, skb);
4138
4139                 if (atomic_read(&hdev->promisc)) {
4140                         /* Send copy to the sockets */
4141                         hci_send_to_sock(hdev, skb);
4142                 }
4143
4144                 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
4145                         kfree_skb(skb);
4146                         continue;
4147                 }
4148
4149                 if (test_bit(HCI_INIT, &hdev->flags)) {
4150                         /* Don't process data packets in this states. */
4151                         switch (hci_skb_pkt_type(skb)) {
4152                         case HCI_ACLDATA_PKT:
4153                         case HCI_SCODATA_PKT:
4154                                 kfree_skb(skb);
4155                                 continue;
4156                         }
4157                 }
4158
4159                 /* Process frame */
4160                 switch (hci_skb_pkt_type(skb)) {
4161                 case HCI_EVENT_PKT:
4162                         BT_DBG("%s Event packet", hdev->name);
4163                         hci_event_packet(hdev, skb);
4164                         break;
4165
4166                 case HCI_ACLDATA_PKT:
4167                         BT_DBG("%s ACL data packet", hdev->name);
4168                         hci_acldata_packet(hdev, skb);
4169                         break;
4170
4171                 case HCI_SCODATA_PKT:
4172                         BT_DBG("%s SCO data packet", hdev->name);
4173                         hci_scodata_packet(hdev, skb);
4174                         break;
4175
4176                 default:
4177                         kfree_skb(skb);
4178                         break;
4179                 }
4180         }
4181 }
4182
4183 static void hci_cmd_work(struct work_struct *work)
4184 {
4185         struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
4186         struct sk_buff *skb;
4187
4188         BT_DBG("%s cmd_cnt %d cmd queued %d", hdev->name,
4189                atomic_read(&hdev->cmd_cnt), skb_queue_len(&hdev->cmd_q));
4190
4191         /* Send queued commands */
4192         if (atomic_read(&hdev->cmd_cnt)) {
4193                 skb = skb_dequeue(&hdev->cmd_q);
4194                 if (!skb)
4195                         return;
4196
4197                 kfree_skb(hdev->sent_cmd);
4198
4199                 hdev->sent_cmd = skb_clone(skb, GFP_KERNEL);
4200                 if (hdev->sent_cmd) {
4201                         atomic_dec(&hdev->cmd_cnt);
4202                         hci_send_frame(hdev, skb);
4203                         if (test_bit(HCI_RESET, &hdev->flags))
4204                                 cancel_delayed_work(&hdev->cmd_timer);
4205                         else
4206                                 schedule_delayed_work(&hdev->cmd_timer,
4207                                                       HCI_CMD_TIMEOUT);
4208                 } else {
4209                         skb_queue_head(&hdev->cmd_q, skb);
4210                         queue_work(hdev->workqueue, &hdev->cmd_work);
4211                 }
4212         }
4213 }