]> git.karo-electronics.de Git - karo-tx-linux.git/blob - net/bluetooth/hci_core.c
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
[karo-tx-linux.git] / net / bluetooth / hci_core.c
1 /*
2    BlueZ - Bluetooth protocol stack for Linux
3    Copyright (C) 2000-2001 Qualcomm Incorporated
4    Copyright (C) 2011 ProFUSION Embedded Systems
5
6    Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7
8    This program is free software; you can redistribute it and/or modify
9    it under the terms of the GNU General Public License version 2 as
10    published by the Free Software Foundation;
11
12    THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13    OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14    FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15    IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
16    CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17    WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18    ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
19    OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20
21    ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22    COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
23    SOFTWARE IS DISCLAIMED.
24 */
25
26 /* Bluetooth HCI core. */
27
28 #include <linux/export.h>
29 #include <linux/idr.h>
30
31 #include <linux/rfkill.h>
32
33 #include <net/bluetooth/bluetooth.h>
34 #include <net/bluetooth/hci_core.h>
35
36 static void hci_rx_work(struct work_struct *work);
37 static void hci_cmd_work(struct work_struct *work);
38 static void hci_tx_work(struct work_struct *work);
39
40 /* HCI device list */
41 LIST_HEAD(hci_dev_list);
42 DEFINE_RWLOCK(hci_dev_list_lock);
43
44 /* HCI callback list */
45 LIST_HEAD(hci_cb_list);
46 DEFINE_RWLOCK(hci_cb_list_lock);
47
48 /* HCI ID Numbering */
49 static DEFINE_IDA(hci_index_ida);
50
51 /* ---- HCI notifications ---- */
52
53 static void hci_notify(struct hci_dev *hdev, int event)
54 {
55         hci_sock_dev_event(hdev, event);
56 }
57
58 /* ---- HCI requests ---- */
59
60 static void hci_req_sync_complete(struct hci_dev *hdev, u8 result)
61 {
62         BT_DBG("%s result 0x%2.2x", hdev->name, result);
63
64         if (hdev->req_status == HCI_REQ_PEND) {
65                 hdev->req_result = result;
66                 hdev->req_status = HCI_REQ_DONE;
67                 wake_up_interruptible(&hdev->req_wait_q);
68         }
69 }
70
71 static void hci_req_cancel(struct hci_dev *hdev, int err)
72 {
73         BT_DBG("%s err 0x%2.2x", hdev->name, err);
74
75         if (hdev->req_status == HCI_REQ_PEND) {
76                 hdev->req_result = err;
77                 hdev->req_status = HCI_REQ_CANCELED;
78                 wake_up_interruptible(&hdev->req_wait_q);
79         }
80 }
81
82 static struct sk_buff *hci_get_cmd_complete(struct hci_dev *hdev, u16 opcode,
83                                             u8 event)
84 {
85         struct hci_ev_cmd_complete *ev;
86         struct hci_event_hdr *hdr;
87         struct sk_buff *skb;
88
89         hci_dev_lock(hdev);
90
91         skb = hdev->recv_evt;
92         hdev->recv_evt = NULL;
93
94         hci_dev_unlock(hdev);
95
96         if (!skb)
97                 return ERR_PTR(-ENODATA);
98
99         if (skb->len < sizeof(*hdr)) {
100                 BT_ERR("Too short HCI event");
101                 goto failed;
102         }
103
104         hdr = (void *) skb->data;
105         skb_pull(skb, HCI_EVENT_HDR_SIZE);
106
107         if (event) {
108                 if (hdr->evt != event)
109                         goto failed;
110                 return skb;
111         }
112
113         if (hdr->evt != HCI_EV_CMD_COMPLETE) {
114                 BT_DBG("Last event is not cmd complete (0x%2.2x)", hdr->evt);
115                 goto failed;
116         }
117
118         if (skb->len < sizeof(*ev)) {
119                 BT_ERR("Too short cmd_complete event");
120                 goto failed;
121         }
122
123         ev = (void *) skb->data;
124         skb_pull(skb, sizeof(*ev));
125
126         if (opcode == __le16_to_cpu(ev->opcode))
127                 return skb;
128
129         BT_DBG("opcode doesn't match (0x%2.2x != 0x%2.2x)", opcode,
130                __le16_to_cpu(ev->opcode));
131
132 failed:
133         kfree_skb(skb);
134         return ERR_PTR(-ENODATA);
135 }
136
137 struct sk_buff *__hci_cmd_sync_ev(struct hci_dev *hdev, u16 opcode, u32 plen,
138                                   const void *param, u8 event, u32 timeout)
139 {
140         DECLARE_WAITQUEUE(wait, current);
141         struct hci_request req;
142         int err = 0;
143
144         BT_DBG("%s", hdev->name);
145
146         hci_req_init(&req, hdev);
147
148         hci_req_add_ev(&req, opcode, plen, param, event);
149
150         hdev->req_status = HCI_REQ_PEND;
151
152         err = hci_req_run(&req, hci_req_sync_complete);
153         if (err < 0)
154                 return ERR_PTR(err);
155
156         add_wait_queue(&hdev->req_wait_q, &wait);
157         set_current_state(TASK_INTERRUPTIBLE);
158
159         schedule_timeout(timeout);
160
161         remove_wait_queue(&hdev->req_wait_q, &wait);
162
163         if (signal_pending(current))
164                 return ERR_PTR(-EINTR);
165
166         switch (hdev->req_status) {
167         case HCI_REQ_DONE:
168                 err = -bt_to_errno(hdev->req_result);
169                 break;
170
171         case HCI_REQ_CANCELED:
172                 err = -hdev->req_result;
173                 break;
174
175         default:
176                 err = -ETIMEDOUT;
177                 break;
178         }
179
180         hdev->req_status = hdev->req_result = 0;
181
182         BT_DBG("%s end: err %d", hdev->name, err);
183
184         if (err < 0)
185                 return ERR_PTR(err);
186
187         return hci_get_cmd_complete(hdev, opcode, event);
188 }
189 EXPORT_SYMBOL(__hci_cmd_sync_ev);
190
191 struct sk_buff *__hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
192                                const void *param, u32 timeout)
193 {
194         return __hci_cmd_sync_ev(hdev, opcode, plen, param, 0, timeout);
195 }
196 EXPORT_SYMBOL(__hci_cmd_sync);
197
198 /* Execute request and wait for completion. */
199 static int __hci_req_sync(struct hci_dev *hdev,
200                           void (*func)(struct hci_request *req,
201                                       unsigned long opt),
202                           unsigned long opt, __u32 timeout)
203 {
204         struct hci_request req;
205         DECLARE_WAITQUEUE(wait, current);
206         int err = 0;
207
208         BT_DBG("%s start", hdev->name);
209
210         hci_req_init(&req, hdev);
211
212         hdev->req_status = HCI_REQ_PEND;
213
214         func(&req, opt);
215
216         err = hci_req_run(&req, hci_req_sync_complete);
217         if (err < 0) {
218                 hdev->req_status = 0;
219
220                 /* ENODATA means the HCI request command queue is empty.
221                  * This can happen when a request with conditionals doesn't
222                  * trigger any commands to be sent. This is normal behavior
223                  * and should not trigger an error return.
224                  */
225                 if (err == -ENODATA)
226                         return 0;
227
228                 return err;
229         }
230
231         add_wait_queue(&hdev->req_wait_q, &wait);
232         set_current_state(TASK_INTERRUPTIBLE);
233
234         schedule_timeout(timeout);
235
236         remove_wait_queue(&hdev->req_wait_q, &wait);
237
238         if (signal_pending(current))
239                 return -EINTR;
240
241         switch (hdev->req_status) {
242         case HCI_REQ_DONE:
243                 err = -bt_to_errno(hdev->req_result);
244                 break;
245
246         case HCI_REQ_CANCELED:
247                 err = -hdev->req_result;
248                 break;
249
250         default:
251                 err = -ETIMEDOUT;
252                 break;
253         }
254
255         hdev->req_status = hdev->req_result = 0;
256
257         BT_DBG("%s end: err %d", hdev->name, err);
258
259         return err;
260 }
261
262 static int hci_req_sync(struct hci_dev *hdev,
263                         void (*req)(struct hci_request *req,
264                                     unsigned long opt),
265                         unsigned long opt, __u32 timeout)
266 {
267         int ret;
268
269         if (!test_bit(HCI_UP, &hdev->flags))
270                 return -ENETDOWN;
271
272         /* Serialize all requests */
273         hci_req_lock(hdev);
274         ret = __hci_req_sync(hdev, req, opt, timeout);
275         hci_req_unlock(hdev);
276
277         return ret;
278 }
279
280 static void hci_reset_req(struct hci_request *req, unsigned long opt)
281 {
282         BT_DBG("%s %ld", req->hdev->name, opt);
283
284         /* Reset device */
285         set_bit(HCI_RESET, &req->hdev->flags);
286         hci_req_add(req, HCI_OP_RESET, 0, NULL);
287 }
288
289 static void bredr_init(struct hci_request *req)
290 {
291         req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;
292
293         /* Read Local Supported Features */
294         hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
295
296         /* Read Local Version */
297         hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
298
299         /* Read BD Address */
300         hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
301 }
302
303 static void amp_init(struct hci_request *req)
304 {
305         req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
306
307         /* Read Local Version */
308         hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
309
310         /* Read Local AMP Info */
311         hci_req_add(req, HCI_OP_READ_LOCAL_AMP_INFO, 0, NULL);
312
313         /* Read Data Blk size */
314         hci_req_add(req, HCI_OP_READ_DATA_BLOCK_SIZE, 0, NULL);
315 }
316
317 static void hci_init1_req(struct hci_request *req, unsigned long opt)
318 {
319         struct hci_dev *hdev = req->hdev;
320
321         BT_DBG("%s %ld", hdev->name, opt);
322
323         /* Reset */
324         if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
325                 hci_reset_req(req, 0);
326
327         switch (hdev->dev_type) {
328         case HCI_BREDR:
329                 bredr_init(req);
330                 break;
331
332         case HCI_AMP:
333                 amp_init(req);
334                 break;
335
336         default:
337                 BT_ERR("Unknown device type %d", hdev->dev_type);
338                 break;
339         }
340 }
341
342 static void bredr_setup(struct hci_request *req)
343 {
344         __le16 param;
345         __u8 flt_type;
346
347         /* Read Buffer Size (ACL mtu, max pkt, etc.) */
348         hci_req_add(req, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
349
350         /* Read Class of Device */
351         hci_req_add(req, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
352
353         /* Read Local Name */
354         hci_req_add(req, HCI_OP_READ_LOCAL_NAME, 0, NULL);
355
356         /* Read Voice Setting */
357         hci_req_add(req, HCI_OP_READ_VOICE_SETTING, 0, NULL);
358
359         /* Clear Event Filters */
360         flt_type = HCI_FLT_CLEAR_ALL;
361         hci_req_add(req, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
362
363         /* Connection accept timeout ~20 secs */
364         param = __constant_cpu_to_le16(0x7d00);
365         hci_req_add(req, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
366
367         /* Read page scan parameters */
368         if (req->hdev->hci_ver > BLUETOOTH_VER_1_1) {
369                 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_ACTIVITY, 0, NULL);
370                 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_TYPE, 0, NULL);
371         }
372 }
373
374 static void le_setup(struct hci_request *req)
375 {
376         struct hci_dev *hdev = req->hdev;
377
378         /* Read LE Buffer Size */
379         hci_req_add(req, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
380
381         /* Read LE Local Supported Features */
382         hci_req_add(req, HCI_OP_LE_READ_LOCAL_FEATURES, 0, NULL);
383
384         /* Read LE Advertising Channel TX Power */
385         hci_req_add(req, HCI_OP_LE_READ_ADV_TX_POWER, 0, NULL);
386
387         /* Read LE White List Size */
388         hci_req_add(req, HCI_OP_LE_READ_WHITE_LIST_SIZE, 0, NULL);
389
390         /* Read LE Supported States */
391         hci_req_add(req, HCI_OP_LE_READ_SUPPORTED_STATES, 0, NULL);
392
393         /* LE-only controllers have LE implicitly enabled */
394         if (!lmp_bredr_capable(hdev))
395                 set_bit(HCI_LE_ENABLED, &hdev->dev_flags);
396 }
397
398 static u8 hci_get_inquiry_mode(struct hci_dev *hdev)
399 {
400         if (lmp_ext_inq_capable(hdev))
401                 return 0x02;
402
403         if (lmp_inq_rssi_capable(hdev))
404                 return 0x01;
405
406         if (hdev->manufacturer == 11 && hdev->hci_rev == 0x00 &&
407             hdev->lmp_subver == 0x0757)
408                 return 0x01;
409
410         if (hdev->manufacturer == 15) {
411                 if (hdev->hci_rev == 0x03 && hdev->lmp_subver == 0x6963)
412                         return 0x01;
413                 if (hdev->hci_rev == 0x09 && hdev->lmp_subver == 0x6963)
414                         return 0x01;
415                 if (hdev->hci_rev == 0x00 && hdev->lmp_subver == 0x6965)
416                         return 0x01;
417         }
418
419         if (hdev->manufacturer == 31 && hdev->hci_rev == 0x2005 &&
420             hdev->lmp_subver == 0x1805)
421                 return 0x01;
422
423         return 0x00;
424 }
425
426 static void hci_setup_inquiry_mode(struct hci_request *req)
427 {
428         u8 mode;
429
430         mode = hci_get_inquiry_mode(req->hdev);
431
432         hci_req_add(req, HCI_OP_WRITE_INQUIRY_MODE, 1, &mode);
433 }
434
435 static void hci_setup_event_mask(struct hci_request *req)
436 {
437         struct hci_dev *hdev = req->hdev;
438
439         /* The second byte is 0xff instead of 0x9f (two reserved bits
440          * disabled) since a Broadcom 1.2 dongle doesn't respond to the
441          * command otherwise.
442          */
443         u8 events[8] = { 0xff, 0xff, 0xfb, 0xff, 0x00, 0x00, 0x00, 0x00 };
444
445         /* CSR 1.1 dongles does not accept any bitfield so don't try to set
446          * any event mask for pre 1.2 devices.
447          */
448         if (hdev->hci_ver < BLUETOOTH_VER_1_2)
449                 return;
450
451         if (lmp_bredr_capable(hdev)) {
452                 events[4] |= 0x01; /* Flow Specification Complete */
453                 events[4] |= 0x02; /* Inquiry Result with RSSI */
454                 events[4] |= 0x04; /* Read Remote Extended Features Complete */
455                 events[5] |= 0x08; /* Synchronous Connection Complete */
456                 events[5] |= 0x10; /* Synchronous Connection Changed */
457         } else {
458                 /* Use a different default for LE-only devices */
459                 memset(events, 0, sizeof(events));
460                 events[0] |= 0x10; /* Disconnection Complete */
461                 events[0] |= 0x80; /* Encryption Change */
462                 events[1] |= 0x08; /* Read Remote Version Information Complete */
463                 events[1] |= 0x20; /* Command Complete */
464                 events[1] |= 0x40; /* Command Status */
465                 events[1] |= 0x80; /* Hardware Error */
466                 events[2] |= 0x04; /* Number of Completed Packets */
467                 events[3] |= 0x02; /* Data Buffer Overflow */
468                 events[5] |= 0x80; /* Encryption Key Refresh Complete */
469         }
470
471         if (lmp_inq_rssi_capable(hdev))
472                 events[4] |= 0x02; /* Inquiry Result with RSSI */
473
474         if (lmp_sniffsubr_capable(hdev))
475                 events[5] |= 0x20; /* Sniff Subrating */
476
477         if (lmp_pause_enc_capable(hdev))
478                 events[5] |= 0x80; /* Encryption Key Refresh Complete */
479
480         if (lmp_ext_inq_capable(hdev))
481                 events[5] |= 0x40; /* Extended Inquiry Result */
482
483         if (lmp_no_flush_capable(hdev))
484                 events[7] |= 0x01; /* Enhanced Flush Complete */
485
486         if (lmp_lsto_capable(hdev))
487                 events[6] |= 0x80; /* Link Supervision Timeout Changed */
488
489         if (lmp_ssp_capable(hdev)) {
490                 events[6] |= 0x01;      /* IO Capability Request */
491                 events[6] |= 0x02;      /* IO Capability Response */
492                 events[6] |= 0x04;      /* User Confirmation Request */
493                 events[6] |= 0x08;      /* User Passkey Request */
494                 events[6] |= 0x10;      /* Remote OOB Data Request */
495                 events[6] |= 0x20;      /* Simple Pairing Complete */
496                 events[7] |= 0x04;      /* User Passkey Notification */
497                 events[7] |= 0x08;      /* Keypress Notification */
498                 events[7] |= 0x10;      /* Remote Host Supported
499                                          * Features Notification
500                                          */
501         }
502
503         if (lmp_le_capable(hdev))
504                 events[7] |= 0x20;      /* LE Meta-Event */
505
506         hci_req_add(req, HCI_OP_SET_EVENT_MASK, sizeof(events), events);
507
508         if (lmp_le_capable(hdev)) {
509                 memset(events, 0, sizeof(events));
510                 events[0] = 0x1f;
511                 hci_req_add(req, HCI_OP_LE_SET_EVENT_MASK,
512                             sizeof(events), events);
513         }
514 }
515
516 static void hci_init2_req(struct hci_request *req, unsigned long opt)
517 {
518         struct hci_dev *hdev = req->hdev;
519
520         if (lmp_bredr_capable(hdev))
521                 bredr_setup(req);
522         else
523                 clear_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
524
525         if (lmp_le_capable(hdev))
526                 le_setup(req);
527
528         hci_setup_event_mask(req);
529
530         /* AVM Berlin (31), aka "BlueFRITZ!", doesn't support the read
531          * local supported commands HCI command.
532          */
533         if (hdev->manufacturer != 31 && hdev->hci_ver > BLUETOOTH_VER_1_1)
534                 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
535
536         if (lmp_ssp_capable(hdev)) {
537                 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
538                         u8 mode = 0x01;
539                         hci_req_add(req, HCI_OP_WRITE_SSP_MODE,
540                                     sizeof(mode), &mode);
541                 } else {
542                         struct hci_cp_write_eir cp;
543
544                         memset(hdev->eir, 0, sizeof(hdev->eir));
545                         memset(&cp, 0, sizeof(cp));
546
547                         hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
548                 }
549         }
550
551         if (lmp_inq_rssi_capable(hdev))
552                 hci_setup_inquiry_mode(req);
553
554         if (lmp_inq_tx_pwr_capable(hdev))
555                 hci_req_add(req, HCI_OP_READ_INQ_RSP_TX_POWER, 0, NULL);
556
557         if (lmp_ext_feat_capable(hdev)) {
558                 struct hci_cp_read_local_ext_features cp;
559
560                 cp.page = 0x01;
561                 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
562                             sizeof(cp), &cp);
563         }
564
565         if (test_bit(HCI_LINK_SECURITY, &hdev->dev_flags)) {
566                 u8 enable = 1;
567                 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, sizeof(enable),
568                             &enable);
569         }
570 }
571
572 static void hci_setup_link_policy(struct hci_request *req)
573 {
574         struct hci_dev *hdev = req->hdev;
575         struct hci_cp_write_def_link_policy cp;
576         u16 link_policy = 0;
577
578         if (lmp_rswitch_capable(hdev))
579                 link_policy |= HCI_LP_RSWITCH;
580         if (lmp_hold_capable(hdev))
581                 link_policy |= HCI_LP_HOLD;
582         if (lmp_sniff_capable(hdev))
583                 link_policy |= HCI_LP_SNIFF;
584         if (lmp_park_capable(hdev))
585                 link_policy |= HCI_LP_PARK;
586
587         cp.policy = cpu_to_le16(link_policy);
588         hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, sizeof(cp), &cp);
589 }
590
591 static void hci_set_le_support(struct hci_request *req)
592 {
593         struct hci_dev *hdev = req->hdev;
594         struct hci_cp_write_le_host_supported cp;
595
596         /* LE-only devices do not support explicit enablement */
597         if (!lmp_bredr_capable(hdev))
598                 return;
599
600         memset(&cp, 0, sizeof(cp));
601
602         if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
603                 cp.le = 0x01;
604                 cp.simul = lmp_le_br_capable(hdev);
605         }
606
607         if (cp.le != lmp_host_le_capable(hdev))
608                 hci_req_add(req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(cp),
609                             &cp);
610 }
611
612 static void hci_set_event_mask_page_2(struct hci_request *req)
613 {
614         struct hci_dev *hdev = req->hdev;
615         u8 events[8] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
616
617         /* If Connectionless Slave Broadcast master role is supported
618          * enable all necessary events for it.
619          */
620         if (hdev->features[2][0] & 0x01) {
621                 events[1] |= 0x40;      /* Triggered Clock Capture */
622                 events[1] |= 0x80;      /* Synchronization Train Complete */
623                 events[2] |= 0x10;      /* Slave Page Response Timeout */
624                 events[2] |= 0x20;      /* CSB Channel Map Change */
625         }
626
627         /* If Connectionless Slave Broadcast slave role is supported
628          * enable all necessary events for it.
629          */
630         if (hdev->features[2][0] & 0x02) {
631                 events[2] |= 0x01;      /* Synchronization Train Received */
632                 events[2] |= 0x02;      /* CSB Receive */
633                 events[2] |= 0x04;      /* CSB Timeout */
634                 events[2] |= 0x08;      /* Truncated Page Complete */
635         }
636
637         hci_req_add(req, HCI_OP_SET_EVENT_MASK_PAGE_2, sizeof(events), events);
638 }
639
640 static void hci_init3_req(struct hci_request *req, unsigned long opt)
641 {
642         struct hci_dev *hdev = req->hdev;
643         u8 p;
644
645         /* Some Broadcom based Bluetooth controllers do not support the
646          * Delete Stored Link Key command. They are clearly indicating its
647          * absence in the bit mask of supported commands.
648          *
649          * Check the supported commands and only if the the command is marked
650          * as supported send it. If not supported assume that the controller
651          * does not have actual support for stored link keys which makes this
652          * command redundant anyway.
653          */
654         if (hdev->commands[6] & 0x80) {
655                 struct hci_cp_delete_stored_link_key cp;
656
657                 bacpy(&cp.bdaddr, BDADDR_ANY);
658                 cp.delete_all = 0x01;
659                 hci_req_add(req, HCI_OP_DELETE_STORED_LINK_KEY,
660                             sizeof(cp), &cp);
661         }
662
663         if (hdev->commands[5] & 0x10)
664                 hci_setup_link_policy(req);
665
666         if (lmp_le_capable(hdev)) {
667                 hci_set_le_support(req);
668                 hci_update_ad(req);
669         }
670
671         /* Read features beyond page 1 if available */
672         for (p = 2; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
673                 struct hci_cp_read_local_ext_features cp;
674
675                 cp.page = p;
676                 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
677                             sizeof(cp), &cp);
678         }
679 }
680
681 static void hci_init4_req(struct hci_request *req, unsigned long opt)
682 {
683         struct hci_dev *hdev = req->hdev;
684
685         /* Set event mask page 2 if the HCI command for it is supported */
686         if (hdev->commands[22] & 0x04)
687                 hci_set_event_mask_page_2(req);
688
689         /* Check for Synchronization Train support */
690         if (hdev->features[2][0] & 0x04)
691                 hci_req_add(req, HCI_OP_READ_SYNC_TRAIN_PARAMS, 0, NULL);
692 }
693
694 static int __hci_init(struct hci_dev *hdev)
695 {
696         int err;
697
698         err = __hci_req_sync(hdev, hci_init1_req, 0, HCI_INIT_TIMEOUT);
699         if (err < 0)
700                 return err;
701
702         /* HCI_BREDR covers both single-mode LE, BR/EDR and dual-mode
703          * BR/EDR/LE type controllers. AMP controllers only need the
704          * first stage init.
705          */
706         if (hdev->dev_type != HCI_BREDR)
707                 return 0;
708
709         err = __hci_req_sync(hdev, hci_init2_req, 0, HCI_INIT_TIMEOUT);
710         if (err < 0)
711                 return err;
712
713         err = __hci_req_sync(hdev, hci_init3_req, 0, HCI_INIT_TIMEOUT);
714         if (err < 0)
715                 return err;
716
717         return __hci_req_sync(hdev, hci_init4_req, 0, HCI_INIT_TIMEOUT);
718 }
719
720 static void hci_scan_req(struct hci_request *req, unsigned long opt)
721 {
722         __u8 scan = opt;
723
724         BT_DBG("%s %x", req->hdev->name, scan);
725
726         /* Inquiry and Page scans */
727         hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
728 }
729
730 static void hci_auth_req(struct hci_request *req, unsigned long opt)
731 {
732         __u8 auth = opt;
733
734         BT_DBG("%s %x", req->hdev->name, auth);
735
736         /* Authentication */
737         hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
738 }
739
740 static void hci_encrypt_req(struct hci_request *req, unsigned long opt)
741 {
742         __u8 encrypt = opt;
743
744         BT_DBG("%s %x", req->hdev->name, encrypt);
745
746         /* Encryption */
747         hci_req_add(req, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
748 }
749
750 static void hci_linkpol_req(struct hci_request *req, unsigned long opt)
751 {
752         __le16 policy = cpu_to_le16(opt);
753
754         BT_DBG("%s %x", req->hdev->name, policy);
755
756         /* Default link policy */
757         hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
758 }
759
760 /* Get HCI device by index.
761  * Device is held on return. */
762 struct hci_dev *hci_dev_get(int index)
763 {
764         struct hci_dev *hdev = NULL, *d;
765
766         BT_DBG("%d", index);
767
768         if (index < 0)
769                 return NULL;
770
771         read_lock(&hci_dev_list_lock);
772         list_for_each_entry(d, &hci_dev_list, list) {
773                 if (d->id == index) {
774                         hdev = hci_dev_hold(d);
775                         break;
776                 }
777         }
778         read_unlock(&hci_dev_list_lock);
779         return hdev;
780 }
781
782 /* ---- Inquiry support ---- */
783
784 bool hci_discovery_active(struct hci_dev *hdev)
785 {
786         struct discovery_state *discov = &hdev->discovery;
787
788         switch (discov->state) {
789         case DISCOVERY_FINDING:
790         case DISCOVERY_RESOLVING:
791                 return true;
792
793         default:
794                 return false;
795         }
796 }
797
798 void hci_discovery_set_state(struct hci_dev *hdev, int state)
799 {
800         BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
801
802         if (hdev->discovery.state == state)
803                 return;
804
805         switch (state) {
806         case DISCOVERY_STOPPED:
807                 if (hdev->discovery.state != DISCOVERY_STARTING)
808                         mgmt_discovering(hdev, 0);
809                 break;
810         case DISCOVERY_STARTING:
811                 break;
812         case DISCOVERY_FINDING:
813                 mgmt_discovering(hdev, 1);
814                 break;
815         case DISCOVERY_RESOLVING:
816                 break;
817         case DISCOVERY_STOPPING:
818                 break;
819         }
820
821         hdev->discovery.state = state;
822 }
823
824 void hci_inquiry_cache_flush(struct hci_dev *hdev)
825 {
826         struct discovery_state *cache = &hdev->discovery;
827         struct inquiry_entry *p, *n;
828
829         list_for_each_entry_safe(p, n, &cache->all, all) {
830                 list_del(&p->all);
831                 kfree(p);
832         }
833
834         INIT_LIST_HEAD(&cache->unknown);
835         INIT_LIST_HEAD(&cache->resolve);
836 }
837
838 struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev,
839                                                bdaddr_t *bdaddr)
840 {
841         struct discovery_state *cache = &hdev->discovery;
842         struct inquiry_entry *e;
843
844         BT_DBG("cache %p, %pMR", cache, bdaddr);
845
846         list_for_each_entry(e, &cache->all, all) {
847                 if (!bacmp(&e->data.bdaddr, bdaddr))
848                         return e;
849         }
850
851         return NULL;
852 }
853
854 struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
855                                                        bdaddr_t *bdaddr)
856 {
857         struct discovery_state *cache = &hdev->discovery;
858         struct inquiry_entry *e;
859
860         BT_DBG("cache %p, %pMR", cache, bdaddr);
861
862         list_for_each_entry(e, &cache->unknown, list) {
863                 if (!bacmp(&e->data.bdaddr, bdaddr))
864                         return e;
865         }
866
867         return NULL;
868 }
869
870 struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
871                                                        bdaddr_t *bdaddr,
872                                                        int state)
873 {
874         struct discovery_state *cache = &hdev->discovery;
875         struct inquiry_entry *e;
876
877         BT_DBG("cache %p bdaddr %pMR state %d", cache, bdaddr, state);
878
879         list_for_each_entry(e, &cache->resolve, list) {
880                 if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
881                         return e;
882                 if (!bacmp(&e->data.bdaddr, bdaddr))
883                         return e;
884         }
885
886         return NULL;
887 }
888
889 void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
890                                       struct inquiry_entry *ie)
891 {
892         struct discovery_state *cache = &hdev->discovery;
893         struct list_head *pos = &cache->resolve;
894         struct inquiry_entry *p;
895
896         list_del(&ie->list);
897
898         list_for_each_entry(p, &cache->resolve, list) {
899                 if (p->name_state != NAME_PENDING &&
900                     abs(p->data.rssi) >= abs(ie->data.rssi))
901                         break;
902                 pos = &p->list;
903         }
904
905         list_add(&ie->list, pos);
906 }
907
908 bool hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
909                               bool name_known, bool *ssp)
910 {
911         struct discovery_state *cache = &hdev->discovery;
912         struct inquiry_entry *ie;
913
914         BT_DBG("cache %p, %pMR", cache, &data->bdaddr);
915
916         hci_remove_remote_oob_data(hdev, &data->bdaddr);
917
918         if (ssp)
919                 *ssp = data->ssp_mode;
920
921         ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
922         if (ie) {
923                 if (ie->data.ssp_mode && ssp)
924                         *ssp = true;
925
926                 if (ie->name_state == NAME_NEEDED &&
927                     data->rssi != ie->data.rssi) {
928                         ie->data.rssi = data->rssi;
929                         hci_inquiry_cache_update_resolve(hdev, ie);
930                 }
931
932                 goto update;
933         }
934
935         /* Entry not in the cache. Add new one. */
936         ie = kzalloc(sizeof(struct inquiry_entry), GFP_ATOMIC);
937         if (!ie)
938                 return false;
939
940         list_add(&ie->all, &cache->all);
941
942         if (name_known) {
943                 ie->name_state = NAME_KNOWN;
944         } else {
945                 ie->name_state = NAME_NOT_KNOWN;
946                 list_add(&ie->list, &cache->unknown);
947         }
948
949 update:
950         if (name_known && ie->name_state != NAME_KNOWN &&
951             ie->name_state != NAME_PENDING) {
952                 ie->name_state = NAME_KNOWN;
953                 list_del(&ie->list);
954         }
955
956         memcpy(&ie->data, data, sizeof(*data));
957         ie->timestamp = jiffies;
958         cache->timestamp = jiffies;
959
960         if (ie->name_state == NAME_NOT_KNOWN)
961                 return false;
962
963         return true;
964 }
965
966 static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
967 {
968         struct discovery_state *cache = &hdev->discovery;
969         struct inquiry_info *info = (struct inquiry_info *) buf;
970         struct inquiry_entry *e;
971         int copied = 0;
972
973         list_for_each_entry(e, &cache->all, all) {
974                 struct inquiry_data *data = &e->data;
975
976                 if (copied >= num)
977                         break;
978
979                 bacpy(&info->bdaddr, &data->bdaddr);
980                 info->pscan_rep_mode    = data->pscan_rep_mode;
981                 info->pscan_period_mode = data->pscan_period_mode;
982                 info->pscan_mode        = data->pscan_mode;
983                 memcpy(info->dev_class, data->dev_class, 3);
984                 info->clock_offset      = data->clock_offset;
985
986                 info++;
987                 copied++;
988         }
989
990         BT_DBG("cache %p, copied %d", cache, copied);
991         return copied;
992 }
993
994 static void hci_inq_req(struct hci_request *req, unsigned long opt)
995 {
996         struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
997         struct hci_dev *hdev = req->hdev;
998         struct hci_cp_inquiry cp;
999
1000         BT_DBG("%s", hdev->name);
1001
1002         if (test_bit(HCI_INQUIRY, &hdev->flags))
1003                 return;
1004
1005         /* Start Inquiry */
1006         memcpy(&cp.lap, &ir->lap, 3);
1007         cp.length  = ir->length;
1008         cp.num_rsp = ir->num_rsp;
1009         hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
1010 }
1011
1012 static int wait_inquiry(void *word)
1013 {
1014         schedule();
1015         return signal_pending(current);
1016 }
1017
1018 int hci_inquiry(void __user *arg)
1019 {
1020         __u8 __user *ptr = arg;
1021         struct hci_inquiry_req ir;
1022         struct hci_dev *hdev;
1023         int err = 0, do_inquiry = 0, max_rsp;
1024         long timeo;
1025         __u8 *buf;
1026
1027         if (copy_from_user(&ir, ptr, sizeof(ir)))
1028                 return -EFAULT;
1029
1030         hdev = hci_dev_get(ir.dev_id);
1031         if (!hdev)
1032                 return -ENODEV;
1033
1034         if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
1035                 err = -EBUSY;
1036                 goto done;
1037         }
1038
1039         if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
1040                 err = -EOPNOTSUPP;
1041                 goto done;
1042         }
1043
1044         hci_dev_lock(hdev);
1045         if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
1046             inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) {
1047                 hci_inquiry_cache_flush(hdev);
1048                 do_inquiry = 1;
1049         }
1050         hci_dev_unlock(hdev);
1051
1052         timeo = ir.length * msecs_to_jiffies(2000);
1053
1054         if (do_inquiry) {
1055                 err = hci_req_sync(hdev, hci_inq_req, (unsigned long) &ir,
1056                                    timeo);
1057                 if (err < 0)
1058                         goto done;
1059
1060                 /* Wait until Inquiry procedure finishes (HCI_INQUIRY flag is
1061                  * cleared). If it is interrupted by a signal, return -EINTR.
1062                  */
1063                 if (wait_on_bit(&hdev->flags, HCI_INQUIRY, wait_inquiry,
1064                                 TASK_INTERRUPTIBLE))
1065                         return -EINTR;
1066         }
1067
1068         /* for unlimited number of responses we will use buffer with
1069          * 255 entries
1070          */
1071         max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
1072
1073         /* cache_dump can't sleep. Therefore we allocate temp buffer and then
1074          * copy it to the user space.
1075          */
1076         buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
1077         if (!buf) {
1078                 err = -ENOMEM;
1079                 goto done;
1080         }
1081
1082         hci_dev_lock(hdev);
1083         ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
1084         hci_dev_unlock(hdev);
1085
1086         BT_DBG("num_rsp %d", ir.num_rsp);
1087
1088         if (!copy_to_user(ptr, &ir, sizeof(ir))) {
1089                 ptr += sizeof(ir);
1090                 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
1091                                  ir.num_rsp))
1092                         err = -EFAULT;
1093         } else
1094                 err = -EFAULT;
1095
1096         kfree(buf);
1097
1098 done:
1099         hci_dev_put(hdev);
1100         return err;
1101 }
1102
1103 static u8 create_ad(struct hci_dev *hdev, u8 *ptr)
1104 {
1105         u8 ad_len = 0, flags = 0;
1106         size_t name_len;
1107
1108         if (test_bit(HCI_LE_PERIPHERAL, &hdev->dev_flags))
1109                 flags |= LE_AD_GENERAL;
1110
1111         if (test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
1112                 if (lmp_le_br_capable(hdev))
1113                         flags |= LE_AD_SIM_LE_BREDR_CTRL;
1114                 if (lmp_host_le_br_capable(hdev))
1115                         flags |= LE_AD_SIM_LE_BREDR_HOST;
1116         } else {
1117                 flags |= LE_AD_NO_BREDR;
1118         }
1119
1120         if (flags) {
1121                 BT_DBG("adv flags 0x%02x", flags);
1122
1123                 ptr[0] = 2;
1124                 ptr[1] = EIR_FLAGS;
1125                 ptr[2] = flags;
1126
1127                 ad_len += 3;
1128                 ptr += 3;
1129         }
1130
1131         if (hdev->adv_tx_power != HCI_TX_POWER_INVALID) {
1132                 ptr[0] = 2;
1133                 ptr[1] = EIR_TX_POWER;
1134                 ptr[2] = (u8) hdev->adv_tx_power;
1135
1136                 ad_len += 3;
1137                 ptr += 3;
1138         }
1139
1140         name_len = strlen(hdev->dev_name);
1141         if (name_len > 0) {
1142                 size_t max_len = HCI_MAX_AD_LENGTH - ad_len - 2;
1143
1144                 if (name_len > max_len) {
1145                         name_len = max_len;
1146                         ptr[1] = EIR_NAME_SHORT;
1147                 } else
1148                         ptr[1] = EIR_NAME_COMPLETE;
1149
1150                 ptr[0] = name_len + 1;
1151
1152                 memcpy(ptr + 2, hdev->dev_name, name_len);
1153
1154                 ad_len += (name_len + 2);
1155                 ptr += (name_len + 2);
1156         }
1157
1158         return ad_len;
1159 }
1160
1161 void hci_update_ad(struct hci_request *req)
1162 {
1163         struct hci_dev *hdev = req->hdev;
1164         struct hci_cp_le_set_adv_data cp;
1165         u8 len;
1166
1167         if (!lmp_le_capable(hdev))
1168                 return;
1169
1170         memset(&cp, 0, sizeof(cp));
1171
1172         len = create_ad(hdev, cp.data);
1173
1174         if (hdev->adv_data_len == len &&
1175             memcmp(cp.data, hdev->adv_data, len) == 0)
1176                 return;
1177
1178         memcpy(hdev->adv_data, cp.data, sizeof(cp.data));
1179         hdev->adv_data_len = len;
1180
1181         cp.length = len;
1182
1183         hci_req_add(req, HCI_OP_LE_SET_ADV_DATA, sizeof(cp), &cp);
1184 }
1185
1186 static int hci_dev_do_open(struct hci_dev *hdev)
1187 {
1188         int ret = 0;
1189
1190         BT_DBG("%s %p", hdev->name, hdev);
1191
1192         hci_req_lock(hdev);
1193
1194         if (test_bit(HCI_UNREGISTER, &hdev->dev_flags)) {
1195                 ret = -ENODEV;
1196                 goto done;
1197         }
1198
1199         /* Check for rfkill but allow the HCI setup stage to proceed
1200          * (which in itself doesn't cause any RF activity).
1201          */
1202         if (test_bit(HCI_RFKILLED, &hdev->dev_flags) &&
1203             !test_bit(HCI_SETUP, &hdev->dev_flags)) {
1204                 ret = -ERFKILL;
1205                 goto done;
1206         }
1207
1208         if (test_bit(HCI_UP, &hdev->flags)) {
1209                 ret = -EALREADY;
1210                 goto done;
1211         }
1212
1213         if (hdev->open(hdev)) {
1214                 ret = -EIO;
1215                 goto done;
1216         }
1217
1218         atomic_set(&hdev->cmd_cnt, 1);
1219         set_bit(HCI_INIT, &hdev->flags);
1220
1221         if (hdev->setup && test_bit(HCI_SETUP, &hdev->dev_flags))
1222                 ret = hdev->setup(hdev);
1223
1224         if (!ret) {
1225                 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
1226                         set_bit(HCI_RAW, &hdev->flags);
1227
1228                 if (!test_bit(HCI_RAW, &hdev->flags) &&
1229                     !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags))
1230                         ret = __hci_init(hdev);
1231         }
1232
1233         clear_bit(HCI_INIT, &hdev->flags);
1234
1235         if (!ret) {
1236                 hci_dev_hold(hdev);
1237                 set_bit(HCI_UP, &hdev->flags);
1238                 hci_notify(hdev, HCI_DEV_UP);
1239                 if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
1240                     !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags) &&
1241                     mgmt_valid_hdev(hdev)) {
1242                         hci_dev_lock(hdev);
1243                         mgmt_powered(hdev, 1);
1244                         hci_dev_unlock(hdev);
1245                 }
1246         } else {
1247                 /* Init failed, cleanup */
1248                 flush_work(&hdev->tx_work);
1249                 flush_work(&hdev->cmd_work);
1250                 flush_work(&hdev->rx_work);
1251
1252                 skb_queue_purge(&hdev->cmd_q);
1253                 skb_queue_purge(&hdev->rx_q);
1254
1255                 if (hdev->flush)
1256                         hdev->flush(hdev);
1257
1258                 if (hdev->sent_cmd) {
1259                         kfree_skb(hdev->sent_cmd);
1260                         hdev->sent_cmd = NULL;
1261                 }
1262
1263                 hdev->close(hdev);
1264                 hdev->flags = 0;
1265         }
1266
1267 done:
1268         hci_req_unlock(hdev);
1269         return ret;
1270 }
1271
1272 /* ---- HCI ioctl helpers ---- */
1273
1274 int hci_dev_open(__u16 dev)
1275 {
1276         struct hci_dev *hdev;
1277         int err;
1278
1279         hdev = hci_dev_get(dev);
1280         if (!hdev)
1281                 return -ENODEV;
1282
1283         /* We need to ensure that no other power on/off work is pending
1284          * before proceeding to call hci_dev_do_open. This is
1285          * particularly important if the setup procedure has not yet
1286          * completed.
1287          */
1288         if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
1289                 cancel_delayed_work(&hdev->power_off);
1290
1291         flush_workqueue(hdev->req_workqueue);
1292
1293         err = hci_dev_do_open(hdev);
1294
1295         hci_dev_put(hdev);
1296
1297         return err;
1298 }
1299
1300 static int hci_dev_do_close(struct hci_dev *hdev)
1301 {
1302         BT_DBG("%s %p", hdev->name, hdev);
1303
1304         cancel_delayed_work(&hdev->power_off);
1305
1306         hci_req_cancel(hdev, ENODEV);
1307         hci_req_lock(hdev);
1308
1309         if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
1310                 del_timer_sync(&hdev->cmd_timer);
1311                 hci_req_unlock(hdev);
1312                 return 0;
1313         }
1314
1315         /* Flush RX and TX works */
1316         flush_work(&hdev->tx_work);
1317         flush_work(&hdev->rx_work);
1318
1319         if (hdev->discov_timeout > 0) {
1320                 cancel_delayed_work(&hdev->discov_off);
1321                 hdev->discov_timeout = 0;
1322                 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
1323         }
1324
1325         if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
1326                 cancel_delayed_work(&hdev->service_cache);
1327
1328         cancel_delayed_work_sync(&hdev->le_scan_disable);
1329
1330         hci_dev_lock(hdev);
1331         hci_inquiry_cache_flush(hdev);
1332         hci_conn_hash_flush(hdev);
1333         hci_dev_unlock(hdev);
1334
1335         hci_notify(hdev, HCI_DEV_DOWN);
1336
1337         if (hdev->flush)
1338                 hdev->flush(hdev);
1339
1340         /* Reset device */
1341         skb_queue_purge(&hdev->cmd_q);
1342         atomic_set(&hdev->cmd_cnt, 1);
1343         if (!test_bit(HCI_RAW, &hdev->flags) &&
1344             test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks)) {
1345                 set_bit(HCI_INIT, &hdev->flags);
1346                 __hci_req_sync(hdev, hci_reset_req, 0, HCI_CMD_TIMEOUT);
1347                 clear_bit(HCI_INIT, &hdev->flags);
1348         }
1349
1350         /* flush cmd  work */
1351         flush_work(&hdev->cmd_work);
1352
1353         /* Drop queues */
1354         skb_queue_purge(&hdev->rx_q);
1355         skb_queue_purge(&hdev->cmd_q);
1356         skb_queue_purge(&hdev->raw_q);
1357
1358         /* Drop last sent command */
1359         if (hdev->sent_cmd) {
1360                 del_timer_sync(&hdev->cmd_timer);
1361                 kfree_skb(hdev->sent_cmd);
1362                 hdev->sent_cmd = NULL;
1363         }
1364
1365         kfree_skb(hdev->recv_evt);
1366         hdev->recv_evt = NULL;
1367
1368         /* After this point our queues are empty
1369          * and no tasks are scheduled. */
1370         hdev->close(hdev);
1371
1372         /* Clear flags */
1373         hdev->flags = 0;
1374         hdev->dev_flags &= ~HCI_PERSISTENT_MASK;
1375
1376         if (!test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags) &&
1377             mgmt_valid_hdev(hdev)) {
1378                 hci_dev_lock(hdev);
1379                 mgmt_powered(hdev, 0);
1380                 hci_dev_unlock(hdev);
1381         }
1382
1383         /* Controller radio is available but is currently powered down */
1384         hdev->amp_status = 0;
1385
1386         memset(hdev->eir, 0, sizeof(hdev->eir));
1387         memset(hdev->dev_class, 0, sizeof(hdev->dev_class));
1388
1389         hci_req_unlock(hdev);
1390
1391         hci_dev_put(hdev);
1392         return 0;
1393 }
1394
1395 int hci_dev_close(__u16 dev)
1396 {
1397         struct hci_dev *hdev;
1398         int err;
1399
1400         hdev = hci_dev_get(dev);
1401         if (!hdev)
1402                 return -ENODEV;
1403
1404         if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
1405                 err = -EBUSY;
1406                 goto done;
1407         }
1408
1409         if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
1410                 cancel_delayed_work(&hdev->power_off);
1411
1412         err = hci_dev_do_close(hdev);
1413
1414 done:
1415         hci_dev_put(hdev);
1416         return err;
1417 }
1418
1419 int hci_dev_reset(__u16 dev)
1420 {
1421         struct hci_dev *hdev;
1422         int ret = 0;
1423
1424         hdev = hci_dev_get(dev);
1425         if (!hdev)
1426                 return -ENODEV;
1427
1428         hci_req_lock(hdev);
1429
1430         if (!test_bit(HCI_UP, &hdev->flags)) {
1431                 ret = -ENETDOWN;
1432                 goto done;
1433         }
1434
1435         if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
1436                 ret = -EBUSY;
1437                 goto done;
1438         }
1439
1440         /* Drop queues */
1441         skb_queue_purge(&hdev->rx_q);
1442         skb_queue_purge(&hdev->cmd_q);
1443
1444         hci_dev_lock(hdev);
1445         hci_inquiry_cache_flush(hdev);
1446         hci_conn_hash_flush(hdev);
1447         hci_dev_unlock(hdev);
1448
1449         if (hdev->flush)
1450                 hdev->flush(hdev);
1451
1452         atomic_set(&hdev->cmd_cnt, 1);
1453         hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
1454
1455         if (!test_bit(HCI_RAW, &hdev->flags))
1456                 ret = __hci_req_sync(hdev, hci_reset_req, 0, HCI_INIT_TIMEOUT);
1457
1458 done:
1459         hci_req_unlock(hdev);
1460         hci_dev_put(hdev);
1461         return ret;
1462 }
1463
1464 int hci_dev_reset_stat(__u16 dev)
1465 {
1466         struct hci_dev *hdev;
1467         int ret = 0;
1468
1469         hdev = hci_dev_get(dev);
1470         if (!hdev)
1471                 return -ENODEV;
1472
1473         if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
1474                 ret = -EBUSY;
1475                 goto done;
1476         }
1477
1478         memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
1479
1480 done:
1481         hci_dev_put(hdev);
1482         return ret;
1483 }
1484
1485 int hci_dev_cmd(unsigned int cmd, void __user *arg)
1486 {
1487         struct hci_dev *hdev;
1488         struct hci_dev_req dr;
1489         int err = 0;
1490
1491         if (copy_from_user(&dr, arg, sizeof(dr)))
1492                 return -EFAULT;
1493
1494         hdev = hci_dev_get(dr.dev_id);
1495         if (!hdev)
1496                 return -ENODEV;
1497
1498         if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
1499                 err = -EBUSY;
1500                 goto done;
1501         }
1502
1503         if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
1504                 err = -EOPNOTSUPP;
1505                 goto done;
1506         }
1507
1508         switch (cmd) {
1509         case HCISETAUTH:
1510                 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
1511                                    HCI_INIT_TIMEOUT);
1512                 break;
1513
1514         case HCISETENCRYPT:
1515                 if (!lmp_encrypt_capable(hdev)) {
1516                         err = -EOPNOTSUPP;
1517                         break;
1518                 }
1519
1520                 if (!test_bit(HCI_AUTH, &hdev->flags)) {
1521                         /* Auth must be enabled first */
1522                         err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
1523                                            HCI_INIT_TIMEOUT);
1524                         if (err)
1525                                 break;
1526                 }
1527
1528                 err = hci_req_sync(hdev, hci_encrypt_req, dr.dev_opt,
1529                                    HCI_INIT_TIMEOUT);
1530                 break;
1531
1532         case HCISETSCAN:
1533                 err = hci_req_sync(hdev, hci_scan_req, dr.dev_opt,
1534                                    HCI_INIT_TIMEOUT);
1535                 break;
1536
1537         case HCISETLINKPOL:
1538                 err = hci_req_sync(hdev, hci_linkpol_req, dr.dev_opt,
1539                                    HCI_INIT_TIMEOUT);
1540                 break;
1541
1542         case HCISETLINKMODE:
1543                 hdev->link_mode = ((__u16) dr.dev_opt) &
1544                                         (HCI_LM_MASTER | HCI_LM_ACCEPT);
1545                 break;
1546
1547         case HCISETPTYPE:
1548                 hdev->pkt_type = (__u16) dr.dev_opt;
1549                 break;
1550
1551         case HCISETACLMTU:
1552                 hdev->acl_mtu  = *((__u16 *) &dr.dev_opt + 1);
1553                 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
1554                 break;
1555
1556         case HCISETSCOMTU:
1557                 hdev->sco_mtu  = *((__u16 *) &dr.dev_opt + 1);
1558                 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
1559                 break;
1560
1561         default:
1562                 err = -EINVAL;
1563                 break;
1564         }
1565
1566 done:
1567         hci_dev_put(hdev);
1568         return err;
1569 }
1570
1571 int hci_get_dev_list(void __user *arg)
1572 {
1573         struct hci_dev *hdev;
1574         struct hci_dev_list_req *dl;
1575         struct hci_dev_req *dr;
1576         int n = 0, size, err;
1577         __u16 dev_num;
1578
1579         if (get_user(dev_num, (__u16 __user *) arg))
1580                 return -EFAULT;
1581
1582         if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
1583                 return -EINVAL;
1584
1585         size = sizeof(*dl) + dev_num * sizeof(*dr);
1586
1587         dl = kzalloc(size, GFP_KERNEL);
1588         if (!dl)
1589                 return -ENOMEM;
1590
1591         dr = dl->dev_req;
1592
1593         read_lock(&hci_dev_list_lock);
1594         list_for_each_entry(hdev, &hci_dev_list, list) {
1595                 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
1596                         cancel_delayed_work(&hdev->power_off);
1597
1598                 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
1599                         set_bit(HCI_PAIRABLE, &hdev->dev_flags);
1600
1601                 (dr + n)->dev_id  = hdev->id;
1602                 (dr + n)->dev_opt = hdev->flags;
1603
1604                 if (++n >= dev_num)
1605                         break;
1606         }
1607         read_unlock(&hci_dev_list_lock);
1608
1609         dl->dev_num = n;
1610         size = sizeof(*dl) + n * sizeof(*dr);
1611
1612         err = copy_to_user(arg, dl, size);
1613         kfree(dl);
1614
1615         return err ? -EFAULT : 0;
1616 }
1617
1618 int hci_get_dev_info(void __user *arg)
1619 {
1620         struct hci_dev *hdev;
1621         struct hci_dev_info di;
1622         int err = 0;
1623
1624         if (copy_from_user(&di, arg, sizeof(di)))
1625                 return -EFAULT;
1626
1627         hdev = hci_dev_get(di.dev_id);
1628         if (!hdev)
1629                 return -ENODEV;
1630
1631         if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
1632                 cancel_delayed_work_sync(&hdev->power_off);
1633
1634         if (!test_bit(HCI_MGMT, &hdev->dev_flags))
1635                 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
1636
1637         strcpy(di.name, hdev->name);
1638         di.bdaddr   = hdev->bdaddr;
1639         di.type     = (hdev->bus & 0x0f) | ((hdev->dev_type & 0x03) << 4);
1640         di.flags    = hdev->flags;
1641         di.pkt_type = hdev->pkt_type;
1642         if (lmp_bredr_capable(hdev)) {
1643                 di.acl_mtu  = hdev->acl_mtu;
1644                 di.acl_pkts = hdev->acl_pkts;
1645                 di.sco_mtu  = hdev->sco_mtu;
1646                 di.sco_pkts = hdev->sco_pkts;
1647         } else {
1648                 di.acl_mtu  = hdev->le_mtu;
1649                 di.acl_pkts = hdev->le_pkts;
1650                 di.sco_mtu  = 0;
1651                 di.sco_pkts = 0;
1652         }
1653         di.link_policy = hdev->link_policy;
1654         di.link_mode   = hdev->link_mode;
1655
1656         memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
1657         memcpy(&di.features, &hdev->features, sizeof(di.features));
1658
1659         if (copy_to_user(arg, &di, sizeof(di)))
1660                 err = -EFAULT;
1661
1662         hci_dev_put(hdev);
1663
1664         return err;
1665 }
1666
1667 /* ---- Interface to HCI drivers ---- */
1668
1669 static int hci_rfkill_set_block(void *data, bool blocked)
1670 {
1671         struct hci_dev *hdev = data;
1672
1673         BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
1674
1675         if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags))
1676                 return -EBUSY;
1677
1678         if (blocked) {
1679                 set_bit(HCI_RFKILLED, &hdev->dev_flags);
1680                 if (!test_bit(HCI_SETUP, &hdev->dev_flags))
1681                         hci_dev_do_close(hdev);
1682         } else {
1683                 clear_bit(HCI_RFKILLED, &hdev->dev_flags);
1684         }
1685
1686         return 0;
1687 }
1688
1689 static const struct rfkill_ops hci_rfkill_ops = {
1690         .set_block = hci_rfkill_set_block,
1691 };
1692
1693 static void hci_power_on(struct work_struct *work)
1694 {
1695         struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
1696         int err;
1697
1698         BT_DBG("%s", hdev->name);
1699
1700         err = hci_dev_do_open(hdev);
1701         if (err < 0) {
1702                 mgmt_set_powered_failed(hdev, err);
1703                 return;
1704         }
1705
1706         if (test_bit(HCI_RFKILLED, &hdev->dev_flags)) {
1707                 clear_bit(HCI_AUTO_OFF, &hdev->dev_flags);
1708                 hci_dev_do_close(hdev);
1709         } else if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
1710                 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
1711                                    HCI_AUTO_OFF_TIMEOUT);
1712         }
1713
1714         if (test_and_clear_bit(HCI_SETUP, &hdev->dev_flags))
1715                 mgmt_index_added(hdev);
1716 }
1717
1718 static void hci_power_off(struct work_struct *work)
1719 {
1720         struct hci_dev *hdev = container_of(work, struct hci_dev,
1721                                             power_off.work);
1722
1723         BT_DBG("%s", hdev->name);
1724
1725         hci_dev_do_close(hdev);
1726 }
1727
1728 static void hci_discov_off(struct work_struct *work)
1729 {
1730         struct hci_dev *hdev;
1731         u8 scan = SCAN_PAGE;
1732
1733         hdev = container_of(work, struct hci_dev, discov_off.work);
1734
1735         BT_DBG("%s", hdev->name);
1736
1737         hci_dev_lock(hdev);
1738
1739         hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, sizeof(scan), &scan);
1740
1741         hdev->discov_timeout = 0;
1742
1743         hci_dev_unlock(hdev);
1744 }
1745
1746 int hci_uuids_clear(struct hci_dev *hdev)
1747 {
1748         struct bt_uuid *uuid, *tmp;
1749
1750         list_for_each_entry_safe(uuid, tmp, &hdev->uuids, list) {
1751                 list_del(&uuid->list);
1752                 kfree(uuid);
1753         }
1754
1755         return 0;
1756 }
1757
1758 int hci_link_keys_clear(struct hci_dev *hdev)
1759 {
1760         struct list_head *p, *n;
1761
1762         list_for_each_safe(p, n, &hdev->link_keys) {
1763                 struct link_key *key;
1764
1765                 key = list_entry(p, struct link_key, list);
1766
1767                 list_del(p);
1768                 kfree(key);
1769         }
1770
1771         return 0;
1772 }
1773
1774 int hci_smp_ltks_clear(struct hci_dev *hdev)
1775 {
1776         struct smp_ltk *k, *tmp;
1777
1778         list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
1779                 list_del(&k->list);
1780                 kfree(k);
1781         }
1782
1783         return 0;
1784 }
1785
1786 struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1787 {
1788         struct link_key *k;
1789
1790         list_for_each_entry(k, &hdev->link_keys, list)
1791                 if (bacmp(bdaddr, &k->bdaddr) == 0)
1792                         return k;
1793
1794         return NULL;
1795 }
1796
1797 static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
1798                                u8 key_type, u8 old_key_type)
1799 {
1800         /* Legacy key */
1801         if (key_type < 0x03)
1802                 return true;
1803
1804         /* Debug keys are insecure so don't store them persistently */
1805         if (key_type == HCI_LK_DEBUG_COMBINATION)
1806                 return false;
1807
1808         /* Changed combination key and there's no previous one */
1809         if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
1810                 return false;
1811
1812         /* Security mode 3 case */
1813         if (!conn)
1814                 return true;
1815
1816         /* Neither local nor remote side had no-bonding as requirement */
1817         if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
1818                 return true;
1819
1820         /* Local side had dedicated bonding as requirement */
1821         if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
1822                 return true;
1823
1824         /* Remote side had dedicated bonding as requirement */
1825         if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
1826                 return true;
1827
1828         /* If none of the above criteria match, then don't store the key
1829          * persistently */
1830         return false;
1831 }
1832
1833 struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, __le16 ediv, u8 rand[8])
1834 {
1835         struct smp_ltk *k;
1836
1837         list_for_each_entry(k, &hdev->long_term_keys, list) {
1838                 if (k->ediv != ediv ||
1839                     memcmp(rand, k->rand, sizeof(k->rand)))
1840                         continue;
1841
1842                 return k;
1843         }
1844
1845         return NULL;
1846 }
1847
1848 struct smp_ltk *hci_find_ltk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
1849                                      u8 addr_type)
1850 {
1851         struct smp_ltk *k;
1852
1853         list_for_each_entry(k, &hdev->long_term_keys, list)
1854                 if (addr_type == k->bdaddr_type &&
1855                     bacmp(bdaddr, &k->bdaddr) == 0)
1856                         return k;
1857
1858         return NULL;
1859 }
1860
1861 int hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn, int new_key,
1862                      bdaddr_t *bdaddr, u8 *val, u8 type, u8 pin_len)
1863 {
1864         struct link_key *key, *old_key;
1865         u8 old_key_type;
1866         bool persistent;
1867
1868         old_key = hci_find_link_key(hdev, bdaddr);
1869         if (old_key) {
1870                 old_key_type = old_key->type;
1871                 key = old_key;
1872         } else {
1873                 old_key_type = conn ? conn->key_type : 0xff;
1874                 key = kzalloc(sizeof(*key), GFP_ATOMIC);
1875                 if (!key)
1876                         return -ENOMEM;
1877                 list_add(&key->list, &hdev->link_keys);
1878         }
1879
1880         BT_DBG("%s key for %pMR type %u", hdev->name, bdaddr, type);
1881
1882         /* Some buggy controller combinations generate a changed
1883          * combination key for legacy pairing even when there's no
1884          * previous key */
1885         if (type == HCI_LK_CHANGED_COMBINATION &&
1886             (!conn || conn->remote_auth == 0xff) && old_key_type == 0xff) {
1887                 type = HCI_LK_COMBINATION;
1888                 if (conn)
1889                         conn->key_type = type;
1890         }
1891
1892         bacpy(&key->bdaddr, bdaddr);
1893         memcpy(key->val, val, HCI_LINK_KEY_SIZE);
1894         key->pin_len = pin_len;
1895
1896         if (type == HCI_LK_CHANGED_COMBINATION)
1897                 key->type = old_key_type;
1898         else
1899                 key->type = type;
1900
1901         if (!new_key)
1902                 return 0;
1903
1904         persistent = hci_persistent_key(hdev, conn, type, old_key_type);
1905
1906         mgmt_new_link_key(hdev, key, persistent);
1907
1908         if (conn)
1909                 conn->flush_key = !persistent;
1910
1911         return 0;
1912 }
1913
1914 int hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type, u8 type,
1915                 int new_key, u8 authenticated, u8 tk[16], u8 enc_size, __le16
1916                 ediv, u8 rand[8])
1917 {
1918         struct smp_ltk *key, *old_key;
1919
1920         if (!(type & HCI_SMP_STK) && !(type & HCI_SMP_LTK))
1921                 return 0;
1922
1923         old_key = hci_find_ltk_by_addr(hdev, bdaddr, addr_type);
1924         if (old_key)
1925                 key = old_key;
1926         else {
1927                 key = kzalloc(sizeof(*key), GFP_ATOMIC);
1928                 if (!key)
1929                         return -ENOMEM;
1930                 list_add(&key->list, &hdev->long_term_keys);
1931         }
1932
1933         bacpy(&key->bdaddr, bdaddr);
1934         key->bdaddr_type = addr_type;
1935         memcpy(key->val, tk, sizeof(key->val));
1936         key->authenticated = authenticated;
1937         key->ediv = ediv;
1938         key->enc_size = enc_size;
1939         key->type = type;
1940         memcpy(key->rand, rand, sizeof(key->rand));
1941
1942         if (!new_key)
1943                 return 0;
1944
1945         if (type & HCI_SMP_LTK)
1946                 mgmt_new_ltk(hdev, key, 1);
1947
1948         return 0;
1949 }
1950
1951 int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1952 {
1953         struct link_key *key;
1954
1955         key = hci_find_link_key(hdev, bdaddr);
1956         if (!key)
1957                 return -ENOENT;
1958
1959         BT_DBG("%s removing %pMR", hdev->name, bdaddr);
1960
1961         list_del(&key->list);
1962         kfree(key);
1963
1964         return 0;
1965 }
1966
1967 int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr)
1968 {
1969         struct smp_ltk *k, *tmp;
1970
1971         list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
1972                 if (bacmp(bdaddr, &k->bdaddr))
1973                         continue;
1974
1975                 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
1976
1977                 list_del(&k->list);
1978                 kfree(k);
1979         }
1980
1981         return 0;
1982 }
1983
1984 /* HCI command timer function */
1985 static void hci_cmd_timeout(unsigned long arg)
1986 {
1987         struct hci_dev *hdev = (void *) arg;
1988
1989         if (hdev->sent_cmd) {
1990                 struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
1991                 u16 opcode = __le16_to_cpu(sent->opcode);
1992
1993                 BT_ERR("%s command 0x%4.4x tx timeout", hdev->name, opcode);
1994         } else {
1995                 BT_ERR("%s command tx timeout", hdev->name);
1996         }
1997
1998         atomic_set(&hdev->cmd_cnt, 1);
1999         queue_work(hdev->workqueue, &hdev->cmd_work);
2000 }
2001
2002 struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
2003                                           bdaddr_t *bdaddr)
2004 {
2005         struct oob_data *data;
2006
2007         list_for_each_entry(data, &hdev->remote_oob_data, list)
2008                 if (bacmp(bdaddr, &data->bdaddr) == 0)
2009                         return data;
2010
2011         return NULL;
2012 }
2013
2014 int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr)
2015 {
2016         struct oob_data *data;
2017
2018         data = hci_find_remote_oob_data(hdev, bdaddr);
2019         if (!data)
2020                 return -ENOENT;
2021
2022         BT_DBG("%s removing %pMR", hdev->name, bdaddr);
2023
2024         list_del(&data->list);
2025         kfree(data);
2026
2027         return 0;
2028 }
2029
2030 int hci_remote_oob_data_clear(struct hci_dev *hdev)
2031 {
2032         struct oob_data *data, *n;
2033
2034         list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
2035                 list_del(&data->list);
2036                 kfree(data);
2037         }
2038
2039         return 0;
2040 }
2041
2042 int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 *hash,
2043                             u8 *randomizer)
2044 {
2045         struct oob_data *data;
2046
2047         data = hci_find_remote_oob_data(hdev, bdaddr);
2048
2049         if (!data) {
2050                 data = kmalloc(sizeof(*data), GFP_ATOMIC);
2051                 if (!data)
2052                         return -ENOMEM;
2053
2054                 bacpy(&data->bdaddr, bdaddr);
2055                 list_add(&data->list, &hdev->remote_oob_data);
2056         }
2057
2058         memcpy(data->hash, hash, sizeof(data->hash));
2059         memcpy(data->randomizer, randomizer, sizeof(data->randomizer));
2060
2061         BT_DBG("%s for %pMR", hdev->name, bdaddr);
2062
2063         return 0;
2064 }
2065
2066 struct bdaddr_list *hci_blacklist_lookup(struct hci_dev *hdev, bdaddr_t *bdaddr)
2067 {
2068         struct bdaddr_list *b;
2069
2070         list_for_each_entry(b, &hdev->blacklist, list)
2071                 if (bacmp(bdaddr, &b->bdaddr) == 0)
2072                         return b;
2073
2074         return NULL;
2075 }
2076
2077 int hci_blacklist_clear(struct hci_dev *hdev)
2078 {
2079         struct list_head *p, *n;
2080
2081         list_for_each_safe(p, n, &hdev->blacklist) {
2082                 struct bdaddr_list *b;
2083
2084                 b = list_entry(p, struct bdaddr_list, list);
2085
2086                 list_del(p);
2087                 kfree(b);
2088         }
2089
2090         return 0;
2091 }
2092
2093 int hci_blacklist_add(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
2094 {
2095         struct bdaddr_list *entry;
2096
2097         if (bacmp(bdaddr, BDADDR_ANY) == 0)
2098                 return -EBADF;
2099
2100         if (hci_blacklist_lookup(hdev, bdaddr))
2101                 return -EEXIST;
2102
2103         entry = kzalloc(sizeof(struct bdaddr_list), GFP_KERNEL);
2104         if (!entry)
2105                 return -ENOMEM;
2106
2107         bacpy(&entry->bdaddr, bdaddr);
2108
2109         list_add(&entry->list, &hdev->blacklist);
2110
2111         return mgmt_device_blocked(hdev, bdaddr, type);
2112 }
2113
2114 int hci_blacklist_del(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
2115 {
2116         struct bdaddr_list *entry;
2117
2118         if (bacmp(bdaddr, BDADDR_ANY) == 0)
2119                 return hci_blacklist_clear(hdev);
2120
2121         entry = hci_blacklist_lookup(hdev, bdaddr);
2122         if (!entry)
2123                 return -ENOENT;
2124
2125         list_del(&entry->list);
2126         kfree(entry);
2127
2128         return mgmt_device_unblocked(hdev, bdaddr, type);
2129 }
2130
2131 static void inquiry_complete(struct hci_dev *hdev, u8 status)
2132 {
2133         if (status) {
2134                 BT_ERR("Failed to start inquiry: status %d", status);
2135
2136                 hci_dev_lock(hdev);
2137                 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2138                 hci_dev_unlock(hdev);
2139                 return;
2140         }
2141 }
2142
2143 static void le_scan_disable_work_complete(struct hci_dev *hdev, u8 status)
2144 {
2145         /* General inquiry access code (GIAC) */
2146         u8 lap[3] = { 0x33, 0x8b, 0x9e };
2147         struct hci_request req;
2148         struct hci_cp_inquiry cp;
2149         int err;
2150
2151         if (status) {
2152                 BT_ERR("Failed to disable LE scanning: status %d", status);
2153                 return;
2154         }
2155
2156         switch (hdev->discovery.type) {
2157         case DISCOV_TYPE_LE:
2158                 hci_dev_lock(hdev);
2159                 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2160                 hci_dev_unlock(hdev);
2161                 break;
2162
2163         case DISCOV_TYPE_INTERLEAVED:
2164                 hci_req_init(&req, hdev);
2165
2166                 memset(&cp, 0, sizeof(cp));
2167                 memcpy(&cp.lap, lap, sizeof(cp.lap));
2168                 cp.length = DISCOV_INTERLEAVED_INQUIRY_LEN;
2169                 hci_req_add(&req, HCI_OP_INQUIRY, sizeof(cp), &cp);
2170
2171                 hci_dev_lock(hdev);
2172
2173                 hci_inquiry_cache_flush(hdev);
2174
2175                 err = hci_req_run(&req, inquiry_complete);
2176                 if (err) {
2177                         BT_ERR("Inquiry request failed: err %d", err);
2178                         hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2179                 }
2180
2181                 hci_dev_unlock(hdev);
2182                 break;
2183         }
2184 }
2185
2186 static void le_scan_disable_work(struct work_struct *work)
2187 {
2188         struct hci_dev *hdev = container_of(work, struct hci_dev,
2189                                             le_scan_disable.work);
2190         struct hci_cp_le_set_scan_enable cp;
2191         struct hci_request req;
2192         int err;
2193
2194         BT_DBG("%s", hdev->name);
2195
2196         hci_req_init(&req, hdev);
2197
2198         memset(&cp, 0, sizeof(cp));
2199         cp.enable = LE_SCAN_DISABLE;
2200         hci_req_add(&req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
2201
2202         err = hci_req_run(&req, le_scan_disable_work_complete);
2203         if (err)
2204                 BT_ERR("Disable LE scanning request failed: err %d", err);
2205 }
2206
2207 /* Alloc HCI device */
2208 struct hci_dev *hci_alloc_dev(void)
2209 {
2210         struct hci_dev *hdev;
2211
2212         hdev = kzalloc(sizeof(struct hci_dev), GFP_KERNEL);
2213         if (!hdev)
2214                 return NULL;
2215
2216         hdev->pkt_type  = (HCI_DM1 | HCI_DH1 | HCI_HV1);
2217         hdev->esco_type = (ESCO_HV1);
2218         hdev->link_mode = (HCI_LM_ACCEPT);
2219         hdev->io_capability = 0x03; /* No Input No Output */
2220         hdev->inq_tx_power = HCI_TX_POWER_INVALID;
2221         hdev->adv_tx_power = HCI_TX_POWER_INVALID;
2222
2223         hdev->sniff_max_interval = 800;
2224         hdev->sniff_min_interval = 80;
2225
2226         mutex_init(&hdev->lock);
2227         mutex_init(&hdev->req_lock);
2228
2229         INIT_LIST_HEAD(&hdev->mgmt_pending);
2230         INIT_LIST_HEAD(&hdev->blacklist);
2231         INIT_LIST_HEAD(&hdev->uuids);
2232         INIT_LIST_HEAD(&hdev->link_keys);
2233         INIT_LIST_HEAD(&hdev->long_term_keys);
2234         INIT_LIST_HEAD(&hdev->remote_oob_data);
2235         INIT_LIST_HEAD(&hdev->conn_hash.list);
2236
2237         INIT_WORK(&hdev->rx_work, hci_rx_work);
2238         INIT_WORK(&hdev->cmd_work, hci_cmd_work);
2239         INIT_WORK(&hdev->tx_work, hci_tx_work);
2240         INIT_WORK(&hdev->power_on, hci_power_on);
2241
2242         INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
2243         INIT_DELAYED_WORK(&hdev->discov_off, hci_discov_off);
2244         INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
2245
2246         skb_queue_head_init(&hdev->rx_q);
2247         skb_queue_head_init(&hdev->cmd_q);
2248         skb_queue_head_init(&hdev->raw_q);
2249
2250         init_waitqueue_head(&hdev->req_wait_q);
2251
2252         setup_timer(&hdev->cmd_timer, hci_cmd_timeout, (unsigned long) hdev);
2253
2254         hci_init_sysfs(hdev);
2255         discovery_init(hdev);
2256
2257         return hdev;
2258 }
2259 EXPORT_SYMBOL(hci_alloc_dev);
2260
2261 /* Free HCI device */
2262 void hci_free_dev(struct hci_dev *hdev)
2263 {
2264         /* will free via device release */
2265         put_device(&hdev->dev);
2266 }
2267 EXPORT_SYMBOL(hci_free_dev);
2268
2269 /* Register HCI device */
2270 int hci_register_dev(struct hci_dev *hdev)
2271 {
2272         int id, error;
2273
2274         if (!hdev->open || !hdev->close)
2275                 return -EINVAL;
2276
2277         /* Do not allow HCI_AMP devices to register at index 0,
2278          * so the index can be used as the AMP controller ID.
2279          */
2280         switch (hdev->dev_type) {
2281         case HCI_BREDR:
2282                 id = ida_simple_get(&hci_index_ida, 0, 0, GFP_KERNEL);
2283                 break;
2284         case HCI_AMP:
2285                 id = ida_simple_get(&hci_index_ida, 1, 0, GFP_KERNEL);
2286                 break;
2287         default:
2288                 return -EINVAL;
2289         }
2290
2291         if (id < 0)
2292                 return id;
2293
2294         sprintf(hdev->name, "hci%d", id);
2295         hdev->id = id;
2296
2297         BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
2298
2299         hdev->workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
2300                                           WQ_MEM_RECLAIM, 1, hdev->name);
2301         if (!hdev->workqueue) {
2302                 error = -ENOMEM;
2303                 goto err;
2304         }
2305
2306         hdev->req_workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
2307                                               WQ_MEM_RECLAIM, 1, hdev->name);
2308         if (!hdev->req_workqueue) {
2309                 destroy_workqueue(hdev->workqueue);
2310                 error = -ENOMEM;
2311                 goto err;
2312         }
2313
2314         error = hci_add_sysfs(hdev);
2315         if (error < 0)
2316                 goto err_wqueue;
2317
2318         hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
2319                                     RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops,
2320                                     hdev);
2321         if (hdev->rfkill) {
2322                 if (rfkill_register(hdev->rfkill) < 0) {
2323                         rfkill_destroy(hdev->rfkill);
2324                         hdev->rfkill = NULL;
2325                 }
2326         }
2327
2328         if (hdev->rfkill && rfkill_blocked(hdev->rfkill))
2329                 set_bit(HCI_RFKILLED, &hdev->dev_flags);
2330
2331         set_bit(HCI_SETUP, &hdev->dev_flags);
2332
2333         if (hdev->dev_type != HCI_AMP) {
2334                 set_bit(HCI_AUTO_OFF, &hdev->dev_flags);
2335                 /* Assume BR/EDR support until proven otherwise (such as
2336                  * through reading supported features during init.
2337                  */
2338                 set_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
2339         }
2340
2341         write_lock(&hci_dev_list_lock);
2342         list_add(&hdev->list, &hci_dev_list);
2343         write_unlock(&hci_dev_list_lock);
2344
2345         hci_notify(hdev, HCI_DEV_REG);
2346         hci_dev_hold(hdev);
2347
2348         queue_work(hdev->req_workqueue, &hdev->power_on);
2349
2350         return id;
2351
2352 err_wqueue:
2353         destroy_workqueue(hdev->workqueue);
2354         destroy_workqueue(hdev->req_workqueue);
2355 err:
2356         ida_simple_remove(&hci_index_ida, hdev->id);
2357
2358         return error;
2359 }
2360 EXPORT_SYMBOL(hci_register_dev);
2361
2362 /* Unregister HCI device */
2363 void hci_unregister_dev(struct hci_dev *hdev)
2364 {
2365         int i, id;
2366
2367         BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
2368
2369         set_bit(HCI_UNREGISTER, &hdev->dev_flags);
2370
2371         id = hdev->id;
2372
2373         write_lock(&hci_dev_list_lock);
2374         list_del(&hdev->list);
2375         write_unlock(&hci_dev_list_lock);
2376
2377         hci_dev_do_close(hdev);
2378
2379         for (i = 0; i < NUM_REASSEMBLY; i++)
2380                 kfree_skb(hdev->reassembly[i]);
2381
2382         cancel_work_sync(&hdev->power_on);
2383
2384         if (!test_bit(HCI_INIT, &hdev->flags) &&
2385             !test_bit(HCI_SETUP, &hdev->dev_flags)) {
2386                 hci_dev_lock(hdev);
2387                 mgmt_index_removed(hdev);
2388                 hci_dev_unlock(hdev);
2389         }
2390
2391         /* mgmt_index_removed should take care of emptying the
2392          * pending list */
2393         BUG_ON(!list_empty(&hdev->mgmt_pending));
2394
2395         hci_notify(hdev, HCI_DEV_UNREG);
2396
2397         if (hdev->rfkill) {
2398                 rfkill_unregister(hdev->rfkill);
2399                 rfkill_destroy(hdev->rfkill);
2400         }
2401
2402         hci_del_sysfs(hdev);
2403
2404         destroy_workqueue(hdev->workqueue);
2405         destroy_workqueue(hdev->req_workqueue);
2406
2407         hci_dev_lock(hdev);
2408         hci_blacklist_clear(hdev);
2409         hci_uuids_clear(hdev);
2410         hci_link_keys_clear(hdev);
2411         hci_smp_ltks_clear(hdev);
2412         hci_remote_oob_data_clear(hdev);
2413         hci_dev_unlock(hdev);
2414
2415         hci_dev_put(hdev);
2416
2417         ida_simple_remove(&hci_index_ida, id);
2418 }
2419 EXPORT_SYMBOL(hci_unregister_dev);
2420
2421 /* Suspend HCI device */
2422 int hci_suspend_dev(struct hci_dev *hdev)
2423 {
2424         hci_notify(hdev, HCI_DEV_SUSPEND);
2425         return 0;
2426 }
2427 EXPORT_SYMBOL(hci_suspend_dev);
2428
2429 /* Resume HCI device */
2430 int hci_resume_dev(struct hci_dev *hdev)
2431 {
2432         hci_notify(hdev, HCI_DEV_RESUME);
2433         return 0;
2434 }
2435 EXPORT_SYMBOL(hci_resume_dev);
2436
2437 /* Receive frame from HCI drivers */
2438 int hci_recv_frame(struct sk_buff *skb)
2439 {
2440         struct hci_dev *hdev = (struct hci_dev *) skb->dev;
2441         if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
2442                       && !test_bit(HCI_INIT, &hdev->flags))) {
2443                 kfree_skb(skb);
2444                 return -ENXIO;
2445         }
2446
2447         /* Incoming skb */
2448         bt_cb(skb)->incoming = 1;
2449
2450         /* Time stamp */
2451         __net_timestamp(skb);
2452
2453         skb_queue_tail(&hdev->rx_q, skb);
2454         queue_work(hdev->workqueue, &hdev->rx_work);
2455
2456         return 0;
2457 }
2458 EXPORT_SYMBOL(hci_recv_frame);
2459
2460 static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
2461                           int count, __u8 index)
2462 {
2463         int len = 0;
2464         int hlen = 0;
2465         int remain = count;
2466         struct sk_buff *skb;
2467         struct bt_skb_cb *scb;
2468
2469         if ((type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT) ||
2470             index >= NUM_REASSEMBLY)
2471                 return -EILSEQ;
2472
2473         skb = hdev->reassembly[index];
2474
2475         if (!skb) {
2476                 switch (type) {
2477                 case HCI_ACLDATA_PKT:
2478                         len = HCI_MAX_FRAME_SIZE;
2479                         hlen = HCI_ACL_HDR_SIZE;
2480                         break;
2481                 case HCI_EVENT_PKT:
2482                         len = HCI_MAX_EVENT_SIZE;
2483                         hlen = HCI_EVENT_HDR_SIZE;
2484                         break;
2485                 case HCI_SCODATA_PKT:
2486                         len = HCI_MAX_SCO_SIZE;
2487                         hlen = HCI_SCO_HDR_SIZE;
2488                         break;
2489                 }
2490
2491                 skb = bt_skb_alloc(len, GFP_ATOMIC);
2492                 if (!skb)
2493                         return -ENOMEM;
2494
2495                 scb = (void *) skb->cb;
2496                 scb->expect = hlen;
2497                 scb->pkt_type = type;
2498
2499                 skb->dev = (void *) hdev;
2500                 hdev->reassembly[index] = skb;
2501         }
2502
2503         while (count) {
2504                 scb = (void *) skb->cb;
2505                 len = min_t(uint, scb->expect, count);
2506
2507                 memcpy(skb_put(skb, len), data, len);
2508
2509                 count -= len;
2510                 data += len;
2511                 scb->expect -= len;
2512                 remain = count;
2513
2514                 switch (type) {
2515                 case HCI_EVENT_PKT:
2516                         if (skb->len == HCI_EVENT_HDR_SIZE) {
2517                                 struct hci_event_hdr *h = hci_event_hdr(skb);
2518                                 scb->expect = h->plen;
2519
2520                                 if (skb_tailroom(skb) < scb->expect) {
2521                                         kfree_skb(skb);
2522                                         hdev->reassembly[index] = NULL;
2523                                         return -ENOMEM;
2524                                 }
2525                         }
2526                         break;
2527
2528                 case HCI_ACLDATA_PKT:
2529                         if (skb->len  == HCI_ACL_HDR_SIZE) {
2530                                 struct hci_acl_hdr *h = hci_acl_hdr(skb);
2531                                 scb->expect = __le16_to_cpu(h->dlen);
2532
2533                                 if (skb_tailroom(skb) < scb->expect) {
2534                                         kfree_skb(skb);
2535                                         hdev->reassembly[index] = NULL;
2536                                         return -ENOMEM;
2537                                 }
2538                         }
2539                         break;
2540
2541                 case HCI_SCODATA_PKT:
2542                         if (skb->len == HCI_SCO_HDR_SIZE) {
2543                                 struct hci_sco_hdr *h = hci_sco_hdr(skb);
2544                                 scb->expect = h->dlen;
2545
2546                                 if (skb_tailroom(skb) < scb->expect) {
2547                                         kfree_skb(skb);
2548                                         hdev->reassembly[index] = NULL;
2549                                         return -ENOMEM;
2550                                 }
2551                         }
2552                         break;
2553                 }
2554
2555                 if (scb->expect == 0) {
2556                         /* Complete frame */
2557
2558                         bt_cb(skb)->pkt_type = type;
2559                         hci_recv_frame(skb);
2560
2561                         hdev->reassembly[index] = NULL;
2562                         return remain;
2563                 }
2564         }
2565
2566         return remain;
2567 }
2568
2569 int hci_recv_fragment(struct hci_dev *hdev, int type, void *data, int count)
2570 {
2571         int rem = 0;
2572
2573         if (type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT)
2574                 return -EILSEQ;
2575
2576         while (count) {
2577                 rem = hci_reassembly(hdev, type, data, count, type - 1);
2578                 if (rem < 0)
2579                         return rem;
2580
2581                 data += (count - rem);
2582                 count = rem;
2583         }
2584
2585         return rem;
2586 }
2587 EXPORT_SYMBOL(hci_recv_fragment);
2588
2589 #define STREAM_REASSEMBLY 0
2590
2591 int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count)
2592 {
2593         int type;
2594         int rem = 0;
2595
2596         while (count) {
2597                 struct sk_buff *skb = hdev->reassembly[STREAM_REASSEMBLY];
2598
2599                 if (!skb) {
2600                         struct { char type; } *pkt;
2601
2602                         /* Start of the frame */
2603                         pkt = data;
2604                         type = pkt->type;
2605
2606                         data++;
2607                         count--;
2608                 } else
2609                         type = bt_cb(skb)->pkt_type;
2610
2611                 rem = hci_reassembly(hdev, type, data, count,
2612                                      STREAM_REASSEMBLY);
2613                 if (rem < 0)
2614                         return rem;
2615
2616                 data += (count - rem);
2617                 count = rem;
2618         }
2619
2620         return rem;
2621 }
2622 EXPORT_SYMBOL(hci_recv_stream_fragment);
2623
2624 /* ---- Interface to upper protocols ---- */
2625
2626 int hci_register_cb(struct hci_cb *cb)
2627 {
2628         BT_DBG("%p name %s", cb, cb->name);
2629
2630         write_lock(&hci_cb_list_lock);
2631         list_add(&cb->list, &hci_cb_list);
2632         write_unlock(&hci_cb_list_lock);
2633
2634         return 0;
2635 }
2636 EXPORT_SYMBOL(hci_register_cb);
2637
2638 int hci_unregister_cb(struct hci_cb *cb)
2639 {
2640         BT_DBG("%p name %s", cb, cb->name);
2641
2642         write_lock(&hci_cb_list_lock);
2643         list_del(&cb->list);
2644         write_unlock(&hci_cb_list_lock);
2645
2646         return 0;
2647 }
2648 EXPORT_SYMBOL(hci_unregister_cb);
2649
2650 static int hci_send_frame(struct sk_buff *skb)
2651 {
2652         struct hci_dev *hdev = (struct hci_dev *) skb->dev;
2653
2654         if (!hdev) {
2655                 kfree_skb(skb);
2656                 return -ENODEV;
2657         }
2658
2659         BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
2660
2661         /* Time stamp */
2662         __net_timestamp(skb);
2663
2664         /* Send copy to monitor */
2665         hci_send_to_monitor(hdev, skb);
2666
2667         if (atomic_read(&hdev->promisc)) {
2668                 /* Send copy to the sockets */
2669                 hci_send_to_sock(hdev, skb);
2670         }
2671
2672         /* Get rid of skb owner, prior to sending to the driver. */
2673         skb_orphan(skb);
2674
2675         return hdev->send(skb);
2676 }
2677
2678 void hci_req_init(struct hci_request *req, struct hci_dev *hdev)
2679 {
2680         skb_queue_head_init(&req->cmd_q);
2681         req->hdev = hdev;
2682         req->err = 0;
2683 }
2684
2685 int hci_req_run(struct hci_request *req, hci_req_complete_t complete)
2686 {
2687         struct hci_dev *hdev = req->hdev;
2688         struct sk_buff *skb;
2689         unsigned long flags;
2690
2691         BT_DBG("length %u", skb_queue_len(&req->cmd_q));
2692
2693         /* If an error occured during request building, remove all HCI
2694          * commands queued on the HCI request queue.
2695          */
2696         if (req->err) {
2697                 skb_queue_purge(&req->cmd_q);
2698                 return req->err;
2699         }
2700
2701         /* Do not allow empty requests */
2702         if (skb_queue_empty(&req->cmd_q))
2703                 return -ENODATA;
2704
2705         skb = skb_peek_tail(&req->cmd_q);
2706         bt_cb(skb)->req.complete = complete;
2707
2708         spin_lock_irqsave(&hdev->cmd_q.lock, flags);
2709         skb_queue_splice_tail(&req->cmd_q, &hdev->cmd_q);
2710         spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
2711
2712         queue_work(hdev->workqueue, &hdev->cmd_work);
2713
2714         return 0;
2715 }
2716
2717 static struct sk_buff *hci_prepare_cmd(struct hci_dev *hdev, u16 opcode,
2718                                        u32 plen, const void *param)
2719 {
2720         int len = HCI_COMMAND_HDR_SIZE + plen;
2721         struct hci_command_hdr *hdr;
2722         struct sk_buff *skb;
2723
2724         skb = bt_skb_alloc(len, GFP_ATOMIC);
2725         if (!skb)
2726                 return NULL;
2727
2728         hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
2729         hdr->opcode = cpu_to_le16(opcode);
2730         hdr->plen   = plen;
2731
2732         if (plen)
2733                 memcpy(skb_put(skb, plen), param, plen);
2734
2735         BT_DBG("skb len %d", skb->len);
2736
2737         bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
2738         skb->dev = (void *) hdev;
2739
2740         return skb;
2741 }
2742
2743 /* Send HCI command */
2744 int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen,
2745                  const void *param)
2746 {
2747         struct sk_buff *skb;
2748
2749         BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
2750
2751         skb = hci_prepare_cmd(hdev, opcode, plen, param);
2752         if (!skb) {
2753                 BT_ERR("%s no memory for command", hdev->name);
2754                 return -ENOMEM;
2755         }
2756
2757         /* Stand-alone HCI commands must be flaged as
2758          * single-command requests.
2759          */
2760         bt_cb(skb)->req.start = true;
2761
2762         skb_queue_tail(&hdev->cmd_q, skb);
2763         queue_work(hdev->workqueue, &hdev->cmd_work);
2764
2765         return 0;
2766 }
2767
2768 /* Queue a command to an asynchronous HCI request */
2769 void hci_req_add_ev(struct hci_request *req, u16 opcode, u32 plen,
2770                     const void *param, u8 event)
2771 {
2772         struct hci_dev *hdev = req->hdev;
2773         struct sk_buff *skb;
2774
2775         BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
2776
2777         /* If an error occured during request building, there is no point in
2778          * queueing the HCI command. We can simply return.
2779          */
2780         if (req->err)
2781                 return;
2782
2783         skb = hci_prepare_cmd(hdev, opcode, plen, param);
2784         if (!skb) {
2785                 BT_ERR("%s no memory for command (opcode 0x%4.4x)",
2786                        hdev->name, opcode);
2787                 req->err = -ENOMEM;
2788                 return;
2789         }
2790
2791         if (skb_queue_empty(&req->cmd_q))
2792                 bt_cb(skb)->req.start = true;
2793
2794         bt_cb(skb)->req.event = event;
2795
2796         skb_queue_tail(&req->cmd_q, skb);
2797 }
2798
2799 void hci_req_add(struct hci_request *req, u16 opcode, u32 plen,
2800                  const void *param)
2801 {
2802         hci_req_add_ev(req, opcode, plen, param, 0);
2803 }
2804
2805 /* Get data from the previously sent command */
2806 void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
2807 {
2808         struct hci_command_hdr *hdr;
2809
2810         if (!hdev->sent_cmd)
2811                 return NULL;
2812
2813         hdr = (void *) hdev->sent_cmd->data;
2814
2815         if (hdr->opcode != cpu_to_le16(opcode))
2816                 return NULL;
2817
2818         BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
2819
2820         return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
2821 }
2822
2823 /* Send ACL data */
2824 static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
2825 {
2826         struct hci_acl_hdr *hdr;
2827         int len = skb->len;
2828
2829         skb_push(skb, HCI_ACL_HDR_SIZE);
2830         skb_reset_transport_header(skb);
2831         hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
2832         hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
2833         hdr->dlen   = cpu_to_le16(len);
2834 }
2835
2836 static void hci_queue_acl(struct hci_chan *chan, struct sk_buff_head *queue,
2837                           struct sk_buff *skb, __u16 flags)
2838 {
2839         struct hci_conn *conn = chan->conn;
2840         struct hci_dev *hdev = conn->hdev;
2841         struct sk_buff *list;
2842
2843         skb->len = skb_headlen(skb);
2844         skb->data_len = 0;
2845
2846         bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
2847
2848         switch (hdev->dev_type) {
2849         case HCI_BREDR:
2850                 hci_add_acl_hdr(skb, conn->handle, flags);
2851                 break;
2852         case HCI_AMP:
2853                 hci_add_acl_hdr(skb, chan->handle, flags);
2854                 break;
2855         default:
2856                 BT_ERR("%s unknown dev_type %d", hdev->name, hdev->dev_type);
2857                 return;
2858         }
2859
2860         list = skb_shinfo(skb)->frag_list;
2861         if (!list) {
2862                 /* Non fragmented */
2863                 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
2864
2865                 skb_queue_tail(queue, skb);
2866         } else {
2867                 /* Fragmented */
2868                 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
2869
2870                 skb_shinfo(skb)->frag_list = NULL;
2871
2872                 /* Queue all fragments atomically */
2873                 spin_lock(&queue->lock);
2874
2875                 __skb_queue_tail(queue, skb);
2876
2877                 flags &= ~ACL_START;
2878                 flags |= ACL_CONT;
2879                 do {
2880                         skb = list; list = list->next;
2881
2882                         skb->dev = (void *) hdev;
2883                         bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
2884                         hci_add_acl_hdr(skb, conn->handle, flags);
2885
2886                         BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
2887
2888                         __skb_queue_tail(queue, skb);
2889                 } while (list);
2890
2891                 spin_unlock(&queue->lock);
2892         }
2893 }
2894
2895 void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
2896 {
2897         struct hci_dev *hdev = chan->conn->hdev;
2898
2899         BT_DBG("%s chan %p flags 0x%4.4x", hdev->name, chan, flags);
2900
2901         skb->dev = (void *) hdev;
2902
2903         hci_queue_acl(chan, &chan->data_q, skb, flags);
2904
2905         queue_work(hdev->workqueue, &hdev->tx_work);
2906 }
2907
2908 /* Send SCO data */
2909 void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
2910 {
2911         struct hci_dev *hdev = conn->hdev;
2912         struct hci_sco_hdr hdr;
2913
2914         BT_DBG("%s len %d", hdev->name, skb->len);
2915
2916         hdr.handle = cpu_to_le16(conn->handle);
2917         hdr.dlen   = skb->len;
2918
2919         skb_push(skb, HCI_SCO_HDR_SIZE);
2920         skb_reset_transport_header(skb);
2921         memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
2922
2923         skb->dev = (void *) hdev;
2924         bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
2925
2926         skb_queue_tail(&conn->data_q, skb);
2927         queue_work(hdev->workqueue, &hdev->tx_work);
2928 }
2929
2930 /* ---- HCI TX task (outgoing data) ---- */
2931
2932 /* HCI Connection scheduler */
2933 static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type,
2934                                      int *quote)
2935 {
2936         struct hci_conn_hash *h = &hdev->conn_hash;
2937         struct hci_conn *conn = NULL, *c;
2938         unsigned int num = 0, min = ~0;
2939
2940         /* We don't have to lock device here. Connections are always
2941          * added and removed with TX task disabled. */
2942
2943         rcu_read_lock();
2944
2945         list_for_each_entry_rcu(c, &h->list, list) {
2946                 if (c->type != type || skb_queue_empty(&c->data_q))
2947                         continue;
2948
2949                 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
2950                         continue;
2951
2952                 num++;
2953
2954                 if (c->sent < min) {
2955                         min  = c->sent;
2956                         conn = c;
2957                 }
2958
2959                 if (hci_conn_num(hdev, type) == num)
2960                         break;
2961         }
2962
2963         rcu_read_unlock();
2964
2965         if (conn) {
2966                 int cnt, q;
2967
2968                 switch (conn->type) {
2969                 case ACL_LINK:
2970                         cnt = hdev->acl_cnt;
2971                         break;
2972                 case SCO_LINK:
2973                 case ESCO_LINK:
2974                         cnt = hdev->sco_cnt;
2975                         break;
2976                 case LE_LINK:
2977                         cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
2978                         break;
2979                 default:
2980                         cnt = 0;
2981                         BT_ERR("Unknown link type");
2982                 }
2983
2984                 q = cnt / num;
2985                 *quote = q ? q : 1;
2986         } else
2987                 *quote = 0;
2988
2989         BT_DBG("conn %p quote %d", conn, *quote);
2990         return conn;
2991 }
2992
2993 static void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
2994 {
2995         struct hci_conn_hash *h = &hdev->conn_hash;
2996         struct hci_conn *c;
2997
2998         BT_ERR("%s link tx timeout", hdev->name);
2999
3000         rcu_read_lock();
3001
3002         /* Kill stalled connections */
3003         list_for_each_entry_rcu(c, &h->list, list) {
3004                 if (c->type == type && c->sent) {
3005                         BT_ERR("%s killing stalled connection %pMR",
3006                                hdev->name, &c->dst);
3007                         hci_disconnect(c, HCI_ERROR_REMOTE_USER_TERM);
3008                 }
3009         }
3010
3011         rcu_read_unlock();
3012 }
3013
3014 static struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
3015                                       int *quote)
3016 {
3017         struct hci_conn_hash *h = &hdev->conn_hash;
3018         struct hci_chan *chan = NULL;
3019         unsigned int num = 0, min = ~0, cur_prio = 0;
3020         struct hci_conn *conn;
3021         int cnt, q, conn_num = 0;
3022
3023         BT_DBG("%s", hdev->name);
3024
3025         rcu_read_lock();
3026
3027         list_for_each_entry_rcu(conn, &h->list, list) {
3028                 struct hci_chan *tmp;
3029
3030                 if (conn->type != type)
3031                         continue;
3032
3033                 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
3034                         continue;
3035
3036                 conn_num++;
3037
3038                 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
3039                         struct sk_buff *skb;
3040
3041                         if (skb_queue_empty(&tmp->data_q))
3042                                 continue;
3043
3044                         skb = skb_peek(&tmp->data_q);
3045                         if (skb->priority < cur_prio)
3046                                 continue;
3047
3048                         if (skb->priority > cur_prio) {
3049                                 num = 0;
3050                                 min = ~0;
3051                                 cur_prio = skb->priority;
3052                         }
3053
3054                         num++;
3055
3056                         if (conn->sent < min) {
3057                                 min  = conn->sent;
3058                                 chan = tmp;
3059                         }
3060                 }
3061
3062                 if (hci_conn_num(hdev, type) == conn_num)
3063                         break;
3064         }
3065
3066         rcu_read_unlock();
3067
3068         if (!chan)
3069                 return NULL;
3070
3071         switch (chan->conn->type) {
3072         case ACL_LINK:
3073                 cnt = hdev->acl_cnt;
3074                 break;
3075         case AMP_LINK:
3076                 cnt = hdev->block_cnt;
3077                 break;
3078         case SCO_LINK:
3079         case ESCO_LINK:
3080                 cnt = hdev->sco_cnt;
3081                 break;
3082         case LE_LINK:
3083                 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
3084                 break;
3085         default:
3086                 cnt = 0;
3087                 BT_ERR("Unknown link type");
3088         }
3089
3090         q = cnt / num;
3091         *quote = q ? q : 1;
3092         BT_DBG("chan %p quote %d", chan, *quote);
3093         return chan;
3094 }
3095
3096 static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
3097 {
3098         struct hci_conn_hash *h = &hdev->conn_hash;
3099         struct hci_conn *conn;
3100         int num = 0;
3101
3102         BT_DBG("%s", hdev->name);
3103
3104         rcu_read_lock();
3105
3106         list_for_each_entry_rcu(conn, &h->list, list) {
3107                 struct hci_chan *chan;
3108
3109                 if (conn->type != type)
3110                         continue;
3111
3112                 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
3113                         continue;
3114
3115                 num++;
3116
3117                 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
3118                         struct sk_buff *skb;
3119
3120                         if (chan->sent) {
3121                                 chan->sent = 0;
3122                                 continue;
3123                         }
3124
3125                         if (skb_queue_empty(&chan->data_q))
3126                                 continue;
3127
3128                         skb = skb_peek(&chan->data_q);
3129                         if (skb->priority >= HCI_PRIO_MAX - 1)
3130                                 continue;
3131
3132                         skb->priority = HCI_PRIO_MAX - 1;
3133
3134                         BT_DBG("chan %p skb %p promoted to %d", chan, skb,
3135                                skb->priority);
3136                 }
3137
3138                 if (hci_conn_num(hdev, type) == num)
3139                         break;
3140         }
3141
3142         rcu_read_unlock();
3143
3144 }
3145
3146 static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
3147 {
3148         /* Calculate count of blocks used by this packet */
3149         return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
3150 }
3151
3152 static void __check_timeout(struct hci_dev *hdev, unsigned int cnt)
3153 {
3154         if (!test_bit(HCI_RAW, &hdev->flags)) {
3155                 /* ACL tx timeout must be longer than maximum
3156                  * link supervision timeout (40.9 seconds) */
3157                 if (!cnt && time_after(jiffies, hdev->acl_last_tx +
3158                                        HCI_ACL_TX_TIMEOUT))
3159                         hci_link_tx_to(hdev, ACL_LINK);
3160         }
3161 }
3162
3163 static void hci_sched_acl_pkt(struct hci_dev *hdev)
3164 {
3165         unsigned int cnt = hdev->acl_cnt;
3166         struct hci_chan *chan;
3167         struct sk_buff *skb;
3168         int quote;
3169
3170         __check_timeout(hdev, cnt);
3171
3172         while (hdev->acl_cnt &&
3173                (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
3174                 u32 priority = (skb_peek(&chan->data_q))->priority;
3175                 while (quote-- && (skb = skb_peek(&chan->data_q))) {
3176                         BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
3177                                skb->len, skb->priority);
3178
3179                         /* Stop if priority has changed */
3180                         if (skb->priority < priority)
3181                                 break;
3182
3183                         skb = skb_dequeue(&chan->data_q);
3184
3185                         hci_conn_enter_active_mode(chan->conn,
3186                                                    bt_cb(skb)->force_active);
3187
3188                         hci_send_frame(skb);
3189                         hdev->acl_last_tx = jiffies;
3190
3191                         hdev->acl_cnt--;
3192                         chan->sent++;
3193                         chan->conn->sent++;
3194                 }
3195         }
3196
3197         if (cnt != hdev->acl_cnt)
3198                 hci_prio_recalculate(hdev, ACL_LINK);
3199 }
3200
3201 static void hci_sched_acl_blk(struct hci_dev *hdev)
3202 {
3203         unsigned int cnt = hdev->block_cnt;
3204         struct hci_chan *chan;
3205         struct sk_buff *skb;
3206         int quote;
3207         u8 type;
3208
3209         __check_timeout(hdev, cnt);
3210
3211         BT_DBG("%s", hdev->name);
3212
3213         if (hdev->dev_type == HCI_AMP)
3214                 type = AMP_LINK;
3215         else
3216                 type = ACL_LINK;
3217
3218         while (hdev->block_cnt > 0 &&
3219                (chan = hci_chan_sent(hdev, type, &quote))) {
3220                 u32 priority = (skb_peek(&chan->data_q))->priority;
3221                 while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
3222                         int blocks;
3223
3224                         BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
3225                                skb->len, skb->priority);
3226
3227                         /* Stop if priority has changed */
3228                         if (skb->priority < priority)
3229                                 break;
3230
3231                         skb = skb_dequeue(&chan->data_q);
3232
3233                         blocks = __get_blocks(hdev, skb);
3234                         if (blocks > hdev->block_cnt)
3235                                 return;
3236
3237                         hci_conn_enter_active_mode(chan->conn,
3238                                                    bt_cb(skb)->force_active);
3239
3240                         hci_send_frame(skb);
3241                         hdev->acl_last_tx = jiffies;
3242
3243                         hdev->block_cnt -= blocks;
3244                         quote -= blocks;
3245
3246                         chan->sent += blocks;
3247                         chan->conn->sent += blocks;
3248                 }
3249         }
3250
3251         if (cnt != hdev->block_cnt)
3252                 hci_prio_recalculate(hdev, type);
3253 }
3254
3255 static void hci_sched_acl(struct hci_dev *hdev)
3256 {
3257         BT_DBG("%s", hdev->name);
3258
3259         /* No ACL link over BR/EDR controller */
3260         if (!hci_conn_num(hdev, ACL_LINK) && hdev->dev_type == HCI_BREDR)
3261                 return;
3262
3263         /* No AMP link over AMP controller */
3264         if (!hci_conn_num(hdev, AMP_LINK) && hdev->dev_type == HCI_AMP)
3265                 return;
3266
3267         switch (hdev->flow_ctl_mode) {
3268         case HCI_FLOW_CTL_MODE_PACKET_BASED:
3269                 hci_sched_acl_pkt(hdev);
3270                 break;
3271
3272         case HCI_FLOW_CTL_MODE_BLOCK_BASED:
3273                 hci_sched_acl_blk(hdev);
3274                 break;
3275         }
3276 }
3277
3278 /* Schedule SCO */
3279 static void hci_sched_sco(struct hci_dev *hdev)
3280 {
3281         struct hci_conn *conn;
3282         struct sk_buff *skb;
3283         int quote;
3284
3285         BT_DBG("%s", hdev->name);
3286
3287         if (!hci_conn_num(hdev, SCO_LINK))
3288                 return;
3289
3290         while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
3291                 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
3292                         BT_DBG("skb %p len %d", skb, skb->len);
3293                         hci_send_frame(skb);
3294
3295                         conn->sent++;
3296                         if (conn->sent == ~0)
3297                                 conn->sent = 0;
3298                 }
3299         }
3300 }
3301
3302 static void hci_sched_esco(struct hci_dev *hdev)
3303 {
3304         struct hci_conn *conn;
3305         struct sk_buff *skb;
3306         int quote;
3307
3308         BT_DBG("%s", hdev->name);
3309
3310         if (!hci_conn_num(hdev, ESCO_LINK))
3311                 return;
3312
3313         while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK,
3314                                                      &quote))) {
3315                 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
3316                         BT_DBG("skb %p len %d", skb, skb->len);
3317                         hci_send_frame(skb);
3318
3319                         conn->sent++;
3320                         if (conn->sent == ~0)
3321                                 conn->sent = 0;
3322                 }
3323         }
3324 }
3325
3326 static void hci_sched_le(struct hci_dev *hdev)
3327 {
3328         struct hci_chan *chan;
3329         struct sk_buff *skb;
3330         int quote, cnt, tmp;
3331
3332         BT_DBG("%s", hdev->name);
3333
3334         if (!hci_conn_num(hdev, LE_LINK))
3335                 return;
3336
3337         if (!test_bit(HCI_RAW, &hdev->flags)) {
3338                 /* LE tx timeout must be longer than maximum
3339                  * link supervision timeout (40.9 seconds) */
3340                 if (!hdev->le_cnt && hdev->le_pkts &&
3341                     time_after(jiffies, hdev->le_last_tx + HZ * 45))
3342                         hci_link_tx_to(hdev, LE_LINK);
3343         }
3344
3345         cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
3346         tmp = cnt;
3347         while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, &quote))) {
3348                 u32 priority = (skb_peek(&chan->data_q))->priority;
3349                 while (quote-- && (skb = skb_peek(&chan->data_q))) {
3350                         BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
3351                                skb->len, skb->priority);
3352
3353                         /* Stop if priority has changed */
3354                         if (skb->priority < priority)
3355                                 break;
3356
3357                         skb = skb_dequeue(&chan->data_q);
3358
3359                         hci_send_frame(skb);
3360                         hdev->le_last_tx = jiffies;
3361
3362                         cnt--;
3363                         chan->sent++;
3364                         chan->conn->sent++;
3365                 }
3366         }
3367
3368         if (hdev->le_pkts)
3369                 hdev->le_cnt = cnt;
3370         else
3371                 hdev->acl_cnt = cnt;
3372
3373         if (cnt != tmp)
3374                 hci_prio_recalculate(hdev, LE_LINK);
3375 }
3376
3377 static void hci_tx_work(struct work_struct *work)
3378 {
3379         struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
3380         struct sk_buff *skb;
3381
3382         BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
3383                hdev->sco_cnt, hdev->le_cnt);
3384
3385         if (!test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
3386                 /* Schedule queues and send stuff to HCI driver */
3387                 hci_sched_acl(hdev);
3388                 hci_sched_sco(hdev);
3389                 hci_sched_esco(hdev);
3390                 hci_sched_le(hdev);
3391         }
3392
3393         /* Send next queued raw (unknown type) packet */
3394         while ((skb = skb_dequeue(&hdev->raw_q)))
3395                 hci_send_frame(skb);
3396 }
3397
3398 /* ----- HCI RX task (incoming data processing) ----- */
3399
3400 /* ACL data packet */
3401 static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
3402 {
3403         struct hci_acl_hdr *hdr = (void *) skb->data;
3404         struct hci_conn *conn;
3405         __u16 handle, flags;
3406
3407         skb_pull(skb, HCI_ACL_HDR_SIZE);
3408
3409         handle = __le16_to_cpu(hdr->handle);
3410         flags  = hci_flags(handle);
3411         handle = hci_handle(handle);
3412
3413         BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
3414                handle, flags);
3415
3416         hdev->stat.acl_rx++;
3417
3418         hci_dev_lock(hdev);
3419         conn = hci_conn_hash_lookup_handle(hdev, handle);
3420         hci_dev_unlock(hdev);
3421
3422         if (conn) {
3423                 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
3424
3425                 /* Send to upper protocol */
3426                 l2cap_recv_acldata(conn, skb, flags);
3427                 return;
3428         } else {
3429                 BT_ERR("%s ACL packet for unknown connection handle %d",
3430                        hdev->name, handle);
3431         }
3432
3433         kfree_skb(skb);
3434 }
3435
3436 /* SCO data packet */
3437 static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
3438 {
3439         struct hci_sco_hdr *hdr = (void *) skb->data;
3440         struct hci_conn *conn;
3441         __u16 handle;
3442
3443         skb_pull(skb, HCI_SCO_HDR_SIZE);
3444
3445         handle = __le16_to_cpu(hdr->handle);
3446
3447         BT_DBG("%s len %d handle 0x%4.4x", hdev->name, skb->len, handle);
3448
3449         hdev->stat.sco_rx++;
3450
3451         hci_dev_lock(hdev);
3452         conn = hci_conn_hash_lookup_handle(hdev, handle);
3453         hci_dev_unlock(hdev);
3454
3455         if (conn) {
3456                 /* Send to upper protocol */
3457                 sco_recv_scodata(conn, skb);
3458                 return;
3459         } else {
3460                 BT_ERR("%s SCO packet for unknown connection handle %d",
3461                        hdev->name, handle);
3462         }
3463
3464         kfree_skb(skb);
3465 }
3466
3467 static bool hci_req_is_complete(struct hci_dev *hdev)
3468 {
3469         struct sk_buff *skb;
3470
3471         skb = skb_peek(&hdev->cmd_q);
3472         if (!skb)
3473                 return true;
3474
3475         return bt_cb(skb)->req.start;
3476 }
3477
3478 static void hci_resend_last(struct hci_dev *hdev)
3479 {
3480         struct hci_command_hdr *sent;
3481         struct sk_buff *skb;
3482         u16 opcode;
3483
3484         if (!hdev->sent_cmd)
3485                 return;
3486
3487         sent = (void *) hdev->sent_cmd->data;
3488         opcode = __le16_to_cpu(sent->opcode);
3489         if (opcode == HCI_OP_RESET)
3490                 return;
3491
3492         skb = skb_clone(hdev->sent_cmd, GFP_KERNEL);
3493         if (!skb)
3494                 return;
3495
3496         skb_queue_head(&hdev->cmd_q, skb);
3497         queue_work(hdev->workqueue, &hdev->cmd_work);
3498 }
3499
3500 void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status)
3501 {
3502         hci_req_complete_t req_complete = NULL;
3503         struct sk_buff *skb;
3504         unsigned long flags;
3505
3506         BT_DBG("opcode 0x%04x status 0x%02x", opcode, status);
3507
3508         /* If the completed command doesn't match the last one that was
3509          * sent we need to do special handling of it.
3510          */
3511         if (!hci_sent_cmd_data(hdev, opcode)) {
3512                 /* Some CSR based controllers generate a spontaneous
3513                  * reset complete event during init and any pending
3514                  * command will never be completed. In such a case we
3515                  * need to resend whatever was the last sent
3516                  * command.
3517                  */
3518                 if (test_bit(HCI_INIT, &hdev->flags) && opcode == HCI_OP_RESET)
3519                         hci_resend_last(hdev);
3520
3521                 return;
3522         }
3523
3524         /* If the command succeeded and there's still more commands in
3525          * this request the request is not yet complete.
3526          */
3527         if (!status && !hci_req_is_complete(hdev))
3528                 return;
3529
3530         /* If this was the last command in a request the complete
3531          * callback would be found in hdev->sent_cmd instead of the
3532          * command queue (hdev->cmd_q).
3533          */
3534         if (hdev->sent_cmd) {
3535                 req_complete = bt_cb(hdev->sent_cmd)->req.complete;
3536
3537                 if (req_complete) {
3538                         /* We must set the complete callback to NULL to
3539                          * avoid calling the callback more than once if
3540                          * this function gets called again.
3541                          */
3542                         bt_cb(hdev->sent_cmd)->req.complete = NULL;
3543
3544                         goto call_complete;
3545                 }
3546         }
3547
3548         /* Remove all pending commands belonging to this request */
3549         spin_lock_irqsave(&hdev->cmd_q.lock, flags);
3550         while ((skb = __skb_dequeue(&hdev->cmd_q))) {
3551                 if (bt_cb(skb)->req.start) {
3552                         __skb_queue_head(&hdev->cmd_q, skb);
3553                         break;
3554                 }
3555
3556                 req_complete = bt_cb(skb)->req.complete;
3557                 kfree_skb(skb);
3558         }
3559         spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
3560
3561 call_complete:
3562         if (req_complete)
3563                 req_complete(hdev, status);
3564 }
3565
3566 static void hci_rx_work(struct work_struct *work)
3567 {
3568         struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
3569         struct sk_buff *skb;
3570
3571         BT_DBG("%s", hdev->name);
3572
3573         while ((skb = skb_dequeue(&hdev->rx_q))) {
3574                 /* Send copy to monitor */
3575                 hci_send_to_monitor(hdev, skb);
3576
3577                 if (atomic_read(&hdev->promisc)) {
3578                         /* Send copy to the sockets */
3579                         hci_send_to_sock(hdev, skb);
3580                 }
3581
3582                 if (test_bit(HCI_RAW, &hdev->flags) ||
3583                     test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
3584                         kfree_skb(skb);
3585                         continue;
3586                 }
3587
3588                 if (test_bit(HCI_INIT, &hdev->flags)) {
3589                         /* Don't process data packets in this states. */
3590                         switch (bt_cb(skb)->pkt_type) {
3591                         case HCI_ACLDATA_PKT:
3592                         case HCI_SCODATA_PKT:
3593                                 kfree_skb(skb);
3594                                 continue;
3595                         }
3596                 }
3597
3598                 /* Process frame */
3599                 switch (bt_cb(skb)->pkt_type) {
3600                 case HCI_EVENT_PKT:
3601                         BT_DBG("%s Event packet", hdev->name);
3602                         hci_event_packet(hdev, skb);
3603                         break;
3604
3605                 case HCI_ACLDATA_PKT:
3606                         BT_DBG("%s ACL data packet", hdev->name);
3607                         hci_acldata_packet(hdev, skb);
3608                         break;
3609
3610                 case HCI_SCODATA_PKT:
3611                         BT_DBG("%s SCO data packet", hdev->name);
3612                         hci_scodata_packet(hdev, skb);
3613                         break;
3614
3615                 default:
3616                         kfree_skb(skb);
3617                         break;
3618                 }
3619         }
3620 }
3621
3622 static void hci_cmd_work(struct work_struct *work)
3623 {
3624         struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
3625         struct sk_buff *skb;
3626
3627         BT_DBG("%s cmd_cnt %d cmd queued %d", hdev->name,
3628                atomic_read(&hdev->cmd_cnt), skb_queue_len(&hdev->cmd_q));
3629
3630         /* Send queued commands */
3631         if (atomic_read(&hdev->cmd_cnt)) {
3632                 skb = skb_dequeue(&hdev->cmd_q);
3633                 if (!skb)
3634                         return;
3635
3636                 kfree_skb(hdev->sent_cmd);
3637
3638                 hdev->sent_cmd = skb_clone(skb, GFP_KERNEL);
3639                 if (hdev->sent_cmd) {
3640                         atomic_dec(&hdev->cmd_cnt);
3641                         hci_send_frame(skb);
3642                         if (test_bit(HCI_RESET, &hdev->flags))
3643                                 del_timer(&hdev->cmd_timer);
3644                         else
3645                                 mod_timer(&hdev->cmd_timer,
3646                                           jiffies + HCI_CMD_TIMEOUT);
3647                 } else {
3648                         skb_queue_head(&hdev->cmd_q, skb);
3649                         queue_work(hdev->workqueue, &hdev->cmd_work);
3650                 }
3651         }
3652 }
3653
3654 u8 bdaddr_to_le(u8 bdaddr_type)
3655 {
3656         switch (bdaddr_type) {
3657         case BDADDR_LE_PUBLIC:
3658                 return ADDR_LE_DEV_PUBLIC;
3659
3660         default:
3661                 /* Fallback to LE Random address type */
3662                 return ADDR_LE_DEV_RANDOM;
3663         }
3664 }