cancel_delayed_work(&hdev->discov_off);
hdev->discov_timeout = 0;
clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
+ clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
}
if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE, sizeof(scan), &scan);
hci_req_run(&req, NULL);
+ /* When discoverable timeout triggers, then just make sure
+ * the limited discoverable flag is cleared. Even in the case
+ * of a timeout triggered from general discoverable, it is
+ * safe to unconditionally clear the flag.
+ */
+ clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
+
hdev->discov_timeout = 0;
hci_dev_unlock(hdev);
if (status) {
u8 mgmt_err = mgmt_status(status);
cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
+ clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
goto remove_cmd;
}
return cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
status);
- if (cp->val != 0x00 && cp->val != 0x01)
+ if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
return cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
MGMT_STATUS_INVALID_PARAMS);
timeout = __le16_to_cpu(cp->timeout);
- if (!cp->val && timeout > 0)
+
+ /* Disabling discoverable requires that no timeout is set,
+ * and enabling limited discoverable requires a timeout.
+ */
+ if ((cp->val == 0x00 && timeout > 0) ||
+ (cp->val == 0x02 && timeout == 0))
return cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
MGMT_STATUS_INVALID_PARAMS);
if (!hdev_is_powered(hdev)) {
bool changed = false;
+ /* Setting limited discoverable when powered off is
+ * not a valid operation since it requires a timeout
+ * and so no need to check HCI_LIMITED_DISCOVERABLE.
+ */
if (!!cp->val != test_bit(HCI_DISCOVERABLE, &hdev->dev_flags)) {
change_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
changed = true;
goto failed;
}
- if (!!cp->val == test_bit(HCI_DISCOVERABLE, &hdev->dev_flags)) {
+ /* If the current mode is the same, then just update the timeout
+ * value with the new value. And if only the timeout gets updated,
+ * then no need for any HCI transactions.
+ */
+ if (!!cp->val == test_bit(HCI_DISCOVERABLE, &hdev->dev_flags) &&
+ (cp->val == 0x02) == test_bit(HCI_LIMITED_DISCOVERABLE,
+ &hdev->dev_flags)) {
cancel_delayed_work(&hdev->discov_off);
hdev->discov_timeout = timeout;
goto failed;
}
+ /* Cancel any potential discoverable timeout that might be
+ * still active and store new timeout value. The arming of
+ * the timeout happens in the complete handler.
+ */
+ cancel_delayed_work(&hdev->discov_off);
+ hdev->discov_timeout = timeout;
+
hci_req_init(&req, hdev);
scan = SCAN_PAGE;
- if (cp->val)
+ if (cp->val) {
+ struct hci_cp_write_current_iac_lap hci_cp;
+
+ if (cp->val == 0x02) {
+ /* Limited discoverable mode */
+ set_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
+
+ hci_cp.num_iac = 2;
+ hci_cp.iac_lap[0] = 0x00; /* LIAC */
+ hci_cp.iac_lap[1] = 0x8b;
+ hci_cp.iac_lap[2] = 0x9e;
+ hci_cp.iac_lap[3] = 0x33; /* GIAC */
+ hci_cp.iac_lap[4] = 0x8b;
+ hci_cp.iac_lap[5] = 0x9e;
+ } else {
+ /* General discoverable mode */
+ clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
+
+ hci_cp.num_iac = 1;
+ hci_cp.iac_lap[0] = 0x33; /* GIAC */
+ hci_cp.iac_lap[1] = 0x8b;
+ hci_cp.iac_lap[2] = 0x9e;
+ }
+
+ hci_req_add(&req, HCI_OP_WRITE_CURRENT_IAC_LAP,
+ (hci_cp.num_iac * 3) + 1, &hci_cp);
+
scan |= SCAN_INQUIRY;
- else
- cancel_delayed_work(&hdev->discov_off);
+ } else {
+ clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
+ }
- hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
+ hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE, sizeof(scan), &scan);
err = hci_req_run(&req, set_discoverable_complete);
if (err < 0)
mgmt_pending_remove(cmd);
- if (cp->val)
- hdev->discov_timeout = timeout;
-
failed:
hci_dev_unlock(hdev);
return err;