diff options
Diffstat (limited to 'net/bluetooth/hci_sync.c')
| -rw-r--r-- | net/bluetooth/hci_sync.c | 3395 |
1 files changed, 2767 insertions, 628 deletions
diff --git a/net/bluetooth/hci_sync.c b/net/bluetooth/hci_sync.c index 0feb68f12545..a9f5b1a68356 100644 --- a/net/bluetooth/hci_sync.c +++ b/net/bluetooth/hci_sync.c @@ -3,6 +3,7 @@ * BlueZ - Bluetooth protocol stack for Linux * * Copyright (C) 2021 Intel Corporation + * Copyright 2023 NXP */ #include <linux/property.h> @@ -11,7 +12,7 @@ #include <net/bluetooth/hci_core.h> #include <net/bluetooth/mgmt.h> -#include "hci_request.h" +#include "hci_codec.h" #include "hci_debugfs.h" #include "smp.h" #include "eir.h" @@ -30,6 +31,10 @@ static void hci_cmd_sync_complete(struct hci_dev *hdev, u8 result, u16 opcode, hdev->req_result = result; hdev->req_status = HCI_REQ_DONE; + /* Free the request command so it is not used as response */ + kfree_skb(hdev->req_skb); + hdev->req_skb = NULL; + if (skb) { struct sock *sk = hci_skb_sk(skb); @@ -37,15 +42,14 @@ static void hci_cmd_sync_complete(struct hci_dev *hdev, u8 result, u16 opcode, if (sk) sock_put(sk); - hdev->req_skb = skb_get(skb); + hdev->req_rsp = skb_get(skb); } wake_up_interruptible(&hdev->req_wait_q); } -static struct sk_buff *hci_cmd_sync_alloc(struct hci_dev *hdev, u16 opcode, - u32 plen, const void *param, - struct sock *sk) +struct sk_buff *hci_cmd_sync_alloc(struct hci_dev *hdev, u16 opcode, u32 plen, + const void *param, struct sock *sk) { int len = HCI_COMMAND_HDR_SIZE + plen; struct hci_command_hdr *hdr; @@ -108,7 +112,7 @@ static void hci_cmd_sync_add(struct hci_request *req, u16 opcode, u32 plen, skb_queue_tail(&req->cmd_q, skb); } -static int hci_cmd_sync_run(struct hci_request *req) +static int hci_req_sync_run(struct hci_request *req) { struct hci_dev *hdev = req->hdev; struct sk_buff *skb; @@ -141,6 +145,13 @@ static int hci_cmd_sync_run(struct hci_request *req) return 0; } +static void hci_request_init(struct hci_request *req, struct hci_dev *hdev) +{ + skb_queue_head_init(&req->cmd_q); + req->hdev = hdev; + req->err = 0; +} + /* This function requires the caller holds hdev->req_lock. */ struct sk_buff *__hci_cmd_sync_sk(struct hci_dev *hdev, u16 opcode, u32 plen, const void *param, u8 event, u32 timeout, @@ -150,15 +161,15 @@ struct sk_buff *__hci_cmd_sync_sk(struct hci_dev *hdev, u16 opcode, u32 plen, struct sk_buff *skb; int err = 0; - bt_dev_dbg(hdev, "Opcode 0x%4x", opcode); + bt_dev_dbg(hdev, "Opcode 0x%4.4x", opcode); - hci_req_init(&req, hdev); + hci_request_init(&req, hdev); hci_cmd_sync_add(&req, opcode, plen, param, event, sk); hdev->req_status = HCI_REQ_PEND; - err = hci_cmd_sync_run(&req); + err = hci_req_sync_run(&req); if (err < 0) return ERR_PTR(err); @@ -185,8 +196,8 @@ struct sk_buff *__hci_cmd_sync_sk(struct hci_dev *hdev, u16 opcode, u32 plen, hdev->req_status = 0; hdev->req_result = 0; - skb = hdev->req_skb; - hdev->req_skb = NULL; + skb = hdev->req_rsp; + hdev->req_rsp = NULL; bt_dev_dbg(hdev, "end: err %d", err); @@ -195,6 +206,12 @@ struct sk_buff *__hci_cmd_sync_sk(struct hci_dev *hdev, u16 opcode, u32 plen, return ERR_PTR(err); } + /* If command return a status event skb will be set to NULL as there are + * no parameters. + */ + if (!skb) + return ERR_PTR(-ENODATA); + return skb; } EXPORT_SYMBOL(__hci_cmd_sync_sk); @@ -244,19 +261,18 @@ int __hci_cmd_sync_status_sk(struct hci_dev *hdev, u16 opcode, u32 plen, u8 status; skb = __hci_cmd_sync_sk(hdev, opcode, plen, param, event, timeout, sk); + + /* If command return a status event, skb will be set to -ENODATA */ + if (skb == ERR_PTR(-ENODATA)) + return 0; + if (IS_ERR(skb)) { - bt_dev_err(hdev, "Opcode 0x%4x failed: %ld", opcode, - PTR_ERR(skb)); + if (!event) + bt_dev_err(hdev, "Opcode 0x%4.4x failed: %ld", opcode, + PTR_ERR(skb)); return PTR_ERR(skb); } - /* If command return a status event skb will be set to NULL as there are - * no parameters, in case of failure IS_ERR(skb) would have be set to - * the actual error would be found with PTR_ERR(skb). - */ - if (!skb) - return 0; - status = skb->data[0]; kfree_skb(skb); @@ -273,43 +289,53 @@ int __hci_cmd_sync_status(struct hci_dev *hdev, u16 opcode, u32 plen, } EXPORT_SYMBOL(__hci_cmd_sync_status); +int hci_cmd_sync_status(struct hci_dev *hdev, u16 opcode, u32 plen, + const void *param, u32 timeout) +{ + int err; + + hci_req_sync_lock(hdev); + err = __hci_cmd_sync_status(hdev, opcode, plen, param, timeout); + hci_req_sync_unlock(hdev); + + return err; +} +EXPORT_SYMBOL(hci_cmd_sync_status); + static void hci_cmd_sync_work(struct work_struct *work) { struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_sync_work); - struct hci_cmd_sync_work_entry *entry; - hci_cmd_sync_work_func_t func; - hci_cmd_sync_work_destroy_t destroy; - void *data; bt_dev_dbg(hdev, ""); - mutex_lock(&hdev->cmd_sync_work_lock); - entry = list_first_entry(&hdev->cmd_sync_work_list, - struct hci_cmd_sync_work_entry, list); - if (entry) { - list_del(&entry->list); - func = entry->func; - data = entry->data; - destroy = entry->destroy; - kfree(entry); - } else { - func = NULL; - data = NULL; - destroy = NULL; - } - mutex_unlock(&hdev->cmd_sync_work_lock); + /* Dequeue all entries and run them */ + while (1) { + struct hci_cmd_sync_work_entry *entry; - if (func) { - int err; + mutex_lock(&hdev->cmd_sync_work_lock); + entry = list_first_entry_or_null(&hdev->cmd_sync_work_list, + struct hci_cmd_sync_work_entry, + list); + if (entry) + list_del(&entry->list); + mutex_unlock(&hdev->cmd_sync_work_lock); + + if (!entry) + break; - hci_req_sync_lock(hdev); + bt_dev_dbg(hdev, "entry %p", entry); - err = func(hdev, data); + if (entry->func) { + int err; - if (destroy) - destroy(hdev, data, err); + hci_req_sync_lock(hdev); + err = entry->func(hdev, entry->data); + if (entry->destroy) + entry->destroy(hdev, entry->data, err); + hci_req_sync_unlock(hdev); + } - hci_req_sync_unlock(hdev); + kfree(entry); } } @@ -324,13 +350,302 @@ static void hci_cmd_sync_cancel_work(struct work_struct *work) wake_up_interruptible(&hdev->req_wait_q); } +static int hci_scan_disable_sync(struct hci_dev *hdev); +static int scan_disable_sync(struct hci_dev *hdev, void *data) +{ + return hci_scan_disable_sync(hdev); +} + +static int interleaved_inquiry_sync(struct hci_dev *hdev, void *data) +{ + return hci_inquiry_sync(hdev, DISCOV_INTERLEAVED_INQUIRY_LEN, 0); +} + +static void le_scan_disable(struct work_struct *work) +{ + struct hci_dev *hdev = container_of(work, struct hci_dev, + le_scan_disable.work); + int status; + + bt_dev_dbg(hdev, ""); + hci_dev_lock(hdev); + + if (!hci_dev_test_flag(hdev, HCI_LE_SCAN)) + goto _return; + + status = hci_cmd_sync_queue(hdev, scan_disable_sync, NULL, NULL); + if (status) { + bt_dev_err(hdev, "failed to disable LE scan: %d", status); + goto _return; + } + + /* If we were running LE only scan, change discovery state. If + * we were running both LE and BR/EDR inquiry simultaneously, + * and BR/EDR inquiry is already finished, stop discovery, + * otherwise BR/EDR inquiry will stop discovery when finished. + * If we will resolve remote device name, do not change + * discovery state. + */ + + if (hdev->discovery.type == DISCOV_TYPE_LE) + goto discov_stopped; + + if (hdev->discovery.type != DISCOV_TYPE_INTERLEAVED) + goto _return; + + if (hci_test_quirk(hdev, HCI_QUIRK_SIMULTANEOUS_DISCOVERY)) { + if (!test_bit(HCI_INQUIRY, &hdev->flags) && + hdev->discovery.state != DISCOVERY_RESOLVING) + goto discov_stopped; + + goto _return; + } + + status = hci_cmd_sync_queue(hdev, interleaved_inquiry_sync, NULL, NULL); + if (status) { + bt_dev_err(hdev, "inquiry failed: status %d", status); + goto discov_stopped; + } + + goto _return; + +discov_stopped: + hci_discovery_set_state(hdev, DISCOVERY_STOPPED); + +_return: + hci_dev_unlock(hdev); +} + +static int hci_le_set_scan_enable_sync(struct hci_dev *hdev, u8 val, + u8 filter_dup); + +static int reenable_adv_sync(struct hci_dev *hdev, void *data) +{ + bt_dev_dbg(hdev, ""); + + if (!hci_dev_test_flag(hdev, HCI_ADVERTISING) && + list_empty(&hdev->adv_instances)) + return 0; + + if (hdev->cur_adv_instance) { + return hci_schedule_adv_instance_sync(hdev, + hdev->cur_adv_instance, + true); + } else { + if (ext_adv_capable(hdev)) { + hci_start_ext_adv_sync(hdev, 0x00); + } else { + hci_update_adv_data_sync(hdev, 0x00); + hci_update_scan_rsp_data_sync(hdev, 0x00); + hci_enable_advertising_sync(hdev); + } + } + + return 0; +} + +static void reenable_adv(struct work_struct *work) +{ + struct hci_dev *hdev = container_of(work, struct hci_dev, + reenable_adv_work); + int status; + + bt_dev_dbg(hdev, ""); + + hci_dev_lock(hdev); + + status = hci_cmd_sync_queue(hdev, reenable_adv_sync, NULL, NULL); + if (status) + bt_dev_err(hdev, "failed to reenable ADV: %d", status); + + hci_dev_unlock(hdev); +} + +static void cancel_adv_timeout(struct hci_dev *hdev) +{ + if (hdev->adv_instance_timeout) { + hdev->adv_instance_timeout = 0; + cancel_delayed_work(&hdev->adv_instance_expire); + } +} + +/* For a single instance: + * - force == true: The instance will be removed even when its remaining + * lifetime is not zero. + * - force == false: the instance will be deactivated but kept stored unless + * the remaining lifetime is zero. + * + * For instance == 0x00: + * - force == true: All instances will be removed regardless of their timeout + * setting. + * - force == false: Only instances that have a timeout will be removed. + */ +int hci_clear_adv_instance_sync(struct hci_dev *hdev, struct sock *sk, + u8 instance, bool force) +{ + struct adv_info *adv_instance, *n, *next_instance = NULL; + int err; + u8 rem_inst; + + /* Cancel any timeout concerning the removed instance(s). */ + if (!instance || hdev->cur_adv_instance == instance) + cancel_adv_timeout(hdev); + + /* Get the next instance to advertise BEFORE we remove + * the current one. This can be the same instance again + * if there is only one instance. + */ + if (instance && hdev->cur_adv_instance == instance) + next_instance = hci_get_next_instance(hdev, instance); + + if (instance == 0x00) { + list_for_each_entry_safe(adv_instance, n, &hdev->adv_instances, + list) { + if (!(force || adv_instance->timeout)) + continue; + + rem_inst = adv_instance->instance; + err = hci_remove_adv_instance(hdev, rem_inst); + if (!err) + mgmt_advertising_removed(sk, hdev, rem_inst); + } + } else { + adv_instance = hci_find_adv_instance(hdev, instance); + + if (force || (adv_instance && adv_instance->timeout && + !adv_instance->remaining_time)) { + /* Don't advertise a removed instance. */ + if (next_instance && + next_instance->instance == instance) + next_instance = NULL; + + err = hci_remove_adv_instance(hdev, instance); + if (!err) + mgmt_advertising_removed(sk, hdev, instance); + } + } + + if (!hdev_is_powered(hdev) || hci_dev_test_flag(hdev, HCI_ADVERTISING)) + return 0; + + if (next_instance && !ext_adv_capable(hdev)) + return hci_schedule_adv_instance_sync(hdev, + next_instance->instance, + false); + + return 0; +} + +static int adv_timeout_expire_sync(struct hci_dev *hdev, void *data) +{ + u8 instance = *(u8 *)data; + + kfree(data); + + hci_clear_adv_instance_sync(hdev, NULL, instance, false); + + if (list_empty(&hdev->adv_instances)) + return hci_disable_advertising_sync(hdev); + + return 0; +} + +static void adv_timeout_expire(struct work_struct *work) +{ + u8 *inst_ptr; + struct hci_dev *hdev = container_of(work, struct hci_dev, + adv_instance_expire.work); + + bt_dev_dbg(hdev, ""); + + hci_dev_lock(hdev); + + hdev->adv_instance_timeout = 0; + + if (hdev->cur_adv_instance == 0x00) + goto unlock; + + inst_ptr = kmalloc(1, GFP_KERNEL); + if (!inst_ptr) + goto unlock; + + *inst_ptr = hdev->cur_adv_instance; + hci_cmd_sync_queue(hdev, adv_timeout_expire_sync, inst_ptr, NULL); + +unlock: + hci_dev_unlock(hdev); +} + +static bool is_interleave_scanning(struct hci_dev *hdev) +{ + return hdev->interleave_scan_state != INTERLEAVE_SCAN_NONE; +} + +static int hci_passive_scan_sync(struct hci_dev *hdev); + +static void interleave_scan_work(struct work_struct *work) +{ + struct hci_dev *hdev = container_of(work, struct hci_dev, + interleave_scan.work); + unsigned long timeout; + + if (hdev->interleave_scan_state == INTERLEAVE_SCAN_ALLOWLIST) { + timeout = msecs_to_jiffies(hdev->advmon_allowlist_duration); + } else if (hdev->interleave_scan_state == INTERLEAVE_SCAN_NO_FILTER) { + timeout = msecs_to_jiffies(hdev->advmon_no_filter_duration); + } else { + bt_dev_err(hdev, "unexpected error"); + return; + } + + hci_passive_scan_sync(hdev); + + hci_dev_lock(hdev); + + switch (hdev->interleave_scan_state) { + case INTERLEAVE_SCAN_ALLOWLIST: + bt_dev_dbg(hdev, "next state: allowlist"); + hdev->interleave_scan_state = INTERLEAVE_SCAN_NO_FILTER; + break; + case INTERLEAVE_SCAN_NO_FILTER: + bt_dev_dbg(hdev, "next state: no filter"); + hdev->interleave_scan_state = INTERLEAVE_SCAN_ALLOWLIST; + break; + case INTERLEAVE_SCAN_NONE: + bt_dev_err(hdev, "unexpected error"); + } + + hci_dev_unlock(hdev); + + /* Don't continue interleaving if it was canceled */ + if (is_interleave_scanning(hdev)) + queue_delayed_work(hdev->req_workqueue, + &hdev->interleave_scan, timeout); +} + void hci_cmd_sync_init(struct hci_dev *hdev) { INIT_WORK(&hdev->cmd_sync_work, hci_cmd_sync_work); INIT_LIST_HEAD(&hdev->cmd_sync_work_list); mutex_init(&hdev->cmd_sync_work_lock); + mutex_init(&hdev->unregister_lock); INIT_WORK(&hdev->cmd_sync_cancel_work, hci_cmd_sync_cancel_work); + INIT_WORK(&hdev->reenable_adv_work, reenable_adv); + INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable); + INIT_DELAYED_WORK(&hdev->adv_instance_expire, adv_timeout_expire); + INIT_DELAYED_WORK(&hdev->interleave_scan, interleave_scan_work); +} + +static void _hci_cmd_sync_cancel_entry(struct hci_dev *hdev, + struct hci_cmd_sync_work_entry *entry, + int err) +{ + if (entry->destroy) + entry->destroy(hdev, entry->data, err); + + list_del(&entry->list); + kfree(entry); } void hci_cmd_sync_clear(struct hci_dev *hdev) @@ -338,17 +653,15 @@ void hci_cmd_sync_clear(struct hci_dev *hdev) struct hci_cmd_sync_work_entry *entry, *tmp; cancel_work_sync(&hdev->cmd_sync_work); + cancel_work_sync(&hdev->reenable_adv_work); - list_for_each_entry_safe(entry, tmp, &hdev->cmd_sync_work_list, list) { - if (entry->destroy) - entry->destroy(hdev, entry->data, -ECANCELED); - - list_del(&entry->list); - kfree(entry); - } + mutex_lock(&hdev->cmd_sync_work_lock); + list_for_each_entry_safe(entry, tmp, &hdev->cmd_sync_work_list, list) + _hci_cmd_sync_cancel_entry(hdev, entry, -ECANCELED); + mutex_unlock(&hdev->cmd_sync_work_lock); } -void __hci_cmd_sync_cancel(struct hci_dev *hdev, int err) +void hci_cmd_sync_cancel(struct hci_dev *hdev, int err) { bt_dev_dbg(hdev, "err 0x%2.2x", err); @@ -356,36 +669,53 @@ void __hci_cmd_sync_cancel(struct hci_dev *hdev, int err) hdev->req_result = err; hdev->req_status = HCI_REQ_CANCELED; - cancel_delayed_work_sync(&hdev->cmd_timer); - cancel_delayed_work_sync(&hdev->ncmd_timer); - atomic_set(&hdev->cmd_cnt, 1); - - wake_up_interruptible(&hdev->req_wait_q); + queue_work(hdev->workqueue, &hdev->cmd_sync_cancel_work); } } +EXPORT_SYMBOL(hci_cmd_sync_cancel); -void hci_cmd_sync_cancel(struct hci_dev *hdev, int err) +/* Cancel ongoing command request synchronously: + * + * - Set result and mark status to HCI_REQ_CANCELED + * - Wakeup command sync thread + */ +void hci_cmd_sync_cancel_sync(struct hci_dev *hdev, int err) { bt_dev_dbg(hdev, "err 0x%2.2x", err); if (hdev->req_status == HCI_REQ_PEND) { - hdev->req_result = err; + /* req_result is __u32 so error must be positive to be properly + * propagated. + */ + hdev->req_result = err < 0 ? -err : err; hdev->req_status = HCI_REQ_CANCELED; - queue_work(hdev->workqueue, &hdev->cmd_sync_cancel_work); + wake_up_interruptible(&hdev->req_wait_q); } } -EXPORT_SYMBOL(hci_cmd_sync_cancel); +EXPORT_SYMBOL(hci_cmd_sync_cancel_sync); -int hci_cmd_sync_queue(struct hci_dev *hdev, hci_cmd_sync_work_func_t func, - void *data, hci_cmd_sync_work_destroy_t destroy) +/* Submit HCI command to be run in as cmd_sync_work: + * + * - hdev must _not_ be unregistered + */ +int hci_cmd_sync_submit(struct hci_dev *hdev, hci_cmd_sync_work_func_t func, + void *data, hci_cmd_sync_work_destroy_t destroy) { struct hci_cmd_sync_work_entry *entry; + int err = 0; - entry = kmalloc(sizeof(*entry), GFP_KERNEL); - if (!entry) - return -ENOMEM; + mutex_lock(&hdev->unregister_lock); + if (hci_dev_test_flag(hdev, HCI_UNREGISTER)) { + err = -ENODEV; + goto unlock; + } + entry = kmalloc(sizeof(*entry), GFP_KERNEL); + if (!entry) { + err = -ENOMEM; + goto unlock; + } entry->func = func; entry->data = data; entry->destroy = destroy; @@ -396,10 +726,182 @@ int hci_cmd_sync_queue(struct hci_dev *hdev, hci_cmd_sync_work_func_t func, queue_work(hdev->req_workqueue, &hdev->cmd_sync_work); - return 0; +unlock: + mutex_unlock(&hdev->unregister_lock); + return err; +} +EXPORT_SYMBOL(hci_cmd_sync_submit); + +/* Queue HCI command: + * + * - hdev must be running + */ +int hci_cmd_sync_queue(struct hci_dev *hdev, hci_cmd_sync_work_func_t func, + void *data, hci_cmd_sync_work_destroy_t destroy) +{ + /* Only queue command if hdev is running which means it had been opened + * and is either on init phase or is already up. + */ + if (!test_bit(HCI_RUNNING, &hdev->flags)) + return -ENETDOWN; + + return hci_cmd_sync_submit(hdev, func, data, destroy); } EXPORT_SYMBOL(hci_cmd_sync_queue); +static struct hci_cmd_sync_work_entry * +_hci_cmd_sync_lookup_entry(struct hci_dev *hdev, hci_cmd_sync_work_func_t func, + void *data, hci_cmd_sync_work_destroy_t destroy) +{ + struct hci_cmd_sync_work_entry *entry, *tmp; + + list_for_each_entry_safe(entry, tmp, &hdev->cmd_sync_work_list, list) { + if (func && entry->func != func) + continue; + + if (data && entry->data != data) + continue; + + if (destroy && entry->destroy != destroy) + continue; + + return entry; + } + + return NULL; +} + +/* Queue HCI command entry once: + * + * - Lookup if an entry already exist and only if it doesn't creates a new entry + * and queue it. + */ +int hci_cmd_sync_queue_once(struct hci_dev *hdev, hci_cmd_sync_work_func_t func, + void *data, hci_cmd_sync_work_destroy_t destroy) +{ + if (hci_cmd_sync_lookup_entry(hdev, func, data, destroy)) + return 0; + + return hci_cmd_sync_queue(hdev, func, data, destroy); +} +EXPORT_SYMBOL(hci_cmd_sync_queue_once); + +/* Run HCI command: + * + * - hdev must be running + * - if on cmd_sync_work then run immediately otherwise queue + */ +int hci_cmd_sync_run(struct hci_dev *hdev, hci_cmd_sync_work_func_t func, + void *data, hci_cmd_sync_work_destroy_t destroy) +{ + /* Only queue command if hdev is running which means it had been opened + * and is either on init phase or is already up. + */ + if (!test_bit(HCI_RUNNING, &hdev->flags)) + return -ENETDOWN; + + /* If on cmd_sync_work then run immediately otherwise queue */ + if (current_work() == &hdev->cmd_sync_work) + return func(hdev, data); + + return hci_cmd_sync_submit(hdev, func, data, destroy); +} +EXPORT_SYMBOL(hci_cmd_sync_run); + +/* Run HCI command entry once: + * + * - Lookup if an entry already exist and only if it doesn't creates a new entry + * and run it. + * - if on cmd_sync_work then run immediately otherwise queue + */ +int hci_cmd_sync_run_once(struct hci_dev *hdev, hci_cmd_sync_work_func_t func, + void *data, hci_cmd_sync_work_destroy_t destroy) +{ + if (hci_cmd_sync_lookup_entry(hdev, func, data, destroy)) + return 0; + + return hci_cmd_sync_run(hdev, func, data, destroy); +} +EXPORT_SYMBOL(hci_cmd_sync_run_once); + +/* Lookup HCI command entry: + * + * - Return first entry that matches by function callback or data or + * destroy callback. + */ +struct hci_cmd_sync_work_entry * +hci_cmd_sync_lookup_entry(struct hci_dev *hdev, hci_cmd_sync_work_func_t func, + void *data, hci_cmd_sync_work_destroy_t destroy) +{ + struct hci_cmd_sync_work_entry *entry; + + mutex_lock(&hdev->cmd_sync_work_lock); + entry = _hci_cmd_sync_lookup_entry(hdev, func, data, destroy); + mutex_unlock(&hdev->cmd_sync_work_lock); + + return entry; +} +EXPORT_SYMBOL(hci_cmd_sync_lookup_entry); + +/* Cancel HCI command entry */ +void hci_cmd_sync_cancel_entry(struct hci_dev *hdev, + struct hci_cmd_sync_work_entry *entry) +{ + mutex_lock(&hdev->cmd_sync_work_lock); + _hci_cmd_sync_cancel_entry(hdev, entry, -ECANCELED); + mutex_unlock(&hdev->cmd_sync_work_lock); +} +EXPORT_SYMBOL(hci_cmd_sync_cancel_entry); + +/* Dequeue one HCI command entry: + * + * - Lookup and cancel first entry that matches. + */ +bool hci_cmd_sync_dequeue_once(struct hci_dev *hdev, + hci_cmd_sync_work_func_t func, + void *data, hci_cmd_sync_work_destroy_t destroy) +{ + struct hci_cmd_sync_work_entry *entry; + + mutex_lock(&hdev->cmd_sync_work_lock); + + entry = _hci_cmd_sync_lookup_entry(hdev, func, data, destroy); + if (!entry) { + mutex_unlock(&hdev->cmd_sync_work_lock); + return false; + } + + _hci_cmd_sync_cancel_entry(hdev, entry, -ECANCELED); + + mutex_unlock(&hdev->cmd_sync_work_lock); + + return true; +} +EXPORT_SYMBOL(hci_cmd_sync_dequeue_once); + +/* Dequeue HCI command entry: + * + * - Lookup and cancel any entry that matches by function callback or data or + * destroy callback. + */ +bool hci_cmd_sync_dequeue(struct hci_dev *hdev, hci_cmd_sync_work_func_t func, + void *data, hci_cmd_sync_work_destroy_t destroy) +{ + struct hci_cmd_sync_work_entry *entry; + bool ret = false; + + mutex_lock(&hdev->cmd_sync_work_lock); + while ((entry = _hci_cmd_sync_lookup_entry(hdev, func, data, + destroy))) { + _hci_cmd_sync_cancel_entry(hdev, entry, -ECANCELED); + ret = true; + } + mutex_unlock(&hdev->cmd_sync_work_lock); + + return ret; +} +EXPORT_SYMBOL(hci_cmd_sync_dequeue); + int hci_update_eir_sync(struct hci_dev *hdev) { struct hci_cp_write_eir cp; @@ -535,9 +1037,9 @@ static bool adv_use_rpa(struct hci_dev *hdev, uint32_t flags) static int hci_set_random_addr_sync(struct hci_dev *hdev, bdaddr_t *rpa) { - /* If we're advertising or initiating an LE connection we can't - * go ahead and change the random address at this time. This is - * because the eventual initiator address used for the + /* If a random_addr has been set we're advertising or initiating an LE + * connection we can't go ahead and change the random address at this + * time. This is because the eventual initiator address used for the * subsequently created connection will be undefined (some * controllers use the new address and others the one we had * when the operation started). @@ -545,8 +1047,9 @@ static int hci_set_random_addr_sync(struct hci_dev *hdev, bdaddr_t *rpa) * In this kind of scenario skip the update and let the random * address be updated at the next cycle. */ - if (hci_dev_test_flag(hdev, HCI_LE_ADV) || - hci_lookup_le_connect(hdev)) { + if (bacmp(&hdev->random_addr, BDADDR_ANY) && + (hci_dev_test_flag(hdev, HCI_LE_ADV) || + hci_lookup_le_connect(hdev))) { bt_dev_dbg(hdev, "Deferring random address update"); hci_dev_set_flag(hdev, HCI_RPA_EXPIRED); return 0; @@ -569,7 +1072,7 @@ int hci_update_random_address_sync(struct hci_dev *hdev, bool require_privacy, /* If Controller supports LL Privacy use own address type is * 0x03 */ - if (use_ll_privacy(hdev)) + if (ll_privacy_capable(hdev)) *own_addr_type = ADDR_LE_DEV_RANDOM_RESOLVED; else *own_addr_type = ADDR_LE_DEV_RANDOM; @@ -652,11 +1155,10 @@ static int hci_disable_ext_adv_instance_sync(struct hci_dev *hdev, u8 instance) struct hci_cp_ext_adv_set *set; u8 data[sizeof(*cp) + sizeof(*set) * 1]; u8 size; + struct adv_info *adv = NULL; /* If request specifies an instance that doesn't exist, fail */ if (instance > 0) { - struct adv_info *adv; - adv = hci_find_adv_instance(hdev, instance); if (!adv) return -EINVAL; @@ -675,7 +1177,7 @@ static int hci_disable_ext_adv_instance_sync(struct hci_dev *hdev, u8 instance) cp->num_of_sets = !!instance; cp->enable = 0x00; - set->handle = instance; + set->handle = adv ? adv->handle : instance; size = sizeof(*cp) + sizeof(*set) * cp->num_of_sets; @@ -709,10 +1211,127 @@ static int hci_set_adv_set_random_addr_sync(struct hci_dev *hdev, u8 instance, sizeof(cp), &cp, HCI_CMD_TIMEOUT); } +static int +hci_set_ext_adv_params_sync(struct hci_dev *hdev, struct adv_info *adv, + const struct hci_cp_le_set_ext_adv_params *cp, + struct hci_rp_le_set_ext_adv_params *rp) +{ + struct sk_buff *skb; + + skb = __hci_cmd_sync(hdev, HCI_OP_LE_SET_EXT_ADV_PARAMS, sizeof(*cp), + cp, HCI_CMD_TIMEOUT); + + /* If command return a status event, skb will be set to -ENODATA */ + if (skb == ERR_PTR(-ENODATA)) + return 0; + + if (IS_ERR(skb)) { + bt_dev_err(hdev, "Opcode 0x%4.4x failed: %ld", + HCI_OP_LE_SET_EXT_ADV_PARAMS, PTR_ERR(skb)); + return PTR_ERR(skb); + } + + if (skb->len != sizeof(*rp)) { + bt_dev_err(hdev, "Invalid response length for 0x%4.4x: %u", + HCI_OP_LE_SET_EXT_ADV_PARAMS, skb->len); + kfree_skb(skb); + return -EIO; + } + + memcpy(rp, skb->data, sizeof(*rp)); + kfree_skb(skb); + + if (!rp->status) { + hdev->adv_addr_type = cp->own_addr_type; + if (!cp->handle) { + /* Store in hdev for instance 0 */ + hdev->adv_tx_power = rp->tx_power; + } else if (adv) { + adv->tx_power = rp->tx_power; + } + } + + return rp->status; +} + +static int hci_set_ext_adv_data_sync(struct hci_dev *hdev, u8 instance) +{ + DEFINE_FLEX(struct hci_cp_le_set_ext_adv_data, pdu, data, length, + HCI_MAX_EXT_AD_LENGTH); + u8 len; + struct adv_info *adv = NULL; + int err; + + if (instance) { + adv = hci_find_adv_instance(hdev, instance); + if (!adv || !adv->adv_data_changed) + return 0; + } + + len = eir_create_adv_data(hdev, instance, pdu->data, + HCI_MAX_EXT_AD_LENGTH); + + pdu->length = len; + pdu->handle = adv ? adv->handle : instance; + pdu->operation = LE_SET_ADV_DATA_OP_COMPLETE; + pdu->frag_pref = LE_SET_ADV_DATA_NO_FRAG; + + err = __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_EXT_ADV_DATA, + struct_size(pdu, data, len), pdu, + HCI_CMD_TIMEOUT); + if (err) + return err; + + /* Update data if the command succeed */ + if (adv) { + adv->adv_data_changed = false; + } else { + memcpy(hdev->adv_data, pdu->data, len); + hdev->adv_data_len = len; + } + + return 0; +} + +static int hci_set_adv_data_sync(struct hci_dev *hdev, u8 instance) +{ + struct hci_cp_le_set_adv_data cp; + u8 len; + + memset(&cp, 0, sizeof(cp)); + + len = eir_create_adv_data(hdev, instance, cp.data, sizeof(cp.data)); + + /* There's nothing to do if the data hasn't changed */ + if (hdev->adv_data_len == len && + memcmp(cp.data, hdev->adv_data, len) == 0) + return 0; + + memcpy(hdev->adv_data, cp.data, sizeof(cp.data)); + hdev->adv_data_len = len; + + cp.length = len; + + return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_ADV_DATA, + sizeof(cp), &cp, HCI_CMD_TIMEOUT); +} + +int hci_update_adv_data_sync(struct hci_dev *hdev, u8 instance) +{ + if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED)) + return 0; + + if (ext_adv_capable(hdev)) + return hci_set_ext_adv_data_sync(hdev, instance); + + return hci_set_adv_data_sync(hdev, instance); +} + int hci_setup_ext_adv_instance_sync(struct hci_dev *hdev, u8 instance) { struct hci_cp_le_set_ext_adv_params cp; - bool connectable; + struct hci_rp_le_set_ext_adv_params rp; + bool connectable, require_privacy; u32 flags; bdaddr_t random_addr; u8 own_addr_type; @@ -732,7 +1351,7 @@ int hci_setup_ext_adv_instance_sync(struct hci_dev *hdev, u8 instance) * Command Disallowed error, so we must first disable the * instance if it is active. */ - if (adv && !adv->pending) { + if (adv) { err = hci_disable_ext_adv_instance_sync(hdev, instance); if (err) return err; @@ -750,10 +1369,12 @@ int hci_setup_ext_adv_instance_sync(struct hci_dev *hdev, u8 instance) return -EPERM; /* Set require_privacy to true only when non-connectable - * advertising is used. In that case it is fine to use a - * non-resolvable private address. + * advertising is used and it is not periodic. + * In that case it is fine to use a non-resolvable private address. */ - err = hci_get_random_address(hdev, !connectable, + require_privacy = !connectable && !(adv && adv->periodic); + + err = hci_get_random_address(hdev, require_privacy, adv_use_rpa(hdev, flags), adv, &own_addr_type, &random_addr); if (err < 0) @@ -765,10 +1386,12 @@ int hci_setup_ext_adv_instance_sync(struct hci_dev *hdev, u8 instance) hci_cpu_to_le24(adv->min_interval, cp.min_interval); hci_cpu_to_le24(adv->max_interval, cp.max_interval); cp.tx_power = adv->tx_power; + cp.sid = adv->sid; } else { hci_cpu_to_le24(hdev->le_adv_min_interval, cp.min_interval); hci_cpu_to_le24(hdev->le_adv_max_interval, cp.max_interval); cp.tx_power = HCI_ADV_TX_POWER_NO_PREFERENCE; + cp.sid = 0x00; } secondary_adv = (flags & MGMT_ADV_FLAG_SEC_MASK); @@ -804,7 +1427,7 @@ int hci_setup_ext_adv_instance_sync(struct hci_dev *hdev, u8 instance) cp.own_addr_type = own_addr_type; cp.channel_map = hdev->le_adv_channel_map; - cp.handle = instance; + cp.handle = adv ? adv->handle : instance; if (flags & MGMT_ADV_FLAG_SEC_2M) { cp.primary_phy = HCI_ADV_PHY_1M; @@ -818,8 +1441,12 @@ int hci_setup_ext_adv_instance_sync(struct hci_dev *hdev, u8 instance) cp.secondary_phy = HCI_ADV_PHY_1M; } - err = __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_EXT_ADV_PARAMS, - sizeof(cp), &cp, HCI_CMD_TIMEOUT); + err = hci_set_ext_adv_params_sync(hdev, adv, &cp, &rp); + if (err) + return err; + + /* Update adv data as tx power is known now */ + err = hci_set_ext_adv_data_sync(hdev, cp.handle); if (err) return err; @@ -844,31 +1471,39 @@ int hci_setup_ext_adv_instance_sync(struct hci_dev *hdev, u8 instance) static int hci_set_ext_scan_rsp_data_sync(struct hci_dev *hdev, u8 instance) { - struct { - struct hci_cp_le_set_ext_scan_rsp_data cp; - u8 data[HCI_MAX_EXT_AD_LENGTH]; - } pdu; + DEFINE_FLEX(struct hci_cp_le_set_ext_scan_rsp_data, pdu, data, length, + HCI_MAX_EXT_AD_LENGTH); u8 len; + struct adv_info *adv = NULL; + int err; - memset(&pdu, 0, sizeof(pdu)); + if (instance) { + adv = hci_find_adv_instance(hdev, instance); + if (!adv || !adv->scan_rsp_changed) + return 0; + } - len = eir_create_scan_rsp(hdev, instance, pdu.data); + len = eir_create_scan_rsp(hdev, instance, pdu->data); - if (hdev->scan_rsp_data_len == len && - !memcmp(pdu.data, hdev->scan_rsp_data, len)) - return 0; + pdu->handle = adv ? adv->handle : instance; + pdu->length = len; + pdu->operation = LE_SET_ADV_DATA_OP_COMPLETE; + pdu->frag_pref = LE_SET_ADV_DATA_NO_FRAG; - memcpy(hdev->scan_rsp_data, pdu.data, len); - hdev->scan_rsp_data_len = len; + err = __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_EXT_SCAN_RSP_DATA, + struct_size(pdu, data, len), pdu, + HCI_CMD_TIMEOUT); + if (err) + return err; - pdu.cp.handle = instance; - pdu.cp.length = len; - pdu.cp.operation = LE_SET_ADV_DATA_OP_COMPLETE; - pdu.cp.frag_pref = LE_SET_ADV_DATA_NO_FRAG; + if (adv) { + adv->scan_rsp_changed = false; + } else { + memcpy(hdev->scan_rsp_data, pdu->data, len); + hdev->scan_rsp_data_len = len; + } - return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_EXT_SCAN_RSP_DATA, - sizeof(pdu.cp) + len, &pdu.cp, - HCI_CMD_TIMEOUT); + return 0; } static int __hci_set_scan_rsp_data_sync(struct hci_dev *hdev, u8 instance) @@ -932,7 +1567,7 @@ int hci_enable_ext_advertising_sync(struct hci_dev *hdev, u8 instance) memset(set, 0, sizeof(*set)); - set->handle = instance; + set->handle = adv ? adv->handle : instance; /* Set duration per instance since controller is responsible for * scheduling it. @@ -965,6 +1600,206 @@ int hci_start_ext_adv_sync(struct hci_dev *hdev, u8 instance) return hci_enable_ext_advertising_sync(hdev, instance); } +int hci_disable_per_advertising_sync(struct hci_dev *hdev, u8 instance) +{ + struct hci_cp_le_set_per_adv_enable cp; + struct adv_info *adv = NULL; + + /* If periodic advertising already disabled there is nothing to do. */ + adv = hci_find_adv_instance(hdev, instance); + if (!adv || !adv->periodic_enabled) + return 0; + + memset(&cp, 0, sizeof(cp)); + + cp.enable = 0x00; + cp.handle = instance; + + return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_PER_ADV_ENABLE, + sizeof(cp), &cp, HCI_CMD_TIMEOUT); +} + +static int hci_set_per_adv_params_sync(struct hci_dev *hdev, u8 instance, + u16 min_interval, u16 max_interval) +{ + struct hci_cp_le_set_per_adv_params cp; + + memset(&cp, 0, sizeof(cp)); + + if (!min_interval) + min_interval = DISCOV_LE_PER_ADV_INT_MIN; + + if (!max_interval) + max_interval = DISCOV_LE_PER_ADV_INT_MAX; + + cp.handle = instance; + cp.min_interval = cpu_to_le16(min_interval); + cp.max_interval = cpu_to_le16(max_interval); + cp.periodic_properties = 0x0000; + + return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_PER_ADV_PARAMS, + sizeof(cp), &cp, HCI_CMD_TIMEOUT); +} + +static int hci_set_per_adv_data_sync(struct hci_dev *hdev, u8 instance) +{ + DEFINE_FLEX(struct hci_cp_le_set_per_adv_data, pdu, data, length, + HCI_MAX_PER_AD_LENGTH); + u8 len; + struct adv_info *adv = NULL; + + if (instance) { + adv = hci_find_adv_instance(hdev, instance); + if (!adv || !adv->periodic) + return 0; + } + + len = eir_create_per_adv_data(hdev, instance, pdu->data); + + pdu->length = len; + pdu->handle = adv ? adv->handle : instance; + pdu->operation = LE_SET_ADV_DATA_OP_COMPLETE; + + return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_PER_ADV_DATA, + struct_size(pdu, data, len), pdu, + HCI_CMD_TIMEOUT); +} + +static int hci_enable_per_advertising_sync(struct hci_dev *hdev, u8 instance) +{ + struct hci_cp_le_set_per_adv_enable cp; + struct adv_info *adv = NULL; + + /* If periodic advertising already enabled there is nothing to do. */ + adv = hci_find_adv_instance(hdev, instance); + if (adv && adv->periodic_enabled) + return 0; + + memset(&cp, 0, sizeof(cp)); + + cp.enable = 0x01; + cp.handle = instance; + + return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_PER_ADV_ENABLE, + sizeof(cp), &cp, HCI_CMD_TIMEOUT); +} + +/* Checks if periodic advertising data contains a Basic Announcement and if it + * does generates a Broadcast ID and add Broadcast Announcement. + */ +static int hci_adv_bcast_annoucement(struct hci_dev *hdev, struct adv_info *adv) +{ + u8 bid[3]; + u8 ad[HCI_MAX_EXT_AD_LENGTH]; + u8 len; + + /* Skip if NULL adv as instance 0x00 is used for general purpose + * advertising so it cannot used for the likes of Broadcast Announcement + * as it can be overwritten at any point. + */ + if (!adv) + return 0; + + /* Check if PA data doesn't contains a Basic Audio Announcement then + * there is nothing to do. + */ + if (!eir_get_service_data(adv->per_adv_data, adv->per_adv_data_len, + 0x1851, NULL)) + return 0; + + /* Check if advertising data already has a Broadcast Announcement since + * the process may want to control the Broadcast ID directly and in that + * case the kernel shall no interfere. + */ + if (eir_get_service_data(adv->adv_data, adv->adv_data_len, 0x1852, + NULL)) + return 0; + + /* Generate Broadcast ID */ + get_random_bytes(bid, sizeof(bid)); + len = eir_append_service_data(ad, 0, 0x1852, bid, sizeof(bid)); + memcpy(ad + len, adv->adv_data, adv->adv_data_len); + hci_set_adv_instance_data(hdev, adv->instance, len + adv->adv_data_len, + ad, 0, NULL); + + return hci_update_adv_data_sync(hdev, adv->instance); +} + +int hci_start_per_adv_sync(struct hci_dev *hdev, u8 instance, u8 sid, + u8 data_len, u8 *data, u32 flags, u16 min_interval, + u16 max_interval, u16 sync_interval) +{ + struct adv_info *adv = NULL; + int err; + bool added = false; + + hci_disable_per_advertising_sync(hdev, instance); + + if (instance) { + adv = hci_find_adv_instance(hdev, instance); + if (adv) { + if (sid != HCI_SID_INVALID && adv->sid != sid) { + /* If the SID don't match attempt to find by + * SID. + */ + adv = hci_find_adv_sid(hdev, sid); + if (!adv) { + bt_dev_err(hdev, + "Unable to find adv_info"); + return -EINVAL; + } + } + + /* Turn it into periodic advertising */ + adv->periodic = true; + adv->per_adv_data_len = data_len; + if (data) + memcpy(adv->per_adv_data, data, data_len); + adv->flags = flags; + } else if (!adv) { + /* Create an instance if that could not be found */ + adv = hci_add_per_instance(hdev, instance, sid, flags, + data_len, data, + sync_interval, + sync_interval); + if (IS_ERR(adv)) + return PTR_ERR(adv); + adv->pending = false; + added = true; + } + } + + /* Start advertising */ + err = hci_start_ext_adv_sync(hdev, instance); + if (err < 0) + goto fail; + + err = hci_adv_bcast_annoucement(hdev, adv); + if (err < 0) + goto fail; + + err = hci_set_per_adv_params_sync(hdev, instance, min_interval, + max_interval); + if (err < 0) + goto fail; + + err = hci_set_per_adv_data_sync(hdev, instance); + if (err < 0) + goto fail; + + err = hci_enable_per_advertising_sync(hdev, instance); + if (err < 0) + goto fail; + + return 0; + +fail: + if (added) + hci_remove_adv_instance(hdev, instance); + + return err; +} + static int hci_start_adv_sync(struct hci_dev *hdev, u8 instance) { int err; @@ -1104,78 +1939,18 @@ int hci_remove_ext_adv_instance_sync(struct hci_dev *hdev, u8 instance, HCI_CMD_TIMEOUT, sk); } -static void cancel_adv_timeout(struct hci_dev *hdev) -{ - if (hdev->adv_instance_timeout) { - hdev->adv_instance_timeout = 0; - cancel_delayed_work(&hdev->adv_instance_expire); - } -} - -static int hci_set_ext_adv_data_sync(struct hci_dev *hdev, u8 instance) +int hci_le_terminate_big_sync(struct hci_dev *hdev, u8 handle, u8 reason) { - struct { - struct hci_cp_le_set_ext_adv_data cp; - u8 data[HCI_MAX_EXT_AD_LENGTH]; - } pdu; - u8 len; - - memset(&pdu, 0, sizeof(pdu)); - - len = eir_create_adv_data(hdev, instance, pdu.data); - - /* There's nothing to do if the data hasn't changed */ - if (hdev->adv_data_len == len && - memcmp(pdu.data, hdev->adv_data, len) == 0) - return 0; - - memcpy(hdev->adv_data, pdu.data, len); - hdev->adv_data_len = len; - - pdu.cp.length = len; - pdu.cp.handle = instance; - pdu.cp.operation = LE_SET_ADV_DATA_OP_COMPLETE; - pdu.cp.frag_pref = LE_SET_ADV_DATA_NO_FRAG; - - return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_EXT_ADV_DATA, - sizeof(pdu.cp) + len, &pdu.cp, - HCI_CMD_TIMEOUT); -} - -static int hci_set_adv_data_sync(struct hci_dev *hdev, u8 instance) -{ - struct hci_cp_le_set_adv_data cp; - u8 len; + struct hci_cp_le_term_big cp; memset(&cp, 0, sizeof(cp)); + cp.handle = handle; + cp.reason = reason; - len = eir_create_adv_data(hdev, instance, cp.data); - - /* There's nothing to do if the data hasn't changed */ - if (hdev->adv_data_len == len && - memcmp(cp.data, hdev->adv_data, len) == 0) - return 0; - - memcpy(hdev->adv_data, cp.data, sizeof(cp.data)); - hdev->adv_data_len = len; - - cp.length = len; - - return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_ADV_DATA, + return __hci_cmd_sync_status(hdev, HCI_OP_LE_TERM_BIG, sizeof(cp), &cp, HCI_CMD_TIMEOUT); } -int hci_update_adv_data_sync(struct hci_dev *hdev, u8 instance) -{ - if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED)) - return 0; - - if (ext_adv_capable(hdev)) - return hci_set_ext_adv_data_sync(hdev, instance); - - return hci_set_adv_data_sync(hdev, instance); -} - int hci_schedule_adv_instance_sync(struct hci_dev *hdev, u8 instance, bool force) { @@ -1216,7 +1991,7 @@ int hci_schedule_adv_instance_sync(struct hci_dev *hdev, u8 instance, hdev->adv_instance_timeout = timeout; queue_delayed_work(hdev->req_workqueue, &hdev->adv_instance_expire, - msecs_to_jiffies(timeout * 1000)); + secs_to_jiffies(timeout)); } /* If we're just re-scheduling the same instance again then do not @@ -1404,7 +2179,11 @@ static int hci_le_set_ext_scan_enable_sync(struct hci_dev *hdev, u8 val, memset(&cp, 0, sizeof(cp)); cp.enable = val; - cp.filter_dup = filter_dup; + + if (hci_dev_test_flag(hdev, HCI_MESH)) + cp.filter_dup = LE_SCAN_FILTER_DUP_DISABLE; + else + cp.filter_dup = filter_dup; return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_EXT_SCAN_ENABLE, sizeof(cp), &cp, HCI_CMD_TIMEOUT); @@ -1420,7 +2199,11 @@ static int hci_le_set_scan_enable_sync(struct hci_dev *hdev, u8 val, memset(&cp, 0, sizeof(cp)); cp.enable = val; - cp.filter_dup = filter_dup; + + if (val && hci_dev_test_flag(hdev, HCI_MESH)) + cp.filter_dup = LE_SCAN_FILTER_DUP_DISABLE; + else + cp.filter_dup = filter_dup; return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp, HCI_CMD_TIMEOUT); @@ -1428,7 +2211,7 @@ static int hci_le_set_scan_enable_sync(struct hci_dev *hdev, u8 val, static int hci_le_set_addr_resolution_enable_sync(struct hci_dev *hdev, u8 val) { - if (!use_ll_privacy(hdev)) + if (!ll_privacy_capable(hdev)) return 0; /* If controller is not/already resolving we are done. */ @@ -1473,11 +2256,6 @@ static void hci_start_interleave_scan(struct hci_dev *hdev) &hdev->interleave_scan, 0); } -static bool is_interleave_scanning(struct hci_dev *hdev) -{ - return hdev->interleave_scan_state != INTERLEAVE_SCAN_NONE; -} - static void cancel_interleave_scan(struct hci_dev *hdev) { bt_dev_dbg(hdev, "cancelling interleave scan"); @@ -1525,7 +2303,7 @@ static int hci_le_del_resolve_list_sync(struct hci_dev *hdev, struct hci_cp_le_del_from_resolv_list cp; struct bdaddr_list_with_irk *entry; - if (!use_ll_privacy(hdev)) + if (!ll_privacy_capable(hdev)) return 0; /* Check if the IRK has been programmed */ @@ -1572,17 +2350,25 @@ static int hci_le_del_accept_list_sync(struct hci_dev *hdev, return 0; } +struct conn_params { + bdaddr_t addr; + u8 addr_type; + hci_conn_flags_t flags; + u8 privacy_mode; +}; + /* Adds connection to resolve list if needed. * Setting params to NULL programs local hdev->irk */ static int hci_le_add_resolve_list_sync(struct hci_dev *hdev, - struct hci_conn_params *params) + struct conn_params *params) { struct hci_cp_le_add_to_resolv_list cp; struct smp_irk *irk; struct bdaddr_list_with_irk *entry; + struct hci_conn_params *p; - if (!use_ll_privacy(hdev)) + if (!ll_privacy_capable(hdev)) return 0; /* Attempt to program local identity address, type and irk if params is @@ -1595,7 +2381,8 @@ static int hci_le_add_resolve_list_sync(struct hci_dev *hdev, hci_copy_identity_address(hdev, &cp.bdaddr, &cp.bdaddr_type); memcpy(cp.peer_irk, hdev->irk, 16); goto done; - } + } else if (!(params->flags & HCI_CONN_FLAG_ADDRESS_RESOLUTION)) + return 0; irk = hci_find_irk_by_addr(hdev, ¶ms->addr, params->addr_type); if (!irk) @@ -1612,6 +2399,19 @@ static int hci_le_add_resolve_list_sync(struct hci_dev *hdev, bacpy(&cp.bdaddr, ¶ms->addr); memcpy(cp.peer_irk, irk->val, 16); + /* Default privacy mode is always Network */ + params->privacy_mode = HCI_NETWORK_PRIVACY; + + rcu_read_lock(); + p = hci_pend_le_action_lookup(&hdev->pend_le_conns, + ¶ms->addr, params->addr_type); + if (!p) + p = hci_pend_le_action_lookup(&hdev->pend_le_reports, + ¶ms->addr, params->addr_type); + if (p) + WRITE_ONCE(p->privacy_mode, HCI_NETWORK_PRIVACY); + rcu_read_unlock(); + done: if (hci_dev_test_flag(hdev, HCI_PRIVACY)) memcpy(cp.local_irk, hdev->irk, 16); @@ -1624,11 +2424,15 @@ done: /* Set Device Privacy Mode. */ static int hci_le_set_privacy_mode_sync(struct hci_dev *hdev, - struct hci_conn_params *params) + struct conn_params *params) { struct hci_cp_le_set_privacy_mode cp; struct smp_irk *irk; + if (!ll_privacy_capable(hdev) || + !(params->flags & HCI_CONN_FLAG_ADDRESS_RESOLUTION)) + return 0; + /* If device privacy mode has already been set there is nothing to do */ if (params->privacy_mode == HCI_DEVICE_PRIVACY) return 0; @@ -1637,7 +2441,7 @@ static int hci_le_set_privacy_mode_sync(struct hci_dev *hdev, * indicates that LL Privacy has been enabled and * HCI_OP_LE_SET_PRIVACY_MODE is supported. */ - if (!test_bit(HCI_CONN_FLAG_DEVICE_PRIVACY, params->flags)) + if (!(params->flags & HCI_CONN_FLAG_DEVICE_PRIVACY)) return 0; irk = hci_find_irk_by_addr(hdev, ¶ms->addr, params->addr_type); @@ -1649,6 +2453,8 @@ static int hci_le_set_privacy_mode_sync(struct hci_dev *hdev, bacpy(&cp.bdaddr, &irk->bdaddr); cp.mode = HCI_DEVICE_PRIVACY; + /* Note: params->privacy_mode is not updated since it is a copy */ + return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_PRIVACY_MODE, sizeof(cp), &cp, HCI_CMD_TIMEOUT); } @@ -1658,26 +2464,23 @@ static int hci_le_set_privacy_mode_sync(struct hci_dev *hdev, * properly set the privacy mode. */ static int hci_le_add_accept_list_sync(struct hci_dev *hdev, - struct hci_conn_params *params, + struct conn_params *params, u8 *num_entries) { struct hci_cp_le_add_to_accept_list cp; int err; - /* Select filter policy to accept all advertising */ - if (*num_entries >= hdev->le_accept_list_size) - return -ENOSPC; - - /* Accept list can not be used with RPAs */ - if (!use_ll_privacy(hdev) && - hci_find_irk_by_addr(hdev, ¶ms->addr, params->addr_type)) { - return -EINVAL; - } - /* During suspend, only wakeable devices can be in acceptlist */ if (hdev->suspended && - !test_bit(HCI_CONN_FLAG_REMOTE_WAKEUP, params->flags)) + !(params->flags & HCI_CONN_FLAG_REMOTE_WAKEUP)) { + hci_le_del_accept_list_sync(hdev, ¶ms->addr, + params->addr_type); return 0; + } + + /* Select filter policy to accept all advertising */ + if (*num_entries >= hdev->le_accept_list_size) + return -ENOSPC; /* Attempt to program the device in the resolving list first to avoid * having to rollback in case it fails since the resolving list is @@ -1726,6 +2529,10 @@ static int hci_pause_advertising_sync(struct hci_dev *hdev) int err; int old_state; + /* If controller is not advertising we are done. */ + if (!hci_dev_test_flag(hdev, HCI_LE_ADV)) + return 0; + /* If already been paused there is nothing to do. */ if (hdev->advertising_paused) return 0; @@ -1795,6 +2602,12 @@ static int hci_resume_advertising_sync(struct hci_dev *hdev) hci_remove_ext_adv_instance_sync(hdev, adv->instance, NULL); } + + /* If current advertising instance is set to instance 0x00 + * then we need to re-enable it. + */ + if (hci_dev_test_and_clear_flag(hdev, HCI_LE_ADV_0)) + err = hci_enable_ext_advertising_sync(hdev, 0x00); } else { /* Schedule for most recent instance to be restarted and begin * the software rotation loop @@ -1809,6 +2622,45 @@ static int hci_resume_advertising_sync(struct hci_dev *hdev) return err; } +static int hci_pause_addr_resolution(struct hci_dev *hdev) +{ + int err; + + if (!ll_privacy_capable(hdev)) + return 0; + + if (!hci_dev_test_flag(hdev, HCI_LL_RPA_RESOLUTION)) + return 0; + + /* Cannot disable addr resolution if scanning is enabled or + * when initiating an LE connection. + */ + if (hci_dev_test_flag(hdev, HCI_LE_SCAN) || + hci_lookup_le_connect(hdev)) { + bt_dev_err(hdev, "Command not allowed when scan/LE connect"); + return -EPERM; + } + + /* Cannot disable addr resolution if advertising is enabled. */ + err = hci_pause_advertising_sync(hdev); + if (err) { + bt_dev_err(hdev, "Pause advertising failed: %d", err); + return err; + } + + err = hci_le_set_addr_resolution_enable_sync(hdev, 0x00); + if (err) + bt_dev_err(hdev, "Unable to disable Address Resolution: %d", + err); + + /* Return if address resolution is disabled and RPA is not used. */ + if (!err && scan_use_rpa(hdev)) + return 0; + + hci_resume_advertising_sync(hdev); + return err; +} + struct sk_buff *hci_read_local_oob_data_sync(struct hci_dev *hdev, bool extended, struct sock *sk) { @@ -1818,16 +2670,72 @@ struct sk_buff *hci_read_local_oob_data_sync(struct hci_dev *hdev, return __hci_cmd_sync_sk(hdev, opcode, 0, NULL, 0, HCI_CMD_TIMEOUT, sk); } +static struct conn_params *conn_params_copy(struct list_head *list, size_t *n) +{ + struct hci_conn_params *params; + struct conn_params *p; + size_t i; + + rcu_read_lock(); + + i = 0; + list_for_each_entry_rcu(params, list, action) + ++i; + *n = i; + + rcu_read_unlock(); + + p = kvcalloc(*n, sizeof(struct conn_params), GFP_KERNEL); + if (!p) + return NULL; + + rcu_read_lock(); + + i = 0; + list_for_each_entry_rcu(params, list, action) { + /* Racing adds are handled in next scan update */ + if (i >= *n) + break; + + /* No hdev->lock, but: addr, addr_type are immutable. + * privacy_mode is only written by us or in + * hci_cc_le_set_privacy_mode that we wait for. + * We should be idempotent so MGMT updating flags + * while we are processing is OK. + */ + bacpy(&p[i].addr, ¶ms->addr); + p[i].addr_type = params->addr_type; + p[i].flags = READ_ONCE(params->flags); + p[i].privacy_mode = READ_ONCE(params->privacy_mode); + ++i; + } + + rcu_read_unlock(); + + *n = i; + return p; +} + +/* Clear LE Accept List */ +static int hci_le_clear_accept_list_sync(struct hci_dev *hdev) +{ + if (!(hdev->commands[26] & 0x80)) + return 0; + + return __hci_cmd_sync_status(hdev, HCI_OP_LE_CLEAR_ACCEPT_LIST, 0, NULL, + HCI_CMD_TIMEOUT); +} + /* Device must not be scanning when updating the accept list. * * Update is done using the following sequence: * - * use_ll_privacy((Disable Advertising) -> Disable Resolving List) -> + * ll_privacy_capable((Disable Advertising) -> Disable Resolving List) -> * Remove Devices From Accept List -> - * (has IRK && use_ll_privacy(Remove Devices From Resolving List))-> + * (has IRK && ll_privacy_capable(Remove Devices From Resolving List))-> * Add Devices to Accept List -> - * (has IRK && use_ll_privacy(Remove Devices From Resolving List)) -> - * use_ll_privacy(Enable Resolving List -> (Enable Advertising)) -> + * (has IRK && ll_privacy_capable(Remove Devices From Resolving List)) -> + * ll_privacy_capable(Enable Resolving List -> (Enable Advertising)) -> * Enable Scanning * * In case of failure advertising shall be restored to its original state and @@ -1837,16 +2745,18 @@ struct sk_buff *hci_read_local_oob_data_sync(struct hci_dev *hdev, */ static u8 hci_update_accept_list_sync(struct hci_dev *hdev) { - struct hci_conn_params *params; + struct conn_params *params; struct bdaddr_list *b, *t; u8 num_entries = 0; bool pend_conn, pend_report; + u8 filter_policy; + size_t i, n; int err; - /* Pause advertising if resolving list can be used as controllers are + /* Pause advertising if resolving list can be used as controllers * cannot accept resolving list modifications while advertising. */ - if (use_ll_privacy(hdev)) { + if (ll_privacy_capable(hdev)) { err = hci_pause_advertising_sync(hdev); if (err) { bt_dev_err(hdev, "pause advertising failed: %d", err); @@ -1864,13 +2774,42 @@ static u8 hci_update_accept_list_sync(struct hci_dev *hdev) goto done; } + /* Force address filtering if PA Sync is in progress */ + if (hci_dev_test_flag(hdev, HCI_PA_SYNC)) { + struct hci_conn *conn; + + conn = hci_conn_hash_lookup_create_pa_sync(hdev); + if (conn) { + struct conn_params pa; + + memset(&pa, 0, sizeof(pa)); + + bacpy(&pa.addr, &conn->dst); + pa.addr_type = conn->dst_type; + + /* Clear first since there could be addresses left + * behind. + */ + hci_le_clear_accept_list_sync(hdev); + + num_entries = 1; + err = hci_le_add_accept_list_sync(hdev, &pa, + &num_entries); + goto done; + } + } + /* Go through the current accept list programmed into the - * controller one by one and check if that address is still - * in the list of pending connections or list of devices to + * controller one by one and check if that address is connected or is + * still in the list of pending connections or list of devices to * report. If not present in either list, then remove it from * the controller. */ list_for_each_entry_safe(b, t, &hdev->le_accept_list, list) { + if (hci_conn_hash_lookup_le(hdev, &b->bdaddr, b->bdaddr_type)) + continue; + + /* Pointers not dereferenced, no locks needed */ pend_conn = hci_pend_le_action_lookup(&hdev->pend_le_conns, &b->bdaddr, b->bdaddr_type); @@ -1899,23 +2838,50 @@ static u8 hci_update_accept_list_sync(struct hci_dev *hdev) * available accept list entries in the controller, then * just abort and return filer policy value to not use the * accept list. + * + * The list and params may be mutated while we wait for events, + * so make a copy and iterate it. */ - list_for_each_entry(params, &hdev->pend_le_conns, action) { - err = hci_le_add_accept_list_sync(hdev, params, &num_entries); - if (err) + + params = conn_params_copy(&hdev->pend_le_conns, &n); + if (!params) { + err = -ENOMEM; + goto done; + } + + for (i = 0; i < n; ++i) { + err = hci_le_add_accept_list_sync(hdev, ¶ms[i], + &num_entries); + if (err) { + kvfree(params); goto done; + } } + kvfree(params); + /* After adding all new pending connections, walk through * the list of pending reports and also add these to the * accept list if there is still space. Abort if space runs out. */ - list_for_each_entry(params, &hdev->pend_le_reports, action) { - err = hci_le_add_accept_list_sync(hdev, params, &num_entries); - if (err) + + params = conn_params_copy(&hdev->pend_le_reports, &n); + if (!params) { + err = -ENOMEM; + goto done; + } + + for (i = 0; i < n; ++i) { + err = hci_le_add_accept_list_sync(hdev, ¶ms[i], + &num_entries); + if (err) { + kvfree(params); goto done; + } } + kvfree(params); + /* Use the allowlist unless the following conditions are all true: * - We are not currently suspending * - There are 1 or more ADV monitors registered and it's not offloaded @@ -1927,38 +2893,27 @@ static u8 hci_update_accept_list_sync(struct hci_dev *hdev) err = -EINVAL; done: + filter_policy = err ? 0x00 : 0x01; + /* Enable address resolution when LL Privacy is enabled. */ err = hci_le_set_addr_resolution_enable_sync(hdev, 0x01); if (err) bt_dev_err(hdev, "Unable to enable LL privacy: %d", err); /* Resume advertising if it was paused */ - if (use_ll_privacy(hdev)) + if (ll_privacy_capable(hdev)) hci_resume_advertising_sync(hdev); /* Select filter policy to use accept list */ - return err ? 0x00 : 0x01; + return filter_policy; } -/* Returns true if an le connection is in the scanning state */ -static inline bool hci_is_le_conn_scanning(struct hci_dev *hdev) +static void hci_le_scan_phy_params(struct hci_cp_le_scan_phy_params *cp, + u8 type, u16 interval, u16 window) { - struct hci_conn_hash *h = &hdev->conn_hash; - struct hci_conn *c; - - rcu_read_lock(); - - list_for_each_entry_rcu(c, &h->list, list) { - if (c->type == LE_LINK && c->state == BT_CONNECT && - test_bit(HCI_CONN_SCANNING, &c->flags)) { - rcu_read_unlock(); - return true; - } - } - - rcu_read_unlock(); - - return false; + cp->type = type; + cp->interval = cpu_to_le16(interval); + cp->window = cpu_to_le16(window); } static int hci_le_set_ext_scan_param_sync(struct hci_dev *hdev, u8 type, @@ -1968,7 +2923,7 @@ static int hci_le_set_ext_scan_param_sync(struct hci_dev *hdev, u8 type, struct hci_cp_le_set_ext_scan_params *cp; struct hci_cp_le_scan_phy_params *phy; u8 data[sizeof(*cp) + sizeof(*phy) * 2]; - u8 num_phy = 0; + u8 num_phy = 0x00; cp = (void *)data; phy = (void *)cp->data; @@ -1978,28 +2933,64 @@ static int hci_le_set_ext_scan_param_sync(struct hci_dev *hdev, u8 type, cp->own_addr_type = own_addr_type; cp->filter_policy = filter_policy; + /* Check if PA Sync is in progress then select the PHY based on the + * hci_conn.iso_qos. + */ + if (hci_dev_test_flag(hdev, HCI_PA_SYNC)) { + struct hci_cp_le_add_to_accept_list *sent; + + sent = hci_sent_cmd_data(hdev, HCI_OP_LE_ADD_TO_ACCEPT_LIST); + if (sent) { + struct hci_conn *conn; + + conn = hci_conn_hash_lookup_ba(hdev, PA_LINK, + &sent->bdaddr); + if (conn) { + struct bt_iso_qos *qos = &conn->iso_qos; + + if (qos->bcast.in.phy & BT_ISO_PHY_1M || + qos->bcast.in.phy & BT_ISO_PHY_2M) { + cp->scanning_phys |= LE_SCAN_PHY_1M; + hci_le_scan_phy_params(phy, type, + interval, + window); + num_phy++; + phy++; + } + + if (qos->bcast.in.phy & BT_ISO_PHY_CODED) { + cp->scanning_phys |= LE_SCAN_PHY_CODED; + hci_le_scan_phy_params(phy, type, + interval * 3, + window * 3); + num_phy++; + phy++; + } + + if (num_phy) + goto done; + } + } + } + if (scan_1m(hdev) || scan_2m(hdev)) { cp->scanning_phys |= LE_SCAN_PHY_1M; - - phy->type = type; - phy->interval = cpu_to_le16(interval); - phy->window = cpu_to_le16(window); - + hci_le_scan_phy_params(phy, type, interval, window); num_phy++; phy++; } if (scan_coded(hdev)) { cp->scanning_phys |= LE_SCAN_PHY_CODED; - - phy->type = type; - phy->interval = cpu_to_le16(interval); - phy->window = cpu_to_le16(window); - + hci_le_scan_phy_params(phy, type, interval * 3, window * 3); num_phy++; phy++; } +done: + if (!num_phy) + return -EINVAL; + return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_EXT_SCAN_PARAMS, sizeof(*cp) + sizeof(*phy) * num_phy, data, HCI_CMD_TIMEOUT); @@ -2051,6 +3042,7 @@ static int hci_passive_scan_sync(struct hci_dev *hdev) u8 own_addr_type; u8 filter_policy; u16 window, interval; + u8 filter_dups = LE_SCAN_FILTER_DUP_ENABLE; int err; if (hdev->scanning_paused) { @@ -2086,6 +3078,27 @@ static int hci_passive_scan_sync(struct hci_dev *hdev) */ filter_policy = hci_update_accept_list_sync(hdev); + /* If suspended and filter_policy set to 0x00 (no acceptlist) then + * passive scanning cannot be started since that would require the host + * to be woken up to process the reports. + */ + if (hdev->suspended && !filter_policy) { + /* Check if accept list is empty then there is no need to scan + * while suspended. + */ + if (list_empty(&hdev->le_accept_list)) + return 0; + + /* If there are devices is the accept_list that means some + * devices could not be programmed which in non-suspended case + * means filter_policy needs to be set to 0x00 so the host needs + * to filter, but since this is treating suspended case we + * can ignore device needing host to filter to allow devices in + * the acceptlist to be able to wakeup the system. + */ + filter_policy = 0x01; + } + /* When the controller is using random resolvable addresses and * with that having LE privacy enabled, then controllers with * Extended Scanner Filter Policies support can now enable support @@ -2108,16 +3121,35 @@ static int hci_passive_scan_sync(struct hci_dev *hdev) } else if (hci_is_adv_monitoring(hdev)) { window = hdev->le_scan_window_adv_monitor; interval = hdev->le_scan_int_adv_monitor; + + /* Disable duplicates filter when scanning for advertisement + * monitor for the following reasons. + * + * For HW pattern filtering (ex. MSFT), Realtek and Qualcomm + * controllers ignore RSSI_Sampling_Period when the duplicates + * filter is enabled. + * + * For SW pattern filtering, when we're not doing interleaved + * scanning, it is necessary to disable duplicates filter, + * otherwise hosts can only receive one advertisement and it's + * impossible to know if a peer is still in range. + */ + filter_dups = LE_SCAN_FILTER_DUP_DISABLE; } else { window = hdev->le_scan_window; interval = hdev->le_scan_interval; } + /* Disable all filtering for Mesh */ + if (hci_dev_test_flag(hdev, HCI_MESH)) { + filter_policy = 0; + filter_dups = LE_SCAN_FILTER_DUP_DISABLE; + } + bt_dev_dbg(hdev, "LE passive scan with acceptlist = %d", filter_policy); return hci_start_scan_sync(hdev, LE_SCAN_PASSIVE, interval, window, - own_addr_type, filter_policy, - LE_SCAN_FILTER_DUP_ENABLE); + own_addr_type, filter_policy, filter_dups); } /* This function controls the passive scanning based on hdev->pend_le_conns @@ -2127,7 +3159,7 @@ static int hci_passive_scan_sync(struct hci_dev *hdev) * If there are devices to scan: * * Disable Scanning -> Update Accept List -> - * use_ll_privacy((Disable Advertising) -> Disable Resolving List -> + * ll_privacy_capable((Disable Advertising) -> Disable Resolving List -> * Update Resolving List -> Enable Resolving List -> (Enable Advertising)) -> * Enable Scanning * @@ -2167,9 +3199,11 @@ int hci_update_passive_scan_sync(struct hci_dev *hdev) bt_dev_dbg(hdev, "ADV monitoring is %s", hci_is_adv_monitoring(hdev) ? "on" : "off"); - if (list_empty(&hdev->pend_le_conns) && + if (!hci_dev_test_flag(hdev, HCI_MESH) && + list_empty(&hdev->pend_le_conns) && list_empty(&hdev->pend_le_reports) && - !hci_is_adv_monitoring(hdev)) { + !hci_is_adv_monitoring(hdev) && + !hci_dev_test_flag(hdev, HCI_PA_SYNC)) { /* If there is no pending LE connections or devices * to be scanned for or no ADV monitors, we should stop the * background scanning. @@ -2204,6 +3238,16 @@ int hci_update_passive_scan_sync(struct hci_dev *hdev) return err; } +static int update_scan_sync(struct hci_dev *hdev, void *data) +{ + return hci_update_scan_sync(hdev); +} + +int hci_update_scan(struct hci_dev *hdev) +{ + return hci_cmd_sync_queue(hdev, update_scan_sync, NULL, NULL); +} + static int update_passive_scan_sync(struct hci_dev *hdev, void *data) { return hci_update_passive_scan_sync(hdev); @@ -2220,7 +3264,8 @@ int hci_update_passive_scan(struct hci_dev *hdev) hci_dev_test_flag(hdev, HCI_UNREGISTER)) return 0; - return hci_cmd_sync_queue(hdev, update_passive_scan_sync, NULL, NULL); + return hci_cmd_sync_queue_once(hdev, update_passive_scan_sync, NULL, + NULL); } int hci_write_sc_support_sync(struct hci_dev *hdev, u8 val) @@ -2313,7 +3358,7 @@ static int hci_powered_update_adv_sync(struct hci_dev *hdev) * advertising data. This also applies to the case * where BR/EDR was toggled during the AUTO_OFF phase. */ - if (hci_dev_test_flag(hdev, HCI_ADVERTISING) || + if (hci_dev_test_flag(hdev, HCI_ADVERTISING) && list_empty(&hdev->adv_instances)) { if (ext_adv_capable(hdev)) { err = hci_setup_ext_adv_instance_sync(hdev, 0x00); @@ -2450,13 +3495,13 @@ int hci_update_scan_sync(struct hci_dev *hdev) return hci_write_scan_enable_sync(hdev, scan); } -int hci_update_name_sync(struct hci_dev *hdev) +int hci_update_name_sync(struct hci_dev *hdev, const u8 *name) { struct hci_cp_write_local_name cp; memset(&cp, 0, sizeof(cp)); - memcpy(cp.name, hdev->dev_name, sizeof(cp.name)); + memcpy(cp.name, name, sizeof(cp.name)); return __hci_cmd_sync_status(hdev, HCI_OP_WRITE_LOCAL_NAME, sizeof(cp), &cp, @@ -2468,11 +3513,12 @@ int hci_update_name_sync(struct hci_dev *hdev) * * HCI_SSP_ENABLED(Enable SSP) * HCI_LE_ENABLED(Enable LE) - * HCI_LE_ENABLED(use_ll_privacy(Add local IRK to Resolving List) -> + * HCI_LE_ENABLED(ll_privacy_capable(Add local IRK to Resolving List) -> * Update adv data) * Enable Authentication * lmp_bredr_capable(Set Fast Connectable -> Set Scan Type -> Set Class -> * Set Name -> Set EIR) + * HCI_FORCE_STATIC_ADDR | BDADDR_ANY && !HCI_BREDR_ENABLED (Set Static Address) */ int hci_powered_update_sync(struct hci_dev *hdev) { @@ -2508,10 +3554,27 @@ int hci_powered_update_sync(struct hci_dev *hdev) hci_write_fast_connectable_sync(hdev, false); hci_update_scan_sync(hdev); hci_update_class_sync(hdev); - hci_update_name_sync(hdev); + hci_update_name_sync(hdev, hdev->dev_name); hci_update_eir_sync(hdev); } + /* If forcing static address is in use or there is no public + * address use the static address as random address (but skip + * the HCI command if the current random address is already the + * static one. + * + * In case BR/EDR has been disabled on a dual-mode controller + * and a static address has been configured, then use that + * address instead of the public BR/EDR address. + */ + if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) || + (!bacmp(&hdev->bdaddr, BDADDR_ANY) && + !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))) { + if (bacmp(&hdev->static_addr, BDADDR_ANY)) + return hci_set_random_addr_sync(hdev, + &hdev->static_addr); + } + return 0; } @@ -2538,7 +3601,10 @@ static void hci_dev_get_bd_addr_from_property(struct hci_dev *hdev) if (ret < 0 || !bacmp(&ba, BDADDR_ANY)) return; - bacpy(&hdev->public_addr, &ba); + if (hci_test_quirk(hdev, HCI_QUIRK_BDADDR_PROPERTY_BROKEN)) + baswap(&hdev->public_addr, &ba); + else + bacpy(&hdev->public_addr, &ba); } struct hci_init_stage { @@ -2610,7 +3676,7 @@ static int hci_init0_sync(struct hci_dev *hdev) bt_dev_dbg(hdev, ""); /* Reset */ - if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks)) { + if (!hci_test_quirk(hdev, HCI_QUIRK_RESET_ON_CLOSE)) { err = hci_reset_sync(hdev); if (err) return err; @@ -2623,7 +3689,7 @@ static int hci_unconf_init_sync(struct hci_dev *hdev) { int err; - if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks)) + if (hci_test_quirk(hdev, HCI_QUIRK_RAW_DEVICE)) return 0; err = hci_init0_sync(hdev); @@ -2639,10 +3705,6 @@ static int hci_unconf_init_sync(struct hci_dev *hdev) /* Read Local Supported Features. */ static int hci_read_local_features_sync(struct hci_dev *hdev) { - /* Not all AMP controllers support this command */ - if (hdev->dev_type == HCI_AMP && !(hdev->commands[14] & 0x20)) - return 0; - return __hci_cmd_sync_status(hdev, HCI_OP_READ_LOCAL_FEATURES, 0, NULL, HCI_CMD_TIMEOUT); } @@ -2670,57 +3732,13 @@ static int hci_read_local_cmds_sync(struct hci_dev *hdev) * supported commands. */ if (hdev->hci_ver > BLUETOOTH_VER_1_1 && - !test_bit(HCI_QUIRK_BROKEN_LOCAL_COMMANDS, &hdev->quirks)) + !hci_test_quirk(hdev, HCI_QUIRK_BROKEN_LOCAL_COMMANDS)) return __hci_cmd_sync_status(hdev, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL, HCI_CMD_TIMEOUT); return 0; } -/* Read Local AMP Info */ -static int hci_read_local_amp_info_sync(struct hci_dev *hdev) -{ - return __hci_cmd_sync_status(hdev, HCI_OP_READ_LOCAL_AMP_INFO, - 0, NULL, HCI_CMD_TIMEOUT); -} - -/* Read Data Blk size */ -static int hci_read_data_block_size_sync(struct hci_dev *hdev) -{ - return __hci_cmd_sync_status(hdev, HCI_OP_READ_DATA_BLOCK_SIZE, - 0, NULL, HCI_CMD_TIMEOUT); -} - -/* Read Flow Control Mode */ -static int hci_read_flow_control_mode_sync(struct hci_dev *hdev) -{ - return __hci_cmd_sync_status(hdev, HCI_OP_READ_FLOW_CONTROL_MODE, - 0, NULL, HCI_CMD_TIMEOUT); -} - -/* Read Location Data */ -static int hci_read_location_data_sync(struct hci_dev *hdev) -{ - return __hci_cmd_sync_status(hdev, HCI_OP_READ_LOCATION_DATA, - 0, NULL, HCI_CMD_TIMEOUT); -} - -/* AMP Controller init stage 1 command sequence */ -static const struct hci_init_stage amp_init1[] = { - /* HCI_OP_READ_LOCAL_VERSION */ - HCI_INIT(hci_read_local_version_sync), - /* HCI_OP_READ_LOCAL_COMMANDS */ - HCI_INIT(hci_read_local_cmds_sync), - /* HCI_OP_READ_LOCAL_AMP_INFO */ - HCI_INIT(hci_read_local_amp_info_sync), - /* HCI_OP_READ_DATA_BLOCK_SIZE */ - HCI_INIT(hci_read_data_block_size_sync), - /* HCI_OP_READ_FLOW_CONTROL_MODE */ - HCI_INIT(hci_read_flow_control_mode_sync), - /* HCI_OP_READ_LOCATION_DATA */ - HCI_INIT(hci_read_location_data_sync), -}; - static int hci_init1_sync(struct hci_dev *hdev) { int err; @@ -2728,33 +3746,15 @@ static int hci_init1_sync(struct hci_dev *hdev) bt_dev_dbg(hdev, ""); /* Reset */ - if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks)) { + if (!hci_test_quirk(hdev, HCI_QUIRK_RESET_ON_CLOSE)) { err = hci_reset_sync(hdev); if (err) return err; } - switch (hdev->dev_type) { - case HCI_PRIMARY: - hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED; - return hci_init_stage_sync(hdev, br_init1); - case HCI_AMP: - hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED; - return hci_init_stage_sync(hdev, amp_init1); - default: - bt_dev_err(hdev, "Unknown device type %d", hdev->dev_type); - break; - } - - return 0; + return hci_init_stage_sync(hdev, br_init1); } -/* AMP Controller init stage 2 command sequence */ -static const struct hci_init_stage amp_init2[] = { - /* HCI_OP_READ_LOCAL_FEATURES */ - HCI_INIT(hci_read_local_features_sync), -}; - /* Read Buffer Size (ACL mtu, max pkt, etc.) */ static int hci_read_buffer_size_sync(struct hci_dev *hdev) { @@ -2779,6 +3779,9 @@ static int hci_read_local_name_sync(struct hci_dev *hdev) /* Read Voice Setting */ static int hci_read_voice_setting_sync(struct hci_dev *hdev) { + if (!read_voice_setting_capable(hdev)) + return 0; + return __hci_cmd_sync_status(hdev, HCI_OP_READ_VOICE_SETTING, 0, NULL, HCI_CMD_TIMEOUT); } @@ -2806,6 +3809,9 @@ static int hci_set_event_filter_sync(struct hci_dev *hdev, u8 flt_type, if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) return 0; + if (hci_test_quirk(hdev, HCI_QUIRK_BROKEN_FILTER_CLEAR_ALL)) + return 0; + memset(&cp, 0, sizeof(cp)); cp.flt_type = flt_type; @@ -2826,6 +3832,13 @@ static int hci_clear_event_filter_sync(struct hci_dev *hdev) if (!hci_dev_test_flag(hdev, HCI_EVENT_FILTER_CONFIGURED)) return 0; + /* In theory the state machine should not reach here unless + * a hci_set_event_filter_sync() call succeeds, but we do + * the check both for parity and as a future reminder. + */ + if (hci_test_quirk(hdev, HCI_QUIRK_BROKEN_FILTER_CLEAR_ALL)) + return 0; + return hci_set_event_filter_sync(hdev, HCI_FLT_CLEAR_ALL, 0x00, BDADDR_ANY, 0x00); } @@ -2839,6 +3852,28 @@ static int hci_write_ca_timeout_sync(struct hci_dev *hdev) sizeof(param), ¶m, HCI_CMD_TIMEOUT); } +/* Enable SCO flow control if supported */ +static int hci_write_sync_flowctl_sync(struct hci_dev *hdev) +{ + struct hci_cp_write_sync_flowctl cp; + int err; + + /* Check if the controller supports SCO and HCI_OP_WRITE_SYNC_FLOWCTL */ + if (!lmp_sco_capable(hdev) || !(hdev->commands[10] & BIT(4)) || + !hci_test_quirk(hdev, HCI_QUIRK_SYNC_FLOWCTL_SUPPORTED)) + return 0; + + memset(&cp, 0, sizeof(cp)); + cp.enable = 0x01; + + err = __hci_cmd_sync_status(hdev, HCI_OP_WRITE_SYNC_FLOWCTL, + sizeof(cp), &cp, HCI_CMD_TIMEOUT); + if (!err) + hci_dev_set_flag(hdev, HCI_SCO_FLOWCTL); + + return err; +} + /* BR Controller init stage 2 command sequence */ static const struct hci_init_stage br_init2[] = { /* HCI_OP_READ_BUFFER_SIZE */ @@ -2857,6 +3892,8 @@ static const struct hci_init_stage br_init2[] = { HCI_INIT(hci_clear_event_filter_sync), /* HCI_OP_WRITE_CA_TIMEOUT */ HCI_INIT(hci_write_ca_timeout_sync), + /* HCI_OP_WRITE_SYNC_FLOWCTL */ + HCI_INIT(hci_write_sync_flowctl_sync), {} }; @@ -2898,7 +3935,7 @@ static int hci_write_inquiry_mode_sync(struct hci_dev *hdev) u8 mode; if (!lmp_inq_rssi_capable(hdev) && - !test_bit(HCI_QUIRK_FIXUP_INQUIRY_MODE, &hdev->quirks)) + !hci_test_quirk(hdev, HCI_QUIRK_FIXUP_INQUIRY_MODE)) return 0; /* If Extended Inquiry Result events are supported, then @@ -2961,6 +3998,12 @@ static const struct hci_init_stage hci_init2[] = { /* Read LE Buffer Size */ static int hci_le_read_buffer_size_sync(struct hci_dev *hdev) { + /* Use Read LE Buffer Size V2 if supported */ + if (iso_capable(hdev) && hdev->commands[41] & 0x20) + return __hci_cmd_sync_status(hdev, + HCI_OP_LE_READ_BUFFER_SIZE_V2, + 0, NULL, HCI_CMD_TIMEOUT); + return __hci_cmd_sync_status(hdev, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL, HCI_CMD_TIMEOUT); } @@ -2968,8 +4011,19 @@ static int hci_le_read_buffer_size_sync(struct hci_dev *hdev) /* Read LE Local Supported Features */ static int hci_le_read_local_features_sync(struct hci_dev *hdev) { - return __hci_cmd_sync_status(hdev, HCI_OP_LE_READ_LOCAL_FEATURES, - 0, NULL, HCI_CMD_TIMEOUT); + int err; + + err = __hci_cmd_sync_status(hdev, HCI_OP_LE_READ_LOCAL_FEATURES, + 0, NULL, HCI_CMD_TIMEOUT); + if (err) + return err; + + if (ll_ext_feature_capable(hdev) && hdev->commands[47] & BIT(2)) + return __hci_cmd_sync_status(hdev, + HCI_OP_LE_READ_ALL_LOCAL_FEATURES, + 0, NULL, HCI_CMD_TIMEOUT); + + return err; } /* Read LE Supported States */ @@ -2981,10 +4035,10 @@ static int hci_le_read_supported_states_sync(struct hci_dev *hdev) /* LE Controller init stage 2 command sequence */ static const struct hci_init_stage le_init2[] = { - /* HCI_OP_LE_READ_BUFFER_SIZE */ - HCI_INIT(hci_le_read_buffer_size_sync), /* HCI_OP_LE_READ_LOCAL_FEATURES */ HCI_INIT(hci_le_read_local_features_sync), + /* HCI_OP_LE_READ_BUFFER_SIZE */ + HCI_INIT(hci_le_read_buffer_size_sync), /* HCI_OP_LE_READ_SUPPORTED_STATES */ HCI_INIT(hci_le_read_supported_states_sync), {} @@ -2996,8 +4050,9 @@ static int hci_init2_sync(struct hci_dev *hdev) bt_dev_dbg(hdev, ""); - if (hdev->dev_type == HCI_AMP) - return hci_init_stage_sync(hdev, amp_init2); + err = hci_init_stage_sync(hdev, hci_init2); + if (err) + return err; if (lmp_bredr_capable(hdev)) { err = hci_init_stage_sync(hdev, br_init2); @@ -3016,7 +4071,7 @@ static int hci_init2_sync(struct hci_dev *hdev) hci_dev_set_flag(hdev, HCI_LE_ENABLED); } - return hci_init_stage_sync(hdev, hci_init2); + return 0; } static int hci_set_event_mask_sync(struct hci_dev *hdev) @@ -3036,12 +4091,14 @@ static int hci_set_event_mask_sync(struct hci_dev *hdev) if (lmp_bredr_capable(hdev)) { events[4] |= 0x01; /* Flow Specification Complete */ - /* Don't set Disconnect Complete when suspended as that - * would wakeup the host when disconnecting due to - * suspend. + /* Don't set Disconnect Complete and mode change when + * suspended as that would wakeup the host when disconnecting + * due to suspend. */ - if (hdev->suspended) + if (hdev->suspended) { events[0] &= 0xef; + events[2] &= 0xf7; + } } else { /* Use a different default for LE-only devices */ memset(events, 0, sizeof(events)); @@ -3079,7 +4136,7 @@ static int hci_set_event_mask_sync(struct hci_dev *hdev) } if (lmp_inq_rssi_capable(hdev) || - test_bit(HCI_QUIRK_FIXUP_INQUIRY_MODE, &hdev->quirks)) + hci_test_quirk(hdev, HCI_QUIRK_FIXUP_INQUIRY_MODE)) events[4] |= 0x02; /* Inquiry Result with RSSI */ if (lmp_ext_feat_capable(hdev)) @@ -3131,7 +4188,7 @@ static int hci_read_stored_link_key_sync(struct hci_dev *hdev) struct hci_cp_read_stored_link_key cp; if (!(hdev->commands[6] & 0x20) || - test_bit(HCI_QUIRK_BROKEN_STORED_LINK_KEY, &hdev->quirks)) + hci_test_quirk(hdev, HCI_QUIRK_BROKEN_STORED_LINK_KEY)) return 0; memset(&cp, 0, sizeof(cp)); @@ -3179,7 +4236,8 @@ static int hci_read_page_scan_activity_sync(struct hci_dev *hdev) static int hci_read_def_err_data_reporting_sync(struct hci_dev *hdev) { if (!(hdev->commands[18] & 0x04) || - test_bit(HCI_QUIRK_BROKEN_ERR_DATA_REPORTING, &hdev->quirks)) + !(hdev->features[0][6] & LMP_ERR_DATA_REPORTING) || + hci_test_quirk(hdev, HCI_QUIRK_BROKEN_ERR_DATA_REPORTING)) return 0; return __hci_cmd_sync_status(hdev, HCI_OP_READ_DEF_ERR_DATA_REPORTING, @@ -3192,7 +4250,8 @@ static int hci_read_page_scan_type_sync(struct hci_dev *hdev) * support the Read Page Scan Type command. Check support for * this command in the bit mask of supported commands. */ - if (!(hdev->commands[13] & 0x01)) + if (!(hdev->commands[13] & 0x01) || + hci_test_quirk(hdev, HCI_QUIRK_BROKEN_READ_PAGE_SCAN_TYPE)) return 0; return __hci_cmd_sync_status(hdev, HCI_OP_READ_PAGE_SCAN_TYPE, @@ -3262,12 +4321,24 @@ static int hci_le_set_event_mask_sync(struct hci_dev *hdev) if (hdev->le_features[0] & HCI_LE_DATA_LEN_EXT) events[0] |= 0x40; /* LE Data Length Change */ - /* If the controller supports LL Privacy feature, enable - * the corresponding event. + /* If the controller supports LL Privacy feature or LE Extended Adv, + * enable the corresponding event. */ - if (hdev->le_features[0] & HCI_LE_LL_PRIVACY) + if (use_enhanced_conn_complete(hdev)) events[1] |= 0x02; /* LE Enhanced Connection Complete */ + /* Mark Device Privacy if Privacy Mode is supported */ + if (privacy_mode_capable(hdev)) + hdev->conn_flags |= HCI_CONN_FLAG_DEVICE_PRIVACY; + + /* Mark Address Resolution if LL Privacy is supported */ + if (ll_privacy_capable(hdev)) + hdev->conn_flags |= HCI_CONN_FLAG_ADDRESS_RESOLUTION; + + /* Mark PAST if supported */ + if (past_capable(hdev)) + hdev->conn_flags |= HCI_CONN_FLAG_PAST; + /* If the controller supports Extended Scanner Filter * Policies, enable the corresponding event. */ @@ -3337,6 +4408,25 @@ static int hci_le_set_event_mask_sync(struct hci_dev *hdev) if (ext_adv_capable(hdev)) events[2] |= 0x02; /* LE Advertising Set Terminated */ + if (past_receiver_capable(hdev)) + events[2] |= 0x80; /* LE PAST Received */ + + if (cis_capable(hdev)) { + events[3] |= 0x01; /* LE CIS Established */ + if (cis_peripheral_capable(hdev)) + events[3] |= 0x02; /* LE CIS Request */ + } + + if (bis_capable(hdev)) { + events[1] |= 0x20; /* LE PA Report */ + events[1] |= 0x40; /* LE PA Sync Established */ + events[3] |= 0x04; /* LE Create BIG Complete */ + events[3] |= 0x08; /* LE Terminate BIG Complete */ + events[3] |= 0x10; /* LE BIG Sync Established */ + events[3] |= 0x20; /* LE BIG Sync Loss */ + events[4] |= 0x02; /* LE BIG Info Advertising Report */ + } + return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_EVENT_MASK, sizeof(events), events, HCI_CMD_TIMEOUT); } @@ -3363,7 +4453,7 @@ static int hci_le_read_adv_tx_power_sync(struct hci_dev *hdev) static int hci_le_read_tx_power_sync(struct hci_dev *hdev) { if (!(hdev->commands[38] & 0x80) || - test_bit(HCI_QUIRK_BROKEN_READ_TRANSMIT_POWER, &hdev->quirks)) + hci_test_quirk(hdev, HCI_QUIRK_BROKEN_READ_TRANSMIT_POWER)) return 0; return __hci_cmd_sync_status(hdev, HCI_OP_LE_READ_TRANSMIT_POWER, @@ -3380,16 +4470,6 @@ static int hci_le_read_accept_list_size_sync(struct hci_dev *hdev) 0, NULL, HCI_CMD_TIMEOUT); } -/* Clear LE Accept List */ -static int hci_le_clear_accept_list_sync(struct hci_dev *hdev) -{ - if (!(hdev->commands[26] & 0x80)) - return 0; - - return __hci_cmd_sync_status(hdev, HCI_OP_LE_CLEAR_ACCEPT_LIST, 0, NULL, - HCI_CMD_TIMEOUT); -} - /* Read LE Resolving List Size */ static int hci_le_read_resolv_list_size_sync(struct hci_dev *hdev) { @@ -3415,7 +4495,8 @@ static int hci_le_set_rpa_timeout_sync(struct hci_dev *hdev) { __le16 timeout = cpu_to_le16(hdev->rpa_timeout); - if (!(hdev->commands[35] & 0x04)) + if (!(hdev->commands[35] & 0x04) || + hci_test_quirk(hdev, HCI_QUIRK_BROKEN_SET_RPA_TIMEOUT)) return 0; return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_RPA_TIMEOUT, @@ -3477,6 +4558,24 @@ static int hci_set_le_support_sync(struct hci_dev *hdev) sizeof(cp), &cp, HCI_CMD_TIMEOUT); } +/* LE Set Host Feature */ +static int hci_le_set_host_feature_sync(struct hci_dev *hdev) +{ + struct hci_cp_le_set_host_feature cp; + + if (!iso_capable(hdev)) + return 0; + + memset(&cp, 0, sizeof(cp)); + + /* Connected Isochronous Channels (Host Support) */ + cp.bit_number = 32; + cp.bit_value = iso_enabled(hdev) ? 0x01 : 0x00; + + return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_HOST_FEATURE, + sizeof(cp), &cp, HCI_CMD_TIMEOUT); +} + /* LE Controller init stage 3 command sequence */ static const struct hci_init_stage le_init3[] = { /* HCI_OP_LE_SET_EVENT_MASK */ @@ -3503,6 +4602,8 @@ static const struct hci_init_stage le_init3[] = { HCI_INIT(hci_le_read_num_support_adv_sets_sync), /* HCI_OP_WRITE_LE_HOST_SUPPORTED */ HCI_INIT(hci_set_le_support_sync), + /* HCI_OP_LE_SET_HOST_FEATURE */ + HCI_INIT(hci_le_set_host_feature_sync), {} }; @@ -3540,7 +4641,7 @@ static int hci_delete_stored_link_key_sync(struct hci_dev *hdev) * just disable this command. */ if (!(hdev->commands[6] & 0x80) || - test_bit(HCI_QUIRK_BROKEN_STORED_LINK_KEY, &hdev->quirks)) + hci_test_quirk(hdev, HCI_QUIRK_BROKEN_STORED_LINK_KEY)) return 0; memset(&cp, 0, sizeof(cp)); @@ -3566,7 +4667,7 @@ static int hci_set_event_mask_page_2_sync(struct hci_dev *hdev) if (lmp_cpb_central_capable(hdev)) { events[1] |= 0x40; /* Triggered Clock Capture */ events[1] |= 0x80; /* Synchronization Train Complete */ - events[2] |= 0x10; /* Peripheral Page Response Timeout */ + events[2] |= 0x08; /* Truncated Page Complete */ events[2] |= 0x20; /* CPB Channel Map Change */ changed = true; } @@ -3578,7 +4679,7 @@ static int hci_set_event_mask_page_2_sync(struct hci_dev *hdev) events[2] |= 0x01; /* Synchronization Train Received */ events[2] |= 0x02; /* CPB Receive */ events[2] |= 0x04; /* CPB Timeout */ - events[2] |= 0x08; /* Truncated Page Complete */ + events[2] |= 0x10; /* Peripheral Page Response Timeout */ changed = true; } @@ -3604,11 +4705,12 @@ static int hci_set_event_mask_page_2_sync(struct hci_dev *hdev) /* Read local codec list if the HCI command is supported */ static int hci_read_local_codecs_sync(struct hci_dev *hdev) { - if (!(hdev->commands[29] & 0x20)) - return 0; + if (hdev->commands[45] & 0x04) + hci_read_supported_codecs_v2(hdev); + else if (hdev->commands[29] & 0x20) + hci_read_supported_codecs(hdev); - return __hci_cmd_sync_status(hdev, HCI_OP_READ_LOCAL_CODECS, 0, NULL, - HCI_CMD_TIMEOUT); + return 0; } /* Read local pairing options if the HCI command is supported */ @@ -3624,7 +4726,7 @@ static int hci_read_local_pairing_opts_sync(struct hci_dev *hdev) /* Get MWS transport configuration if the HCI command is supported */ static int hci_get_mws_transport_config_sync(struct hci_dev *hdev) { - if (!(hdev->commands[30] & 0x08)) + if (!mws_transport_config_capable(hdev)) return 0; return __hci_cmd_sync_status(hdev, HCI_OP_GET_MWS_TRANSPORT_CONFIG, @@ -3664,7 +4766,8 @@ static int hci_set_err_data_report_sync(struct hci_dev *hdev) bool enabled = hci_dev_test_flag(hdev, HCI_WIDEBAND_SPEECH_ENABLED); if (!(hdev->commands[18] & 0x08) || - test_bit(HCI_QUIRK_BROKEN_ERR_DATA_REPORTING, &hdev->quirks)) + !(hdev->features[0][6] & LMP_ERR_DATA_REPORTING) || + hci_test_quirk(hdev, HCI_QUIRK_BROKEN_ERR_DATA_REPORTING)) return 0; if (enabled == hdev->err_data_reporting) @@ -3714,18 +4817,38 @@ static int hci_le_set_write_def_data_len_sync(struct hci_dev *hdev) sizeof(cp), &cp, HCI_CMD_TIMEOUT); } -/* Set Default PHY parameters if command is supported */ +/* Set Default PHY parameters if command is supported, enables all supported + * PHYs according to the LE Features bits. + */ static int hci_le_set_default_phy_sync(struct hci_dev *hdev) { struct hci_cp_le_set_default_phy cp; - if (!(hdev->commands[35] & 0x20)) + if (!(hdev->commands[35] & 0x20)) { + /* If the command is not supported it means only 1M PHY is + * supported. + */ + hdev->le_tx_def_phys = HCI_LE_SET_PHY_1M; + hdev->le_rx_def_phys = HCI_LE_SET_PHY_1M; return 0; + } memset(&cp, 0, sizeof(cp)); cp.all_phys = 0x00; - cp.tx_phys = hdev->le_tx_def_phys; - cp.rx_phys = hdev->le_rx_def_phys; + cp.tx_phys = HCI_LE_SET_PHY_1M; + cp.rx_phys = HCI_LE_SET_PHY_1M; + + /* Enables 2M PHY if supported */ + if (le_2m_capable(hdev)) { + cp.tx_phys |= HCI_LE_SET_PHY_2M; + cp.rx_phys |= HCI_LE_SET_PHY_2M; + } + + /* Enables Coded PHY if supported */ + if (le_coded_capable(hdev)) { + cp.tx_phys |= HCI_LE_SET_PHY_CODED; + cp.rx_phys |= HCI_LE_SET_PHY_CODED; + } return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_DEFAULT_PHY, sizeof(cp), &cp, HCI_CMD_TIMEOUT); @@ -3770,13 +4893,6 @@ static int hci_init_sync(struct hci_dev *hdev) if (err < 0) return err; - /* HCI_PRIMARY covers both single-mode LE, BR/EDR and dual-mode - * BR/EDR/LE type controllers. AMP controllers only need the - * first two stages of init. - */ - if (hdev->dev_type != HCI_PRIMARY) - return 0; - err = hci_init3_sync(hdev); if (err < 0) return err; @@ -3801,6 +4917,9 @@ static int hci_init_sync(struct hci_dev *hdev) !hci_dev_test_flag(hdev, HCI_CONFIG)) return 0; + if (hci_dev_test_and_set_flag(hdev, HCI_DEBUGFS_CREATED)) + return 0; + hci_debugfs_create_common(hdev); if (lmp_bredr_capable(hdev)) @@ -3812,128 +4931,132 @@ static int hci_init_sync(struct hci_dev *hdev) return 0; } -int hci_dev_open_sync(struct hci_dev *hdev) +#define HCI_QUIRK_BROKEN(_quirk, _desc) { HCI_QUIRK_BROKEN_##_quirk, _desc } + +static const struct { + unsigned long quirk; + const char *desc; +} hci_broken_table[] = { + HCI_QUIRK_BROKEN(LOCAL_COMMANDS, + "HCI Read Local Supported Commands not supported"), + HCI_QUIRK_BROKEN(STORED_LINK_KEY, + "HCI Delete Stored Link Key command is advertised, " + "but not supported."), + HCI_QUIRK_BROKEN(ERR_DATA_REPORTING, + "HCI Read Default Erroneous Data Reporting command is " + "advertised, but not supported."), + HCI_QUIRK_BROKEN(READ_TRANSMIT_POWER, + "HCI Read Transmit Power Level command is advertised, " + "but not supported."), + HCI_QUIRK_BROKEN(FILTER_CLEAR_ALL, + "HCI Set Event Filter command not supported."), + HCI_QUIRK_BROKEN(ENHANCED_SETUP_SYNC_CONN, + "HCI Enhanced Setup Synchronous Connection command is " + "advertised, but not supported."), + HCI_QUIRK_BROKEN(SET_RPA_TIMEOUT, + "HCI LE Set Random Private Address Timeout command is " + "advertised, but not supported."), + HCI_QUIRK_BROKEN(EXT_CREATE_CONN, + "HCI LE Extended Create Connection command is " + "advertised, but not supported."), + HCI_QUIRK_BROKEN(WRITE_AUTH_PAYLOAD_TIMEOUT, + "HCI WRITE AUTH PAYLOAD TIMEOUT command leads " + "to unexpected SMP errors when pairing " + "and will not be used."), + HCI_QUIRK_BROKEN(LE_CODED, + "HCI LE Coded PHY feature bit is set, " + "but its usage is not supported.") +}; + +/* This function handles hdev setup stage: + * + * Calls hdev->setup + * Setup address if HCI_QUIRK_USE_BDADDR_PROPERTY is set. + */ +static int hci_dev_setup_sync(struct hci_dev *hdev) { int ret = 0; + bool invalid_bdaddr; + size_t i; + + if (!hci_dev_test_flag(hdev, HCI_SETUP) && + !hci_test_quirk(hdev, HCI_QUIRK_NON_PERSISTENT_SETUP)) + return 0; bt_dev_dbg(hdev, ""); - if (hci_dev_test_flag(hdev, HCI_UNREGISTER)) { - ret = -ENODEV; - goto done; - } + hci_sock_dev_event(hdev, HCI_DEV_SETUP); - if (!hci_dev_test_flag(hdev, HCI_SETUP) && - !hci_dev_test_flag(hdev, HCI_CONFIG)) { - /* Check for rfkill but allow the HCI setup stage to - * proceed (which in itself doesn't cause any RF activity). - */ - if (hci_dev_test_flag(hdev, HCI_RFKILLED)) { - ret = -ERFKILL; - goto done; - } + if (hdev->setup) + ret = hdev->setup(hdev); - /* Check for valid public address or a configured static - * random address, but let the HCI setup proceed to - * be able to determine if there is a public address - * or not. - * - * In case of user channel usage, it is not important - * if a public address or static random address is - * available. - * - * This check is only valid for BR/EDR controllers - * since AMP controllers do not have an address. - */ - if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL) && - hdev->dev_type == HCI_PRIMARY && - !bacmp(&hdev->bdaddr, BDADDR_ANY) && - !bacmp(&hdev->static_addr, BDADDR_ANY)) { - ret = -EADDRNOTAVAIL; - goto done; - } + for (i = 0; i < ARRAY_SIZE(hci_broken_table); i++) { + if (hci_test_quirk(hdev, hci_broken_table[i].quirk)) + bt_dev_warn(hdev, "%s", hci_broken_table[i].desc); } - if (test_bit(HCI_UP, &hdev->flags)) { - ret = -EALREADY; - goto done; - } + /* The transport driver can set the quirk to mark the + * BD_ADDR invalid before creating the HCI device or in + * its setup callback. + */ + invalid_bdaddr = hci_test_quirk(hdev, HCI_QUIRK_INVALID_BDADDR) || + hci_test_quirk(hdev, HCI_QUIRK_USE_BDADDR_PROPERTY); + if (!ret) { + if (hci_test_quirk(hdev, HCI_QUIRK_USE_BDADDR_PROPERTY) && + !bacmp(&hdev->public_addr, BDADDR_ANY)) + hci_dev_get_bd_addr_from_property(hdev); - if (hdev->open(hdev)) { - ret = -EIO; - goto done; + if (invalid_bdaddr && bacmp(&hdev->public_addr, BDADDR_ANY) && + hdev->set_bdaddr) { + ret = hdev->set_bdaddr(hdev, &hdev->public_addr); + if (!ret) + invalid_bdaddr = false; + } } - set_bit(HCI_RUNNING, &hdev->flags); - hci_sock_dev_event(hdev, HCI_DEV_OPEN); - - atomic_set(&hdev->cmd_cnt, 1); - set_bit(HCI_INIT, &hdev->flags); - - if (hci_dev_test_flag(hdev, HCI_SETUP) || - test_bit(HCI_QUIRK_NON_PERSISTENT_SETUP, &hdev->quirks)) { - bool invalid_bdaddr; - - hci_sock_dev_event(hdev, HCI_DEV_SETUP); - - if (hdev->setup) - ret = hdev->setup(hdev); - - /* The transport driver can set the quirk to mark the - * BD_ADDR invalid before creating the HCI device or in - * its setup callback. - */ - invalid_bdaddr = test_bit(HCI_QUIRK_INVALID_BDADDR, - &hdev->quirks); + /* The transport driver can set these quirks before + * creating the HCI device or in its setup callback. + * + * For the invalid BD_ADDR quirk it is possible that + * it becomes a valid address if the bootloader does + * provide it (see above). + * + * In case any of them is set, the controller has to + * start up as unconfigured. + */ + if (hci_test_quirk(hdev, HCI_QUIRK_EXTERNAL_CONFIG) || + invalid_bdaddr) + hci_dev_set_flag(hdev, HCI_UNCONFIGURED); - if (ret) - goto setup_failed; + /* For an unconfigured controller it is required to + * read at least the version information provided by + * the Read Local Version Information command. + * + * If the set_bdaddr driver callback is provided, then + * also the original Bluetooth public device address + * will be read using the Read BD Address command. + */ + if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) + return hci_unconf_init_sync(hdev); - if (test_bit(HCI_QUIRK_USE_BDADDR_PROPERTY, &hdev->quirks)) { - if (!bacmp(&hdev->public_addr, BDADDR_ANY)) - hci_dev_get_bd_addr_from_property(hdev); + return ret; +} - if (bacmp(&hdev->public_addr, BDADDR_ANY) && - hdev->set_bdaddr) { - ret = hdev->set_bdaddr(hdev, - &hdev->public_addr); +/* This function handles hdev init stage: + * + * Calls hci_dev_setup_sync to perform setup stage + * Calls hci_init_sync to perform HCI command init sequence + */ +static int hci_dev_init_sync(struct hci_dev *hdev) +{ + int ret; - /* If setting of the BD_ADDR from the device - * property succeeds, then treat the address - * as valid even if the invalid BD_ADDR - * quirk indicates otherwise. - */ - if (!ret) - invalid_bdaddr = false; - } - } + bt_dev_dbg(hdev, ""); -setup_failed: - /* The transport driver can set these quirks before - * creating the HCI device or in its setup callback. - * - * For the invalid BD_ADDR quirk it is possible that - * it becomes a valid address if the bootloader does - * provide it (see above). - * - * In case any of them is set, the controller has to - * start up as unconfigured. - */ - if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) || - invalid_bdaddr) - hci_dev_set_flag(hdev, HCI_UNCONFIGURED); + atomic_set(&hdev->cmd_cnt, 1); + set_bit(HCI_INIT, &hdev->flags); - /* For an unconfigured controller it is required to - * read at least the version information provided by - * the Read Local Version Information command. - * - * If the set_bdaddr driver callback is provided, then - * also the original Bluetooth public device address - * will be read using the Read BD Address command. - */ - if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) - ret = hci_unconf_init_sync(hdev); - } + ret = hci_dev_setup_sync(hdev); if (hci_dev_test_flag(hdev, HCI_CONFIG)) { /* If public address change is configured, ensure that @@ -3961,7 +5084,7 @@ setup_failed: * then they need to be reprogrammed after the init procedure * completed. */ - if (test_bit(HCI_QUIRK_NON_PERSISTENT_DIAG, &hdev->quirks) && + if (hci_test_quirk(hdev, HCI_QUIRK_NON_PERSISTENT_DIAG) && !hci_dev_test_flag(hdev, HCI_USER_CHANNEL) && hci_dev_test_flag(hdev, HCI_VENDOR_DIAG) && hdev->set_diag) ret = hdev->set_diag(hdev, true); @@ -3973,6 +5096,63 @@ setup_failed: clear_bit(HCI_INIT, &hdev->flags); + return ret; +} + +int hci_dev_open_sync(struct hci_dev *hdev) +{ + int ret; + + bt_dev_dbg(hdev, ""); + + if (hci_dev_test_flag(hdev, HCI_UNREGISTER)) { + ret = -ENODEV; + goto done; + } + + if (!hci_dev_test_flag(hdev, HCI_SETUP) && + !hci_dev_test_flag(hdev, HCI_CONFIG)) { + /* Check for rfkill but allow the HCI setup stage to + * proceed (which in itself doesn't cause any RF activity). + */ + if (hci_dev_test_flag(hdev, HCI_RFKILLED)) { + ret = -ERFKILL; + goto done; + } + + /* Check for valid public address or a configured static + * random address, but let the HCI setup proceed to + * be able to determine if there is a public address + * or not. + * + * In case of user channel usage, it is not important + * if a public address or static random address is + * available. + */ + if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL) && + !bacmp(&hdev->bdaddr, BDADDR_ANY) && + !bacmp(&hdev->static_addr, BDADDR_ANY)) { + ret = -EADDRNOTAVAIL; + goto done; + } + } + + if (test_bit(HCI_UP, &hdev->flags)) { + ret = -EALREADY; + goto done; + } + + if (hdev->open(hdev)) { + ret = -EIO; + goto done; + } + + hci_devcd_reset(hdev); + + set_bit(HCI_RUNNING, &hdev->flags); + hci_sock_dev_event(hdev, HCI_DEV_OPEN); + + ret = hci_dev_init_sync(hdev); if (!ret) { hci_dev_hold(hdev); hci_dev_set_flag(hdev, HCI_RPA_EXPIRED); @@ -3984,9 +5164,9 @@ setup_failed: !hci_dev_test_flag(hdev, HCI_CONFIG) && !hci_dev_test_flag(hdev, HCI_UNCONFIGURED) && !hci_dev_test_flag(hdev, HCI_USER_CHANNEL) && - hci_dev_test_flag(hdev, HCI_MGMT) && - hdev->dev_type == HCI_PRIMARY) { + hci_dev_test_flag(hdev, HCI_MGMT)) { ret = hci_powered_update_sync(hdev); + mgmt_power_on(hdev, ret); } } else { /* Init failed, cleanup */ @@ -4006,10 +5186,16 @@ setup_failed: hdev->flush(hdev); if (hdev->sent_cmd) { + cancel_delayed_work_sync(&hdev->cmd_timer); kfree_skb(hdev->sent_cmd); hdev->sent_cmd = NULL; } + if (hdev->req_skb) { + kfree_skb(hdev->req_skb); + hdev->req_skb = NULL; + } + clear_bit(HCI_RUNNING, &hdev->flags); hci_sock_dev_event(hdev, HCI_DEV_CLOSE); @@ -4027,17 +5213,42 @@ static void hci_pend_le_actions_clear(struct hci_dev *hdev) struct hci_conn_params *p; list_for_each_entry(p, &hdev->le_conn_params, list) { + hci_pend_le_list_del_init(p); if (p->conn) { hci_conn_drop(p->conn); hci_conn_put(p->conn); p->conn = NULL; } - list_del_init(&p->action); } BT_DBG("All LE pending actions cleared"); } +static int hci_dev_shutdown(struct hci_dev *hdev) +{ + int err = 0; + /* Similar to how we first do setup and then set the exclusive access + * bit for userspace, we must first unset userchannel and then clean up. + * Otherwise, the kernel can't properly use the hci channel to clean up + * the controller (some shutdown routines require sending additional + * commands to the controller for example). + */ + bool was_userchannel = + hci_dev_test_and_clear_flag(hdev, HCI_USER_CHANNEL); + + if (!hci_dev_test_flag(hdev, HCI_UNREGISTER) && + test_bit(HCI_UP, &hdev->flags)) { + /* Execute vendor specific shutdown routine */ + if (hdev->shutdown) + err = hdev->shutdown(hdev); + } + + if (was_userchannel) + hci_dev_set_flag(hdev, HCI_USER_CHANNEL); + + return err; +} + int hci_dev_close_sync(struct hci_dev *hdev) { bool auto_off; @@ -4045,19 +5256,27 @@ int hci_dev_close_sync(struct hci_dev *hdev) bt_dev_dbg(hdev, ""); - cancel_delayed_work(&hdev->power_off); - cancel_delayed_work(&hdev->ncmd_timer); + if (hci_dev_test_flag(hdev, HCI_UNREGISTER)) { + disable_delayed_work(&hdev->power_off); + disable_delayed_work(&hdev->ncmd_timer); + disable_delayed_work(&hdev->le_scan_disable); + } else { + cancel_delayed_work(&hdev->power_off); + cancel_delayed_work(&hdev->ncmd_timer); + cancel_delayed_work(&hdev->le_scan_disable); + } - hci_request_cancel_all(hdev); + hci_cmd_sync_cancel_sync(hdev, ENODEV); - if (!hci_dev_test_flag(hdev, HCI_UNREGISTER) && - !hci_dev_test_flag(hdev, HCI_USER_CHANNEL) && - test_bit(HCI_UP, &hdev->flags)) { - /* Execute vendor specific shutdown routine */ - if (hdev->shutdown) - err = hdev->shutdown(hdev); + cancel_interleave_scan(hdev); + + if (hdev->adv_instance_timeout) { + cancel_delayed_work_sync(&hdev->adv_instance_expire); + hdev->adv_instance_timeout = 0; } + err = hci_dev_shutdown(hdev); + if (!test_and_clear_bit(HCI_UP, &hdev->flags)) { cancel_delayed_work_sync(&hdev->cmd_timer); return err; @@ -4098,17 +5317,16 @@ int hci_dev_close_sync(struct hci_dev *hdev) auto_off = hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF); - if (!auto_off && hdev->dev_type == HCI_PRIMARY && - !hci_dev_test_flag(hdev, HCI_USER_CHANNEL) && + if (!auto_off && !hci_dev_test_flag(hdev, HCI_USER_CHANNEL) && hci_dev_test_flag(hdev, HCI_MGMT)) __mgmt_power_off(hdev); hci_inquiry_cache_flush(hdev); hci_pend_le_actions_clear(hdev); hci_conn_hash_flush(hdev); - hci_dev_unlock(hdev); - + /* Prevent data races on hdev->smp_data or hdev->smp_bredr_data */ smp_unregister(hdev); + hci_dev_unlock(hdev); hci_sock_dev_event(hdev, HCI_DEV_DOWN); @@ -4123,7 +5341,7 @@ int hci_dev_close_sync(struct hci_dev *hdev) /* Reset device */ skb_queue_purge(&hdev->cmd_q); atomic_set(&hdev->cmd_cnt, 1); - if (test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks) && + if (hci_test_quirk(hdev, HCI_QUIRK_RESET_ON_CLOSE) && !auto_off && !hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) { set_bit(HCI_INIT, &hdev->flags); hci_reset_sync(hdev); @@ -4145,6 +5363,12 @@ int hci_dev_close_sync(struct hci_dev *hdev) hdev->sent_cmd = NULL; } + /* Drop last request */ + if (hdev->req_skb) { + kfree_skb(hdev->req_skb); + hdev->req_skb = NULL; + } + clear_bit(HCI_RUNNING, &hdev->flags); hci_sock_dev_event(hdev, HCI_DEV_CLOSE); @@ -4155,12 +5379,10 @@ int hci_dev_close_sync(struct hci_dev *hdev) hdev->flags &= BIT(HCI_RAW); hci_dev_clear_volatile_flags(hdev); - /* Controller radio is available but is currently powered down */ - hdev->amp_status = AMP_STATUS_POWERED_DOWN; - memset(hdev->eir, 0, sizeof(hdev->eir)); memset(hdev->dev_class, 0, sizeof(hdev->dev_class)); bacpy(&hdev->random_addr, BDADDR_ANY); + hci_codec_list_clear(&hdev->local_codecs); hci_dev_put(hdev); return err; @@ -4193,8 +5415,7 @@ static int hci_power_on_sync(struct hci_dev *hdev) */ if (hci_dev_test_flag(hdev, HCI_RFKILLED) || hci_dev_test_flag(hdev, HCI_UNCONFIGURED) || - (hdev->dev_type == HCI_PRIMARY && - !bacmp(&hdev->bdaddr, BDADDR_ANY) && + (!bacmp(&hdev->bdaddr, BDADDR_ANY) && !bacmp(&hdev->static_addr, BDADDR_ANY))) { hci_dev_clear_flag(hdev, HCI_AUTO_OFF); hci_dev_close_sync(hdev); @@ -4264,7 +5485,6 @@ int hci_stop_discovery_sync(struct hci_dev *hdev) if (hci_dev_test_flag(hdev, HCI_LE_SCAN)) { cancel_delayed_work(&hdev->le_scan_disable); - cancel_delayed_work(&hdev->le_scan_restart); err = hci_scan_disable_sync(hdev); if (err) @@ -4278,7 +5498,7 @@ int hci_stop_discovery_sync(struct hci_dev *hdev) } /* Resume advertising if it was paused */ - if (use_ll_privacy(hdev)) + if (ll_privacy_capable(hdev)) hci_resume_advertising_sync(hdev); /* No further actions needed for LE-only discovery */ @@ -4291,41 +5511,41 @@ int hci_stop_discovery_sync(struct hci_dev *hdev) if (!e) return 0; - return hci_remote_name_cancel_sync(hdev, &e->data.bdaddr); + /* Ignore cancel errors since it should interfere with stopping + * of the discovery. + */ + hci_remote_name_cancel_sync(hdev, &e->data.bdaddr); } return 0; } -static int hci_disconnect_phy_link_sync(struct hci_dev *hdev, u16 handle, - u8 reason) -{ - struct hci_cp_disconn_phy_link cp; - - memset(&cp, 0, sizeof(cp)); - cp.phy_handle = HCI_PHY_HANDLE(handle); - cp.reason = reason; - - return __hci_cmd_sync_status(hdev, HCI_OP_DISCONN_PHY_LINK, - sizeof(cp), &cp, HCI_CMD_TIMEOUT); -} - static int hci_disconnect_sync(struct hci_dev *hdev, struct hci_conn *conn, u8 reason) { struct hci_cp_disconnect cp; - if (conn->type == AMP_LINK) - return hci_disconnect_phy_link_sync(hdev, conn->handle, reason); + if (conn->type == BIS_LINK || conn->type == PA_LINK) { + /* This is a BIS connection, hci_conn_del will + * do the necessary cleanup. + */ + hci_dev_lock(hdev); + hci_conn_failed(conn, reason); + hci_dev_unlock(hdev); + + return 0; + } memset(&cp, 0, sizeof(cp)); cp.handle = cpu_to_le16(conn->handle); cp.reason = reason; - /* Wait for HCI_EV_DISCONN_COMPLETE not HCI_EV_CMD_STATUS when not - * suspending. + /* Wait for HCI_EV_DISCONN_COMPLETE, not HCI_EV_CMD_STATUS, when the + * reason is anything but HCI_ERROR_REMOTE_POWER_OFF. This reason is + * used when suspending or powering off, where we don't want to wait + * for the peer's response. */ - if (!hdev->suspended) + if (reason != HCI_ERROR_REMOTE_POWER_OFF) return __hci_cmd_sync_status_sk(hdev, HCI_OP_DISCONNECT, sizeof(cp), &cp, HCI_EV_DISCONN_COMPLETE, @@ -4336,23 +5556,65 @@ static int hci_disconnect_sync(struct hci_dev *hdev, struct hci_conn *conn, } static int hci_le_connect_cancel_sync(struct hci_dev *hdev, - struct hci_conn *conn) + struct hci_conn *conn, u8 reason) { + /* Return reason if scanning since the connection shall probably be + * cleanup directly. + */ if (test_bit(HCI_CONN_SCANNING, &conn->flags)) + return reason; + + if (conn->role == HCI_ROLE_SLAVE || + test_and_set_bit(HCI_CONN_CANCEL, &conn->flags)) return 0; return __hci_cmd_sync_status(hdev, HCI_OP_LE_CREATE_CONN_CANCEL, - 6, &conn->dst, HCI_CMD_TIMEOUT); + 0, NULL, HCI_CMD_TIMEOUT); } -static int hci_connect_cancel_sync(struct hci_dev *hdev, struct hci_conn *conn) +static int hci_connect_cancel_sync(struct hci_dev *hdev, struct hci_conn *conn, + u8 reason) { if (conn->type == LE_LINK) - return hci_le_connect_cancel_sync(hdev, conn); + return hci_le_connect_cancel_sync(hdev, conn, reason); + + if (conn->type == CIS_LINK) { + /* BLUETOOTH CORE SPECIFICATION Version 5.3 | Vol 4, Part E + * page 1857: + * + * If this command is issued for a CIS on the Central and the + * CIS is successfully terminated before being established, + * then an HCI_LE_CIS_Established event shall also be sent for + * this CIS with the Status Operation Cancelled by Host (0x44). + */ + if (test_bit(HCI_CONN_CREATE_CIS, &conn->flags)) + return hci_disconnect_sync(hdev, conn, reason); + + /* CIS with no Create CIS sent have nothing to cancel */ + return HCI_ERROR_LOCAL_HOST_TERM; + } + + if (conn->type == BIS_LINK || conn->type == PA_LINK) { + /* There is no way to cancel a BIS without terminating the BIG + * which is done later on connection cleanup. + */ + return 0; + } if (hdev->hci_ver < BLUETOOTH_VER_1_2) return 0; + /* Wait for HCI_EV_CONN_COMPLETE, not HCI_EV_CMD_STATUS, when the + * reason is anything but HCI_ERROR_REMOTE_POWER_OFF. This reason is + * used when suspending or powering off, where we don't want to wait + * for the peer's response. + */ + if (reason != HCI_ERROR_REMOTE_POWER_OFF) + return __hci_cmd_sync_status_sk(hdev, HCI_OP_CREATE_CONN_CANCEL, + 6, &conn->dst, + HCI_EV_CONN_COMPLETE, + HCI_CMD_TIMEOUT, NULL); + return __hci_cmd_sync_status(hdev, HCI_OP_CREATE_CONN_CANCEL, 6, &conn->dst, HCI_CMD_TIMEOUT); } @@ -4376,11 +5638,30 @@ static int hci_reject_sco_sync(struct hci_dev *hdev, struct hci_conn *conn, sizeof(cp), &cp, HCI_CMD_TIMEOUT); } +static int hci_le_reject_cis_sync(struct hci_dev *hdev, struct hci_conn *conn, + u8 reason) +{ + struct hci_cp_le_reject_cis cp; + + memset(&cp, 0, sizeof(cp)); + cp.handle = cpu_to_le16(conn->handle); + cp.reason = reason; + + return __hci_cmd_sync_status(hdev, HCI_OP_LE_REJECT_CIS, + sizeof(cp), &cp, HCI_CMD_TIMEOUT); +} + static int hci_reject_conn_sync(struct hci_dev *hdev, struct hci_conn *conn, u8 reason) { struct hci_cp_reject_conn_req cp; + if (conn->type == CIS_LINK) + return hci_le_reject_cis_sync(hdev, conn, reason); + + if (conn->type == BIS_LINK || conn->type == PA_LINK) + return -EINVAL; + if (conn->type == SCO_LINK || conn->type == ESCO_LINK) return hci_reject_sco_sync(hdev, conn, reason); @@ -4392,37 +5673,81 @@ static int hci_reject_conn_sync(struct hci_dev *hdev, struct hci_conn *conn, sizeof(cp), &cp, HCI_CMD_TIMEOUT); } -static int hci_abort_conn_sync(struct hci_dev *hdev, struct hci_conn *conn, - u8 reason) +int hci_abort_conn_sync(struct hci_dev *hdev, struct hci_conn *conn, u8 reason) { + int err = 0; + u16 handle = conn->handle; + bool disconnect = false; + struct hci_conn *c; + switch (conn->state) { case BT_CONNECTED: case BT_CONFIG: - return hci_disconnect_sync(hdev, conn, reason); + err = hci_disconnect_sync(hdev, conn, reason); + break; case BT_CONNECT: - return hci_connect_cancel_sync(hdev, conn); + err = hci_connect_cancel_sync(hdev, conn, reason); + break; case BT_CONNECT2: - return hci_reject_conn_sync(hdev, conn, reason); + err = hci_reject_conn_sync(hdev, conn, reason); + break; + case BT_OPEN: + case BT_BOUND: + break; default: - conn->state = BT_CLOSED; + disconnect = true; break; } - return 0; + hci_dev_lock(hdev); + + /* Check if the connection has been cleaned up concurrently */ + c = hci_conn_hash_lookup_handle(hdev, handle); + if (!c || c != conn) { + err = 0; + goto unlock; + } + + /* Cleanup hci_conn object if it cannot be cancelled as it + * likely means the controller and host stack are out of sync + * or in case of LE it was still scanning so it can be cleanup + * safely. + */ + if (disconnect) { + conn->state = BT_CLOSED; + hci_disconn_cfm(conn, reason); + hci_conn_del(conn); + } else { + hci_conn_failed(conn, reason); + } + +unlock: + hci_dev_unlock(hdev); + return err; } static int hci_disconnect_all_sync(struct hci_dev *hdev, u8 reason) { - struct hci_conn *conn, *tmp; - int err; + struct list_head *head = &hdev->conn_hash.list; + struct hci_conn *conn; - list_for_each_entry_safe(conn, tmp, &hdev->conn_hash.list, list) { - err = hci_abort_conn_sync(hdev, conn, reason); - if (err) - return err; + rcu_read_lock(); + while ((conn = list_first_or_null_rcu(head, struct hci_conn, list))) { + /* Make sure the connection is not freed while unlocking */ + conn = hci_conn_get(conn); + rcu_read_unlock(); + /* Disregard possible errors since hci_conn_del shall have been + * called even in case of errors had occurred since it would + * then cause hci_conn_failed to be called which calls + * hci_conn_del internally. + */ + hci_abort_conn_sync(hdev, conn, reason); + hci_conn_put(conn); + rcu_read_lock(); } + rcu_read_unlock(); - return err; + return 0; } /* This function perform power off HCI command sequence as follows: @@ -4440,27 +5765,33 @@ static int hci_power_off_sync(struct hci_dev *hdev) if (!test_bit(HCI_UP, &hdev->flags)) return 0; + hci_dev_set_flag(hdev, HCI_POWERING_DOWN); + if (test_bit(HCI_ISCAN, &hdev->flags) || test_bit(HCI_PSCAN, &hdev->flags)) { err = hci_write_scan_enable_sync(hdev, 0x00); if (err) - return err; + goto out; } err = hci_clear_adv_sync(hdev, NULL, false); if (err) - return err; + goto out; err = hci_stop_discovery_sync(hdev); if (err) - return err; + goto out; /* Terminated due to Power Off */ err = hci_disconnect_all_sync(hdev, HCI_ERROR_REMOTE_POWER_OFF); if (err) - return err; + goto out; - return hci_dev_close_sync(hdev); + err = hci_dev_close_sync(hdev); + +out: + hci_dev_clear_flag(hdev, HCI_POWERING_DOWN); + return err; } int hci_set_powered_sync(struct hci_dev *hdev, u8 val) @@ -4591,7 +5922,7 @@ int hci_update_connectable_sync(struct hci_dev *hdev) return hci_update_passive_scan_sync(hdev); } -static int hci_inquiry_sync(struct hci_dev *hdev, u8 length) +int hci_inquiry_sync(struct hci_dev *hdev, u8 length, u8 num_rsp) { const u8 giac[3] = { 0x33, 0x8b, 0x9e }; const u8 liac[3] = { 0x00, 0x8b, 0x9e }; @@ -4599,7 +5930,7 @@ static int hci_inquiry_sync(struct hci_dev *hdev, u8 length) bt_dev_dbg(hdev, ""); - if (hci_dev_test_flag(hdev, HCI_INQUIRY)) + if (test_bit(HCI_INQUIRY, &hdev->flags)) return 0; hci_dev_lock(hdev); @@ -4614,6 +5945,7 @@ static int hci_inquiry_sync(struct hci_dev *hdev, u8 length) memcpy(&cp.lap, giac, sizeof(cp.lap)); cp.length = length; + cp.num_rsp = num_rsp; return __hci_cmd_sync_status(hdev, HCI_OP_INQUIRY, sizeof(cp), &cp, HCI_CMD_TIMEOUT); @@ -4642,27 +5974,12 @@ static int hci_active_scan_sync(struct hci_dev *hdev, uint16_t interval) cancel_interleave_scan(hdev); - /* Pause advertising since active scanning disables address resolution - * which advertising depend on in order to generate its RPAs. - */ - if (use_ll_privacy(hdev)) { - err = hci_pause_advertising_sync(hdev); - if (err) { - bt_dev_err(hdev, "pause advertising failed: %d", err); - goto failed; - } - } - - /* Disable address resolution while doing active scanning since the - * accept list shall not be used and all reports shall reach the host - * anyway. + /* Pause address resolution for active scan and stop advertising if + * privacy is enabled. */ - err = hci_le_set_addr_resolution_enable_sync(hdev, 0x00); - if (err) { - bt_dev_err(hdev, "Unable to disable Address Resolution: %d", - err); + err = hci_pause_addr_resolution(hdev); + if (err) goto failed; - } /* All active scans will be done with either a resolvable private * address (when privacy feature has been enabled) or non-resolvable @@ -4673,19 +5990,18 @@ static int hci_active_scan_sync(struct hci_dev *hdev, uint16_t interval) if (err < 0) own_addr_type = ADDR_LE_DEV_PUBLIC; - if (hci_is_adv_monitoring(hdev)) { + if (hci_is_adv_monitoring(hdev) || + (hci_test_quirk(hdev, HCI_QUIRK_STRICT_DUPLICATE_FILTER) && + hdev->discovery.result_filtering)) { /* Duplicate filter should be disabled when some advertisement * monitor is activated, otherwise AdvMon can only receive one * advertisement for one peer(*) during active scanning, and * might report loss to these peers. * - * Note that different controllers have different meanings of - * |duplicate|. Some of them consider packets with the same - * address as duplicate, and others consider packets with the - * same address and the same RSSI as duplicate. Although in the - * latter case we don't need to disable duplicate filter, but - * it is common to have active scanning for a short period of - * time, the power impact should be neglectable. + * If controller does strict duplicate filtering and the + * discovery requires result filtering disables controller based + * filtering since that can cause reports that would match the + * host filter to not be reported. */ filter_dup = LE_SCAN_FILTER_DUP_DISABLE; } @@ -4698,7 +6014,7 @@ static int hci_active_scan_sync(struct hci_dev *hdev, uint16_t interval) failed: /* Resume advertising if it was paused */ - if (use_ll_privacy(hdev)) + if (ll_privacy_capable(hdev)) hci_resume_advertising_sync(hdev); /* Resume passive scanning */ @@ -4716,7 +6032,7 @@ static int hci_start_interleaved_discovery_sync(struct hci_dev *hdev) if (err) return err; - return hci_inquiry_sync(hdev, DISCOV_BREDR_INQUIRY_LEN); + return hci_inquiry_sync(hdev, DISCOV_BREDR_INQUIRY_LEN, 0); } int hci_start_discovery_sync(struct hci_dev *hdev) @@ -4728,7 +6044,7 @@ int hci_start_discovery_sync(struct hci_dev *hdev) switch (hdev->discovery.type) { case DISCOV_TYPE_BREDR: - return hci_inquiry_sync(hdev, DISCOV_BREDR_INQUIRY_LEN); + return hci_inquiry_sync(hdev, DISCOV_BREDR_INQUIRY_LEN, 0); case DISCOV_TYPE_INTERLEAVED: /* When running simultaneous discovery, the LE scanning time * should occupy the whole discovery time sine BR/EDR inquiry @@ -4738,8 +6054,7 @@ int hci_start_discovery_sync(struct hci_dev *hdev) * and LE scanning are done sequentially with separate * timeouts. */ - if (test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY, - &hdev->quirks)) { + if (hci_test_quirk(hdev, HCI_QUIRK_SIMULTANEOUS_DISCOVERY)) { timeout = msecs_to_jiffies(DISCOV_LE_TIMEOUT); /* During simultaneous discovery, we double LE scan * interval. We must leave some time for the controller @@ -4765,17 +6080,6 @@ int hci_start_discovery_sync(struct hci_dev *hdev) bt_dev_dbg(hdev, "timeout %u ms", jiffies_to_msecs(timeout)); - /* When service discovery is used and the controller has a - * strict duplicate filter, it is important to remember the - * start and duration of the scan. This is required for - * restarting scanning during the discovery phase. - */ - if (test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks) && - hdev->discovery.result_filtering) { - hdev->discovery.scan_start = jiffies; - hdev->discovery.scan_duration = timeout; - } - queue_delayed_work(hdev->req_workqueue, &hdev->le_scan_disable, timeout); return 0; @@ -4809,7 +6113,6 @@ static int hci_pause_discovery_sync(struct hci_dev *hdev) return err; hdev->discovery_paused = true; - hdev->discovery_old_state = old_state; hci_discovery_set_state(hdev, DISCOVERY_STOPPED); return 0; @@ -4825,11 +6128,17 @@ static int hci_update_event_filter_sync(struct hci_dev *hdev) if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) return 0; + /* Some fake CSR controllers lock up after setting this type of + * filter, so avoid sending the request altogether. + */ + if (hci_test_quirk(hdev, HCI_QUIRK_BROKEN_FILTER_CLEAR_ALL)) + return 0; + /* Always clear event filter when starting */ hci_clear_event_filter_sync(hdev); list_for_each_entry(b, &hdev->accept_list, list) { - if (!test_bit(HCI_CONN_FLAG_REMOTE_WAKEUP, b->flags)) + if (!(b->flags & HCI_CONN_FLAG_REMOTE_WAKEUP)) continue; bt_dev_dbg(hdev, "Adding event filters for %pMR", &b->bdaddr); @@ -4839,7 +6148,7 @@ static int hci_update_event_filter_sync(struct hci_dev *hdev) &b->bdaddr, HCI_CONN_SETUP_AUTO_ON); if (err) - bt_dev_dbg(hdev, "Failed to set event filter for %pMR", + bt_dev_err(hdev, "Failed to set event filter for %pMR", &b->bdaddr); else scan = SCAN_PAGE; @@ -4853,10 +6162,28 @@ static int hci_update_event_filter_sync(struct hci_dev *hdev) return 0; } +/* This function disables scan (BR and LE) and mark it as paused */ +static int hci_pause_scan_sync(struct hci_dev *hdev) +{ + if (hdev->scanning_paused) + return 0; + + /* Disable page scan if enabled */ + if (test_bit(HCI_PSCAN, &hdev->flags)) + hci_write_scan_enable_sync(hdev, SCAN_DISABLED); + + hci_scan_disable_sync(hdev); + + hdev->scanning_paused = true; + + return 0; +} + /* This function performs the HCI suspend procedures in the follow order: * * Pause discovery (active scanning/inquiry) * Pause Directed Advertising/Advertising + * Pause Scanning (passive scanning in case discovery was not active) * Disconnect all connections * Set suspend_status to BT_SUSPEND_DISCONNECT if hdev cannot wakeup * otherwise: @@ -4882,23 +6209,26 @@ int hci_suspend_sync(struct hci_dev *hdev) /* Pause other advertisements */ hci_pause_advertising_sync(hdev); - /* Disable page scan if enabled */ - if (test_bit(HCI_PSCAN, &hdev->flags)) - hci_write_scan_enable_sync(hdev, SCAN_DISABLED); - /* Suspend monitor filters */ hci_suspend_monitor_sync(hdev); /* Prevent disconnects from causing scanning to be re-enabled */ - hdev->scanning_paused = true; + hci_pause_scan_sync(hdev); - /* Soft disconnect everything (power off) */ - err = hci_disconnect_all_sync(hdev, HCI_ERROR_REMOTE_POWER_OFF); - if (err) { - /* Set state to BT_RUNNING so resume doesn't notify */ - hdev->suspend_state = BT_RUNNING; - hci_resume_sync(hdev); - return err; + if (hci_conn_count(hdev)) { + /* Soft disconnect everything (power off) */ + err = hci_disconnect_all_sync(hdev, HCI_ERROR_REMOTE_POWER_OFF); + if (err) { + /* Set state to BT_RUNNING so resume doesn't notify */ + hdev->suspend_state = BT_RUNNING; + hci_resume_sync(hdev); + return err; + } + + /* Update event mask so only the allowed event can wakeup the + * host. + */ + hci_set_event_mask_sync(hdev); } /* Only configure accept list if disconnect succeeded and wake @@ -4912,9 +6242,6 @@ int hci_suspend_sync(struct hci_dev *hdev) /* Unpause to take care of updating scanning params */ hdev->scanning_paused = false; - /* Update event mask so only the allowed event can wakeup the host */ - hci_set_event_mask_sync(hdev); - /* Enable event filter for paired devices */ hci_update_event_filter_sync(hdev); @@ -4961,6 +6288,22 @@ static void hci_resume_monitor_sync(struct hci_dev *hdev) } } +/* This function resume scan and reset paused flag */ +static int hci_resume_scan_sync(struct hci_dev *hdev) +{ + if (!hdev->scanning_paused) + return 0; + + hdev->scanning_paused = false; + + hci_update_scan_sync(hdev); + + /* Reset passive scanning to normal */ + hci_update_passive_scan_sync(hdev); + + return 0; +} + /* This function performs the HCI suspend procedures in the follow order: * * Restore event mask @@ -4976,17 +6319,15 @@ int hci_resume_sync(struct hci_dev *hdev) return 0; hdev->suspended = false; - hdev->scanning_paused = false; /* Restore event mask */ hci_set_event_mask_sync(hdev); /* Clear any event filters and restore scan state */ hci_clear_event_filter_sync(hdev); - hci_update_scan_sync(hdev); - /* Reset passive scanning to normal */ - hci_update_passive_scan_sync(hdev); + /* Resume scanning */ + hci_resume_scan_sync(hdev); /* Resume monitor filters */ hci_resume_monitor_sync(hdev); @@ -5011,6 +6352,7 @@ static int hci_le_ext_directed_advertising_sync(struct hci_dev *hdev, struct hci_conn *conn) { struct hci_cp_le_set_ext_adv_params cp; + struct hci_rp_le_set_ext_adv_params rp; int err; bdaddr_t random_addr; u8 own_addr_type; @@ -5031,7 +6373,6 @@ static int hci_le_ext_directed_advertising_sync(struct hci_dev *hdev, memset(&cp, 0, sizeof(cp)); cp.evt_properties = cpu_to_le16(LE_LEGACY_ADV_DIRECT_IND); - cp.own_addr_type = own_addr_type; cp.channel_map = hdev->le_adv_channel_map; cp.tx_power = HCI_TX_POWER_INVALID; cp.primary_phy = HCI_ADV_PHY_1M; @@ -5053,8 +6394,12 @@ static int hci_le_ext_directed_advertising_sync(struct hci_dev *hdev, if (err) return err; - err = __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_EXT_ADV_PARAMS, - sizeof(cp), &cp, HCI_CMD_TIMEOUT); + err = hci_set_ext_adv_params_sync(hdev, NULL, &cp, &rp); + if (err) + return err; + + /* Update adv data as tx power is known now */ + err = hci_set_ext_adv_data_sync(hdev, cp.handle); if (err) return err; @@ -5140,8 +6485,8 @@ static void set_ext_conn_params(struct hci_conn *conn, p->max_ce_len = cpu_to_le16(0x0000); } -int hci_le_ext_create_conn_sync(struct hci_dev *hdev, struct hci_conn *conn, - u8 own_addr_type) +static int hci_le_ext_create_conn_sync(struct hci_dev *hdev, + struct hci_conn *conn, u8 own_addr_type) { struct hci_cp_le_ext_create_conn *cp; struct hci_cp_le_ext_conn_param *p; @@ -5159,7 +6504,8 @@ int hci_le_ext_create_conn_sync(struct hci_dev *hdev, struct hci_conn *conn, plen = sizeof(*cp); - if (scan_1m(hdev)) { + if (scan_1m(hdev) && (conn->le_adv_phy == HCI_ADV_PHY_1M || + conn->le_adv_sec_phy == HCI_ADV_PHY_1M)) { cp->phys |= LE_SCAN_PHY_1M; set_ext_conn_params(conn, p); @@ -5167,7 +6513,8 @@ int hci_le_ext_create_conn_sync(struct hci_dev *hdev, struct hci_conn *conn, plen += sizeof(*p); } - if (scan_2m(hdev)) { + if (scan_2m(hdev) && (conn->le_adv_phy == HCI_ADV_PHY_2M || + conn->le_adv_sec_phy == HCI_ADV_PHY_2M)) { cp->phys |= LE_SCAN_PHY_2M; set_ext_conn_params(conn, p); @@ -5175,7 +6522,8 @@ int hci_le_ext_create_conn_sync(struct hci_dev *hdev, struct hci_conn *conn, plen += sizeof(*p); } - if (scan_coded(hdev)) { + if (scan_coded(hdev) && (conn->le_adv_phy == HCI_ADV_PHY_CODED || + conn->le_adv_sec_phy == HCI_ADV_PHY_CODED)) { cp->phys |= LE_SCAN_PHY_CODED; set_ext_conn_params(conn, p); @@ -5185,15 +6533,24 @@ int hci_le_ext_create_conn_sync(struct hci_dev *hdev, struct hci_conn *conn, return __hci_cmd_sync_status_sk(hdev, HCI_OP_LE_EXT_CREATE_CONN, plen, data, HCI_EV_LE_ENHANCED_CONN_COMPLETE, - HCI_CMD_TIMEOUT, NULL); + conn->conn_timeout, NULL); } -int hci_le_create_conn_sync(struct hci_dev *hdev, struct hci_conn *conn) +static int hci_le_create_conn_sync(struct hci_dev *hdev, void *data) { struct hci_cp_le_create_conn cp; struct hci_conn_params *params; u8 own_addr_type; int err; + struct hci_conn *conn = data; + + if (!hci_conn_valid(hdev, conn)) + return -ECANCELED; + + bt_dev_dbg(hdev, "conn %p", conn); + + clear_bit(HCI_CONN_SCANNING, &conn->flags); + conn->state = BT_CONNECT; /* If requested to connect as peripheral use directed advertising */ if (conn->role == HCI_ROLE_SLAVE) { @@ -5249,7 +6606,7 @@ int hci_le_create_conn_sync(struct hci_dev *hdev, struct hci_conn *conn) &own_addr_type); if (err) goto done; - + /* Send command LE Extended Create Connection if supported */ if (use_ext_conn(hdev)) { err = hci_le_ext_create_conn_sync(hdev, conn, own_addr_type); goto done; @@ -5270,12 +6627,794 @@ int hci_le_create_conn_sync(struct hci_dev *hdev, struct hci_conn *conn) cp.min_ce_len = cpu_to_le16(0x0000); cp.max_ce_len = cpu_to_le16(0x0000); + /* BLUETOOTH CORE SPECIFICATION Version 5.3 | Vol 4, Part E page 2261: + * + * If this event is unmasked and the HCI_LE_Connection_Complete event + * is unmasked, only the HCI_LE_Enhanced_Connection_Complete event is + * sent when a new connection has been created. + */ err = __hci_cmd_sync_status_sk(hdev, HCI_OP_LE_CREATE_CONN, - sizeof(cp), &cp, HCI_EV_LE_CONN_COMPLETE, - HCI_CMD_TIMEOUT, NULL); + sizeof(cp), &cp, + use_enhanced_conn_complete(hdev) ? + HCI_EV_LE_ENHANCED_CONN_COMPLETE : + HCI_EV_LE_CONN_COMPLETE, + conn->conn_timeout, NULL); done: + if (err == -ETIMEDOUT) + hci_le_connect_cancel_sync(hdev, conn, 0x00); + /* Re-enable advertising after the connection attempt is finished. */ hci_resume_advertising_sync(hdev); return err; } + +int hci_le_create_cis_sync(struct hci_dev *hdev) +{ + DEFINE_FLEX(struct hci_cp_le_create_cis, cmd, cis, num_cis, 0x1f); + size_t aux_num_cis = 0; + struct hci_conn *conn; + u8 cig = BT_ISO_QOS_CIG_UNSET; + + /* The spec allows only one pending LE Create CIS command at a time. If + * the command is pending now, don't do anything. We check for pending + * connections after each CIS Established event. + * + * BLUETOOTH CORE SPECIFICATION Version 5.3 | Vol 4, Part E + * page 2566: + * + * If the Host issues this command before all the + * HCI_LE_CIS_Established events from the previous use of the + * command have been generated, the Controller shall return the + * error code Command Disallowed (0x0C). + * + * BLUETOOTH CORE SPECIFICATION Version 5.3 | Vol 4, Part E + * page 2567: + * + * When the Controller receives the HCI_LE_Create_CIS command, the + * Controller sends the HCI_Command_Status event to the Host. An + * HCI_LE_CIS_Established event will be generated for each CIS when it + * is established or if it is disconnected or considered lost before + * being established; until all the events are generated, the command + * remains pending. + */ + + hci_dev_lock(hdev); + + rcu_read_lock(); + + /* Wait until previous Create CIS has completed */ + list_for_each_entry_rcu(conn, &hdev->conn_hash.list, list) { + if (test_bit(HCI_CONN_CREATE_CIS, &conn->flags)) + goto done; + } + + /* Find CIG with all CIS ready */ + list_for_each_entry_rcu(conn, &hdev->conn_hash.list, list) { + struct hci_conn *link; + + if (hci_conn_check_create_cis(conn)) + continue; + + cig = conn->iso_qos.ucast.cig; + + list_for_each_entry_rcu(link, &hdev->conn_hash.list, list) { + if (hci_conn_check_create_cis(link) > 0 && + link->iso_qos.ucast.cig == cig && + link->state != BT_CONNECTED) { + cig = BT_ISO_QOS_CIG_UNSET; + break; + } + } + + if (cig != BT_ISO_QOS_CIG_UNSET) + break; + } + + if (cig == BT_ISO_QOS_CIG_UNSET) + goto done; + + list_for_each_entry_rcu(conn, &hdev->conn_hash.list, list) { + struct hci_cis *cis = &cmd->cis[aux_num_cis]; + + if (hci_conn_check_create_cis(conn) || + conn->iso_qos.ucast.cig != cig) + continue; + + set_bit(HCI_CONN_CREATE_CIS, &conn->flags); + cis->acl_handle = cpu_to_le16(conn->parent->handle); + cis->cis_handle = cpu_to_le16(conn->handle); + aux_num_cis++; + + if (aux_num_cis >= cmd->num_cis) + break; + } + cmd->num_cis = aux_num_cis; + +done: + rcu_read_unlock(); + + hci_dev_unlock(hdev); + + if (!aux_num_cis) + return 0; + + /* Wait for HCI_LE_CIS_Established */ + return __hci_cmd_sync_status_sk(hdev, HCI_OP_LE_CREATE_CIS, + struct_size(cmd, cis, cmd->num_cis), + cmd, HCI_EVT_LE_CIS_ESTABLISHED, + conn->conn_timeout, NULL); +} + +int hci_le_remove_cig_sync(struct hci_dev *hdev, u8 handle) +{ + struct hci_cp_le_remove_cig cp; + + memset(&cp, 0, sizeof(cp)); + cp.cig_id = handle; + + return __hci_cmd_sync_status(hdev, HCI_OP_LE_REMOVE_CIG, sizeof(cp), + &cp, HCI_CMD_TIMEOUT); +} + +int hci_le_big_terminate_sync(struct hci_dev *hdev, u8 handle) +{ + struct hci_cp_le_big_term_sync cp; + + memset(&cp, 0, sizeof(cp)); + cp.handle = handle; + + return __hci_cmd_sync_status(hdev, HCI_OP_LE_BIG_TERM_SYNC, + sizeof(cp), &cp, HCI_CMD_TIMEOUT); +} + +int hci_le_pa_terminate_sync(struct hci_dev *hdev, u16 handle) +{ + struct hci_cp_le_pa_term_sync cp; + + memset(&cp, 0, sizeof(cp)); + cp.handle = cpu_to_le16(handle); + + return __hci_cmd_sync_status(hdev, HCI_OP_LE_PA_TERM_SYNC, + sizeof(cp), &cp, HCI_CMD_TIMEOUT); +} + +int hci_get_random_address(struct hci_dev *hdev, bool require_privacy, + bool use_rpa, struct adv_info *adv_instance, + u8 *own_addr_type, bdaddr_t *rand_addr) +{ + int err; + + bacpy(rand_addr, BDADDR_ANY); + + /* If privacy is enabled use a resolvable private address. If + * current RPA has expired then generate a new one. + */ + if (use_rpa) { + /* If Controller supports LL Privacy use own address type is + * 0x03 + */ + if (ll_privacy_capable(hdev)) + *own_addr_type = ADDR_LE_DEV_RANDOM_RESOLVED; + else + *own_addr_type = ADDR_LE_DEV_RANDOM; + + if (adv_instance) { + if (adv_rpa_valid(adv_instance)) + return 0; + } else { + if (rpa_valid(hdev)) + return 0; + } + + err = smp_generate_rpa(hdev, hdev->irk, &hdev->rpa); + if (err < 0) { + bt_dev_err(hdev, "failed to generate new RPA"); + return err; + } + + bacpy(rand_addr, &hdev->rpa); + + return 0; + } + + /* In case of required privacy without resolvable private address, + * use an non-resolvable private address. This is useful for + * non-connectable advertising. + */ + if (require_privacy) { + bdaddr_t nrpa; + + while (true) { + /* The non-resolvable private address is generated + * from random six bytes with the two most significant + * bits cleared. + */ + get_random_bytes(&nrpa, 6); + nrpa.b[5] &= 0x3f; + + /* The non-resolvable private address shall not be + * equal to the public address. + */ + if (bacmp(&hdev->bdaddr, &nrpa)) + break; + } + + *own_addr_type = ADDR_LE_DEV_RANDOM; + bacpy(rand_addr, &nrpa); + + return 0; + } + + /* No privacy, use the current address */ + hci_copy_identity_address(hdev, rand_addr, own_addr_type); + + return 0; +} + +static int _update_adv_data_sync(struct hci_dev *hdev, void *data) +{ + u8 instance = PTR_UINT(data); + + return hci_update_adv_data_sync(hdev, instance); +} + +int hci_update_adv_data(struct hci_dev *hdev, u8 instance) +{ + return hci_cmd_sync_queue(hdev, _update_adv_data_sync, + UINT_PTR(instance), NULL); +} + +static int hci_acl_create_conn_sync(struct hci_dev *hdev, void *data) +{ + struct hci_conn *conn = data; + struct inquiry_entry *ie; + struct hci_cp_create_conn cp; + int err; + + if (!hci_conn_valid(hdev, conn)) + return -ECANCELED; + + /* Many controllers disallow HCI Create Connection while it is doing + * HCI Inquiry. So we cancel the Inquiry first before issuing HCI Create + * Connection. This may cause the MGMT discovering state to become false + * without user space's request but it is okay since the MGMT Discovery + * APIs do not promise that discovery should be done forever. Instead, + * the user space monitors the status of MGMT discovering and it may + * request for discovery again when this flag becomes false. + */ + if (test_bit(HCI_INQUIRY, &hdev->flags)) { + err = __hci_cmd_sync_status(hdev, HCI_OP_INQUIRY_CANCEL, 0, + NULL, HCI_CMD_TIMEOUT); + if (err) + bt_dev_warn(hdev, "Failed to cancel inquiry %d", err); + } + + conn->state = BT_CONNECT; + conn->out = true; + conn->role = HCI_ROLE_MASTER; + + conn->attempt++; + + conn->link_policy = hdev->link_policy; + + memset(&cp, 0, sizeof(cp)); + bacpy(&cp.bdaddr, &conn->dst); + cp.pscan_rep_mode = 0x02; + + ie = hci_inquiry_cache_lookup(hdev, &conn->dst); + if (ie) { + if (inquiry_entry_age(ie) <= INQUIRY_ENTRY_AGE_MAX) { + cp.pscan_rep_mode = ie->data.pscan_rep_mode; + cp.pscan_mode = ie->data.pscan_mode; + cp.clock_offset = ie->data.clock_offset | + cpu_to_le16(0x8000); + } + + memcpy(conn->dev_class, ie->data.dev_class, 3); + } + + cp.pkt_type = cpu_to_le16(conn->pkt_type); + if (lmp_rswitch_capable(hdev) && !(hdev->link_mode & HCI_LM_MASTER)) + cp.role_switch = 0x01; + else + cp.role_switch = 0x00; + + return __hci_cmd_sync_status_sk(hdev, HCI_OP_CREATE_CONN, + sizeof(cp), &cp, + HCI_EV_CONN_COMPLETE, + conn->conn_timeout, NULL); +} + +int hci_connect_acl_sync(struct hci_dev *hdev, struct hci_conn *conn) +{ + return hci_cmd_sync_queue_once(hdev, hci_acl_create_conn_sync, conn, + NULL); +} + +static void create_le_conn_complete(struct hci_dev *hdev, void *data, int err) +{ + struct hci_conn *conn = data; + + bt_dev_dbg(hdev, "err %d", err); + + if (err == -ECANCELED) + return; + + hci_dev_lock(hdev); + + if (!hci_conn_valid(hdev, conn)) + goto done; + + if (!err) { + hci_connect_le_scan_cleanup(conn, 0x00); + goto done; + } + + /* Check if connection is still pending */ + if (conn != hci_lookup_le_connect(hdev)) + goto done; + + /* Flush to make sure we send create conn cancel command if needed */ + flush_delayed_work(&conn->le_conn_timeout); + hci_conn_failed(conn, bt_status(err)); + +done: + hci_dev_unlock(hdev); +} + +int hci_connect_le_sync(struct hci_dev *hdev, struct hci_conn *conn) +{ + return hci_cmd_sync_queue_once(hdev, hci_le_create_conn_sync, conn, + create_le_conn_complete); +} + +int hci_cancel_connect_sync(struct hci_dev *hdev, struct hci_conn *conn) +{ + if (conn->state != BT_OPEN) + return -EINVAL; + + switch (conn->type) { + case ACL_LINK: + return !hci_cmd_sync_dequeue_once(hdev, + hci_acl_create_conn_sync, + conn, NULL); + case LE_LINK: + return !hci_cmd_sync_dequeue_once(hdev, hci_le_create_conn_sync, + conn, create_le_conn_complete); + } + + return -ENOENT; +} + +int hci_le_conn_update_sync(struct hci_dev *hdev, struct hci_conn *conn, + struct hci_conn_params *params) +{ + struct hci_cp_le_conn_update cp; + + memset(&cp, 0, sizeof(cp)); + cp.handle = cpu_to_le16(conn->handle); + cp.conn_interval_min = cpu_to_le16(params->conn_min_interval); + cp.conn_interval_max = cpu_to_le16(params->conn_max_interval); + cp.conn_latency = cpu_to_le16(params->conn_latency); + cp.supervision_timeout = cpu_to_le16(params->supervision_timeout); + cp.min_ce_len = cpu_to_le16(0x0000); + cp.max_ce_len = cpu_to_le16(0x0000); + + return __hci_cmd_sync_status(hdev, HCI_OP_LE_CONN_UPDATE, + sizeof(cp), &cp, HCI_CMD_TIMEOUT); +} + +static void create_pa_complete(struct hci_dev *hdev, void *data, int err) +{ + struct hci_conn *conn = data; + struct hci_conn *pa_sync; + + bt_dev_dbg(hdev, "err %d", err); + + if (err == -ECANCELED) + return; + + hci_dev_lock(hdev); + + if (hci_conn_valid(hdev, conn)) + clear_bit(HCI_CONN_CREATE_PA_SYNC, &conn->flags); + + if (!err) + goto unlock; + + /* Add connection to indicate PA sync error */ + pa_sync = hci_conn_add_unset(hdev, PA_LINK, BDADDR_ANY, 0, + HCI_ROLE_SLAVE); + + if (IS_ERR(pa_sync)) + goto unlock; + + set_bit(HCI_CONN_PA_SYNC_FAILED, &pa_sync->flags); + + /* Notify iso layer */ + hci_connect_cfm(pa_sync, bt_status(err)); + +unlock: + hci_dev_unlock(hdev); +} + +static int hci_le_past_params_sync(struct hci_dev *hdev, struct hci_conn *conn, + struct hci_conn *acl, struct bt_iso_qos *qos) +{ + struct hci_cp_le_past_params cp; + int err; + + memset(&cp, 0, sizeof(cp)); + cp.handle = cpu_to_le16(acl->handle); + /* An HCI_LE_Periodic_Advertising_Sync_Transfer_Received event is sent + * to the Host. HCI_LE_Periodic_Advertising_Report events will be + * enabled with duplicate filtering enabled. + */ + cp.mode = 0x03; + cp.skip = cpu_to_le16(qos->bcast.skip); + cp.sync_timeout = cpu_to_le16(qos->bcast.sync_timeout); + cp.cte_type = qos->bcast.sync_cte_type; + + /* HCI_LE_PAST_PARAMS command returns a command complete event so it + * cannot wait for HCI_EV_LE_PAST_RECEIVED. + */ + err = __hci_cmd_sync_status(hdev, HCI_OP_LE_PAST_PARAMS, + sizeof(cp), &cp, HCI_CMD_TIMEOUT); + if (err) + return err; + + /* Wait for HCI_EV_LE_PAST_RECEIVED event */ + return __hci_cmd_sync_status_sk(hdev, HCI_OP_NOP, 0, NULL, + HCI_EV_LE_PAST_RECEIVED, + conn->conn_timeout, NULL); +} + +static int hci_le_pa_create_sync(struct hci_dev *hdev, void *data) +{ + struct hci_cp_le_pa_create_sync cp; + struct hci_conn *conn = data, *le; + struct bt_iso_qos *qos = &conn->iso_qos; + int err; + + if (!hci_conn_valid(hdev, conn)) + return -ECANCELED; + + if (conn->sync_handle != HCI_SYNC_HANDLE_INVALID) + return -EINVAL; + + if (hci_dev_test_and_set_flag(hdev, HCI_PA_SYNC)) + return -EBUSY; + + /* Stop scanning if SID has not been set and active scanning is enabled + * so we use passive scanning which will be scanning using the allow + * list programmed to contain only the connection address. + */ + if (conn->sid == HCI_SID_INVALID && + hci_dev_test_flag(hdev, HCI_LE_SCAN)) { + hci_scan_disable_sync(hdev); + hci_dev_set_flag(hdev, HCI_LE_SCAN_INTERRUPTED); + hci_discovery_set_state(hdev, DISCOVERY_STOPPED); + } + + /* Mark HCI_CONN_CREATE_PA_SYNC so hci_update_passive_scan_sync can + * program the address in the allow list so PA advertisements can be + * received. + */ + set_bit(HCI_CONN_CREATE_PA_SYNC, &conn->flags); + + hci_update_passive_scan_sync(hdev); + + /* Check if PAST is possible: + * + * 1. Check if an ACL connection with the destination address exists + * 2. Check if that HCI_CONN_FLAG_PAST has been set which indicates that + * user really intended to use PAST. + */ + le = hci_conn_hash_lookup_le(hdev, &conn->dst, conn->dst_type); + if (le) { + struct hci_conn_params *params; + + params = hci_conn_params_lookup(hdev, &le->dst, le->dst_type); + if (params && params->flags & HCI_CONN_FLAG_PAST) { + err = hci_le_past_params_sync(hdev, conn, le, qos); + if (!err) + goto done; + } + } + + /* SID has not been set listen for HCI_EV_LE_EXT_ADV_REPORT to update + * it. + */ + if (conn->sid == HCI_SID_INVALID) { + err = __hci_cmd_sync_status_sk(hdev, HCI_OP_NOP, 0, NULL, + HCI_EV_LE_EXT_ADV_REPORT, + conn->conn_timeout, NULL); + if (err == -ETIMEDOUT) + goto done; + } + + memset(&cp, 0, sizeof(cp)); + cp.options = qos->bcast.options; + cp.sid = conn->sid; + cp.addr_type = conn->dst_type; + bacpy(&cp.addr, &conn->dst); + cp.skip = cpu_to_le16(qos->bcast.skip); + cp.sync_timeout = cpu_to_le16(qos->bcast.sync_timeout); + cp.sync_cte_type = qos->bcast.sync_cte_type; + + /* The spec allows only one pending LE Periodic Advertising Create + * Sync command at a time so we forcefully wait for PA Sync Established + * event since cmd_work can only schedule one command at a time. + * + * BLUETOOTH CORE SPECIFICATION Version 5.3 | Vol 4, Part E + * page 2493: + * + * If the Host issues this command when another HCI_LE_Periodic_ + * Advertising_Create_Sync command is pending, the Controller shall + * return the error code Command Disallowed (0x0C). + */ + err = __hci_cmd_sync_status_sk(hdev, HCI_OP_LE_PA_CREATE_SYNC, + sizeof(cp), &cp, + HCI_EV_LE_PA_SYNC_ESTABLISHED, + conn->conn_timeout, NULL); + if (err == -ETIMEDOUT) + __hci_cmd_sync_status(hdev, HCI_OP_LE_PA_CREATE_SYNC_CANCEL, + 0, NULL, HCI_CMD_TIMEOUT); + +done: + hci_dev_clear_flag(hdev, HCI_PA_SYNC); + + /* Update passive scan since HCI_PA_SYNC flag has been cleared */ + hci_update_passive_scan_sync(hdev); + + return err; +} + +int hci_connect_pa_sync(struct hci_dev *hdev, struct hci_conn *conn) +{ + return hci_cmd_sync_queue_once(hdev, hci_le_pa_create_sync, conn, + create_pa_complete); +} + +static void create_big_complete(struct hci_dev *hdev, void *data, int err) +{ + struct hci_conn *conn = data; + + bt_dev_dbg(hdev, "err %d", err); + + if (err == -ECANCELED) + return; + + if (hci_conn_valid(hdev, conn)) + clear_bit(HCI_CONN_CREATE_BIG_SYNC, &conn->flags); +} + +static int hci_le_big_create_sync(struct hci_dev *hdev, void *data) +{ + DEFINE_FLEX(struct hci_cp_le_big_create_sync, cp, bis, num_bis, 0x11); + struct hci_conn *conn = data; + struct bt_iso_qos *qos = &conn->iso_qos; + int err; + + if (!hci_conn_valid(hdev, conn)) + return -ECANCELED; + + set_bit(HCI_CONN_CREATE_BIG_SYNC, &conn->flags); + + memset(cp, 0, sizeof(*cp)); + cp->handle = qos->bcast.big; + cp->sync_handle = cpu_to_le16(conn->sync_handle); + cp->encryption = qos->bcast.encryption; + memcpy(cp->bcode, qos->bcast.bcode, sizeof(cp->bcode)); + cp->mse = qos->bcast.mse; + cp->timeout = cpu_to_le16(qos->bcast.timeout); + cp->num_bis = conn->num_bis; + memcpy(cp->bis, conn->bis, conn->num_bis); + + /* The spec allows only one pending LE BIG Create Sync command at + * a time, so we forcefully wait for BIG Sync Established event since + * cmd_work can only schedule one command at a time. + * + * BLUETOOTH CORE SPECIFICATION Version 5.3 | Vol 4, Part E + * page 2586: + * + * If the Host sends this command when the Controller is in the + * process of synchronizing to any BIG, i.e. the HCI_LE_BIG_Sync_ + * Established event has not been generated, the Controller shall + * return the error code Command Disallowed (0x0C). + */ + err = __hci_cmd_sync_status_sk(hdev, HCI_OP_LE_BIG_CREATE_SYNC, + struct_size(cp, bis, cp->num_bis), cp, + HCI_EVT_LE_BIG_SYNC_ESTABLISHED, + conn->conn_timeout, NULL); + if (err == -ETIMEDOUT) + hci_le_big_terminate_sync(hdev, cp->handle); + + return err; +} + +int hci_connect_big_sync(struct hci_dev *hdev, struct hci_conn *conn) +{ + return hci_cmd_sync_queue_once(hdev, hci_le_big_create_sync, conn, + create_big_complete); +} + +struct past_data { + struct hci_conn *conn; + struct hci_conn *le; +}; + +static void past_complete(struct hci_dev *hdev, void *data, int err) +{ + struct past_data *past = data; + + bt_dev_dbg(hdev, "err %d", err); + + kfree(past); +} + +static int hci_le_past_set_info_sync(struct hci_dev *hdev, void *data) +{ + struct past_data *past = data; + struct hci_cp_le_past_set_info cp; + + hci_dev_lock(hdev); + + if (!hci_conn_valid(hdev, past->conn) || + !hci_conn_valid(hdev, past->le)) { + hci_dev_unlock(hdev); + return -ECANCELED; + } + + memset(&cp, 0, sizeof(cp)); + cp.handle = cpu_to_le16(past->le->handle); + cp.adv_handle = past->conn->iso_qos.bcast.bis; + + hci_dev_unlock(hdev); + + return __hci_cmd_sync_status(hdev, HCI_OP_LE_PAST_SET_INFO, + sizeof(cp), &cp, HCI_CMD_TIMEOUT); +} + +static int hci_le_past_sync(struct hci_dev *hdev, void *data) +{ + struct past_data *past = data; + struct hci_cp_le_past cp; + + hci_dev_lock(hdev); + + if (!hci_conn_valid(hdev, past->conn) || + !hci_conn_valid(hdev, past->le)) { + hci_dev_unlock(hdev); + return -ECANCELED; + } + + memset(&cp, 0, sizeof(cp)); + cp.handle = cpu_to_le16(past->le->handle); + cp.sync_handle = cpu_to_le16(past->conn->sync_handle); + + hci_dev_unlock(hdev); + + return __hci_cmd_sync_status(hdev, HCI_OP_LE_PAST, + sizeof(cp), &cp, HCI_CMD_TIMEOUT); +} + +int hci_past_sync(struct hci_conn *conn, struct hci_conn *le) +{ + struct past_data *data; + int err; + + if (conn->type != BIS_LINK && conn->type != PA_LINK) + return -EINVAL; + + if (!past_sender_capable(conn->hdev)) + return -EOPNOTSUPP; + + data = kmalloc(sizeof(*data), GFP_KERNEL); + if (!data) + return -ENOMEM; + + data->conn = conn; + data->le = le; + + if (conn->role == HCI_ROLE_MASTER) + err = hci_cmd_sync_queue_once(conn->hdev, + hci_le_past_set_info_sync, data, + past_complete); + else + err = hci_cmd_sync_queue_once(conn->hdev, hci_le_past_sync, + data, past_complete); + + if (err) + kfree(data); + + return err; +} + +static void le_read_features_complete(struct hci_dev *hdev, void *data, int err) +{ + struct hci_conn *conn = data; + + bt_dev_dbg(hdev, "err %d", err); + + if (err == -ECANCELED) + return; + + hci_conn_drop(conn); +} + +static int hci_le_read_all_remote_features_sync(struct hci_dev *hdev, + void *data) +{ + struct hci_conn *conn = data; + struct hci_cp_le_read_all_remote_features cp; + + memset(&cp, 0, sizeof(cp)); + cp.handle = cpu_to_le16(conn->handle); + cp.pages = 10; /* Attempt to read all pages */ + + /* Wait for HCI_EVT_LE_ALL_REMOTE_FEATURES_COMPLETE event otherwise + * hci_conn_drop may run prematurely causing a disconnection. + */ + return __hci_cmd_sync_status_sk(hdev, + HCI_OP_LE_READ_ALL_REMOTE_FEATURES, + sizeof(cp), &cp, + HCI_EVT_LE_ALL_REMOTE_FEATURES_COMPLETE, + HCI_CMD_TIMEOUT, NULL); + + return __hci_cmd_sync_status(hdev, HCI_OP_LE_READ_ALL_REMOTE_FEATURES, + sizeof(cp), &cp, HCI_CMD_TIMEOUT); +} + +static int hci_le_read_remote_features_sync(struct hci_dev *hdev, void *data) +{ + struct hci_conn *conn = data; + struct hci_cp_le_read_remote_features cp; + + if (!hci_conn_valid(hdev, conn)) + return -ECANCELED; + + /* Check if LL Extended Feature Set is supported and + * HCI_OP_LE_READ_ALL_REMOTE_FEATURES is supported then use that to read + * all features. + */ + if (ll_ext_feature_capable(hdev) && hdev->commands[47] & BIT(3)) + return hci_le_read_all_remote_features_sync(hdev, data); + + memset(&cp, 0, sizeof(cp)); + cp.handle = cpu_to_le16(conn->handle); + + /* Wait for HCI_EV_LE_REMOTE_FEAT_COMPLETE event otherwise + * hci_conn_drop may run prematurely causing a disconnection. + */ + return __hci_cmd_sync_status_sk(hdev, HCI_OP_LE_READ_REMOTE_FEATURES, + sizeof(cp), &cp, + HCI_EV_LE_REMOTE_FEAT_COMPLETE, + HCI_CMD_TIMEOUT, NULL); +} + +int hci_le_read_remote_features(struct hci_conn *conn) +{ + struct hci_dev *hdev = conn->hdev; + int err; + + /* The remote features procedure is defined for central + * role only. So only in case of an initiated connection + * request the remote features. + * + * If the local controller supports peripheral-initiated features + * exchange, then requesting the remote features in peripheral + * role is possible. Otherwise just transition into the + * connected state without requesting the remote features. + */ + if (conn->out || (hdev->le_features[0] & HCI_LE_PERIPHERAL_FEATURES)) + err = hci_cmd_sync_queue_once(hdev, + hci_le_read_remote_features_sync, + hci_conn_hold(conn), + le_read_features_complete); + else + err = -EOPNOTSUPP; + + return err; +} |
