diff options
Diffstat (limited to 'net/bluetooth/hci_core.c')
| -rw-r--r-- | net/bluetooth/hci_core.c | 4210 |
1 files changed, 2427 insertions, 1783 deletions
diff --git a/net/bluetooth/hci_core.c b/net/bluetooth/hci_core.c index e3a349977595..8ccec73dce45 100644 --- a/net/bluetooth/hci_core.c +++ b/net/bluetooth/hci_core.c @@ -26,12 +26,26 @@ /* Bluetooth HCI core. */ #include <linux/export.h> -#include <linux/idr.h> - #include <linux/rfkill.h> +#include <linux/debugfs.h> +#include <linux/crypto.h> +#include <linux/kcov.h> +#include <linux/property.h> +#include <linux/suspend.h> +#include <linux/wait.h> +#include <linux/unaligned.h> #include <net/bluetooth/bluetooth.h> #include <net/bluetooth/hci_core.h> +#include <net/bluetooth/l2cap.h> +#include <net/bluetooth/mgmt.h> + +#include "hci_debugfs.h" +#include "smp.h" +#include "leds.h" +#include "msft.h" +#include "aosp.h" +#include "hci_codec.h" static void hci_rx_work(struct work_struct *work); static void hci_cmd_work(struct work_struct *work); @@ -43,661 +57,14 @@ DEFINE_RWLOCK(hci_dev_list_lock); /* HCI callback list */ LIST_HEAD(hci_cb_list); -DEFINE_RWLOCK(hci_cb_list_lock); +DEFINE_MUTEX(hci_cb_list_lock); /* HCI ID Numbering */ static DEFINE_IDA(hci_index_ida); -/* ---- HCI notifications ---- */ - -static void hci_notify(struct hci_dev *hdev, int event) -{ - hci_sock_dev_event(hdev, event); -} - -/* ---- HCI requests ---- */ - -static void hci_req_sync_complete(struct hci_dev *hdev, u8 result) -{ - BT_DBG("%s result 0x%2.2x", hdev->name, result); - - if (hdev->req_status == HCI_REQ_PEND) { - hdev->req_result = result; - hdev->req_status = HCI_REQ_DONE; - wake_up_interruptible(&hdev->req_wait_q); - } -} - -static void hci_req_cancel(struct hci_dev *hdev, int err) -{ - BT_DBG("%s err 0x%2.2x", hdev->name, err); - - if (hdev->req_status == HCI_REQ_PEND) { - hdev->req_result = err; - hdev->req_status = HCI_REQ_CANCELED; - wake_up_interruptible(&hdev->req_wait_q); - } -} - -static struct sk_buff *hci_get_cmd_complete(struct hci_dev *hdev, u16 opcode, - u8 event) -{ - struct hci_ev_cmd_complete *ev; - struct hci_event_hdr *hdr; - struct sk_buff *skb; - - hci_dev_lock(hdev); - - skb = hdev->recv_evt; - hdev->recv_evt = NULL; - - hci_dev_unlock(hdev); - - if (!skb) - return ERR_PTR(-ENODATA); - - if (skb->len < sizeof(*hdr)) { - BT_ERR("Too short HCI event"); - goto failed; - } - - hdr = (void *) skb->data; - skb_pull(skb, HCI_EVENT_HDR_SIZE); - - if (event) { - if (hdr->evt != event) - goto failed; - return skb; - } - - if (hdr->evt != HCI_EV_CMD_COMPLETE) { - BT_DBG("Last event is not cmd complete (0x%2.2x)", hdr->evt); - goto failed; - } - - if (skb->len < sizeof(*ev)) { - BT_ERR("Too short cmd_complete event"); - goto failed; - } - - ev = (void *) skb->data; - skb_pull(skb, sizeof(*ev)); - - if (opcode == __le16_to_cpu(ev->opcode)) - return skb; - - BT_DBG("opcode doesn't match (0x%2.2x != 0x%2.2x)", opcode, - __le16_to_cpu(ev->opcode)); - -failed: - kfree_skb(skb); - return ERR_PTR(-ENODATA); -} - -struct sk_buff *__hci_cmd_sync_ev(struct hci_dev *hdev, u16 opcode, u32 plen, - const void *param, u8 event, u32 timeout) -{ - DECLARE_WAITQUEUE(wait, current); - struct hci_request req; - int err = 0; - - BT_DBG("%s", hdev->name); - - hci_req_init(&req, hdev); - - hci_req_add_ev(&req, opcode, plen, param, event); - - hdev->req_status = HCI_REQ_PEND; - - err = hci_req_run(&req, hci_req_sync_complete); - if (err < 0) - return ERR_PTR(err); - - add_wait_queue(&hdev->req_wait_q, &wait); - set_current_state(TASK_INTERRUPTIBLE); - - schedule_timeout(timeout); - - remove_wait_queue(&hdev->req_wait_q, &wait); - - if (signal_pending(current)) - return ERR_PTR(-EINTR); - - switch (hdev->req_status) { - case HCI_REQ_DONE: - err = -bt_to_errno(hdev->req_result); - break; - - case HCI_REQ_CANCELED: - err = -hdev->req_result; - break; - - default: - err = -ETIMEDOUT; - break; - } - - hdev->req_status = hdev->req_result = 0; - - BT_DBG("%s end: err %d", hdev->name, err); - - if (err < 0) - return ERR_PTR(err); - - return hci_get_cmd_complete(hdev, opcode, event); -} -EXPORT_SYMBOL(__hci_cmd_sync_ev); - -struct sk_buff *__hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen, - const void *param, u32 timeout) -{ - return __hci_cmd_sync_ev(hdev, opcode, plen, param, 0, timeout); -} -EXPORT_SYMBOL(__hci_cmd_sync); - -/* Execute request and wait for completion. */ -static int __hci_req_sync(struct hci_dev *hdev, - void (*func)(struct hci_request *req, - unsigned long opt), - unsigned long opt, __u32 timeout) -{ - struct hci_request req; - DECLARE_WAITQUEUE(wait, current); - int err = 0; - - BT_DBG("%s start", hdev->name); - - hci_req_init(&req, hdev); - - hdev->req_status = HCI_REQ_PEND; - - func(&req, opt); - - err = hci_req_run(&req, hci_req_sync_complete); - if (err < 0) { - hdev->req_status = 0; - - /* ENODATA means the HCI request command queue is empty. - * This can happen when a request with conditionals doesn't - * trigger any commands to be sent. This is normal behavior - * and should not trigger an error return. - */ - if (err == -ENODATA) - return 0; - - return err; - } - - add_wait_queue(&hdev->req_wait_q, &wait); - set_current_state(TASK_INTERRUPTIBLE); - - schedule_timeout(timeout); - - remove_wait_queue(&hdev->req_wait_q, &wait); - - if (signal_pending(current)) - return -EINTR; - - switch (hdev->req_status) { - case HCI_REQ_DONE: - err = -bt_to_errno(hdev->req_result); - break; - - case HCI_REQ_CANCELED: - err = -hdev->req_result; - break; - - default: - err = -ETIMEDOUT; - break; - } - - hdev->req_status = hdev->req_result = 0; - - BT_DBG("%s end: err %d", hdev->name, err); - - return err; -} - -static int hci_req_sync(struct hci_dev *hdev, - void (*req)(struct hci_request *req, - unsigned long opt), - unsigned long opt, __u32 timeout) -{ - int ret; - - if (!test_bit(HCI_UP, &hdev->flags)) - return -ENETDOWN; - - /* Serialize all requests */ - hci_req_lock(hdev); - ret = __hci_req_sync(hdev, req, opt, timeout); - hci_req_unlock(hdev); - - return ret; -} - -static void hci_reset_req(struct hci_request *req, unsigned long opt) -{ - BT_DBG("%s %ld", req->hdev->name, opt); - - /* Reset device */ - set_bit(HCI_RESET, &req->hdev->flags); - hci_req_add(req, HCI_OP_RESET, 0, NULL); -} - -static void bredr_init(struct hci_request *req) -{ - req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED; - - /* Read Local Supported Features */ - hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL); - - /* Read Local Version */ - hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL); - - /* Read BD Address */ - hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL); -} - -static void amp_init(struct hci_request *req) -{ - req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED; - - /* Read Local Version */ - hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL); - - /* Read Local AMP Info */ - hci_req_add(req, HCI_OP_READ_LOCAL_AMP_INFO, 0, NULL); - - /* Read Data Blk size */ - hci_req_add(req, HCI_OP_READ_DATA_BLOCK_SIZE, 0, NULL); -} - -static void hci_init1_req(struct hci_request *req, unsigned long opt) -{ - struct hci_dev *hdev = req->hdev; - - BT_DBG("%s %ld", hdev->name, opt); - - /* Reset */ - if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks)) - hci_reset_req(req, 0); - - switch (hdev->dev_type) { - case HCI_BREDR: - bredr_init(req); - break; - - case HCI_AMP: - amp_init(req); - break; - - default: - BT_ERR("Unknown device type %d", hdev->dev_type); - break; - } -} - -static void bredr_setup(struct hci_request *req) -{ - __le16 param; - __u8 flt_type; - - /* Read Buffer Size (ACL mtu, max pkt, etc.) */ - hci_req_add(req, HCI_OP_READ_BUFFER_SIZE, 0, NULL); - - /* Read Class of Device */ - hci_req_add(req, HCI_OP_READ_CLASS_OF_DEV, 0, NULL); - - /* Read Local Name */ - hci_req_add(req, HCI_OP_READ_LOCAL_NAME, 0, NULL); - - /* Read Voice Setting */ - hci_req_add(req, HCI_OP_READ_VOICE_SETTING, 0, NULL); - - /* Clear Event Filters */ - flt_type = HCI_FLT_CLEAR_ALL; - hci_req_add(req, HCI_OP_SET_EVENT_FLT, 1, &flt_type); - - /* Connection accept timeout ~20 secs */ - param = __constant_cpu_to_le16(0x7d00); - hci_req_add(req, HCI_OP_WRITE_CA_TIMEOUT, 2, ¶m); - - /* Read page scan parameters */ - if (req->hdev->hci_ver > BLUETOOTH_VER_1_1) { - hci_req_add(req, HCI_OP_READ_PAGE_SCAN_ACTIVITY, 0, NULL); - hci_req_add(req, HCI_OP_READ_PAGE_SCAN_TYPE, 0, NULL); - } -} - -static void le_setup(struct hci_request *req) -{ - struct hci_dev *hdev = req->hdev; - - /* Read LE Buffer Size */ - hci_req_add(req, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL); - - /* Read LE Local Supported Features */ - hci_req_add(req, HCI_OP_LE_READ_LOCAL_FEATURES, 0, NULL); - - /* Read LE Advertising Channel TX Power */ - hci_req_add(req, HCI_OP_LE_READ_ADV_TX_POWER, 0, NULL); - - /* Read LE White List Size */ - hci_req_add(req, HCI_OP_LE_READ_WHITE_LIST_SIZE, 0, NULL); - - /* Read LE Supported States */ - hci_req_add(req, HCI_OP_LE_READ_SUPPORTED_STATES, 0, NULL); - - /* LE-only controllers have LE implicitly enabled */ - if (!lmp_bredr_capable(hdev)) - set_bit(HCI_LE_ENABLED, &hdev->dev_flags); -} - -static u8 hci_get_inquiry_mode(struct hci_dev *hdev) -{ - if (lmp_ext_inq_capable(hdev)) - return 0x02; - - if (lmp_inq_rssi_capable(hdev)) - return 0x01; - - if (hdev->manufacturer == 11 && hdev->hci_rev == 0x00 && - hdev->lmp_subver == 0x0757) - return 0x01; - - if (hdev->manufacturer == 15) { - if (hdev->hci_rev == 0x03 && hdev->lmp_subver == 0x6963) - return 0x01; - if (hdev->hci_rev == 0x09 && hdev->lmp_subver == 0x6963) - return 0x01; - if (hdev->hci_rev == 0x00 && hdev->lmp_subver == 0x6965) - return 0x01; - } - - if (hdev->manufacturer == 31 && hdev->hci_rev == 0x2005 && - hdev->lmp_subver == 0x1805) - return 0x01; - - return 0x00; -} - -static void hci_setup_inquiry_mode(struct hci_request *req) -{ - u8 mode; - - mode = hci_get_inquiry_mode(req->hdev); - - hci_req_add(req, HCI_OP_WRITE_INQUIRY_MODE, 1, &mode); -} - -static void hci_setup_event_mask(struct hci_request *req) -{ - struct hci_dev *hdev = req->hdev; - - /* The second byte is 0xff instead of 0x9f (two reserved bits - * disabled) since a Broadcom 1.2 dongle doesn't respond to the - * command otherwise. - */ - u8 events[8] = { 0xff, 0xff, 0xfb, 0xff, 0x00, 0x00, 0x00, 0x00 }; - - /* CSR 1.1 dongles does not accept any bitfield so don't try to set - * any event mask for pre 1.2 devices. - */ - if (hdev->hci_ver < BLUETOOTH_VER_1_2) - return; - - if (lmp_bredr_capable(hdev)) { - events[4] |= 0x01; /* Flow Specification Complete */ - events[4] |= 0x02; /* Inquiry Result with RSSI */ - events[4] |= 0x04; /* Read Remote Extended Features Complete */ - events[5] |= 0x08; /* Synchronous Connection Complete */ - events[5] |= 0x10; /* Synchronous Connection Changed */ - } - - if (lmp_inq_rssi_capable(hdev)) - events[4] |= 0x02; /* Inquiry Result with RSSI */ - - if (lmp_sniffsubr_capable(hdev)) - events[5] |= 0x20; /* Sniff Subrating */ - - if (lmp_pause_enc_capable(hdev)) - events[5] |= 0x80; /* Encryption Key Refresh Complete */ - - if (lmp_ext_inq_capable(hdev)) - events[5] |= 0x40; /* Extended Inquiry Result */ - - if (lmp_no_flush_capable(hdev)) - events[7] |= 0x01; /* Enhanced Flush Complete */ - - if (lmp_lsto_capable(hdev)) - events[6] |= 0x80; /* Link Supervision Timeout Changed */ - - if (lmp_ssp_capable(hdev)) { - events[6] |= 0x01; /* IO Capability Request */ - events[6] |= 0x02; /* IO Capability Response */ - events[6] |= 0x04; /* User Confirmation Request */ - events[6] |= 0x08; /* User Passkey Request */ - events[6] |= 0x10; /* Remote OOB Data Request */ - events[6] |= 0x20; /* Simple Pairing Complete */ - events[7] |= 0x04; /* User Passkey Notification */ - events[7] |= 0x08; /* Keypress Notification */ - events[7] |= 0x10; /* Remote Host Supported - * Features Notification - */ - } - - if (lmp_le_capable(hdev)) - events[7] |= 0x20; /* LE Meta-Event */ - - hci_req_add(req, HCI_OP_SET_EVENT_MASK, sizeof(events), events); - - if (lmp_le_capable(hdev)) { - memset(events, 0, sizeof(events)); - events[0] = 0x1f; - hci_req_add(req, HCI_OP_LE_SET_EVENT_MASK, - sizeof(events), events); - } -} - -static void hci_init2_req(struct hci_request *req, unsigned long opt) -{ - struct hci_dev *hdev = req->hdev; - - if (lmp_bredr_capable(hdev)) - bredr_setup(req); - - if (lmp_le_capable(hdev)) - le_setup(req); - - hci_setup_event_mask(req); - - if (hdev->hci_ver > BLUETOOTH_VER_1_1) - hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL); - - if (lmp_ssp_capable(hdev)) { - if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) { - u8 mode = 0x01; - hci_req_add(req, HCI_OP_WRITE_SSP_MODE, - sizeof(mode), &mode); - } else { - struct hci_cp_write_eir cp; - - memset(hdev->eir, 0, sizeof(hdev->eir)); - memset(&cp, 0, sizeof(cp)); - - hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp); - } - } - - if (lmp_inq_rssi_capable(hdev)) - hci_setup_inquiry_mode(req); - - if (lmp_inq_tx_pwr_capable(hdev)) - hci_req_add(req, HCI_OP_READ_INQ_RSP_TX_POWER, 0, NULL); - - if (lmp_ext_feat_capable(hdev)) { - struct hci_cp_read_local_ext_features cp; - - cp.page = 0x01; - hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES, - sizeof(cp), &cp); - } - - if (test_bit(HCI_LINK_SECURITY, &hdev->dev_flags)) { - u8 enable = 1; - hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, sizeof(enable), - &enable); - } -} - -static void hci_setup_link_policy(struct hci_request *req) -{ - struct hci_dev *hdev = req->hdev; - struct hci_cp_write_def_link_policy cp; - u16 link_policy = 0; - - if (lmp_rswitch_capable(hdev)) - link_policy |= HCI_LP_RSWITCH; - if (lmp_hold_capable(hdev)) - link_policy |= HCI_LP_HOLD; - if (lmp_sniff_capable(hdev)) - link_policy |= HCI_LP_SNIFF; - if (lmp_park_capable(hdev)) - link_policy |= HCI_LP_PARK; - - cp.policy = cpu_to_le16(link_policy); - hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, sizeof(cp), &cp); -} - -static void hci_set_le_support(struct hci_request *req) -{ - struct hci_dev *hdev = req->hdev; - struct hci_cp_write_le_host_supported cp; - - /* LE-only devices do not support explicit enablement */ - if (!lmp_bredr_capable(hdev)) - return; - - memset(&cp, 0, sizeof(cp)); - - if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) { - cp.le = 0x01; - cp.simul = lmp_le_br_capable(hdev); - } - - if (cp.le != lmp_host_le_capable(hdev)) - hci_req_add(req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(cp), - &cp); -} - -static void hci_init3_req(struct hci_request *req, unsigned long opt) -{ - struct hci_dev *hdev = req->hdev; - u8 p; - - /* Some Broadcom based Bluetooth controllers do not support the - * Delete Stored Link Key command. They are clearly indicating its - * absence in the bit mask of supported commands. - * - * Check the supported commands and only if the the command is marked - * as supported send it. If not supported assume that the controller - * does not have actual support for stored link keys which makes this - * command redundant anyway. - */ - if (hdev->commands[6] & 0x80) { - struct hci_cp_delete_stored_link_key cp; - - bacpy(&cp.bdaddr, BDADDR_ANY); - cp.delete_all = 0x01; - hci_req_add(req, HCI_OP_DELETE_STORED_LINK_KEY, - sizeof(cp), &cp); - } - - if (hdev->commands[5] & 0x10) - hci_setup_link_policy(req); - - if (lmp_le_capable(hdev)) { - hci_set_le_support(req); - hci_update_ad(req); - } - - /* Read features beyond page 1 if available */ - for (p = 2; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) { - struct hci_cp_read_local_ext_features cp; - - cp.page = p; - hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES, - sizeof(cp), &cp); - } -} - -static int __hci_init(struct hci_dev *hdev) -{ - int err; - - err = __hci_req_sync(hdev, hci_init1_req, 0, HCI_INIT_TIMEOUT); - if (err < 0) - return err; - - /* HCI_BREDR covers both single-mode LE, BR/EDR and dual-mode - * BR/EDR/LE type controllers. AMP controllers only need the - * first stage init. - */ - if (hdev->dev_type != HCI_BREDR) - return 0; - - err = __hci_req_sync(hdev, hci_init2_req, 0, HCI_INIT_TIMEOUT); - if (err < 0) - return err; - - return __hci_req_sync(hdev, hci_init3_req, 0, HCI_INIT_TIMEOUT); -} - -static void hci_scan_req(struct hci_request *req, unsigned long opt) -{ - __u8 scan = opt; - - BT_DBG("%s %x", req->hdev->name, scan); - - /* Inquiry and Page scans */ - hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan); -} - -static void hci_auth_req(struct hci_request *req, unsigned long opt) -{ - __u8 auth = opt; - - BT_DBG("%s %x", req->hdev->name, auth); - - /* Authentication */ - hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth); -} - -static void hci_encrypt_req(struct hci_request *req, unsigned long opt) -{ - __u8 encrypt = opt; - - BT_DBG("%s %x", req->hdev->name, encrypt); - - /* Encryption */ - hci_req_add(req, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt); -} - -static void hci_linkpol_req(struct hci_request *req, unsigned long opt) -{ - __le16 policy = cpu_to_le16(opt); - - BT_DBG("%s %x", req->hdev->name, policy); - - /* Default link policy */ - hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy); -} - /* Get HCI device by index. * Device is held on return. */ -struct hci_dev *hci_dev_get(int index) +static struct hci_dev *__hci_dev_get(int index, int *srcu_index) { struct hci_dev *hdev = NULL, *d; @@ -710,6 +77,8 @@ struct hci_dev *hci_dev_get(int index) list_for_each_entry(d, &hci_dev_list, list) { if (d->id == index) { hdev = hci_dev_hold(d); + if (srcu_index) + *srcu_index = srcu_read_lock(&d->srcu); break; } } @@ -717,6 +86,22 @@ struct hci_dev *hci_dev_get(int index) return hdev; } +struct hci_dev *hci_dev_get(int index) +{ + return __hci_dev_get(index, NULL); +} + +static struct hci_dev *hci_dev_get_srcu(int index, int *srcu_index) +{ + return __hci_dev_get(index, srcu_index); +} + +static void hci_dev_put_srcu(struct hci_dev *hdev, int srcu_index) +{ + srcu_read_unlock(&hdev->srcu, srcu_index); + hci_dev_put(hdev); +} + /* ---- Inquiry support ---- */ bool hci_discovery_active(struct hci_dev *hdev) @@ -735,14 +120,18 @@ bool hci_discovery_active(struct hci_dev *hdev) void hci_discovery_set_state(struct hci_dev *hdev, int state) { - BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state); + int old_state = hdev->discovery.state; - if (hdev->discovery.state == state) + if (old_state == state) return; + hdev->discovery.state = state; + switch (state) { case DISCOVERY_STOPPED: - if (hdev->discovery.state != DISCOVERY_STARTING) + hci_update_passive_scan(hdev); + + if (old_state != DISCOVERY_STARTING) mgmt_discovering(hdev, 0); break; case DISCOVERY_STARTING: @@ -756,7 +145,7 @@ void hci_discovery_set_state(struct hci_dev *hdev, int state) break; } - hdev->discovery.state = state; + bt_dev_dbg(hdev, "state %u -> %u", old_state, state); } void hci_inquiry_cache_flush(struct hci_dev *hdev) @@ -843,23 +232,24 @@ void hci_inquiry_cache_update_resolve(struct hci_dev *hdev, list_add(&ie->list, pos); } -bool hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data, - bool name_known, bool *ssp) +u32 hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data, + bool name_known) { struct discovery_state *cache = &hdev->discovery; struct inquiry_entry *ie; + u32 flags = 0; BT_DBG("cache %p, %pMR", cache, &data->bdaddr); - hci_remove_remote_oob_data(hdev, &data->bdaddr); + hci_remove_remote_oob_data(hdev, &data->bdaddr, BDADDR_BREDR); - if (ssp) - *ssp = data->ssp_mode; + if (!data->ssp_mode) + flags |= MGMT_DEV_FOUND_LEGACY_PAIRING; ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr); if (ie) { - if (ie->data.ssp_mode && ssp) - *ssp = true; + if (!ie->data.ssp_mode) + flags |= MGMT_DEV_FOUND_LEGACY_PAIRING; if (ie->name_state == NAME_NEEDED && data->rssi != ie->data.rssi) { @@ -871,9 +261,11 @@ bool hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data, } /* Entry not in the cache. Add new one. */ - ie = kzalloc(sizeof(struct inquiry_entry), GFP_ATOMIC); - if (!ie) - return false; + ie = kzalloc(sizeof(*ie), GFP_KERNEL); + if (!ie) { + flags |= MGMT_DEV_FOUND_CONFIRM_NAME; + goto done; + } list_add(&ie->all, &cache->all); @@ -896,9 +288,10 @@ update: cache->timestamp = jiffies; if (ie->name_state == NAME_NOT_KNOWN) - return false; + flags |= MGMT_DEV_FOUND_CONFIRM_NAME; - return true; +done: + return flags; } static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf) @@ -929,37 +322,12 @@ static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf) return copied; } -static void hci_inq_req(struct hci_request *req, unsigned long opt) -{ - struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt; - struct hci_dev *hdev = req->hdev; - struct hci_cp_inquiry cp; - - BT_DBG("%s", hdev->name); - - if (test_bit(HCI_INQUIRY, &hdev->flags)) - return; - - /* Start Inquiry */ - memcpy(&cp.lap, &ir->lap, 3); - cp.length = ir->length; - cp.num_rsp = ir->num_rsp; - hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp); -} - -static int wait_inquiry(void *word) -{ - schedule(); - return signal_pending(current); -} - int hci_inquiry(void __user *arg) { __u8 __user *ptr = arg; struct hci_inquiry_req ir; struct hci_dev *hdev; int err = 0, do_inquiry = 0, max_rsp; - long timeo; __u8 *buf; if (copy_from_user(&ir, ptr, sizeof(ir))) @@ -969,6 +337,27 @@ int hci_inquiry(void __user *arg) if (!hdev) return -ENODEV; + if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) { + err = -EBUSY; + goto done; + } + + if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) { + err = -EOPNOTSUPP; + goto done; + } + + if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) { + err = -EOPNOTSUPP; + goto done; + } + + /* Restrict maximum inquiry length to 60 seconds */ + if (ir.length > 60) { + err = -EINVAL; + goto done; + } + hci_dev_lock(hdev); if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX || inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) { @@ -977,20 +366,22 @@ int hci_inquiry(void __user *arg) } hci_dev_unlock(hdev); - timeo = ir.length * msecs_to_jiffies(2000); - if (do_inquiry) { - err = hci_req_sync(hdev, hci_inq_req, (unsigned long) &ir, - timeo); + hci_req_sync_lock(hdev); + err = hci_inquiry_sync(hdev, ir.length, ir.num_rsp); + hci_req_sync_unlock(hdev); + if (err < 0) goto done; /* Wait until Inquiry procedure finishes (HCI_INQUIRY flag is * cleared). If it is interrupted by a signal, return -EINTR. */ - if (wait_on_bit(&hdev->flags, HCI_INQUIRY, wait_inquiry, - TASK_INTERRUPTIBLE)) - return -EINTR; + if (wait_on_bit(&hdev->flags, HCI_INQUIRY, + TASK_INTERRUPTIBLE)) { + err = -EINTR; + goto done; + } } /* for unlimited number of responses we will use buffer with @@ -1001,7 +392,7 @@ int hci_inquiry(void __user *arg) /* cache_dump can't sleep. Therefore we allocate temp buffer and then * copy it to the user space. */ - buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL); + buf = kmalloc_array(max_rsp, sizeof(struct inquiry_info), GFP_KERNEL); if (!buf) { err = -ENOMEM; goto done; @@ -1028,297 +419,202 @@ done: return err; } -static u8 create_ad(struct hci_dev *hdev, u8 *ptr) +static int hci_dev_do_open(struct hci_dev *hdev) { - u8 ad_len = 0, flags = 0; - size_t name_len; - - if (test_bit(HCI_LE_PERIPHERAL, &hdev->dev_flags)) - flags |= LE_AD_GENERAL; + int ret = 0; - if (!lmp_bredr_capable(hdev)) - flags |= LE_AD_NO_BREDR; + BT_DBG("%s %p", hdev->name, hdev); - if (lmp_le_br_capable(hdev)) - flags |= LE_AD_SIM_LE_BREDR_CTRL; + hci_req_sync_lock(hdev); - if (lmp_host_le_br_capable(hdev)) - flags |= LE_AD_SIM_LE_BREDR_HOST; + ret = hci_dev_open_sync(hdev); - if (flags) { - BT_DBG("adv flags 0x%02x", flags); + hci_req_sync_unlock(hdev); + return ret; +} - ptr[0] = 2; - ptr[1] = EIR_FLAGS; - ptr[2] = flags; +/* ---- HCI ioctl helpers ---- */ - ad_len += 3; - ptr += 3; - } +int hci_dev_open(__u16 dev) +{ + struct hci_dev *hdev; + int err; - if (hdev->adv_tx_power != HCI_TX_POWER_INVALID) { - ptr[0] = 2; - ptr[1] = EIR_TX_POWER; - ptr[2] = (u8) hdev->adv_tx_power; + hdev = hci_dev_get(dev); + if (!hdev) + return -ENODEV; - ad_len += 3; - ptr += 3; + /* Devices that are marked as unconfigured can only be powered + * up as user channel. Trying to bring them up as normal devices + * will result into a failure. Only user channel operation is + * possible. + * + * When this function is called for a user channel, the flag + * HCI_USER_CHANNEL will be set first before attempting to + * open the device. + */ + if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED) && + !hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) { + err = -EOPNOTSUPP; + goto done; } - name_len = strlen(hdev->dev_name); - if (name_len > 0) { - size_t max_len = HCI_MAX_AD_LENGTH - ad_len - 2; - - if (name_len > max_len) { - name_len = max_len; - ptr[1] = EIR_NAME_SHORT; - } else - ptr[1] = EIR_NAME_COMPLETE; + /* We need to ensure that no other power on/off work is pending + * before proceeding to call hci_dev_do_open. This is + * particularly important if the setup procedure has not yet + * completed. + */ + if (hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF)) + cancel_delayed_work(&hdev->power_off); - ptr[0] = name_len + 1; + /* After this call it is guaranteed that the setup procedure + * has finished. This means that error conditions like RFKILL + * or no valid public or static random address apply. + */ + flush_workqueue(hdev->req_workqueue); - memcpy(ptr + 2, hdev->dev_name, name_len); + /* For controllers not using the management interface and that + * are brought up using legacy ioctl, set the HCI_BONDABLE bit + * so that pairing works for them. Once the management interface + * is in use this bit will be cleared again and userspace has + * to explicitly enable it. + */ + if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL) && + !hci_dev_test_flag(hdev, HCI_MGMT)) + hci_dev_set_flag(hdev, HCI_BONDABLE); - ad_len += (name_len + 2); - ptr += (name_len + 2); - } + err = hci_dev_do_open(hdev); - return ad_len; +done: + hci_dev_put(hdev); + return err; } -void hci_update_ad(struct hci_request *req) +int hci_dev_do_close(struct hci_dev *hdev) { - struct hci_dev *hdev = req->hdev; - struct hci_cp_le_set_adv_data cp; - u8 len; - - if (!lmp_le_capable(hdev)) - return; - - memset(&cp, 0, sizeof(cp)); + int err; - len = create_ad(hdev, cp.data); + BT_DBG("%s %p", hdev->name, hdev); - if (hdev->adv_data_len == len && - memcmp(cp.data, hdev->adv_data, len) == 0) - return; + hci_req_sync_lock(hdev); - memcpy(hdev->adv_data, cp.data, sizeof(cp.data)); - hdev->adv_data_len = len; + err = hci_dev_close_sync(hdev); - cp.length = len; + hci_req_sync_unlock(hdev); - hci_req_add(req, HCI_OP_LE_SET_ADV_DATA, sizeof(cp), &cp); + return err; } -/* ---- HCI ioctl helpers ---- */ - -int hci_dev_open(__u16 dev) +int hci_dev_close(__u16 dev) { struct hci_dev *hdev; - int ret = 0; + int err; hdev = hci_dev_get(dev); if (!hdev) return -ENODEV; - BT_DBG("%s %p", hdev->name, hdev); - - hci_req_lock(hdev); - - if (test_bit(HCI_UNREGISTER, &hdev->dev_flags)) { - ret = -ENODEV; + if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) { + err = -EBUSY; goto done; } - if (hdev->rfkill && rfkill_blocked(hdev->rfkill)) { - ret = -ERFKILL; - goto done; - } - - if (test_bit(HCI_UP, &hdev->flags)) { - ret = -EALREADY; - goto done; - } - - if (hdev->open(hdev)) { - ret = -EIO; - goto done; - } - - atomic_set(&hdev->cmd_cnt, 1); - set_bit(HCI_INIT, &hdev->flags); - - if (hdev->setup && test_bit(HCI_SETUP, &hdev->dev_flags)) - ret = hdev->setup(hdev); - - if (!ret) { - /* Treat all non BR/EDR controllers as raw devices if - * enable_hs is not set. - */ - if (hdev->dev_type != HCI_BREDR && !enable_hs) - set_bit(HCI_RAW, &hdev->flags); - - if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks)) - set_bit(HCI_RAW, &hdev->flags); - - if (!test_bit(HCI_RAW, &hdev->flags)) - ret = __hci_init(hdev); - } - - clear_bit(HCI_INIT, &hdev->flags); - - if (!ret) { - hci_dev_hold(hdev); - set_bit(HCI_UP, &hdev->flags); - hci_notify(hdev, HCI_DEV_UP); - if (!test_bit(HCI_SETUP, &hdev->dev_flags) && - mgmt_valid_hdev(hdev)) { - hci_dev_lock(hdev); - mgmt_powered(hdev, 1); - hci_dev_unlock(hdev); - } - } else { - /* Init failed, cleanup */ - flush_work(&hdev->tx_work); - flush_work(&hdev->cmd_work); - flush_work(&hdev->rx_work); - - skb_queue_purge(&hdev->cmd_q); - skb_queue_purge(&hdev->rx_q); - - if (hdev->flush) - hdev->flush(hdev); - - if (hdev->sent_cmd) { - kfree_skb(hdev->sent_cmd); - hdev->sent_cmd = NULL; - } + cancel_work_sync(&hdev->power_on); + if (hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF)) + cancel_delayed_work(&hdev->power_off); - hdev->close(hdev); - hdev->flags = 0; - } + err = hci_dev_do_close(hdev); done: - hci_req_unlock(hdev); hci_dev_put(hdev); - return ret; + return err; } -static int hci_dev_do_close(struct hci_dev *hdev) +static int hci_dev_do_reset(struct hci_dev *hdev) { - BT_DBG("%s %p", hdev->name, hdev); - - cancel_delayed_work(&hdev->power_off); - - hci_req_cancel(hdev, ENODEV); - hci_req_lock(hdev); + int ret; - if (!test_and_clear_bit(HCI_UP, &hdev->flags)) { - del_timer_sync(&hdev->cmd_timer); - hci_req_unlock(hdev); - return 0; - } + BT_DBG("%s %p", hdev->name, hdev); - /* Flush RX and TX works */ - flush_work(&hdev->tx_work); - flush_work(&hdev->rx_work); + hci_req_sync_lock(hdev); - if (hdev->discov_timeout > 0) { - cancel_delayed_work(&hdev->discov_off); - hdev->discov_timeout = 0; - clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags); - } + /* Drop queues */ + skb_queue_purge(&hdev->rx_q); + skb_queue_purge(&hdev->cmd_q); - if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags)) - cancel_delayed_work(&hdev->service_cache); + /* Cancel these to avoid queueing non-chained pending work */ + hci_dev_set_flag(hdev, HCI_CMD_DRAIN_WORKQUEUE); + /* Wait for + * + * if (!hci_dev_test_flag(hdev, HCI_CMD_DRAIN_WORKQUEUE)) + * queue_delayed_work(&hdev->{cmd,ncmd}_timer) + * + * inside RCU section to see the flag or complete scheduling. + */ + synchronize_rcu(); + /* Explicitly cancel works in case scheduled after setting the flag. */ + cancel_delayed_work(&hdev->cmd_timer); + cancel_delayed_work(&hdev->ncmd_timer); - cancel_delayed_work_sync(&hdev->le_scan_disable); + /* Avoid potential lockdep warnings from the *_flush() calls by + * ensuring the workqueue is empty up front. + */ + drain_workqueue(hdev->workqueue); hci_dev_lock(hdev); hci_inquiry_cache_flush(hdev); hci_conn_hash_flush(hdev); hci_dev_unlock(hdev); - hci_notify(hdev, HCI_DEV_DOWN); - if (hdev->flush) hdev->flush(hdev); - /* Reset device */ - skb_queue_purge(&hdev->cmd_q); - atomic_set(&hdev->cmd_cnt, 1); - if (!test_bit(HCI_RAW, &hdev->flags) && - test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks)) { - set_bit(HCI_INIT, &hdev->flags); - __hci_req_sync(hdev, hci_reset_req, 0, HCI_CMD_TIMEOUT); - clear_bit(HCI_INIT, &hdev->flags); - } - - /* flush cmd work */ - flush_work(&hdev->cmd_work); - - /* Drop queues */ - skb_queue_purge(&hdev->rx_q); - skb_queue_purge(&hdev->cmd_q); - skb_queue_purge(&hdev->raw_q); - - /* Drop last sent command */ - if (hdev->sent_cmd) { - del_timer_sync(&hdev->cmd_timer); - kfree_skb(hdev->sent_cmd); - hdev->sent_cmd = NULL; - } + hci_dev_clear_flag(hdev, HCI_CMD_DRAIN_WORKQUEUE); - kfree_skb(hdev->recv_evt); - hdev->recv_evt = NULL; - - /* After this point our queues are empty - * and no tasks are scheduled. */ - hdev->close(hdev); - - /* Clear flags */ - hdev->flags = 0; - hdev->dev_flags &= ~HCI_PERSISTENT_MASK; - - if (!test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags) && - mgmt_valid_hdev(hdev)) { - hci_dev_lock(hdev); - mgmt_powered(hdev, 0); - hci_dev_unlock(hdev); - } - - /* Controller radio is available but is currently powered down */ - hdev->amp_status = 0; - - memset(hdev->eir, 0, sizeof(hdev->eir)); - memset(hdev->dev_class, 0, sizeof(hdev->dev_class)); + atomic_set(&hdev->cmd_cnt, 1); + hdev->acl_cnt = 0; + hdev->sco_cnt = 0; + hdev->le_cnt = 0; + hdev->iso_cnt = 0; - hci_req_unlock(hdev); + ret = hci_reset_sync(hdev); - hci_dev_put(hdev); - return 0; + hci_req_sync_unlock(hdev); + return ret; } -int hci_dev_close(__u16 dev) +int hci_dev_reset(__u16 dev) { struct hci_dev *hdev; - int err; + int err, srcu_index; - hdev = hci_dev_get(dev); + hdev = hci_dev_get_srcu(dev, &srcu_index); if (!hdev) return -ENODEV; - if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags)) - cancel_delayed_work(&hdev->power_off); + if (!test_bit(HCI_UP, &hdev->flags)) { + err = -ENETDOWN; + goto done; + } - err = hci_dev_do_close(hdev); + if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) { + err = -EBUSY; + goto done; + } - hci_dev_put(hdev); + if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) { + err = -EOPNOTSUPP; + goto done; + } + + err = hci_dev_do_reset(hdev); + +done: + hci_dev_put_srcu(hdev, srcu_index); return err; } -int hci_dev_reset(__u16 dev) +int hci_dev_reset_stat(__u16 dev) { struct hci_dev *hdev; int ret = 0; @@ -1327,55 +623,64 @@ int hci_dev_reset(__u16 dev) if (!hdev) return -ENODEV; - hci_req_lock(hdev); - - if (!test_bit(HCI_UP, &hdev->flags)) + if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) { + ret = -EBUSY; goto done; + } - /* Drop queues */ - skb_queue_purge(&hdev->rx_q); - skb_queue_purge(&hdev->cmd_q); - - hci_dev_lock(hdev); - hci_inquiry_cache_flush(hdev); - hci_conn_hash_flush(hdev); - hci_dev_unlock(hdev); - - if (hdev->flush) - hdev->flush(hdev); - - atomic_set(&hdev->cmd_cnt, 1); - hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0; + if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) { + ret = -EOPNOTSUPP; + goto done; + } - if (!test_bit(HCI_RAW, &hdev->flags)) - ret = __hci_req_sync(hdev, hci_reset_req, 0, HCI_INIT_TIMEOUT); + memset(&hdev->stat, 0, sizeof(struct hci_dev_stats)); done: - hci_req_unlock(hdev); hci_dev_put(hdev); return ret; } -int hci_dev_reset_stat(__u16 dev) +static void hci_update_passive_scan_state(struct hci_dev *hdev, u8 scan) { - struct hci_dev *hdev; - int ret = 0; + bool conn_changed, discov_changed; - hdev = hci_dev_get(dev); - if (!hdev) - return -ENODEV; + BT_DBG("%s scan 0x%02x", hdev->name, scan); - memset(&hdev->stat, 0, sizeof(struct hci_dev_stats)); + if ((scan & SCAN_PAGE)) + conn_changed = !hci_dev_test_and_set_flag(hdev, + HCI_CONNECTABLE); + else + conn_changed = hci_dev_test_and_clear_flag(hdev, + HCI_CONNECTABLE); - hci_dev_put(hdev); + if ((scan & SCAN_INQUIRY)) { + discov_changed = !hci_dev_test_and_set_flag(hdev, + HCI_DISCOVERABLE); + } else { + hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE); + discov_changed = hci_dev_test_and_clear_flag(hdev, + HCI_DISCOVERABLE); + } - return ret; + if (!hci_dev_test_flag(hdev, HCI_MGMT)) + return; + + if (conn_changed || discov_changed) { + /* In case this was disabled through mgmt */ + hci_dev_set_flag(hdev, HCI_BREDR_ENABLED); + + if (hci_dev_test_flag(hdev, HCI_LE_ENABLED)) + hci_update_adv_data(hdev, hdev->cur_adv_instance); + + mgmt_new_settings(hdev); + } } int hci_dev_cmd(unsigned int cmd, void __user *arg) { struct hci_dev *hdev; struct hci_dev_req dr; + __le16 policy; int err = 0; if (copy_from_user(&dr, arg, sizeof(dr))) @@ -1385,10 +690,25 @@ int hci_dev_cmd(unsigned int cmd, void __user *arg) if (!hdev) return -ENODEV; + if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) { + err = -EBUSY; + goto done; + } + + if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) { + err = -EOPNOTSUPP; + goto done; + } + + if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) { + err = -EOPNOTSUPP; + goto done; + } + switch (cmd) { case HCISETAUTH: - err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt, - HCI_INIT_TIMEOUT); + err = hci_cmd_sync_status(hdev, HCI_OP_WRITE_AUTH_ENABLE, + 1, &dr.dev_opt, HCI_CMD_TIMEOUT); break; case HCISETENCRYPT: @@ -1399,24 +719,34 @@ int hci_dev_cmd(unsigned int cmd, void __user *arg) if (!test_bit(HCI_AUTH, &hdev->flags)) { /* Auth must be enabled first */ - err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt, - HCI_INIT_TIMEOUT); + err = hci_cmd_sync_status(hdev, + HCI_OP_WRITE_AUTH_ENABLE, + 1, &dr.dev_opt, + HCI_CMD_TIMEOUT); if (err) break; } - err = hci_req_sync(hdev, hci_encrypt_req, dr.dev_opt, - HCI_INIT_TIMEOUT); + err = hci_cmd_sync_status(hdev, HCI_OP_WRITE_ENCRYPT_MODE, + 1, &dr.dev_opt, HCI_CMD_TIMEOUT); break; case HCISETSCAN: - err = hci_req_sync(hdev, hci_scan_req, dr.dev_opt, - HCI_INIT_TIMEOUT); + err = hci_cmd_sync_status(hdev, HCI_OP_WRITE_SCAN_ENABLE, + 1, &dr.dev_opt, HCI_CMD_TIMEOUT); + + /* Ensure that the connectable and discoverable states + * get correctly modified as this was a non-mgmt change. + */ + if (!err) + hci_update_passive_scan_state(hdev, dr.dev_opt); break; case HCISETLINKPOL: - err = hci_req_sync(hdev, hci_linkpol_req, dr.dev_opt, - HCI_INIT_TIMEOUT); + policy = cpu_to_le16(dr.dev_opt); + + err = hci_cmd_sync_status(hdev, HCI_OP_WRITE_DEF_LINK_POLICY, + 2, &policy, HCI_CMD_TIMEOUT); break; case HCISETLINKMODE: @@ -1425,7 +755,11 @@ int hci_dev_cmd(unsigned int cmd, void __user *arg) break; case HCISETPTYPE: + if (hdev->pkt_type == (__u16) dr.dev_opt) + break; + hdev->pkt_type = (__u16) dr.dev_opt; + mgmt_phy_configuration_changed(hdev, NULL); break; case HCISETACLMTU: @@ -1443,6 +777,7 @@ int hci_dev_cmd(unsigned int cmd, void __user *arg) break; } +done: hci_dev_put(hdev); return err; } @@ -1452,7 +787,7 @@ int hci_get_dev_list(void __user *arg) struct hci_dev *hdev; struct hci_dev_list_req *dl; struct hci_dev_req *dr; - int n = 0, size, err; + int n = 0, err; __u16 dev_num; if (get_user(dev_num, (__u16 __user *) arg)) @@ -1461,24 +796,26 @@ int hci_get_dev_list(void __user *arg) if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr)) return -EINVAL; - size = sizeof(*dl) + dev_num * sizeof(*dr); - - dl = kzalloc(size, GFP_KERNEL); + dl = kzalloc(struct_size(dl, dev_req, dev_num), GFP_KERNEL); if (!dl) return -ENOMEM; + dl->dev_num = dev_num; dr = dl->dev_req; read_lock(&hci_dev_list_lock); list_for_each_entry(hdev, &hci_dev_list, list) { - if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags)) - cancel_delayed_work(&hdev->power_off); + unsigned long flags = hdev->flags; - if (!test_bit(HCI_MGMT, &hdev->dev_flags)) - set_bit(HCI_PAIRABLE, &hdev->dev_flags); + /* When the auto-off is configured it means the transport + * is running, but in that case still indicate that the + * device is actually down. + */ + if (hci_dev_test_flag(hdev, HCI_AUTO_OFF)) + flags &= ~BIT(HCI_UP); - (dr + n)->dev_id = hdev->id; - (dr + n)->dev_opt = hdev->flags; + dr[n].dev_id = hdev->id; + dr[n].dev_opt = flags; if (++n >= dev_num) break; @@ -1486,9 +823,7 @@ int hci_get_dev_list(void __user *arg) read_unlock(&hci_dev_list_lock); dl->dev_num = n; - size = sizeof(*dl) + n * sizeof(*dr); - - err = copy_to_user(arg, dl, size); + err = copy_to_user(arg, dl, struct_size(dl, dev_req, n)); kfree(dl); return err ? -EFAULT : 0; @@ -1498,6 +833,7 @@ int hci_get_dev_info(void __user *arg) { struct hci_dev *hdev; struct hci_dev_info di; + unsigned long flags; int err = 0; if (copy_from_user(&di, arg, sizeof(di))) @@ -1507,16 +843,19 @@ int hci_get_dev_info(void __user *arg) if (!hdev) return -ENODEV; - if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags)) - cancel_delayed_work_sync(&hdev->power_off); - - if (!test_bit(HCI_MGMT, &hdev->dev_flags)) - set_bit(HCI_PAIRABLE, &hdev->dev_flags); + /* When the auto-off is configured it means the transport + * is running, but in that case still indicate that the + * device is actually down. + */ + if (hci_dev_test_flag(hdev, HCI_AUTO_OFF)) + flags = hdev->flags & ~BIT(HCI_UP); + else + flags = hdev->flags; - strcpy(di.name, hdev->name); + strscpy(di.name, hdev->name, sizeof(di.name)); di.bdaddr = hdev->bdaddr; - di.type = (hdev->bus & 0x0f) | (hdev->dev_type << 4); - di.flags = hdev->flags; + di.type = (hdev->bus & 0x0f); + di.flags = flags; di.pkt_type = hdev->pkt_type; if (lmp_bredr_capable(hdev)) { di.acl_mtu = hdev->acl_mtu; @@ -1545,16 +884,54 @@ int hci_get_dev_info(void __user *arg) /* ---- Interface to HCI drivers ---- */ +static int hci_dev_do_poweroff(struct hci_dev *hdev) +{ + int err; + + BT_DBG("%s %p", hdev->name, hdev); + + hci_req_sync_lock(hdev); + + err = hci_set_powered_sync(hdev, false); + + hci_req_sync_unlock(hdev); + + return err; +} + static int hci_rfkill_set_block(void *data, bool blocked) { struct hci_dev *hdev = data; + int err; BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked); - if (!blocked) + if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) + return -EBUSY; + + if (blocked == hci_dev_test_flag(hdev, HCI_RFKILLED)) return 0; - hci_dev_do_close(hdev); + if (blocked) { + hci_dev_set_flag(hdev, HCI_RFKILLED); + + if (!hci_dev_test_flag(hdev, HCI_SETUP) && + !hci_dev_test_flag(hdev, HCI_CONFIG)) { + err = hci_dev_do_poweroff(hdev); + if (err) { + bt_dev_err(hdev, "Error when powering off device on rfkill (%d)", + err); + + /* Make sure the device is still closed even if + * anything during power off sequence (eg. + * disconnecting devices) failed. + */ + hci_dev_do_close(hdev); + } + } + } else { + hci_dev_clear_flag(hdev, HCI_RFKILLED); + } return 0; } @@ -1570,18 +947,66 @@ static void hci_power_on(struct work_struct *work) BT_DBG("%s", hdev->name); - err = hci_dev_open(hdev->id); + if (test_bit(HCI_UP, &hdev->flags) && + hci_dev_test_flag(hdev, HCI_MGMT) && + hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF)) { + cancel_delayed_work(&hdev->power_off); + err = hci_powered_update_sync(hdev); + mgmt_power_on(hdev, err); + return; + } + + err = hci_dev_do_open(hdev); if (err < 0) { + hci_dev_lock(hdev); mgmt_set_powered_failed(hdev, err); + hci_dev_unlock(hdev); return; } - if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags)) + /* During the HCI setup phase, a few error conditions are + * ignored and they need to be checked now. If they are still + * valid, it is important to turn the device back off. + */ + if (hci_dev_test_flag(hdev, HCI_RFKILLED) || + hci_dev_test_flag(hdev, HCI_UNCONFIGURED) || + (!bacmp(&hdev->bdaddr, BDADDR_ANY) && + !bacmp(&hdev->static_addr, BDADDR_ANY))) { + hci_dev_clear_flag(hdev, HCI_AUTO_OFF); + hci_dev_do_close(hdev); + } else if (hci_dev_test_flag(hdev, HCI_AUTO_OFF)) { queue_delayed_work(hdev->req_workqueue, &hdev->power_off, HCI_AUTO_OFF_TIMEOUT); + } - if (test_and_clear_bit(HCI_SETUP, &hdev->dev_flags)) + if (hci_dev_test_and_clear_flag(hdev, HCI_SETUP)) { + /* For unconfigured devices, set the HCI_RAW flag + * so that userspace can easily identify them. + */ + if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) + set_bit(HCI_RAW, &hdev->flags); + + /* For fully configured devices, this will send + * the Index Added event. For unconfigured devices, + * it will send Unconfigued Index Added event. + * + * Devices with HCI_QUIRK_RAW_DEVICE are ignored + * and no event will be send. + */ mgmt_index_added(hdev); + } else if (hci_dev_test_and_clear_flag(hdev, HCI_CONFIG)) { + /* When the controller is now configured, then it + * is important to clear the HCI_RAW flag. + */ + if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) + clear_bit(HCI_RAW, &hdev->flags); + + /* Powering on the controller with HCI_CONFIG set only + * happens with the transition from unconfigured to + * configured. This will send the Index Added event. + */ + mgmt_index_added(hdev); + } } static void hci_power_off(struct work_struct *work) @@ -1594,25 +1019,25 @@ static void hci_power_off(struct work_struct *work) hci_dev_do_close(hdev); } -static void hci_discov_off(struct work_struct *work) +static void hci_error_reset(struct work_struct *work) { - struct hci_dev *hdev; - u8 scan = SCAN_PAGE; - - hdev = container_of(work, struct hci_dev, discov_off.work); + struct hci_dev *hdev = container_of(work, struct hci_dev, error_reset); + hci_dev_hold(hdev); BT_DBG("%s", hdev->name); - hci_dev_lock(hdev); - - hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, sizeof(scan), &scan); + if (hdev->hw_error) + hdev->hw_error(hdev, hdev->hw_error_code); + else + bt_dev_err(hdev, "hardware error 0x%2.2x", hdev->hw_error_code); - hdev->discov_timeout = 0; + if (!hci_dev_do_close(hdev)) + hci_dev_do_open(hdev); - hci_dev_unlock(hdev); + hci_dev_put(hdev); } -int hci_uuids_clear(struct hci_dev *hdev) +void hci_uuids_clear(struct hci_dev *hdev) { struct bt_uuid *uuid, *tmp; @@ -1620,45 +1045,87 @@ int hci_uuids_clear(struct hci_dev *hdev) list_del(&uuid->list); kfree(uuid); } +} - return 0; +void hci_link_keys_clear(struct hci_dev *hdev) +{ + struct link_key *key, *tmp; + + list_for_each_entry_safe(key, tmp, &hdev->link_keys, list) { + list_del_rcu(&key->list); + kfree_rcu(key, rcu); + } } -int hci_link_keys_clear(struct hci_dev *hdev) +void hci_smp_ltks_clear(struct hci_dev *hdev) { - struct list_head *p, *n; + struct smp_ltk *k, *tmp; - list_for_each_safe(p, n, &hdev->link_keys) { - struct link_key *key; + list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) { + list_del_rcu(&k->list); + kfree_rcu(k, rcu); + } +} - key = list_entry(p, struct link_key, list); +void hci_smp_irks_clear(struct hci_dev *hdev) +{ + struct smp_irk *k, *tmp; - list_del(p); - kfree(key); + list_for_each_entry_safe(k, tmp, &hdev->identity_resolving_keys, list) { + list_del_rcu(&k->list); + kfree_rcu(k, rcu); } +} - return 0; +void hci_blocked_keys_clear(struct hci_dev *hdev) +{ + struct blocked_key *b, *tmp; + + list_for_each_entry_safe(b, tmp, &hdev->blocked_keys, list) { + list_del_rcu(&b->list); + kfree_rcu(b, rcu); + } } -int hci_smp_ltks_clear(struct hci_dev *hdev) +bool hci_is_blocked_key(struct hci_dev *hdev, u8 type, u8 val[16]) { - struct smp_ltk *k, *tmp; + bool blocked = false; + struct blocked_key *b; - list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) { - list_del(&k->list); - kfree(k); + rcu_read_lock(); + list_for_each_entry_rcu(b, &hdev->blocked_keys, list) { + if (b->type == type && !memcmp(b->val, val, sizeof(b->val))) { + blocked = true; + break; + } } - return 0; + rcu_read_unlock(); + return blocked; } struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr) { struct link_key *k; - list_for_each_entry(k, &hdev->link_keys, list) - if (bacmp(bdaddr, &k->bdaddr) == 0) + rcu_read_lock(); + list_for_each_entry_rcu(k, &hdev->link_keys, list) { + if (bacmp(bdaddr, &k->bdaddr) == 0) { + rcu_read_unlock(); + + if (hci_is_blocked_key(hdev, + HCI_BLOCKED_KEY_TYPE_LINKKEY, + k->val)) { + bt_dev_warn_ratelimited(hdev, + "Link key blocked for %pMR", + &k->bdaddr); + return NULL; + } + return k; + } + } + rcu_read_unlock(); return NULL; } @@ -1682,6 +1149,10 @@ static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn, if (!conn) return true; + /* BR/EDR key derived using SC from an LE link */ + if (conn->type == LE_LINK) + return true; + /* Neither local nor remote side had no-bonding as requirement */ if (conn->auth_type > 0x01 && conn->remote_auth > 0x01) return true; @@ -1699,40 +1170,114 @@ static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn, return false; } -struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, __le16 ediv, u8 rand[8]) +static u8 ltk_role(u8 type) +{ + if (type == SMP_LTK) + return HCI_ROLE_MASTER; + + return HCI_ROLE_SLAVE; +} + +struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, + u8 addr_type, u8 role) { struct smp_ltk *k; - list_for_each_entry(k, &hdev->long_term_keys, list) { - if (k->ediv != ediv || - memcmp(rand, k->rand, sizeof(k->rand))) + rcu_read_lock(); + list_for_each_entry_rcu(k, &hdev->long_term_keys, list) { + if (addr_type != k->bdaddr_type || bacmp(bdaddr, &k->bdaddr)) continue; - return k; + if (smp_ltk_is_sc(k) || ltk_role(k->type) == role) { + rcu_read_unlock(); + + if (hci_is_blocked_key(hdev, HCI_BLOCKED_KEY_TYPE_LTK, + k->val)) { + bt_dev_warn_ratelimited(hdev, + "LTK blocked for %pMR", + &k->bdaddr); + return NULL; + } + + return k; + } } + rcu_read_unlock(); return NULL; } -struct smp_ltk *hci_find_ltk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr, +struct smp_irk *hci_find_irk_by_rpa(struct hci_dev *hdev, bdaddr_t *rpa) +{ + struct smp_irk *irk_to_return = NULL; + struct smp_irk *irk; + + rcu_read_lock(); + list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) { + if (!bacmp(&irk->rpa, rpa)) { + irk_to_return = irk; + goto done; + } + } + + list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) { + if (smp_irk_matches(hdev, irk->val, rpa)) { + bacpy(&irk->rpa, rpa); + irk_to_return = irk; + goto done; + } + } + +done: + if (irk_to_return && hci_is_blocked_key(hdev, HCI_BLOCKED_KEY_TYPE_IRK, + irk_to_return->val)) { + bt_dev_warn_ratelimited(hdev, "Identity key blocked for %pMR", + &irk_to_return->bdaddr); + irk_to_return = NULL; + } + + rcu_read_unlock(); + + return irk_to_return; +} + +struct smp_irk *hci_find_irk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type) { - struct smp_ltk *k; + struct smp_irk *irk_to_return = NULL; + struct smp_irk *irk; - list_for_each_entry(k, &hdev->long_term_keys, list) - if (addr_type == k->bdaddr_type && - bacmp(bdaddr, &k->bdaddr) == 0) - return k; + /* Identity Address must be public or static random */ + if (addr_type == ADDR_LE_DEV_RANDOM && (bdaddr->b[5] & 0xc0) != 0xc0) + return NULL; - return NULL; + rcu_read_lock(); + list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) { + if (addr_type == irk->addr_type && + bacmp(bdaddr, &irk->bdaddr) == 0) { + irk_to_return = irk; + break; + } + } + + if (irk_to_return && hci_is_blocked_key(hdev, HCI_BLOCKED_KEY_TYPE_IRK, + irk_to_return->val)) { + bt_dev_warn_ratelimited(hdev, "Identity key blocked for %pMR", + &irk_to_return->bdaddr); + irk_to_return = NULL; + } + + rcu_read_unlock(); + + return irk_to_return; } -int hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn, int new_key, - bdaddr_t *bdaddr, u8 *val, u8 type, u8 pin_len) +struct link_key *hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn, + bdaddr_t *bdaddr, u8 *val, u8 type, + u8 pin_len, bool *persistent) { struct link_key *key, *old_key; u8 old_key_type; - bool persistent; old_key = hci_find_link_key(hdev, bdaddr); if (old_key) { @@ -1740,10 +1285,10 @@ int hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn, int new_key, key = old_key; } else { old_key_type = conn ? conn->key_type : 0xff; - key = kzalloc(sizeof(*key), GFP_ATOMIC); + key = kzalloc(sizeof(*key), GFP_KERNEL); if (!key) - return -ENOMEM; - list_add(&key->list, &hdev->link_keys); + return NULL; + list_add_rcu(&key->list, &hdev->link_keys); } BT_DBG("%s key for %pMR type %u", hdev->name, bdaddr, type); @@ -1767,36 +1312,28 @@ int hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn, int new_key, else key->type = type; - if (!new_key) - return 0; - - persistent = hci_persistent_key(hdev, conn, type, old_key_type); + if (persistent) + *persistent = hci_persistent_key(hdev, conn, type, + old_key_type); - mgmt_new_link_key(hdev, key, persistent); - - if (conn) - conn->flush_key = !persistent; - - return 0; + return key; } -int hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type, u8 type, - int new_key, u8 authenticated, u8 tk[16], u8 enc_size, __le16 - ediv, u8 rand[8]) +struct smp_ltk *hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, + u8 addr_type, u8 type, u8 authenticated, + u8 tk[16], u8 enc_size, __le16 ediv, __le64 rand) { struct smp_ltk *key, *old_key; + u8 role = ltk_role(type); - if (!(type & HCI_SMP_STK) && !(type & HCI_SMP_LTK)) - return 0; - - old_key = hci_find_ltk_by_addr(hdev, bdaddr, addr_type); + old_key = hci_find_ltk(hdev, bdaddr, addr_type, role); if (old_key) key = old_key; else { - key = kzalloc(sizeof(*key), GFP_ATOMIC); + key = kzalloc(sizeof(*key), GFP_KERNEL); if (!key) - return -ENOMEM; - list_add(&key->list, &hdev->long_term_keys); + return NULL; + list_add_rcu(&key->list, &hdev->long_term_keys); } bacpy(&key->bdaddr, bdaddr); @@ -1804,17 +1341,34 @@ int hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type, u8 type, memcpy(key->val, tk, sizeof(key->val)); key->authenticated = authenticated; key->ediv = ediv; + key->rand = rand; key->enc_size = enc_size; key->type = type; - memcpy(key->rand, rand, sizeof(key->rand)); - if (!new_key) - return 0; + return key; +} - if (type & HCI_SMP_LTK) - mgmt_new_ltk(hdev, key, 1); +struct smp_irk *hci_add_irk(struct hci_dev *hdev, bdaddr_t *bdaddr, + u8 addr_type, u8 val[16], bdaddr_t *rpa) +{ + struct smp_irk *irk; - return 0; + irk = hci_find_irk_by_addr(hdev, bdaddr, addr_type); + if (!irk) { + irk = kzalloc(sizeof(*irk), GFP_KERNEL); + if (!irk) + return NULL; + + bacpy(&irk->bdaddr, bdaddr); + irk->addr_type = addr_type; + + list_add_rcu(&irk->list, &hdev->identity_resolving_keys); + } + + memcpy(irk->val, val, 16); + bacpy(&irk->rpa, rpa); + + return irk; } int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr) @@ -1827,68 +1381,149 @@ int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr) BT_DBG("%s removing %pMR", hdev->name, bdaddr); - list_del(&key->list); - kfree(key); + list_del_rcu(&key->list); + kfree_rcu(key, rcu); return 0; } -int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr) +int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 bdaddr_type) { struct smp_ltk *k, *tmp; + int removed = 0; list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) { - if (bacmp(bdaddr, &k->bdaddr)) + if (bacmp(bdaddr, &k->bdaddr) || k->bdaddr_type != bdaddr_type) continue; BT_DBG("%s removing %pMR", hdev->name, bdaddr); - list_del(&k->list); - kfree(k); + list_del_rcu(&k->list); + kfree_rcu(k, rcu); + removed++; } - return 0; + return removed ? 0 : -ENOENT; +} + +void hci_remove_irk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type) +{ + struct smp_irk *k, *tmp; + + list_for_each_entry_safe(k, tmp, &hdev->identity_resolving_keys, list) { + if (bacmp(bdaddr, &k->bdaddr) || k->addr_type != addr_type) + continue; + + BT_DBG("%s removing %pMR", hdev->name, bdaddr); + + list_del_rcu(&k->list); + kfree_rcu(k, rcu); + } +} + +bool hci_bdaddr_is_paired(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type) +{ + struct smp_ltk *k; + struct smp_irk *irk; + u8 addr_type; + + if (type == BDADDR_BREDR) { + if (hci_find_link_key(hdev, bdaddr)) + return true; + return false; + } + + /* Convert to HCI addr type which struct smp_ltk uses */ + if (type == BDADDR_LE_PUBLIC) + addr_type = ADDR_LE_DEV_PUBLIC; + else + addr_type = ADDR_LE_DEV_RANDOM; + + irk = hci_get_irk(hdev, bdaddr, addr_type); + if (irk) { + bdaddr = &irk->bdaddr; + addr_type = irk->addr_type; + } + + rcu_read_lock(); + list_for_each_entry_rcu(k, &hdev->long_term_keys, list) { + if (k->bdaddr_type == addr_type && !bacmp(bdaddr, &k->bdaddr)) { + rcu_read_unlock(); + return true; + } + } + rcu_read_unlock(); + + return false; } /* HCI command timer function */ -static void hci_cmd_timeout(unsigned long arg) +static void hci_cmd_timeout(struct work_struct *work) { - struct hci_dev *hdev = (void *) arg; + struct hci_dev *hdev = container_of(work, struct hci_dev, + cmd_timer.work); - if (hdev->sent_cmd) { - struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data; - u16 opcode = __le16_to_cpu(sent->opcode); + if (hdev->req_skb) { + u16 opcode = hci_skb_opcode(hdev->req_skb); - BT_ERR("%s command 0x%4.4x tx timeout", hdev->name, opcode); + bt_dev_err(hdev, "command 0x%4.4x tx timeout", opcode); + + hci_cmd_sync_cancel_sync(hdev, ETIMEDOUT); } else { - BT_ERR("%s command tx timeout", hdev->name); + bt_dev_err(hdev, "command tx timeout"); } + if (hdev->reset) + hdev->reset(hdev); + atomic_set(&hdev->cmd_cnt, 1); queue_work(hdev->workqueue, &hdev->cmd_work); } +/* HCI ncmd timer function */ +static void hci_ncmd_timeout(struct work_struct *work) +{ + struct hci_dev *hdev = container_of(work, struct hci_dev, + ncmd_timer.work); + + bt_dev_err(hdev, "Controller not accepting commands anymore: ncmd = 0"); + + /* During HCI_INIT phase no events can be injected if the ncmd timer + * triggers since the procedure has its own timeout handling. + */ + if (test_bit(HCI_INIT, &hdev->flags)) + return; + + /* This is an irrecoverable state, inject hardware error event */ + hci_reset_dev(hdev); +} + struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev, - bdaddr_t *bdaddr) + bdaddr_t *bdaddr, u8 bdaddr_type) { struct oob_data *data; - list_for_each_entry(data, &hdev->remote_oob_data, list) - if (bacmp(bdaddr, &data->bdaddr) == 0) - return data; + list_for_each_entry(data, &hdev->remote_oob_data, list) { + if (bacmp(bdaddr, &data->bdaddr) != 0) + continue; + if (data->bdaddr_type != bdaddr_type) + continue; + return data; + } return NULL; } -int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr) +int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr, + u8 bdaddr_type) { struct oob_data *data; - data = hci_find_remote_oob_data(hdev, bdaddr); + data = hci_find_remote_oob_data(hdev, bdaddr, bdaddr_type); if (!data) return -ENOENT; - BT_DBG("%s removing %pMR", hdev->name, bdaddr); + BT_DBG("%s removing %pMR (%u)", hdev->name, bdaddr, bdaddr_type); list_del(&data->list); kfree(data); @@ -1896,7 +1531,7 @@ int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr) return 0; } -int hci_remote_oob_data_clear(struct hci_dev *hdev) +void hci_remote_oob_data_clear(struct hci_dev *hdev) { struct oob_data *data, *n; @@ -1904,213 +1539,1020 @@ int hci_remote_oob_data_clear(struct hci_dev *hdev) list_del(&data->list); kfree(data); } - - return 0; } -int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 *hash, - u8 *randomizer) +int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr, + u8 bdaddr_type, u8 *hash192, u8 *rand192, + u8 *hash256, u8 *rand256) { struct oob_data *data; - data = hci_find_remote_oob_data(hdev, bdaddr); - + data = hci_find_remote_oob_data(hdev, bdaddr, bdaddr_type); if (!data) { - data = kmalloc(sizeof(*data), GFP_ATOMIC); + data = kmalloc(sizeof(*data), GFP_KERNEL); if (!data) return -ENOMEM; bacpy(&data->bdaddr, bdaddr); + data->bdaddr_type = bdaddr_type; list_add(&data->list, &hdev->remote_oob_data); } - memcpy(data->hash, hash, sizeof(data->hash)); - memcpy(data->randomizer, randomizer, sizeof(data->randomizer)); + if (hash192 && rand192) { + memcpy(data->hash192, hash192, sizeof(data->hash192)); + memcpy(data->rand192, rand192, sizeof(data->rand192)); + if (hash256 && rand256) + data->present = 0x03; + } else { + memset(data->hash192, 0, sizeof(data->hash192)); + memset(data->rand192, 0, sizeof(data->rand192)); + if (hash256 && rand256) + data->present = 0x02; + else + data->present = 0x00; + } + + if (hash256 && rand256) { + memcpy(data->hash256, hash256, sizeof(data->hash256)); + memcpy(data->rand256, rand256, sizeof(data->rand256)); + } else { + memset(data->hash256, 0, sizeof(data->hash256)); + memset(data->rand256, 0, sizeof(data->rand256)); + if (hash192 && rand192) + data->present = 0x01; + } BT_DBG("%s for %pMR", hdev->name, bdaddr); return 0; } -struct bdaddr_list *hci_blacklist_lookup(struct hci_dev *hdev, bdaddr_t *bdaddr) +/* This function requires the caller holds hdev->lock */ +struct adv_info *hci_find_adv_instance(struct hci_dev *hdev, u8 instance) +{ + struct adv_info *adv_instance; + + list_for_each_entry(adv_instance, &hdev->adv_instances, list) { + if (adv_instance->instance == instance) + return adv_instance; + } + + return NULL; +} + +/* This function requires the caller holds hdev->lock */ +struct adv_info *hci_find_adv_sid(struct hci_dev *hdev, u8 sid) +{ + struct adv_info *adv; + + list_for_each_entry(adv, &hdev->adv_instances, list) { + if (adv->sid == sid) + return adv; + } + + return NULL; +} + +/* This function requires the caller holds hdev->lock */ +struct adv_info *hci_get_next_instance(struct hci_dev *hdev, u8 instance) +{ + struct adv_info *cur_instance; + + cur_instance = hci_find_adv_instance(hdev, instance); + if (!cur_instance) + return NULL; + + if (cur_instance == list_last_entry(&hdev->adv_instances, + struct adv_info, list)) + return list_first_entry(&hdev->adv_instances, + struct adv_info, list); + else + return list_next_entry(cur_instance, list); +} + +/* This function requires the caller holds hdev->lock */ +int hci_remove_adv_instance(struct hci_dev *hdev, u8 instance) +{ + struct adv_info *adv_instance; + + adv_instance = hci_find_adv_instance(hdev, instance); + if (!adv_instance) + return -ENOENT; + + BT_DBG("%s removing %dMR", hdev->name, instance); + + if (hdev->cur_adv_instance == instance) { + if (hdev->adv_instance_timeout) { + cancel_delayed_work(&hdev->adv_instance_expire); + hdev->adv_instance_timeout = 0; + } + hdev->cur_adv_instance = 0x00; + } + + cancel_delayed_work_sync(&adv_instance->rpa_expired_cb); + + list_del(&adv_instance->list); + kfree(adv_instance); + + hdev->adv_instance_cnt--; + + return 0; +} + +void hci_adv_instances_set_rpa_expired(struct hci_dev *hdev, bool rpa_expired) +{ + struct adv_info *adv_instance, *n; + + list_for_each_entry_safe(adv_instance, n, &hdev->adv_instances, list) + adv_instance->rpa_expired = rpa_expired; +} + +/* This function requires the caller holds hdev->lock */ +void hci_adv_instances_clear(struct hci_dev *hdev) +{ + struct adv_info *adv_instance, *n; + + if (hdev->adv_instance_timeout) { + disable_delayed_work(&hdev->adv_instance_expire); + hdev->adv_instance_timeout = 0; + } + + list_for_each_entry_safe(adv_instance, n, &hdev->adv_instances, list) { + disable_delayed_work_sync(&adv_instance->rpa_expired_cb); + list_del(&adv_instance->list); + kfree(adv_instance); + } + + hdev->adv_instance_cnt = 0; + hdev->cur_adv_instance = 0x00; +} + +static void adv_instance_rpa_expired(struct work_struct *work) +{ + struct adv_info *adv_instance = container_of(work, struct adv_info, + rpa_expired_cb.work); + + BT_DBG(""); + + adv_instance->rpa_expired = true; +} + +/* This function requires the caller holds hdev->lock */ +struct adv_info *hci_add_adv_instance(struct hci_dev *hdev, u8 instance, + u32 flags, u16 adv_data_len, u8 *adv_data, + u16 scan_rsp_len, u8 *scan_rsp_data, + u16 timeout, u16 duration, s8 tx_power, + u32 min_interval, u32 max_interval, + u8 mesh_handle) +{ + struct adv_info *adv; + + adv = hci_find_adv_instance(hdev, instance); + if (adv) { + memset(adv->adv_data, 0, sizeof(adv->adv_data)); + memset(adv->scan_rsp_data, 0, sizeof(adv->scan_rsp_data)); + memset(adv->per_adv_data, 0, sizeof(adv->per_adv_data)); + } else { + if (hdev->adv_instance_cnt >= hdev->le_num_of_adv_sets || + instance < 1 || instance > hdev->le_num_of_adv_sets + 1) + return ERR_PTR(-EOVERFLOW); + + adv = kzalloc(sizeof(*adv), GFP_KERNEL); + if (!adv) + return ERR_PTR(-ENOMEM); + + adv->pending = true; + adv->instance = instance; + + /* If controller support only one set and the instance is set to + * 1 then there is no option other than using handle 0x00. + */ + if (hdev->le_num_of_adv_sets == 1 && instance == 1) + adv->handle = 0x00; + else + adv->handle = instance; + + list_add(&adv->list, &hdev->adv_instances); + hdev->adv_instance_cnt++; + } + + adv->flags = flags; + adv->min_interval = min_interval; + adv->max_interval = max_interval; + adv->tx_power = tx_power; + /* Defining a mesh_handle changes the timing units to ms, + * rather than seconds, and ties the instance to the requested + * mesh_tx queue. + */ + adv->mesh = mesh_handle; + + hci_set_adv_instance_data(hdev, instance, adv_data_len, adv_data, + scan_rsp_len, scan_rsp_data); + + adv->timeout = timeout; + adv->remaining_time = timeout; + + if (duration == 0) + adv->duration = hdev->def_multi_adv_rotation_duration; + else + adv->duration = duration; + + INIT_DELAYED_WORK(&adv->rpa_expired_cb, adv_instance_rpa_expired); + + BT_DBG("%s for %dMR", hdev->name, instance); + + return adv; +} + +/* This function requires the caller holds hdev->lock */ +struct adv_info *hci_add_per_instance(struct hci_dev *hdev, u8 instance, u8 sid, + u32 flags, u8 data_len, u8 *data, + u32 min_interval, u32 max_interval) +{ + struct adv_info *adv; + + adv = hci_add_adv_instance(hdev, instance, flags, 0, NULL, 0, NULL, + 0, 0, HCI_ADV_TX_POWER_NO_PREFERENCE, + min_interval, max_interval, 0); + if (IS_ERR(adv)) + return adv; + + adv->sid = sid; + adv->periodic = true; + adv->per_adv_data_len = data_len; + + if (data) + memcpy(adv->per_adv_data, data, data_len); + + return adv; +} + +/* This function requires the caller holds hdev->lock */ +int hci_set_adv_instance_data(struct hci_dev *hdev, u8 instance, + u16 adv_data_len, u8 *adv_data, + u16 scan_rsp_len, u8 *scan_rsp_data) +{ + struct adv_info *adv; + + adv = hci_find_adv_instance(hdev, instance); + + /* If advertisement doesn't exist, we can't modify its data */ + if (!adv) + return -ENOENT; + + if (adv_data_len && ADV_DATA_CMP(adv, adv_data, adv_data_len)) { + memset(adv->adv_data, 0, sizeof(adv->adv_data)); + memcpy(adv->adv_data, adv_data, adv_data_len); + adv->adv_data_len = adv_data_len; + adv->adv_data_changed = true; + } + + if (scan_rsp_len && SCAN_RSP_CMP(adv, scan_rsp_data, scan_rsp_len)) { + memset(adv->scan_rsp_data, 0, sizeof(adv->scan_rsp_data)); + memcpy(adv->scan_rsp_data, scan_rsp_data, scan_rsp_len); + adv->scan_rsp_len = scan_rsp_len; + adv->scan_rsp_changed = true; + } + + /* Mark as changed if there are flags which would affect it */ + if (((adv->flags & MGMT_ADV_FLAG_APPEARANCE) && hdev->appearance) || + adv->flags & MGMT_ADV_FLAG_LOCAL_NAME) + adv->scan_rsp_changed = true; + + return 0; +} + +/* This function requires the caller holds hdev->lock */ +u32 hci_adv_instance_flags(struct hci_dev *hdev, u8 instance) +{ + u32 flags; + struct adv_info *adv; + + if (instance == 0x00) { + /* Instance 0 always manages the "Tx Power" and "Flags" + * fields + */ + flags = MGMT_ADV_FLAG_TX_POWER | MGMT_ADV_FLAG_MANAGED_FLAGS; + + /* For instance 0, the HCI_ADVERTISING_CONNECTABLE setting + * corresponds to the "connectable" instance flag. + */ + if (hci_dev_test_flag(hdev, HCI_ADVERTISING_CONNECTABLE)) + flags |= MGMT_ADV_FLAG_CONNECTABLE; + + if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE)) + flags |= MGMT_ADV_FLAG_LIMITED_DISCOV; + else if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE)) + flags |= MGMT_ADV_FLAG_DISCOV; + + return flags; + } + + adv = hci_find_adv_instance(hdev, instance); + + /* Return 0 when we got an invalid instance identifier. */ + if (!adv) + return 0; + + return adv->flags; +} + +bool hci_adv_instance_is_scannable(struct hci_dev *hdev, u8 instance) +{ + struct adv_info *adv; + + /* Instance 0x00 always set local name */ + if (instance == 0x00) + return true; + + adv = hci_find_adv_instance(hdev, instance); + if (!adv) + return false; + + if (adv->flags & MGMT_ADV_FLAG_APPEARANCE || + adv->flags & MGMT_ADV_FLAG_LOCAL_NAME) + return true; + + return adv->scan_rsp_len ? true : false; +} + +/* This function requires the caller holds hdev->lock */ +void hci_adv_monitors_clear(struct hci_dev *hdev) +{ + struct adv_monitor *monitor; + int handle; + + idr_for_each_entry(&hdev->adv_monitors_idr, monitor, handle) + hci_free_adv_monitor(hdev, monitor); + + idr_destroy(&hdev->adv_monitors_idr); +} + +/* Frees the monitor structure and do some bookkeepings. + * This function requires the caller holds hdev->lock. + */ +void hci_free_adv_monitor(struct hci_dev *hdev, struct adv_monitor *monitor) +{ + struct adv_pattern *pattern; + struct adv_pattern *tmp; + + if (!monitor) + return; + + list_for_each_entry_safe(pattern, tmp, &monitor->patterns, list) { + list_del(&pattern->list); + kfree(pattern); + } + + if (monitor->handle) + idr_remove(&hdev->adv_monitors_idr, monitor->handle); + + if (monitor->state != ADV_MONITOR_STATE_NOT_REGISTERED) + hdev->adv_monitors_cnt--; + + kfree(monitor); +} + +/* Assigns handle to a monitor, and if offloading is supported and power is on, + * also attempts to forward the request to the controller. + * This function requires the caller holds hci_req_sync_lock. + */ +int hci_add_adv_monitor(struct hci_dev *hdev, struct adv_monitor *monitor) +{ + int min, max, handle; + int status = 0; + + if (!monitor) + return -EINVAL; + + hci_dev_lock(hdev); + + min = HCI_MIN_ADV_MONITOR_HANDLE; + max = HCI_MIN_ADV_MONITOR_HANDLE + HCI_MAX_ADV_MONITOR_NUM_HANDLES; + handle = idr_alloc(&hdev->adv_monitors_idr, monitor, min, max, + GFP_KERNEL); + + hci_dev_unlock(hdev); + + if (handle < 0) + return handle; + + monitor->handle = handle; + + if (!hdev_is_powered(hdev)) + return status; + + switch (hci_get_adv_monitor_offload_ext(hdev)) { + case HCI_ADV_MONITOR_EXT_NONE: + bt_dev_dbg(hdev, "add monitor %d status %d", + monitor->handle, status); + /* Message was not forwarded to controller - not an error */ + break; + + case HCI_ADV_MONITOR_EXT_MSFT: + status = msft_add_monitor_pattern(hdev, monitor); + bt_dev_dbg(hdev, "add monitor %d msft status %d", + handle, status); + break; + } + + return status; +} + +/* Attempts to tell the controller and free the monitor. If somehow the + * controller doesn't have a corresponding handle, remove anyway. + * This function requires the caller holds hci_req_sync_lock. + */ +static int hci_remove_adv_monitor(struct hci_dev *hdev, + struct adv_monitor *monitor) +{ + int status = 0; + int handle; + + switch (hci_get_adv_monitor_offload_ext(hdev)) { + case HCI_ADV_MONITOR_EXT_NONE: /* also goes here when powered off */ + bt_dev_dbg(hdev, "remove monitor %d status %d", + monitor->handle, status); + goto free_monitor; + + case HCI_ADV_MONITOR_EXT_MSFT: + handle = monitor->handle; + status = msft_remove_monitor(hdev, monitor); + bt_dev_dbg(hdev, "remove monitor %d msft status %d", + handle, status); + break; + } + + /* In case no matching handle registered, just free the monitor */ + if (status == -ENOENT) + goto free_monitor; + + return status; + +free_monitor: + if (status == -ENOENT) + bt_dev_warn(hdev, "Removing monitor with no matching handle %d", + monitor->handle); + hci_free_adv_monitor(hdev, monitor); + + return status; +} + +/* This function requires the caller holds hci_req_sync_lock */ +int hci_remove_single_adv_monitor(struct hci_dev *hdev, u16 handle) +{ + struct adv_monitor *monitor = idr_find(&hdev->adv_monitors_idr, handle); + + if (!monitor) + return -EINVAL; + + return hci_remove_adv_monitor(hdev, monitor); +} + +/* This function requires the caller holds hci_req_sync_lock */ +int hci_remove_all_adv_monitor(struct hci_dev *hdev) +{ + struct adv_monitor *monitor; + int idr_next_id = 0; + int status = 0; + + while (1) { + monitor = idr_get_next(&hdev->adv_monitors_idr, &idr_next_id); + if (!monitor) + break; + + status = hci_remove_adv_monitor(hdev, monitor); + if (status) + return status; + + idr_next_id++; + } + + return status; +} + +/* This function requires the caller holds hdev->lock */ +bool hci_is_adv_monitoring(struct hci_dev *hdev) +{ + return !idr_is_empty(&hdev->adv_monitors_idr); +} + +int hci_get_adv_monitor_offload_ext(struct hci_dev *hdev) +{ + if (msft_monitor_supported(hdev)) + return HCI_ADV_MONITOR_EXT_MSFT; + + return HCI_ADV_MONITOR_EXT_NONE; +} + +struct bdaddr_list *hci_bdaddr_list_lookup(struct list_head *bdaddr_list, + bdaddr_t *bdaddr, u8 type) { struct bdaddr_list *b; - list_for_each_entry(b, &hdev->blacklist, list) - if (bacmp(bdaddr, &b->bdaddr) == 0) + list_for_each_entry(b, bdaddr_list, list) { + if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type) + return b; + } + + return NULL; +} + +struct bdaddr_list_with_irk *hci_bdaddr_list_lookup_with_irk( + struct list_head *bdaddr_list, bdaddr_t *bdaddr, + u8 type) +{ + struct bdaddr_list_with_irk *b; + + list_for_each_entry(b, bdaddr_list, list) { + if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type) return b; + } return NULL; } -int hci_blacklist_clear(struct hci_dev *hdev) +struct bdaddr_list_with_flags * +hci_bdaddr_list_lookup_with_flags(struct list_head *bdaddr_list, + bdaddr_t *bdaddr, u8 type) { - struct list_head *p, *n; + struct bdaddr_list_with_flags *b; - list_for_each_safe(p, n, &hdev->blacklist) { - struct bdaddr_list *b; + list_for_each_entry(b, bdaddr_list, list) { + if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type) + return b; + } - b = list_entry(p, struct bdaddr_list, list); + return NULL; +} - list_del(p); +void hci_bdaddr_list_clear(struct list_head *bdaddr_list) +{ + struct bdaddr_list *b, *n; + + list_for_each_entry_safe(b, n, bdaddr_list, list) { + list_del(&b->list); kfree(b); } +} + +int hci_bdaddr_list_add(struct list_head *list, bdaddr_t *bdaddr, u8 type) +{ + struct bdaddr_list *entry; + + if (!bacmp(bdaddr, BDADDR_ANY)) + return -EBADF; + + if (hci_bdaddr_list_lookup(list, bdaddr, type)) + return -EEXIST; + + entry = kzalloc(sizeof(*entry), GFP_KERNEL); + if (!entry) + return -ENOMEM; + + bacpy(&entry->bdaddr, bdaddr); + entry->bdaddr_type = type; + + list_add(&entry->list, list); return 0; } -int hci_blacklist_add(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type) +int hci_bdaddr_list_add_with_irk(struct list_head *list, bdaddr_t *bdaddr, + u8 type, u8 *peer_irk, u8 *local_irk) { - struct bdaddr_list *entry; + struct bdaddr_list_with_irk *entry; + + if (!bacmp(bdaddr, BDADDR_ANY)) + return -EBADF; + + if (hci_bdaddr_list_lookup(list, bdaddr, type)) + return -EEXIST; + + entry = kzalloc(sizeof(*entry), GFP_KERNEL); + if (!entry) + return -ENOMEM; + + bacpy(&entry->bdaddr, bdaddr); + entry->bdaddr_type = type; + + if (peer_irk) + memcpy(entry->peer_irk, peer_irk, 16); + + if (local_irk) + memcpy(entry->local_irk, local_irk, 16); + + list_add(&entry->list, list); + + return 0; +} + +int hci_bdaddr_list_add_with_flags(struct list_head *list, bdaddr_t *bdaddr, + u8 type, u32 flags) +{ + struct bdaddr_list_with_flags *entry; - if (bacmp(bdaddr, BDADDR_ANY) == 0) + if (!bacmp(bdaddr, BDADDR_ANY)) return -EBADF; - if (hci_blacklist_lookup(hdev, bdaddr)) + if (hci_bdaddr_list_lookup(list, bdaddr, type)) return -EEXIST; - entry = kzalloc(sizeof(struct bdaddr_list), GFP_KERNEL); + entry = kzalloc(sizeof(*entry), GFP_KERNEL); if (!entry) return -ENOMEM; bacpy(&entry->bdaddr, bdaddr); + entry->bdaddr_type = type; + entry->flags = flags; - list_add(&entry->list, &hdev->blacklist); + list_add(&entry->list, list); - return mgmt_device_blocked(hdev, bdaddr, type); + return 0; } -int hci_blacklist_del(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type) +int hci_bdaddr_list_del(struct list_head *list, bdaddr_t *bdaddr, u8 type) { struct bdaddr_list *entry; - if (bacmp(bdaddr, BDADDR_ANY) == 0) - return hci_blacklist_clear(hdev); + if (!bacmp(bdaddr, BDADDR_ANY)) { + hci_bdaddr_list_clear(list); + return 0; + } - entry = hci_blacklist_lookup(hdev, bdaddr); + entry = hci_bdaddr_list_lookup(list, bdaddr, type); if (!entry) return -ENOENT; list_del(&entry->list); kfree(entry); - return mgmt_device_unblocked(hdev, bdaddr, type); + return 0; } -static void inquiry_complete(struct hci_dev *hdev, u8 status) +int hci_bdaddr_list_del_with_irk(struct list_head *list, bdaddr_t *bdaddr, + u8 type) { - if (status) { - BT_ERR("Failed to start inquiry: status %d", status); + struct bdaddr_list_with_irk *entry; - hci_dev_lock(hdev); - hci_discovery_set_state(hdev, DISCOVERY_STOPPED); - hci_dev_unlock(hdev); - return; + if (!bacmp(bdaddr, BDADDR_ANY)) { + hci_bdaddr_list_clear(list); + return 0; } + + entry = hci_bdaddr_list_lookup_with_irk(list, bdaddr, type); + if (!entry) + return -ENOENT; + + list_del(&entry->list); + kfree(entry); + + return 0; } -static void le_scan_disable_work_complete(struct hci_dev *hdev, u8 status) +/* This function requires the caller holds hdev->lock */ +struct hci_conn_params *hci_conn_params_lookup(struct hci_dev *hdev, + bdaddr_t *addr, u8 addr_type) { - /* General inquiry access code (GIAC) */ - u8 lap[3] = { 0x33, 0x8b, 0x9e }; - struct hci_request req; - struct hci_cp_inquiry cp; - int err; + struct hci_conn_params *params; + + list_for_each_entry(params, &hdev->le_conn_params, list) { + if (bacmp(¶ms->addr, addr) == 0 && + params->addr_type == addr_type) { + return params; + } + } + + return NULL; +} + +/* This function requires the caller holds hdev->lock or rcu_read_lock */ +struct hci_conn_params *hci_pend_le_action_lookup(struct list_head *list, + bdaddr_t *addr, u8 addr_type) +{ + struct hci_conn_params *param; + + rcu_read_lock(); + + list_for_each_entry_rcu(param, list, action) { + if (bacmp(¶m->addr, addr) == 0 && + param->addr_type == addr_type) { + rcu_read_unlock(); + return param; + } + } + + rcu_read_unlock(); + + return NULL; +} - if (status) { - BT_ERR("Failed to disable LE scanning: status %d", status); +/* This function requires the caller holds hdev->lock */ +void hci_pend_le_list_del_init(struct hci_conn_params *param) +{ + if (list_empty(¶m->action)) return; + + list_del_rcu(¶m->action); + synchronize_rcu(); + INIT_LIST_HEAD(¶m->action); +} + +/* This function requires the caller holds hdev->lock */ +void hci_pend_le_list_add(struct hci_conn_params *param, + struct list_head *list) +{ + list_add_rcu(¶m->action, list); +} + +/* This function requires the caller holds hdev->lock */ +struct hci_conn_params *hci_conn_params_add(struct hci_dev *hdev, + bdaddr_t *addr, u8 addr_type) +{ + struct hci_conn_params *params; + + params = hci_conn_params_lookup(hdev, addr, addr_type); + if (params) + return params; + + params = kzalloc(sizeof(*params), GFP_KERNEL); + if (!params) { + bt_dev_err(hdev, "out of memory"); + return NULL; } - switch (hdev->discovery.type) { - case DISCOV_TYPE_LE: - hci_dev_lock(hdev); - hci_discovery_set_state(hdev, DISCOVERY_STOPPED); - hci_dev_unlock(hdev); - break; + bacpy(¶ms->addr, addr); + params->addr_type = addr_type; - case DISCOV_TYPE_INTERLEAVED: - hci_req_init(&req, hdev); + list_add(¶ms->list, &hdev->le_conn_params); + INIT_LIST_HEAD(¶ms->action); - memset(&cp, 0, sizeof(cp)); - memcpy(&cp.lap, lap, sizeof(cp.lap)); - cp.length = DISCOV_INTERLEAVED_INQUIRY_LEN; - hci_req_add(&req, HCI_OP_INQUIRY, sizeof(cp), &cp); + params->conn_min_interval = hdev->le_conn_min_interval; + params->conn_max_interval = hdev->le_conn_max_interval; + params->conn_latency = hdev->le_conn_latency; + params->supervision_timeout = hdev->le_supv_timeout; + params->auto_connect = HCI_AUTO_CONN_DISABLED; - hci_dev_lock(hdev); + BT_DBG("addr %pMR (type %u)", addr, addr_type); - hci_inquiry_cache_flush(hdev); + return params; +} + +void hci_conn_params_free(struct hci_conn_params *params) +{ + hci_pend_le_list_del_init(params); + + if (params->conn) { + hci_conn_drop(params->conn); + hci_conn_put(params->conn); + } + + list_del(¶ms->list); + kfree(params); +} + +/* This function requires the caller holds hdev->lock */ +void hci_conn_params_del(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type) +{ + struct hci_conn_params *params; - err = hci_req_run(&req, inquiry_complete); - if (err) { - BT_ERR("Inquiry request failed: err %d", err); - hci_discovery_set_state(hdev, DISCOVERY_STOPPED); + params = hci_conn_params_lookup(hdev, addr, addr_type); + if (!params) + return; + + hci_conn_params_free(params); + + hci_update_passive_scan(hdev); + + BT_DBG("addr %pMR (type %u)", addr, addr_type); +} + +/* This function requires the caller holds hdev->lock */ +void hci_conn_params_clear_disabled(struct hci_dev *hdev) +{ + struct hci_conn_params *params, *tmp; + + list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list) { + if (params->auto_connect != HCI_AUTO_CONN_DISABLED) + continue; + + /* If trying to establish one time connection to disabled + * device, leave the params, but mark them as just once. + */ + if (params->explicit_connect) { + params->auto_connect = HCI_AUTO_CONN_EXPLICIT; + continue; } - hci_dev_unlock(hdev); - break; + hci_conn_params_free(params); } + + BT_DBG("All LE disabled connection parameters were removed"); } -static void le_scan_disable_work(struct work_struct *work) +/* This function requires the caller holds hdev->lock */ +static void hci_conn_params_clear_all(struct hci_dev *hdev) { - struct hci_dev *hdev = container_of(work, struct hci_dev, - le_scan_disable.work); - struct hci_cp_le_set_scan_enable cp; - struct hci_request req; - int err; + struct hci_conn_params *params, *tmp; - BT_DBG("%s", hdev->name); + list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list) + hci_conn_params_free(params); - hci_req_init(&req, hdev); + BT_DBG("All LE connection parameters were removed"); +} - memset(&cp, 0, sizeof(cp)); - cp.enable = LE_SCAN_DISABLE; - hci_req_add(&req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp); +/* Copy the Identity Address of the controller. + * + * If the controller has a public BD_ADDR, then by default use that one. + * If this is a LE only controller without a public address, default to + * the static random address. + * + * For debugging purposes it is possible to force controllers with a + * public address to use the static random address instead. + * + * In case BR/EDR has been disabled on a dual-mode controller and + * userspace has configured a static address, then that address + * becomes the identity address instead of the public BR/EDR address. + */ +void hci_copy_identity_address(struct hci_dev *hdev, bdaddr_t *bdaddr, + u8 *bdaddr_type) +{ + if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) || + !bacmp(&hdev->bdaddr, BDADDR_ANY) || + (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) && + bacmp(&hdev->static_addr, BDADDR_ANY))) { + bacpy(bdaddr, &hdev->static_addr); + *bdaddr_type = ADDR_LE_DEV_RANDOM; + } else { + bacpy(bdaddr, &hdev->bdaddr); + *bdaddr_type = ADDR_LE_DEV_PUBLIC; + } +} - err = hci_req_run(&req, le_scan_disable_work_complete); - if (err) - BT_ERR("Disable LE scanning request failed: err %d", err); +static void hci_clear_wake_reason(struct hci_dev *hdev) +{ + hci_dev_lock(hdev); + + hdev->wake_reason = 0; + bacpy(&hdev->wake_addr, BDADDR_ANY); + hdev->wake_addr_type = 0; + + hci_dev_unlock(hdev); +} + +static int hci_suspend_notifier(struct notifier_block *nb, unsigned long action, + void *data) +{ + struct hci_dev *hdev = + container_of(nb, struct hci_dev, suspend_notifier); + int ret = 0; + + /* Userspace has full control of this device. Do nothing. */ + if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) + return NOTIFY_DONE; + + /* To avoid a potential race with hci_unregister_dev. */ + hci_dev_hold(hdev); + + switch (action) { + case PM_HIBERNATION_PREPARE: + case PM_SUSPEND_PREPARE: + ret = hci_suspend_dev(hdev); + break; + case PM_POST_HIBERNATION: + case PM_POST_SUSPEND: + ret = hci_resume_dev(hdev); + break; + } + + if (ret) + bt_dev_err(hdev, "Suspend notifier action (%lu) failed: %d", + action, ret); + + hci_dev_put(hdev); + return NOTIFY_DONE; } /* Alloc HCI device */ -struct hci_dev *hci_alloc_dev(void) +struct hci_dev *hci_alloc_dev_priv(int sizeof_priv) { struct hci_dev *hdev; + unsigned int alloc_size; - hdev = kzalloc(sizeof(struct hci_dev), GFP_KERNEL); + alloc_size = sizeof(*hdev); + if (sizeof_priv) { + /* Fixme: May need ALIGN-ment? */ + alloc_size += sizeof_priv; + } + + hdev = kzalloc(alloc_size, GFP_KERNEL); if (!hdev) return NULL; + if (init_srcu_struct(&hdev->srcu)) { + kfree(hdev); + return NULL; + } + hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1); hdev->esco_type = (ESCO_HV1); hdev->link_mode = (HCI_LM_ACCEPT); - hdev->io_capability = 0x03; /* No Input No Output */ + hdev->num_iac = 0x01; /* One IAC support is mandatory */ + hdev->io_capability = 0x03; /* No Input No Output */ + hdev->manufacturer = 0xffff; /* Default to internal use */ hdev->inq_tx_power = HCI_TX_POWER_INVALID; hdev->adv_tx_power = HCI_TX_POWER_INVALID; + hdev->adv_instance_cnt = 0; + hdev->cur_adv_instance = 0x00; + hdev->adv_instance_timeout = 0; + + hdev->advmon_allowlist_duration = 300; + hdev->advmon_no_filter_duration = 500; + hdev->enable_advmon_interleave_scan = 0x00; /* Default to disable */ hdev->sniff_max_interval = 800; hdev->sniff_min_interval = 80; + hdev->le_adv_channel_map = 0x07; + hdev->le_adv_min_interval = 0x0800; + hdev->le_adv_max_interval = 0x0800; + hdev->le_scan_interval = DISCOV_LE_SCAN_INT_FAST; + hdev->le_scan_window = DISCOV_LE_SCAN_WIN_FAST; + hdev->le_scan_int_suspend = DISCOV_LE_SCAN_INT_SLOW1; + hdev->le_scan_window_suspend = DISCOV_LE_SCAN_WIN_SLOW1; + hdev->le_scan_int_discovery = DISCOV_LE_SCAN_INT; + hdev->le_scan_window_discovery = DISCOV_LE_SCAN_WIN; + hdev->le_scan_int_adv_monitor = DISCOV_LE_SCAN_INT_FAST; + hdev->le_scan_window_adv_monitor = DISCOV_LE_SCAN_WIN_FAST; + hdev->le_scan_int_connect = DISCOV_LE_SCAN_INT_CONN; + hdev->le_scan_window_connect = DISCOV_LE_SCAN_WIN_CONN; + hdev->le_conn_min_interval = 0x0018; + hdev->le_conn_max_interval = 0x0028; + hdev->le_conn_latency = 0x0000; + hdev->le_supv_timeout = 0x002a; + hdev->le_def_tx_len = 0x001b; + hdev->le_def_tx_time = 0x0148; + hdev->le_max_tx_len = 0x001b; + hdev->le_max_tx_time = 0x0148; + hdev->le_max_rx_len = 0x001b; + hdev->le_max_rx_time = 0x0148; + hdev->le_max_key_size = SMP_MAX_ENC_KEY_SIZE; + hdev->le_min_key_size = SMP_MIN_ENC_KEY_SIZE; + hdev->le_tx_def_phys = HCI_LE_SET_PHY_1M; + hdev->le_rx_def_phys = HCI_LE_SET_PHY_1M; + hdev->le_num_of_adv_sets = HCI_MAX_ADV_INSTANCES; + hdev->def_multi_adv_rotation_duration = HCI_DEFAULT_ADV_DURATION; + hdev->def_le_autoconnect_timeout = HCI_LE_CONN_TIMEOUT; + hdev->min_le_tx_power = HCI_TX_POWER_INVALID; + hdev->max_le_tx_power = HCI_TX_POWER_INVALID; + + hdev->rpa_timeout = HCI_DEFAULT_RPA_TIMEOUT; + hdev->discov_interleaved_timeout = DISCOV_INTERLEAVED_TIMEOUT; + hdev->conn_info_min_age = DEFAULT_CONN_INFO_MIN_AGE; + hdev->conn_info_max_age = DEFAULT_CONN_INFO_MAX_AGE; + hdev->auth_payload_timeout = DEFAULT_AUTH_PAYLOAD_TIMEOUT; + hdev->min_enc_key_size = HCI_MIN_ENC_KEY_SIZE; + + /* default 1.28 sec page scan */ + hdev->def_page_scan_type = PAGE_SCAN_TYPE_STANDARD; + hdev->def_page_scan_int = 0x0800; + hdev->def_page_scan_window = 0x0012; + mutex_init(&hdev->lock); mutex_init(&hdev->req_lock); + mutex_init(&hdev->mgmt_pending_lock); + + ida_init(&hdev->unset_handle_ida); + INIT_LIST_HEAD(&hdev->mesh_pending); INIT_LIST_HEAD(&hdev->mgmt_pending); - INIT_LIST_HEAD(&hdev->blacklist); + INIT_LIST_HEAD(&hdev->reject_list); + INIT_LIST_HEAD(&hdev->accept_list); INIT_LIST_HEAD(&hdev->uuids); INIT_LIST_HEAD(&hdev->link_keys); INIT_LIST_HEAD(&hdev->long_term_keys); + INIT_LIST_HEAD(&hdev->identity_resolving_keys); INIT_LIST_HEAD(&hdev->remote_oob_data); + INIT_LIST_HEAD(&hdev->le_accept_list); + INIT_LIST_HEAD(&hdev->le_resolv_list); + INIT_LIST_HEAD(&hdev->le_conn_params); + INIT_LIST_HEAD(&hdev->pend_le_conns); + INIT_LIST_HEAD(&hdev->pend_le_reports); INIT_LIST_HEAD(&hdev->conn_hash.list); + INIT_LIST_HEAD(&hdev->adv_instances); + INIT_LIST_HEAD(&hdev->blocked_keys); + INIT_LIST_HEAD(&hdev->monitored_devices); + INIT_LIST_HEAD(&hdev->local_codecs); INIT_WORK(&hdev->rx_work, hci_rx_work); INIT_WORK(&hdev->cmd_work, hci_cmd_work); INIT_WORK(&hdev->tx_work, hci_tx_work); INIT_WORK(&hdev->power_on, hci_power_on); + INIT_WORK(&hdev->error_reset, hci_error_reset); + + hci_cmd_sync_init(hdev); INIT_DELAYED_WORK(&hdev->power_off, hci_power_off); - INIT_DELAYED_WORK(&hdev->discov_off, hci_discov_off); - INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work); skb_queue_head_init(&hdev->rx_q); skb_queue_head_init(&hdev->cmd_q); @@ -2118,14 +2560,17 @@ struct hci_dev *hci_alloc_dev(void) init_waitqueue_head(&hdev->req_wait_q); - setup_timer(&hdev->cmd_timer, hci_cmd_timeout, (unsigned long) hdev); + INIT_DELAYED_WORK(&hdev->cmd_timer, hci_cmd_timeout); + INIT_DELAYED_WORK(&hdev->ncmd_timer, hci_ncmd_timeout); + + hci_devcd_setup(hdev); hci_init_sysfs(hdev); discovery_init(hdev); return hdev; } -EXPORT_SYMBOL(hci_alloc_dev); +EXPORT_SYMBOL(hci_alloc_dev_priv); /* Free HCI device */ void hci_free_dev(struct hci_dev *hdev) @@ -2140,54 +2585,45 @@ int hci_register_dev(struct hci_dev *hdev) { int id, error; - if (!hdev->open || !hdev->close) + if (!hdev->open || !hdev->close || !hdev->send) return -EINVAL; - /* Do not allow HCI_AMP devices to register at index 0, - * so the index can be used as the AMP controller ID. - */ - switch (hdev->dev_type) { - case HCI_BREDR: - id = ida_simple_get(&hci_index_ida, 0, 0, GFP_KERNEL); - break; - case HCI_AMP: - id = ida_simple_get(&hci_index_ida, 1, 0, GFP_KERNEL); - break; - default: - return -EINVAL; - } - + id = ida_alloc_max(&hci_index_ida, HCI_MAX_ID - 1, GFP_KERNEL); if (id < 0) return id; - sprintf(hdev->name, "hci%d", id); + error = dev_set_name(&hdev->dev, "hci%u", id); + if (error) + return error; + + hdev->name = dev_name(&hdev->dev); hdev->id = id; BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus); - write_lock(&hci_dev_list_lock); - list_add(&hdev->list, &hci_dev_list); - write_unlock(&hci_dev_list_lock); - - hdev->workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND | - WQ_MEM_RECLAIM, 1, hdev->name); + hdev->workqueue = alloc_ordered_workqueue("%s", WQ_HIGHPRI, hdev->name); if (!hdev->workqueue) { error = -ENOMEM; goto err; } - hdev->req_workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND | - WQ_MEM_RECLAIM, 1, hdev->name); + hdev->req_workqueue = alloc_ordered_workqueue("%s", WQ_HIGHPRI, + hdev->name); if (!hdev->req_workqueue) { destroy_workqueue(hdev->workqueue); error = -ENOMEM; goto err; } - error = hci_add_sysfs(hdev); + if (!IS_ERR_OR_NULL(bt_debugfs)) + hdev->debugfs = debugfs_create_dir(hdev->name, bt_debugfs); + + error = device_add(&hdev->dev); if (error < 0) goto err_wqueue; + hci_leds_init(hdev); + hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev, RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops, hdev); @@ -2198,26 +2634,53 @@ int hci_register_dev(struct hci_dev *hdev) } } - set_bit(HCI_SETUP, &hdev->dev_flags); + if (hdev->rfkill && rfkill_blocked(hdev->rfkill)) + hci_dev_set_flag(hdev, HCI_RFKILLED); + + hci_dev_set_flag(hdev, HCI_SETUP); + hci_dev_set_flag(hdev, HCI_AUTO_OFF); + + /* Assume BR/EDR support until proven otherwise (such as + * through reading supported features during init. + */ + hci_dev_set_flag(hdev, HCI_BREDR_ENABLED); + + write_lock(&hci_dev_list_lock); + list_add(&hdev->list, &hci_dev_list); + write_unlock(&hci_dev_list_lock); - if (hdev->dev_type != HCI_AMP) - set_bit(HCI_AUTO_OFF, &hdev->dev_flags); + /* Devices that are marked for raw-only usage are unconfigured + * and should not be included in normal operation. + */ + if (hci_test_quirk(hdev, HCI_QUIRK_RAW_DEVICE)) + hci_dev_set_flag(hdev, HCI_UNCONFIGURED); + + /* Mark Remote Wakeup connection flag as supported if driver has wakeup + * callback. + */ + if (hdev->wakeup) + hdev->conn_flags |= HCI_CONN_FLAG_REMOTE_WAKEUP; - hci_notify(hdev, HCI_DEV_REG); + hci_sock_dev_event(hdev, HCI_DEV_REG); hci_dev_hold(hdev); + error = hci_register_suspend_notifier(hdev); + if (error) + BT_WARN("register suspend notifier failed error:%d\n", error); + queue_work(hdev->req_workqueue, &hdev->power_on); + idr_init(&hdev->adv_monitors_idr); + msft_register(hdev); + return id; err_wqueue: + debugfs_remove_recursive(hdev->debugfs); destroy_workqueue(hdev->workqueue); destroy_workqueue(hdev->req_workqueue); err: - ida_simple_remove(&hci_index_ida, hdev->id); - write_lock(&hci_dev_list_lock); - list_del(&hdev->list); - write_unlock(&hci_dev_list_lock); + ida_free(&hci_index_ida, hdev->id); return error; } @@ -2226,27 +2689,34 @@ EXPORT_SYMBOL(hci_register_dev); /* Unregister HCI device */ void hci_unregister_dev(struct hci_dev *hdev) { - int i, id; - BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus); - set_bit(HCI_UNREGISTER, &hdev->dev_flags); - - id = hdev->id; + mutex_lock(&hdev->unregister_lock); + hci_dev_set_flag(hdev, HCI_UNREGISTER); + mutex_unlock(&hdev->unregister_lock); write_lock(&hci_dev_list_lock); list_del(&hdev->list); write_unlock(&hci_dev_list_lock); - hci_dev_do_close(hdev); + synchronize_srcu(&hdev->srcu); + cleanup_srcu_struct(&hdev->srcu); - for (i = 0; i < NUM_REASSEMBLY; i++) - kfree_skb(hdev->reassembly[i]); + disable_work_sync(&hdev->rx_work); + disable_work_sync(&hdev->cmd_work); + disable_work_sync(&hdev->tx_work); + disable_work_sync(&hdev->power_on); + disable_work_sync(&hdev->error_reset); - cancel_work_sync(&hdev->power_on); + hci_cmd_sync_clear(hdev); + + hci_unregister_suspend_notifier(hdev); + + hci_dev_do_close(hdev); if (!test_bit(HCI_INIT, &hdev->flags) && - !test_bit(HCI_SETUP, &hdev->dev_flags)) { + !hci_dev_test_flag(hdev, HCI_SETUP) && + !hci_dev_test_flag(hdev, HCI_CONFIG)) { hci_dev_lock(hdev); mgmt_index_removed(hdev); hci_dev_unlock(hdev); @@ -2256,234 +2726,288 @@ void hci_unregister_dev(struct hci_dev *hdev) * pending list */ BUG_ON(!list_empty(&hdev->mgmt_pending)); - hci_notify(hdev, HCI_DEV_UNREG); + hci_sock_dev_event(hdev, HCI_DEV_UNREG); if (hdev->rfkill) { rfkill_unregister(hdev->rfkill); rfkill_destroy(hdev->rfkill); } - hci_del_sysfs(hdev); + device_del(&hdev->dev); + /* Actual cleanup is deferred until hci_release_dev(). */ + hci_dev_put(hdev); +} +EXPORT_SYMBOL(hci_unregister_dev); + +/* Release HCI device */ +void hci_release_dev(struct hci_dev *hdev) +{ + debugfs_remove_recursive(hdev->debugfs); + kfree_const(hdev->hw_info); + kfree_const(hdev->fw_info); destroy_workqueue(hdev->workqueue); destroy_workqueue(hdev->req_workqueue); hci_dev_lock(hdev); - hci_blacklist_clear(hdev); + hci_bdaddr_list_clear(&hdev->reject_list); + hci_bdaddr_list_clear(&hdev->accept_list); hci_uuids_clear(hdev); hci_link_keys_clear(hdev); hci_smp_ltks_clear(hdev); + hci_smp_irks_clear(hdev); hci_remote_oob_data_clear(hdev); + hci_adv_instances_clear(hdev); + hci_adv_monitors_clear(hdev); + hci_bdaddr_list_clear(&hdev->le_accept_list); + hci_bdaddr_list_clear(&hdev->le_resolv_list); + hci_conn_params_clear_all(hdev); + hci_discovery_filter_clear(hdev); + hci_blocked_keys_clear(hdev); + hci_codec_list_clear(&hdev->local_codecs); + msft_release(hdev); hci_dev_unlock(hdev); - hci_dev_put(hdev); - - ida_simple_remove(&hci_index_ida, id); + ida_destroy(&hdev->unset_handle_ida); + ida_free(&hci_index_ida, hdev->id); + kfree_skb(hdev->sent_cmd); + kfree_skb(hdev->req_skb); + kfree_skb(hdev->recv_event); + kfree(hdev); } -EXPORT_SYMBOL(hci_unregister_dev); +EXPORT_SYMBOL(hci_release_dev); -/* Suspend HCI device */ -int hci_suspend_dev(struct hci_dev *hdev) +int hci_register_suspend_notifier(struct hci_dev *hdev) { - hci_notify(hdev, HCI_DEV_SUSPEND); - return 0; -} -EXPORT_SYMBOL(hci_suspend_dev); + int ret = 0; -/* Resume HCI device */ -int hci_resume_dev(struct hci_dev *hdev) -{ - hci_notify(hdev, HCI_DEV_RESUME); - return 0; + if (!hdev->suspend_notifier.notifier_call && + !hci_test_quirk(hdev, HCI_QUIRK_NO_SUSPEND_NOTIFIER)) { + hdev->suspend_notifier.notifier_call = hci_suspend_notifier; + ret = register_pm_notifier(&hdev->suspend_notifier); + } + + return ret; } -EXPORT_SYMBOL(hci_resume_dev); -/* Receive frame from HCI drivers */ -int hci_recv_frame(struct sk_buff *skb) +int hci_unregister_suspend_notifier(struct hci_dev *hdev) { - struct hci_dev *hdev = (struct hci_dev *) skb->dev; - if (!hdev || (!test_bit(HCI_UP, &hdev->flags) - && !test_bit(HCI_INIT, &hdev->flags))) { - kfree_skb(skb); - return -ENXIO; + int ret = 0; + + if (hdev->suspend_notifier.notifier_call) { + ret = unregister_pm_notifier(&hdev->suspend_notifier); + if (!ret) + hdev->suspend_notifier.notifier_call = NULL; } - /* Incoming skb */ - bt_cb(skb)->incoming = 1; + return ret; +} - /* Time stamp */ - __net_timestamp(skb); +/* Cancel ongoing command synchronously: + * + * - Cancel command timer + * - Reset command counter + * - Cancel command request + */ +static void hci_cancel_cmd_sync(struct hci_dev *hdev, int err) +{ + bt_dev_dbg(hdev, "err 0x%2.2x", err); + + if (hci_dev_test_flag(hdev, HCI_UNREGISTER)) { + disable_delayed_work_sync(&hdev->cmd_timer); + disable_delayed_work_sync(&hdev->ncmd_timer); + } else { + cancel_delayed_work_sync(&hdev->cmd_timer); + cancel_delayed_work_sync(&hdev->ncmd_timer); + } - skb_queue_tail(&hdev->rx_q, skb); - queue_work(hdev->workqueue, &hdev->rx_work); + atomic_set(&hdev->cmd_cnt, 1); - return 0; + hci_cmd_sync_cancel_sync(hdev, err); } -EXPORT_SYMBOL(hci_recv_frame); -static int hci_reassembly(struct hci_dev *hdev, int type, void *data, - int count, __u8 index) +/* Suspend HCI device */ +int hci_suspend_dev(struct hci_dev *hdev) { - int len = 0; - int hlen = 0; - int remain = count; - struct sk_buff *skb; - struct bt_skb_cb *scb; + int ret; - if ((type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT) || - index >= NUM_REASSEMBLY) - return -EILSEQ; + bt_dev_dbg(hdev, ""); - skb = hdev->reassembly[index]; + /* Suspend should only act on when powered. */ + if (!hdev_is_powered(hdev) || + hci_dev_test_flag(hdev, HCI_UNREGISTER)) + return 0; - if (!skb) { - switch (type) { - case HCI_ACLDATA_PKT: - len = HCI_MAX_FRAME_SIZE; - hlen = HCI_ACL_HDR_SIZE; - break; - case HCI_EVENT_PKT: - len = HCI_MAX_EVENT_SIZE; - hlen = HCI_EVENT_HDR_SIZE; - break; - case HCI_SCODATA_PKT: - len = HCI_MAX_SCO_SIZE; - hlen = HCI_SCO_HDR_SIZE; - break; - } + /* If powering down don't attempt to suspend */ + if (mgmt_powering_down(hdev)) + return 0; - skb = bt_skb_alloc(len, GFP_ATOMIC); - if (!skb) - return -ENOMEM; + /* Cancel potentially blocking sync operation before suspend */ + hci_cancel_cmd_sync(hdev, EHOSTDOWN); - scb = (void *) skb->cb; - scb->expect = hlen; - scb->pkt_type = type; + hci_req_sync_lock(hdev); + ret = hci_suspend_sync(hdev); + hci_req_sync_unlock(hdev); - skb->dev = (void *) hdev; - hdev->reassembly[index] = skb; - } + hci_clear_wake_reason(hdev); + mgmt_suspending(hdev, hdev->suspend_state); + + hci_sock_dev_event(hdev, HCI_DEV_SUSPEND); + return ret; +} +EXPORT_SYMBOL(hci_suspend_dev); - while (count) { - scb = (void *) skb->cb; - len = min_t(uint, scb->expect, count); +/* Resume HCI device */ +int hci_resume_dev(struct hci_dev *hdev) +{ + int ret; - memcpy(skb_put(skb, len), data, len); + bt_dev_dbg(hdev, ""); - count -= len; - data += len; - scb->expect -= len; - remain = count; + /* Resume should only act on when powered. */ + if (!hdev_is_powered(hdev) || + hci_dev_test_flag(hdev, HCI_UNREGISTER)) + return 0; - switch (type) { - case HCI_EVENT_PKT: - if (skb->len == HCI_EVENT_HDR_SIZE) { - struct hci_event_hdr *h = hci_event_hdr(skb); - scb->expect = h->plen; - - if (skb_tailroom(skb) < scb->expect) { - kfree_skb(skb); - hdev->reassembly[index] = NULL; - return -ENOMEM; - } - } - break; + /* If powering down don't attempt to resume */ + if (mgmt_powering_down(hdev)) + return 0; - case HCI_ACLDATA_PKT: - if (skb->len == HCI_ACL_HDR_SIZE) { - struct hci_acl_hdr *h = hci_acl_hdr(skb); - scb->expect = __le16_to_cpu(h->dlen); - - if (skb_tailroom(skb) < scb->expect) { - kfree_skb(skb); - hdev->reassembly[index] = NULL; - return -ENOMEM; - } - } - break; + hci_req_sync_lock(hdev); + ret = hci_resume_sync(hdev); + hci_req_sync_unlock(hdev); - case HCI_SCODATA_PKT: - if (skb->len == HCI_SCO_HDR_SIZE) { - struct hci_sco_hdr *h = hci_sco_hdr(skb); - scb->expect = h->dlen; - - if (skb_tailroom(skb) < scb->expect) { - kfree_skb(skb); - hdev->reassembly[index] = NULL; - return -ENOMEM; - } - } - break; - } + mgmt_resuming(hdev, hdev->wake_reason, &hdev->wake_addr, + hdev->wake_addr_type); - if (scb->expect == 0) { - /* Complete frame */ + hci_sock_dev_event(hdev, HCI_DEV_RESUME); + return ret; +} +EXPORT_SYMBOL(hci_resume_dev); - bt_cb(skb)->pkt_type = type; - hci_recv_frame(skb); +/* Reset HCI device */ +int hci_reset_dev(struct hci_dev *hdev) +{ + static const u8 hw_err[] = { HCI_EV_HARDWARE_ERROR, 0x01, 0x00 }; + struct sk_buff *skb; - hdev->reassembly[index] = NULL; - return remain; - } - } + skb = bt_skb_alloc(3, GFP_ATOMIC); + if (!skb) + return -ENOMEM; - return remain; + hci_skb_pkt_type(skb) = HCI_EVENT_PKT; + skb_put_data(skb, hw_err, 3); + + bt_dev_err(hdev, "Injecting HCI hardware error event"); + + /* Send Hardware Error to upper stack */ + return hci_recv_frame(hdev, skb); } +EXPORT_SYMBOL(hci_reset_dev); -int hci_recv_fragment(struct hci_dev *hdev, int type, void *data, int count) +static u8 hci_dev_classify_pkt_type(struct hci_dev *hdev, struct sk_buff *skb) { - int rem = 0; + if (hdev->classify_pkt_type) + return hdev->classify_pkt_type(hdev, skb); - if (type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT) - return -EILSEQ; + return hci_skb_pkt_type(skb); +} - while (count) { - rem = hci_reassembly(hdev, type, data, count, type - 1); - if (rem < 0) - return rem; +/* Receive frame from HCI drivers */ +int hci_recv_frame(struct hci_dev *hdev, struct sk_buff *skb) +{ + u8 dev_pkt_type; - data += (count - rem); - count = rem; + if (!hdev || (!test_bit(HCI_UP, &hdev->flags) + && !test_bit(HCI_INIT, &hdev->flags))) { + kfree_skb(skb); + return -ENXIO; } - return rem; -} -EXPORT_SYMBOL(hci_recv_fragment); + /* Check if the driver agree with packet type classification */ + dev_pkt_type = hci_dev_classify_pkt_type(hdev, skb); + if (hci_skb_pkt_type(skb) != dev_pkt_type) { + hci_skb_pkt_type(skb) = dev_pkt_type; + } + + switch (hci_skb_pkt_type(skb)) { + case HCI_EVENT_PKT: + break; + case HCI_ACLDATA_PKT: + /* Detect if ISO packet has been sent as ACL */ + if (hci_conn_num(hdev, CIS_LINK) || + hci_conn_num(hdev, BIS_LINK) || + hci_conn_num(hdev, PA_LINK)) { + __u16 handle = __le16_to_cpu(hci_acl_hdr(skb)->handle); + __u8 type; + + type = hci_conn_lookup_type(hdev, hci_handle(handle)); + if (type == CIS_LINK || type == BIS_LINK || + type == PA_LINK) + hci_skb_pkt_type(skb) = HCI_ISODATA_PKT; + } + break; + case HCI_SCODATA_PKT: + break; + case HCI_ISODATA_PKT: + break; + case HCI_DRV_PKT: + break; + default: + kfree_skb(skb); + return -EINVAL; + } + + /* Incoming skb */ + bt_cb(skb)->incoming = 1; -#define STREAM_REASSEMBLY 0 + /* Time stamp */ + __net_timestamp(skb); -int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count) + skb_queue_tail(&hdev->rx_q, skb); + queue_work(hdev->workqueue, &hdev->rx_work); + + return 0; +} +EXPORT_SYMBOL(hci_recv_frame); + +/* Receive diagnostic message from HCI drivers */ +int hci_recv_diag(struct hci_dev *hdev, struct sk_buff *skb) { - int type; - int rem = 0; + /* Mark as diagnostic packet */ + hci_skb_pkt_type(skb) = HCI_DIAG_PKT; - while (count) { - struct sk_buff *skb = hdev->reassembly[STREAM_REASSEMBLY]; + /* Time stamp */ + __net_timestamp(skb); - if (!skb) { - struct { char type; } *pkt; + skb_queue_tail(&hdev->rx_q, skb); + queue_work(hdev->workqueue, &hdev->rx_work); - /* Start of the frame */ - pkt = data; - type = pkt->type; + return 0; +} +EXPORT_SYMBOL(hci_recv_diag); - data++; - count--; - } else - type = bt_cb(skb)->pkt_type; +void hci_set_hw_info(struct hci_dev *hdev, const char *fmt, ...) +{ + va_list vargs; - rem = hci_reassembly(hdev, type, data, count, - STREAM_REASSEMBLY); - if (rem < 0) - return rem; + va_start(vargs, fmt); + kfree_const(hdev->hw_info); + hdev->hw_info = kvasprintf_const(GFP_KERNEL, fmt, vargs); + va_end(vargs); +} +EXPORT_SYMBOL(hci_set_hw_info); - data += (count - rem); - count = rem; - } +void hci_set_fw_info(struct hci_dev *hdev, const char *fmt, ...) +{ + va_list vargs; - return rem; + va_start(vargs, fmt); + kfree_const(hdev->fw_info); + hdev->fw_info = kvasprintf_const(GFP_KERNEL, fmt, vargs); + va_end(vargs); } -EXPORT_SYMBOL(hci_recv_stream_fragment); +EXPORT_SYMBOL(hci_set_fw_info); /* ---- Interface to upper protocols ---- */ @@ -2491,9 +3015,9 @@ int hci_register_cb(struct hci_cb *cb) { BT_DBG("%p name %s", cb, cb->name); - write_lock(&hci_cb_list_lock); - list_add(&cb->list, &hci_cb_list); - write_unlock(&hci_cb_list_lock); + mutex_lock(&hci_cb_list_lock); + list_add_tail(&cb->list, &hci_cb_list); + mutex_unlock(&hci_cb_list_lock); return 0; } @@ -2503,24 +3027,20 @@ int hci_unregister_cb(struct hci_cb *cb) { BT_DBG("%p name %s", cb, cb->name); - write_lock(&hci_cb_list_lock); + mutex_lock(&hci_cb_list_lock); list_del(&cb->list); - write_unlock(&hci_cb_list_lock); + mutex_unlock(&hci_cb_list_lock); return 0; } EXPORT_SYMBOL(hci_unregister_cb); -static int hci_send_frame(struct sk_buff *skb) +static int hci_send_frame(struct hci_dev *hdev, struct sk_buff *skb) { - struct hci_dev *hdev = (struct hci_dev *) skb->dev; - - if (!hdev) { - kfree_skb(skb); - return -ENODEV; - } + int err; - BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len); + BT_DBG("%s type %d len %d", hdev->name, hci_skb_pkt_type(skb), + skb->len); /* Time stamp */ __net_timestamp(skb); @@ -2536,72 +3056,35 @@ static int hci_send_frame(struct sk_buff *skb) /* Get rid of skb owner, prior to sending to the driver. */ skb_orphan(skb); - return hdev->send(skb); -} - -void hci_req_init(struct hci_request *req, struct hci_dev *hdev) -{ - skb_queue_head_init(&req->cmd_q); - req->hdev = hdev; - req->err = 0; -} - -int hci_req_run(struct hci_request *req, hci_req_complete_t complete) -{ - struct hci_dev *hdev = req->hdev; - struct sk_buff *skb; - unsigned long flags; - - BT_DBG("length %u", skb_queue_len(&req->cmd_q)); - - /* If an error occured during request building, remove all HCI - * commands queued on the HCI request queue. - */ - if (req->err) { - skb_queue_purge(&req->cmd_q); - return req->err; + if (!test_bit(HCI_RUNNING, &hdev->flags)) { + kfree_skb(skb); + return -EINVAL; } - /* Do not allow empty requests */ - if (skb_queue_empty(&req->cmd_q)) - return -ENODATA; - - skb = skb_peek_tail(&req->cmd_q); - bt_cb(skb)->req.complete = complete; - - spin_lock_irqsave(&hdev->cmd_q.lock, flags); - skb_queue_splice_tail(&req->cmd_q, &hdev->cmd_q); - spin_unlock_irqrestore(&hdev->cmd_q.lock, flags); + if (hci_skb_pkt_type(skb) == HCI_DRV_PKT) { + /* Intercept HCI Drv packet here and don't go with hdev->send + * callback. + */ + err = hci_drv_process_cmd(hdev, skb); + kfree_skb(skb); + return err; + } - queue_work(hdev->workqueue, &hdev->cmd_work); + err = hdev->send(hdev, skb); + if (err < 0) { + bt_dev_err(hdev, "sending frame failed (%d)", err); + kfree_skb(skb); + return err; + } return 0; } -static struct sk_buff *hci_prepare_cmd(struct hci_dev *hdev, u16 opcode, - u32 plen, const void *param) +static int hci_send_conn_frame(struct hci_dev *hdev, struct hci_conn *conn, + struct sk_buff *skb) { - int len = HCI_COMMAND_HDR_SIZE + plen; - struct hci_command_hdr *hdr; - struct sk_buff *skb; - - skb = bt_skb_alloc(len, GFP_ATOMIC); - if (!skb) - return NULL; - - hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE); - hdr->opcode = cpu_to_le16(opcode); - hdr->plen = plen; - - if (plen) - memcpy(skb_put(skb, plen), param, plen); - - BT_DBG("skb len %d", skb->len); - - bt_cb(skb)->pkt_type = HCI_COMMAND_PKT; - skb->dev = (void *) hdev; - - return skb; + hci_conn_tx_queue(conn, skb); + return hci_send_frame(hdev, skb); } /* Send HCI command */ @@ -2612,16 +3095,16 @@ int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen, BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen); - skb = hci_prepare_cmd(hdev, opcode, plen, param); + skb = hci_cmd_sync_alloc(hdev, opcode, plen, param, NULL); if (!skb) { - BT_ERR("%s no memory for command", hdev->name); + bt_dev_err(hdev, "no memory for command"); return -ENOMEM; } - /* Stand-alone HCI commands must be flaged as + /* Stand-alone HCI commands must be flagged as * single-command requests. */ - bt_cb(skb)->req.start = true; + bt_cb(skb)->hci.req_flags |= HCI_REQ_START; skb_queue_tail(&hdev->cmd_q, skb); queue_work(hdev->workqueue, &hdev->cmd_work); @@ -2629,59 +3112,96 @@ int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen, return 0; } -/* Queue a command to an asynchronous HCI request */ -void hci_req_add_ev(struct hci_request *req, u16 opcode, u32 plen, - const void *param, u8 event) +int __hci_cmd_send(struct hci_dev *hdev, u16 opcode, u32 plen, + const void *param) { - struct hci_dev *hdev = req->hdev; struct sk_buff *skb; - BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen); - - /* If an error occured during request building, there is no point in - * queueing the HCI command. We can simply return. - */ - if (req->err) - return; + if (hci_opcode_ogf(opcode) != 0x3f) { + /* A controller receiving a command shall respond with either + * a Command Status Event or a Command Complete Event. + * Therefore, all standard HCI commands must be sent via the + * standard API, using hci_send_cmd or hci_cmd_sync helpers. + * Some vendors do not comply with this rule for vendor-specific + * commands and do not return any event. We want to support + * unresponded commands for such cases only. + */ + bt_dev_err(hdev, "unresponded command not supported"); + return -EINVAL; + } - skb = hci_prepare_cmd(hdev, opcode, plen, param); + skb = hci_cmd_sync_alloc(hdev, opcode, plen, param, NULL); if (!skb) { - BT_ERR("%s no memory for command (opcode 0x%4.4x)", - hdev->name, opcode); - req->err = -ENOMEM; - return; + bt_dev_err(hdev, "no memory for command (opcode 0x%4.4x)", + opcode); + return -ENOMEM; } - if (skb_queue_empty(&req->cmd_q)) - bt_cb(skb)->req.start = true; - - bt_cb(skb)->req.event = event; + hci_send_frame(hdev, skb); - skb_queue_tail(&req->cmd_q, skb); + return 0; } +EXPORT_SYMBOL(__hci_cmd_send); -void hci_req_add(struct hci_request *req, u16 opcode, u32 plen, - const void *param) +/* Get data from the previously sent command */ +static void *hci_cmd_data(struct sk_buff *skb, __u16 opcode) { - hci_req_add_ev(req, opcode, plen, param, 0); + struct hci_command_hdr *hdr; + + if (!skb || skb->len < HCI_COMMAND_HDR_SIZE) + return NULL; + + hdr = (void *)skb->data; + + if (hdr->opcode != cpu_to_le16(opcode)) + return NULL; + + return skb->data + HCI_COMMAND_HDR_SIZE; } /* Get data from the previously sent command */ void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode) { - struct hci_command_hdr *hdr; + void *data; - if (!hdev->sent_cmd) + /* Check if opcode matches last sent command */ + data = hci_cmd_data(hdev->sent_cmd, opcode); + if (!data) + /* Check if opcode matches last request */ + data = hci_cmd_data(hdev->req_skb, opcode); + + return data; +} + +/* Get data from last received event */ +void *hci_recv_event_data(struct hci_dev *hdev, __u8 event) +{ + struct hci_event_hdr *hdr; + int offset; + + if (!hdev->recv_event) return NULL; - hdr = (void *) hdev->sent_cmd->data; + hdr = (void *)hdev->recv_event->data; + offset = sizeof(*hdr); - if (hdr->opcode != cpu_to_le16(opcode)) + if (hdr->evt != event) { + /* In case of LE metaevent check the subevent match */ + if (hdr->evt == HCI_EV_LE_META) { + struct hci_ev_le_meta *ev; + + ev = (void *)hdev->recv_event->data + offset; + offset += sizeof(*ev); + if (ev->subevent == event) + goto found; + } return NULL; + } - BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode); +found: + bt_dev_dbg(hdev, "event 0x%2.2x", event); - return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE; + return hdev->recv_event->data + offset; } /* Send ACL data */ @@ -2707,19 +3227,9 @@ static void hci_queue_acl(struct hci_chan *chan, struct sk_buff_head *queue, skb->len = skb_headlen(skb); skb->data_len = 0; - bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT; + hci_skb_pkt_type(skb) = HCI_ACLDATA_PKT; - switch (hdev->dev_type) { - case HCI_BREDR: - hci_add_acl_hdr(skb, conn->handle, flags); - break; - case HCI_AMP: - hci_add_acl_hdr(skb, chan->handle, flags); - break; - default: - BT_ERR("%s unknown dev_type %d", hdev->name, hdev->dev_type); - return; - } + hci_add_acl_hdr(skb, conn->handle, flags); list = skb_shinfo(skb)->frag_list; if (!list) { @@ -2733,8 +3243,12 @@ static void hci_queue_acl(struct hci_chan *chan, struct sk_buff_head *queue, skb_shinfo(skb)->frag_list = NULL; - /* Queue all fragments atomically */ - spin_lock(&queue->lock); + /* Queue all fragments atomically. We need to use spin_lock_bh + * here because of 6LoWPAN links, as there this function is + * called from softirq and using normal spin lock could cause + * deadlocks. + */ + spin_lock_bh(&queue->lock); __skb_queue_tail(queue, skb); @@ -2743,8 +3257,7 @@ static void hci_queue_acl(struct hci_chan *chan, struct sk_buff_head *queue, do { skb = list; list = list->next; - skb->dev = (void *) hdev; - bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT; + hci_skb_pkt_type(skb) = HCI_ACLDATA_PKT; hci_add_acl_hdr(skb, conn->handle, flags); BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len); @@ -2752,8 +3265,10 @@ static void hci_queue_acl(struct hci_chan *chan, struct sk_buff_head *queue, __skb_queue_tail(queue, skb); } while (list); - spin_unlock(&queue->lock); + spin_unlock_bh(&queue->lock); } + + bt_dev_dbg(hdev, "chan %p queued %d", chan, skb_queue_len(queue)); } void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags) @@ -2762,8 +3277,6 @@ void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags) BT_DBG("%s chan %p flags 0x%4.4x", hdev->name, chan, flags); - skb->dev = (void *) hdev; - hci_queue_acl(chan, &chan->data_q, skb, flags); queue_work(hdev->workqueue, &hdev->tx_work); @@ -2784,16 +3297,127 @@ void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb) skb_reset_transport_header(skb); memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE); - skb->dev = (void *) hdev; - bt_cb(skb)->pkt_type = HCI_SCODATA_PKT; + hci_skb_pkt_type(skb) = HCI_SCODATA_PKT; skb_queue_tail(&conn->data_q, skb); + + bt_dev_dbg(hdev, "hcon %p queued %d", conn, + skb_queue_len(&conn->data_q)); + + queue_work(hdev->workqueue, &hdev->tx_work); +} + +/* Send ISO data */ +static void hci_add_iso_hdr(struct sk_buff *skb, __u16 handle, __u8 flags) +{ + struct hci_iso_hdr *hdr; + int len = skb->len; + + skb_push(skb, HCI_ISO_HDR_SIZE); + skb_reset_transport_header(skb); + hdr = (struct hci_iso_hdr *)skb_transport_header(skb); + hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags)); + hdr->dlen = cpu_to_le16(len); +} + +static void hci_queue_iso(struct hci_conn *conn, struct sk_buff_head *queue, + struct sk_buff *skb) +{ + struct hci_dev *hdev = conn->hdev; + struct sk_buff *list; + __u16 flags; + + skb->len = skb_headlen(skb); + skb->data_len = 0; + + hci_skb_pkt_type(skb) = HCI_ISODATA_PKT; + + list = skb_shinfo(skb)->frag_list; + + flags = hci_iso_flags_pack(list ? ISO_START : ISO_SINGLE, 0x00); + hci_add_iso_hdr(skb, conn->handle, flags); + + if (!list) { + /* Non fragmented */ + BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len); + + skb_queue_tail(queue, skb); + } else { + /* Fragmented */ + BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len); + + skb_shinfo(skb)->frag_list = NULL; + + __skb_queue_tail(queue, skb); + + do { + skb = list; list = list->next; + + hci_skb_pkt_type(skb) = HCI_ISODATA_PKT; + flags = hci_iso_flags_pack(list ? ISO_CONT : ISO_END, + 0x00); + hci_add_iso_hdr(skb, conn->handle, flags); + + BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len); + + __skb_queue_tail(queue, skb); + } while (list); + } + + bt_dev_dbg(hdev, "hcon %p queued %d", conn, skb_queue_len(queue)); +} + +void hci_send_iso(struct hci_conn *conn, struct sk_buff *skb) +{ + struct hci_dev *hdev = conn->hdev; + + BT_DBG("%s len %d", hdev->name, skb->len); + + hci_queue_iso(conn, &conn->data_q, skb); + queue_work(hdev->workqueue, &hdev->tx_work); } /* ---- HCI TX task (outgoing data) ---- */ /* HCI Connection scheduler */ +static inline void hci_quote_sent(struct hci_conn *conn, int num, int *quote) +{ + struct hci_dev *hdev; + int cnt, q; + + if (!conn) { + *quote = 0; + return; + } + + hdev = conn->hdev; + + switch (conn->type) { + case ACL_LINK: + cnt = hdev->acl_cnt; + break; + case SCO_LINK: + case ESCO_LINK: + cnt = hdev->sco_cnt; + break; + case LE_LINK: + cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt; + break; + case CIS_LINK: + case BIS_LINK: + case PA_LINK: + cnt = hdev->iso_cnt; + break; + default: + cnt = 0; + bt_dev_err(hdev, "unknown link type %d", conn->type); + } + + q = cnt / num; + *quote = q ? q : 1; +} + static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type, int *quote) { @@ -2807,9 +3431,14 @@ static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type, rcu_read_lock(); list_for_each_entry_rcu(c, &h->list, list) { - if (c->type != type || skb_queue_empty(&c->data_q)) + if (c->type != type || + skb_queue_empty(&c->data_q)) continue; + bt_dev_dbg(hdev, "hcon %p state %s queued %d", c, + state_to_string(c->state), + skb_queue_len(&c->data_q)); + if (c->state != BT_CONNECTED && c->state != BT_CONFIG) continue; @@ -2826,29 +3455,7 @@ static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type, rcu_read_unlock(); - if (conn) { - int cnt, q; - - switch (conn->type) { - case ACL_LINK: - cnt = hdev->acl_cnt; - break; - case SCO_LINK: - case ESCO_LINK: - cnt = hdev->sco_cnt; - break; - case LE_LINK: - cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt; - break; - default: - cnt = 0; - BT_ERR("Unknown link type"); - } - - q = cnt / num; - *quote = q ? q : 1; - } else - *quote = 0; + hci_quote_sent(conn, num, quote); BT_DBG("conn %p quote %d", conn, *quote); return conn; @@ -2859,20 +3466,20 @@ static void hci_link_tx_to(struct hci_dev *hdev, __u8 type) struct hci_conn_hash *h = &hdev->conn_hash; struct hci_conn *c; - BT_ERR("%s link tx timeout", hdev->name); + bt_dev_err(hdev, "link tx timeout"); - rcu_read_lock(); + hci_dev_lock(hdev); /* Kill stalled connections */ - list_for_each_entry_rcu(c, &h->list, list) { + list_for_each_entry(c, &h->list, list) { if (c->type == type && c->sent) { - BT_ERR("%s killing stalled connection %pMR", - hdev->name, &c->dst); + bt_dev_err(hdev, "killing stalled connection %pMR", + &c->dst); hci_disconnect(c, HCI_ERROR_REMOTE_USER_TERM); } } - rcu_read_unlock(); + hci_dev_unlock(hdev); } static struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type, @@ -2882,7 +3489,7 @@ static struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type, struct hci_chan *chan = NULL; unsigned int num = 0, min = ~0, cur_prio = 0; struct hci_conn *conn; - int cnt, q, conn_num = 0; + int conn_num = 0; BT_DBG("%s", hdev->name); @@ -2932,27 +3539,8 @@ static struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type, if (!chan) return NULL; - switch (chan->conn->type) { - case ACL_LINK: - cnt = hdev->acl_cnt; - break; - case AMP_LINK: - cnt = hdev->block_cnt; - break; - case SCO_LINK: - case ESCO_LINK: - cnt = hdev->sco_cnt; - break; - case LE_LINK: - cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt; - break; - default: - cnt = 0; - BT_ERR("Unknown link type"); - } + hci_quote_sent(chan->conn, num, quote); - q = cnt / num; - *quote = q ? q : 1; BT_DBG("chan %p quote %d", chan, *quote); return chan; } @@ -3007,84 +3595,97 @@ static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type) } -static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb) +static void __check_timeout(struct hci_dev *hdev, unsigned int cnt, u8 type) { - /* Calculate count of blocks used by this packet */ - return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len); -} + unsigned long timeout; -static void __check_timeout(struct hci_dev *hdev, unsigned int cnt) -{ - if (!test_bit(HCI_RAW, &hdev->flags)) { - /* ACL tx timeout must be longer than maximum - * link supervision timeout (40.9 seconds) */ - if (!cnt && time_after(jiffies, hdev->acl_last_tx + - HCI_ACL_TX_TIMEOUT)) - hci_link_tx_to(hdev, ACL_LINK); + if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) + return; + + switch (type) { + case ACL_LINK: + /* tx timeout must be longer than maximum link supervision + * timeout (40.9 seconds) + */ + timeout = hdev->acl_last_tx + HCI_ACL_TX_TIMEOUT; + break; + case LE_LINK: + /* tx timeout must be longer than maximum link supervision + * timeout (40.9 seconds) + */ + timeout = hdev->le_last_tx + HCI_ACL_TX_TIMEOUT; + break; + case CIS_LINK: + case BIS_LINK: + case PA_LINK: + /* tx timeout must be longer than the maximum transport latency + * (8.388607 seconds) + */ + timeout = hdev->iso_last_tx + HCI_ISO_TX_TIMEOUT; + break; + default: + return; } + + if (!cnt && time_after(jiffies, timeout)) + hci_link_tx_to(hdev, type); } -static void hci_sched_acl_pkt(struct hci_dev *hdev) +/* Schedule SCO */ +static void hci_sched_sco(struct hci_dev *hdev, __u8 type) { - unsigned int cnt = hdev->acl_cnt; - struct hci_chan *chan; + struct hci_conn *conn; struct sk_buff *skb; - int quote; - - __check_timeout(hdev, cnt); - - while (hdev->acl_cnt && - (chan = hci_chan_sent(hdev, ACL_LINK, "e))) { - u32 priority = (skb_peek(&chan->data_q))->priority; - while (quote-- && (skb = skb_peek(&chan->data_q))) { - BT_DBG("chan %p skb %p len %d priority %u", chan, skb, - skb->len, skb->priority); + int quote, *cnt; + unsigned int pkts = hdev->sco_pkts; - /* Stop if priority has changed */ - if (skb->priority < priority) - break; + bt_dev_dbg(hdev, "type %u", type); - skb = skb_dequeue(&chan->data_q); + if (!hci_conn_num(hdev, type) || !pkts) + return; - hci_conn_enter_active_mode(chan->conn, - bt_cb(skb)->force_active); + /* Use sco_pkts if flow control has not been enabled which will limit + * the amount of buffer sent in a row. + */ + if (!hci_dev_test_flag(hdev, HCI_SCO_FLOWCTL)) + cnt = &pkts; + else + cnt = &hdev->sco_cnt; - hci_send_frame(skb); - hdev->acl_last_tx = jiffies; + while (*cnt && (conn = hci_low_sent(hdev, type, "e))) { + while (quote-- && (skb = skb_dequeue(&conn->data_q))) { + BT_DBG("skb %p len %d", skb, skb->len); + hci_send_conn_frame(hdev, conn, skb); - hdev->acl_cnt--; - chan->sent++; - chan->conn->sent++; + conn->sent++; + if (conn->sent == ~0) + conn->sent = 0; + (*cnt)--; } } - if (cnt != hdev->acl_cnt) - hci_prio_recalculate(hdev, ACL_LINK); + /* Rescheduled if all packets were sent and flow control is not enabled + * as there could be more packets queued that could not be sent and + * since no HCI_EV_NUM_COMP_PKTS event will be generated the reschedule + * needs to be forced. + */ + if (!pkts && !hci_dev_test_flag(hdev, HCI_SCO_FLOWCTL)) + queue_work(hdev->workqueue, &hdev->tx_work); } -static void hci_sched_acl_blk(struct hci_dev *hdev) +static void hci_sched_acl_pkt(struct hci_dev *hdev) { - unsigned int cnt = hdev->block_cnt; + unsigned int cnt = hdev->acl_cnt; struct hci_chan *chan; struct sk_buff *skb; int quote; - u8 type; - - __check_timeout(hdev, cnt); - - BT_DBG("%s", hdev->name); - if (hdev->dev_type == HCI_AMP) - type = AMP_LINK; - else - type = ACL_LINK; + __check_timeout(hdev, cnt, ACL_LINK); - while (hdev->block_cnt > 0 && - (chan = hci_chan_sent(hdev, type, "e))) { + while (hdev->acl_cnt && + (chan = hci_chan_sent(hdev, ACL_LINK, "e))) { u32 priority = (skb_peek(&chan->data_q))->priority; - while (quote > 0 && (skb = skb_peek(&chan->data_q))) { - int blocks; - + while (quote-- && (skb = skb_peek(&chan->data_q))) { BT_DBG("chan %p skb %p len %d priority %u", chan, skb, skb->len, skb->priority); @@ -3094,26 +3695,24 @@ static void hci_sched_acl_blk(struct hci_dev *hdev) skb = skb_dequeue(&chan->data_q); - blocks = __get_blocks(hdev, skb); - if (blocks > hdev->block_cnt) - return; - hci_conn_enter_active_mode(chan->conn, bt_cb(skb)->force_active); - hci_send_frame(skb); + hci_send_conn_frame(hdev, chan->conn, skb); hdev->acl_last_tx = jiffies; - hdev->block_cnt -= blocks; - quote -= blocks; + hdev->acl_cnt--; + chan->sent++; + chan->conn->sent++; - chan->sent += blocks; - chan->conn->sent += blocks; + /* Send pending SCO packets right away */ + hci_sched_sco(hdev, SCO_LINK); + hci_sched_sco(hdev, ESCO_LINK); } } - if (cnt != hdev->block_cnt) - hci_prio_recalculate(hdev, type); + if (cnt != hdev->acl_cnt) + hci_prio_recalculate(hdev, ACL_LINK); } static void hci_sched_acl(struct hci_dev *hdev) @@ -3121,94 +3720,29 @@ static void hci_sched_acl(struct hci_dev *hdev) BT_DBG("%s", hdev->name); /* No ACL link over BR/EDR controller */ - if (!hci_conn_num(hdev, ACL_LINK) && hdev->dev_type == HCI_BREDR) - return; - - /* No AMP link over AMP controller */ - if (!hci_conn_num(hdev, AMP_LINK) && hdev->dev_type == HCI_AMP) - return; - - switch (hdev->flow_ctl_mode) { - case HCI_FLOW_CTL_MODE_PACKET_BASED: - hci_sched_acl_pkt(hdev); - break; - - case HCI_FLOW_CTL_MODE_BLOCK_BASED: - hci_sched_acl_blk(hdev); - break; - } -} - -/* Schedule SCO */ -static void hci_sched_sco(struct hci_dev *hdev) -{ - struct hci_conn *conn; - struct sk_buff *skb; - int quote; - - BT_DBG("%s", hdev->name); - - if (!hci_conn_num(hdev, SCO_LINK)) - return; - - while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, "e))) { - while (quote-- && (skb = skb_dequeue(&conn->data_q))) { - BT_DBG("skb %p len %d", skb, skb->len); - hci_send_frame(skb); - - conn->sent++; - if (conn->sent == ~0) - conn->sent = 0; - } - } -} - -static void hci_sched_esco(struct hci_dev *hdev) -{ - struct hci_conn *conn; - struct sk_buff *skb; - int quote; - - BT_DBG("%s", hdev->name); - - if (!hci_conn_num(hdev, ESCO_LINK)) + if (!hci_conn_num(hdev, ACL_LINK)) return; - while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK, - "e))) { - while (quote-- && (skb = skb_dequeue(&conn->data_q))) { - BT_DBG("skb %p len %d", skb, skb->len); - hci_send_frame(skb); - - conn->sent++; - if (conn->sent == ~0) - conn->sent = 0; - } - } + hci_sched_acl_pkt(hdev); } static void hci_sched_le(struct hci_dev *hdev) { struct hci_chan *chan; struct sk_buff *skb; - int quote, cnt, tmp; + int quote, *cnt, tmp; BT_DBG("%s", hdev->name); if (!hci_conn_num(hdev, LE_LINK)) return; - if (!test_bit(HCI_RAW, &hdev->flags)) { - /* LE tx timeout must be longer than maximum - * link supervision timeout (40.9 seconds) */ - if (!hdev->le_cnt && hdev->le_pkts && - time_after(jiffies, hdev->le_last_tx + HZ * 45)) - hci_link_tx_to(hdev, LE_LINK); - } + cnt = hdev->le_pkts ? &hdev->le_cnt : &hdev->acl_cnt; + + __check_timeout(hdev, *cnt, LE_LINK); - cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt; - tmp = cnt; - while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, "e))) { + tmp = *cnt; + while (*cnt && (chan = hci_chan_sent(hdev, LE_LINK, "e))) { u32 priority = (skb_peek(&chan->data_q))->priority; while (quote-- && (skb = skb_peek(&chan->data_q))) { BT_DBG("chan %p skb %p len %d priority %u", chan, skb, @@ -3220,45 +3754,76 @@ static void hci_sched_le(struct hci_dev *hdev) skb = skb_dequeue(&chan->data_q); - hci_send_frame(skb); + hci_send_conn_frame(hdev, chan->conn, skb); hdev->le_last_tx = jiffies; - cnt--; + (*cnt)--; chan->sent++; chan->conn->sent++; + + /* Send pending SCO packets right away */ + hci_sched_sco(hdev, SCO_LINK); + hci_sched_sco(hdev, ESCO_LINK); } } - if (hdev->le_pkts) - hdev->le_cnt = cnt; - else - hdev->acl_cnt = cnt; - - if (cnt != tmp) + if (*cnt != tmp) hci_prio_recalculate(hdev, LE_LINK); } -static void hci_tx_work(struct work_struct *work) +/* Schedule iso */ +static void hci_sched_iso(struct hci_dev *hdev, __u8 type) { - struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work); + struct hci_conn *conn; struct sk_buff *skb; + int quote, *cnt; - BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt, - hdev->sco_cnt, hdev->le_cnt); + BT_DBG("%s", hdev->name); - /* Schedule queues and send stuff to HCI driver */ + if (!hci_conn_num(hdev, type)) + return; - hci_sched_acl(hdev); + cnt = &hdev->iso_cnt; - hci_sched_sco(hdev); + __check_timeout(hdev, *cnt, type); - hci_sched_esco(hdev); + while (*cnt && (conn = hci_low_sent(hdev, type, "e))) { + while (quote-- && (skb = skb_dequeue(&conn->data_q))) { + BT_DBG("skb %p len %d", skb, skb->len); - hci_sched_le(hdev); + hci_send_conn_frame(hdev, conn, skb); + hdev->iso_last_tx = jiffies; + + conn->sent++; + if (conn->sent == ~0) + conn->sent = 0; + (*cnt)--; + } + } +} + +static void hci_tx_work(struct work_struct *work) +{ + struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work); + struct sk_buff *skb; + + BT_DBG("%s acl %d sco %d le %d iso %d", hdev->name, hdev->acl_cnt, + hdev->sco_cnt, hdev->le_cnt, hdev->iso_cnt); + + if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) { + /* Schedule queues and send stuff to HCI driver */ + hci_sched_sco(hdev, SCO_LINK); + hci_sched_sco(hdev, ESCO_LINK); + hci_sched_iso(hdev, CIS_LINK); + hci_sched_iso(hdev, BIS_LINK); + hci_sched_iso(hdev, PA_LINK); + hci_sched_acl(hdev); + hci_sched_le(hdev); + } /* Send next queued raw (unknown type) packet */ while ((skb = skb_dequeue(&hdev->raw_q))) - hci_send_frame(skb); + hci_send_frame(hdev, skb); } /* ----- HCI RX task (incoming data processing) ----- */ @@ -3266,68 +3831,96 @@ static void hci_tx_work(struct work_struct *work) /* ACL data packet */ static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb) { - struct hci_acl_hdr *hdr = (void *) skb->data; - struct hci_conn *conn; + struct hci_acl_hdr *hdr; __u16 handle, flags; + int err; - skb_pull(skb, HCI_ACL_HDR_SIZE); + hdr = skb_pull_data(skb, sizeof(*hdr)); + if (!hdr) { + bt_dev_err(hdev, "ACL packet too small"); + kfree_skb(skb); + return; + } handle = __le16_to_cpu(hdr->handle); flags = hci_flags(handle); handle = hci_handle(handle); - BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len, - handle, flags); + bt_dev_dbg(hdev, "len %d handle 0x%4.4x flags 0x%4.4x", skb->len, + handle, flags); hdev->stat.acl_rx++; - hci_dev_lock(hdev); - conn = hci_conn_hash_lookup_handle(hdev, handle); - hci_dev_unlock(hdev); - - if (conn) { - hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF); - - /* Send to upper protocol */ - l2cap_recv_acldata(conn, skb, flags); - return; - } else { - BT_ERR("%s ACL packet for unknown connection handle %d", - hdev->name, handle); - } - - kfree_skb(skb); + err = l2cap_recv_acldata(hdev, handle, skb, flags); + if (err == -ENOENT) + bt_dev_err(hdev, "ACL packet for unknown connection handle %d", + handle); + else if (err) + bt_dev_dbg(hdev, "ACL packet recv for handle %d failed: %d", + handle, err); } /* SCO data packet */ static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb) { - struct hci_sco_hdr *hdr = (void *) skb->data; - struct hci_conn *conn; - __u16 handle; + struct hci_sco_hdr *hdr; + __u16 handle, flags; + int err; - skb_pull(skb, HCI_SCO_HDR_SIZE); + hdr = skb_pull_data(skb, sizeof(*hdr)); + if (!hdr) { + bt_dev_err(hdev, "SCO packet too small"); + kfree_skb(skb); + return; + } handle = __le16_to_cpu(hdr->handle); + flags = hci_flags(handle); + handle = hci_handle(handle); - BT_DBG("%s len %d handle 0x%4.4x", hdev->name, skb->len, handle); + bt_dev_dbg(hdev, "len %d handle 0x%4.4x flags 0x%4.4x", skb->len, + handle, flags); hdev->stat.sco_rx++; - hci_dev_lock(hdev); - conn = hci_conn_hash_lookup_handle(hdev, handle); - hci_dev_unlock(hdev); + hci_skb_pkt_status(skb) = flags & 0x03; + + err = sco_recv_scodata(hdev, handle, skb); + if (err == -ENOENT) + bt_dev_err_ratelimited(hdev, "SCO packet for unknown connection handle %d", + handle); + else if (err) + bt_dev_dbg(hdev, "SCO packet recv for handle %d failed: %d", + handle, err); +} - if (conn) { - /* Send to upper protocol */ - sco_recv_scodata(conn, skb); +static void hci_isodata_packet(struct hci_dev *hdev, struct sk_buff *skb) +{ + struct hci_iso_hdr *hdr; + __u16 handle, flags; + int err; + + hdr = skb_pull_data(skb, sizeof(*hdr)); + if (!hdr) { + bt_dev_err(hdev, "ISO packet too small"); + kfree_skb(skb); return; - } else { - BT_ERR("%s SCO packet for unknown connection handle %d", - hdev->name, handle); } - kfree_skb(skb); + handle = __le16_to_cpu(hdr->handle); + flags = hci_flags(handle); + handle = hci_handle(handle); + + bt_dev_dbg(hdev, "len %d handle 0x%4.4x flags 0x%4.4x", skb->len, + handle, flags); + + err = iso_recv(hdev, handle, skb, flags); + if (err == -ENOENT) + bt_dev_err(hdev, "ISO packet for unknown connection handle %d", + handle); + else if (err) + bt_dev_dbg(hdev, "ISO packet recv for handle %d failed: %d", + handle, err); } static bool hci_req_is_complete(struct hci_dev *hdev) @@ -3338,7 +3931,7 @@ static bool hci_req_is_complete(struct hci_dev *hdev) if (!skb) return true; - return bt_cb(skb)->req.start; + return (bt_cb(skb)->hci.req_flags & HCI_REQ_START); } static void hci_resend_last(struct hci_dev *hdev) @@ -3363,9 +3956,10 @@ static void hci_resend_last(struct hci_dev *hdev) queue_work(hdev->workqueue, &hdev->cmd_work); } -void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status) +void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status, + hci_req_complete_t *req_complete, + hci_req_complete_skb_t *req_complete_skb) { - hci_req_complete_t req_complete = NULL; struct sk_buff *skb; unsigned long flags; @@ -3387,38 +3981,46 @@ void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status) return; } + /* If we reach this point this event matches the last command sent */ + hci_dev_clear_flag(hdev, HCI_CMD_PENDING); + /* If the command succeeded and there's still more commands in * this request the request is not yet complete. */ if (!status && !hci_req_is_complete(hdev)) return; + skb = hdev->req_skb; + /* If this was the last command in a request the complete - * callback would be found in hdev->sent_cmd instead of the + * callback would be found in hdev->req_skb instead of the * command queue (hdev->cmd_q). */ - if (hdev->sent_cmd) { - req_complete = bt_cb(hdev->sent_cmd)->req.complete; - if (req_complete) - goto call_complete; + if (skb && bt_cb(skb)->hci.req_flags & HCI_REQ_SKB) { + *req_complete_skb = bt_cb(skb)->hci.req_complete_skb; + return; + } + + if (skb && bt_cb(skb)->hci.req_complete) { + *req_complete = bt_cb(skb)->hci.req_complete; + return; } /* Remove all pending commands belonging to this request */ spin_lock_irqsave(&hdev->cmd_q.lock, flags); while ((skb = __skb_dequeue(&hdev->cmd_q))) { - if (bt_cb(skb)->req.start) { + if (bt_cb(skb)->hci.req_flags & HCI_REQ_START) { __skb_queue_head(&hdev->cmd_q, skb); break; } - req_complete = bt_cb(skb)->req.complete; - kfree_skb(skb); + if (bt_cb(skb)->hci.req_flags & HCI_REQ_SKB) + *req_complete_skb = bt_cb(skb)->hci.req_complete_skb; + else + *req_complete = bt_cb(skb)->hci.req_complete; + dev_kfree_skb_irq(skb); } spin_unlock_irqrestore(&hdev->cmd_q.lock, flags); - -call_complete: - if (req_complete) - req_complete(hdev, status); } static void hci_rx_work(struct work_struct *work) @@ -3428,7 +4030,14 @@ static void hci_rx_work(struct work_struct *work) BT_DBG("%s", hdev->name); - while ((skb = skb_dequeue(&hdev->rx_q))) { + /* The kcov_remote functions used for collecting packet parsing + * coverage information from this background thread and associate + * the coverage with the syscall's thread which originally injected + * the packet. This helps fuzzing the kernel. + */ + for (; (skb = skb_dequeue(&hdev->rx_q)); kcov_remote_stop()) { + kcov_remote_start_common(skb_get_kcov_handle(skb)); + /* Send copy to monitor */ hci_send_to_monitor(hdev, skb); @@ -3437,23 +4046,31 @@ static void hci_rx_work(struct work_struct *work) hci_send_to_sock(hdev, skb); } - if (test_bit(HCI_RAW, &hdev->flags)) { + /* If the device has been opened in HCI_USER_CHANNEL, + * the userspace has exclusive access to device. + * When device is HCI_INIT, we still need to process + * the data packets to the driver in order + * to complete its setup(). + */ + if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL) && + !test_bit(HCI_INIT, &hdev->flags)) { kfree_skb(skb); continue; } if (test_bit(HCI_INIT, &hdev->flags)) { /* Don't process data packets in this states. */ - switch (bt_cb(skb)->pkt_type) { + switch (hci_skb_pkt_type(skb)) { case HCI_ACLDATA_PKT: case HCI_SCODATA_PKT: + case HCI_ISODATA_PKT: kfree_skb(skb); continue; } } /* Process frame */ - switch (bt_cb(skb)->pkt_type) { + switch (hci_skb_pkt_type(skb)) { case HCI_EVENT_PKT: BT_DBG("%s Event packet", hdev->name); hci_event_packet(hdev, skb); @@ -3469,6 +4086,11 @@ static void hci_rx_work(struct work_struct *work) hci_scodata_packet(hdev, skb); break; + case HCI_ISODATA_PKT: + BT_DBG("%s ISO data packet", hdev->name); + hci_isodata_packet(hdev, skb); + break; + default: kfree_skb(skb); break; @@ -3476,10 +4098,47 @@ static void hci_rx_work(struct work_struct *work) } } +static int hci_send_cmd_sync(struct hci_dev *hdev, struct sk_buff *skb) +{ + int err; + + bt_dev_dbg(hdev, "skb %p", skb); + + kfree_skb(hdev->sent_cmd); + + hdev->sent_cmd = skb_clone(skb, GFP_KERNEL); + if (!hdev->sent_cmd) { + skb_queue_head(&hdev->cmd_q, skb); + queue_work(hdev->workqueue, &hdev->cmd_work); + return -EINVAL; + } + + if (hci_skb_opcode(skb) != HCI_OP_NOP) { + err = hci_send_frame(hdev, skb); + if (err < 0) { + hci_cmd_sync_cancel_sync(hdev, -err); + return err; + } + atomic_dec(&hdev->cmd_cnt); + } else { + err = -ENODATA; + kfree_skb(skb); + } + + if (hdev->req_status == HCI_REQ_PEND && + !hci_dev_test_and_set_flag(hdev, HCI_CMD_PENDING)) { + kfree_skb(hdev->req_skb); + hdev->req_skb = skb_clone(hdev->sent_cmd, GFP_KERNEL); + } + + return err; +} + static void hci_cmd_work(struct work_struct *work) { struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work); struct sk_buff *skb; + int err; BT_DBG("%s cmd_cnt %d cmd queued %d", hdev->name, atomic_read(&hdev->cmd_cnt), skb_queue_len(&hdev->cmd_q)); @@ -3490,32 +4149,17 @@ static void hci_cmd_work(struct work_struct *work) if (!skb) return; - kfree_skb(hdev->sent_cmd); - - hdev->sent_cmd = skb_clone(skb, GFP_ATOMIC); - if (hdev->sent_cmd) { - atomic_dec(&hdev->cmd_cnt); - hci_send_frame(skb); - if (test_bit(HCI_RESET, &hdev->flags)) - del_timer(&hdev->cmd_timer); - else - mod_timer(&hdev->cmd_timer, - jiffies + HCI_CMD_TIMEOUT); - } else { - skb_queue_head(&hdev->cmd_q, skb); - queue_work(hdev->workqueue, &hdev->cmd_work); - } - } -} - -u8 bdaddr_to_le(u8 bdaddr_type) -{ - switch (bdaddr_type) { - case BDADDR_LE_PUBLIC: - return ADDR_LE_DEV_PUBLIC; + err = hci_send_cmd_sync(hdev, skb); + if (err) + return; - default: - /* Fallback to LE Random address type */ - return ADDR_LE_DEV_RANDOM; + rcu_read_lock(); + if (test_bit(HCI_RESET, &hdev->flags) || + hci_dev_test_flag(hdev, HCI_CMD_DRAIN_WORKQUEUE)) + cancel_delayed_work(&hdev->cmd_timer); + else + queue_delayed_work(hdev->workqueue, &hdev->cmd_timer, + HCI_CMD_TIMEOUT); + rcu_read_unlock(); } } |
