diff options
Diffstat (limited to 'net/bluetooth/hci_core.c')
| -rw-r--r-- | net/bluetooth/hci_core.c | 3131 |
1 files changed, 1417 insertions, 1714 deletions
diff --git a/net/bluetooth/hci_core.c b/net/bluetooth/hci_core.c index 7352fe85674b..8ccec73dce45 100644 --- a/net/bluetooth/hci_core.c +++ b/net/bluetooth/hci_core.c @@ -26,21 +26,26 @@ /* Bluetooth HCI core. */ #include <linux/export.h> -#include <linux/idr.h> #include <linux/rfkill.h> #include <linux/debugfs.h> #include <linux/crypto.h> -#include <asm/unaligned.h> +#include <linux/kcov.h> +#include <linux/property.h> +#include <linux/suspend.h> +#include <linux/wait.h> +#include <linux/unaligned.h> #include <net/bluetooth/bluetooth.h> #include <net/bluetooth/hci_core.h> #include <net/bluetooth/l2cap.h> #include <net/bluetooth/mgmt.h> -#include "hci_request.h" #include "hci_debugfs.h" #include "smp.h" #include "leds.h" +#include "msft.h" +#include "aosp.h" +#include "hci_codec.h" static void hci_rx_work(struct work_struct *work); static void hci_cmd_work(struct work_struct *work); @@ -57,950 +62,9 @@ DEFINE_MUTEX(hci_cb_list_lock); /* HCI ID Numbering */ static DEFINE_IDA(hci_index_ida); -/* ---- HCI debugfs entries ---- */ - -static ssize_t dut_mode_read(struct file *file, char __user *user_buf, - size_t count, loff_t *ppos) -{ - struct hci_dev *hdev = file->private_data; - char buf[3]; - - buf[0] = hci_dev_test_flag(hdev, HCI_DUT_MODE) ? 'Y' : 'N'; - buf[1] = '\n'; - buf[2] = '\0'; - return simple_read_from_buffer(user_buf, count, ppos, buf, 2); -} - -static ssize_t dut_mode_write(struct file *file, const char __user *user_buf, - size_t count, loff_t *ppos) -{ - struct hci_dev *hdev = file->private_data; - struct sk_buff *skb; - bool enable; - int err; - - if (!test_bit(HCI_UP, &hdev->flags)) - return -ENETDOWN; - - err = kstrtobool_from_user(user_buf, count, &enable); - if (err) - return err; - - if (enable == hci_dev_test_flag(hdev, HCI_DUT_MODE)) - return -EALREADY; - - hci_req_sync_lock(hdev); - if (enable) - skb = __hci_cmd_sync(hdev, HCI_OP_ENABLE_DUT_MODE, 0, NULL, - HCI_CMD_TIMEOUT); - else - skb = __hci_cmd_sync(hdev, HCI_OP_RESET, 0, NULL, - HCI_CMD_TIMEOUT); - hci_req_sync_unlock(hdev); - - if (IS_ERR(skb)) - return PTR_ERR(skb); - - kfree_skb(skb); - - hci_dev_change_flag(hdev, HCI_DUT_MODE); - - return count; -} - -static const struct file_operations dut_mode_fops = { - .open = simple_open, - .read = dut_mode_read, - .write = dut_mode_write, - .llseek = default_llseek, -}; - -static ssize_t vendor_diag_read(struct file *file, char __user *user_buf, - size_t count, loff_t *ppos) -{ - struct hci_dev *hdev = file->private_data; - char buf[3]; - - buf[0] = hci_dev_test_flag(hdev, HCI_VENDOR_DIAG) ? 'Y' : 'N'; - buf[1] = '\n'; - buf[2] = '\0'; - return simple_read_from_buffer(user_buf, count, ppos, buf, 2); -} - -static ssize_t vendor_diag_write(struct file *file, const char __user *user_buf, - size_t count, loff_t *ppos) -{ - struct hci_dev *hdev = file->private_data; - bool enable; - int err; - - err = kstrtobool_from_user(user_buf, count, &enable); - if (err) - return err; - - /* When the diagnostic flags are not persistent and the transport - * is not active or in user channel operation, then there is no need - * for the vendor callback. Instead just store the desired value and - * the setting will be programmed when the controller gets powered on. - */ - if (test_bit(HCI_QUIRK_NON_PERSISTENT_DIAG, &hdev->quirks) && - (!test_bit(HCI_RUNNING, &hdev->flags) || - hci_dev_test_flag(hdev, HCI_USER_CHANNEL))) - goto done; - - hci_req_sync_lock(hdev); - err = hdev->set_diag(hdev, enable); - hci_req_sync_unlock(hdev); - - if (err < 0) - return err; - -done: - if (enable) - hci_dev_set_flag(hdev, HCI_VENDOR_DIAG); - else - hci_dev_clear_flag(hdev, HCI_VENDOR_DIAG); - - return count; -} - -static const struct file_operations vendor_diag_fops = { - .open = simple_open, - .read = vendor_diag_read, - .write = vendor_diag_write, - .llseek = default_llseek, -}; - -static void hci_debugfs_create_basic(struct hci_dev *hdev) -{ - debugfs_create_file("dut_mode", 0644, hdev->debugfs, hdev, - &dut_mode_fops); - - if (hdev->set_diag) - debugfs_create_file("vendor_diag", 0644, hdev->debugfs, hdev, - &vendor_diag_fops); -} - -static int hci_reset_req(struct hci_request *req, unsigned long opt) -{ - BT_DBG("%s %ld", req->hdev->name, opt); - - /* Reset device */ - set_bit(HCI_RESET, &req->hdev->flags); - hci_req_add(req, HCI_OP_RESET, 0, NULL); - return 0; -} - -static void bredr_init(struct hci_request *req) -{ - req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED; - - /* Read Local Supported Features */ - hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL); - - /* Read Local Version */ - hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL); - - /* Read BD Address */ - hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL); -} - -static void amp_init1(struct hci_request *req) -{ - req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED; - - /* Read Local Version */ - hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL); - - /* Read Local Supported Commands */ - hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL); - - /* Read Local AMP Info */ - hci_req_add(req, HCI_OP_READ_LOCAL_AMP_INFO, 0, NULL); - - /* Read Data Blk size */ - hci_req_add(req, HCI_OP_READ_DATA_BLOCK_SIZE, 0, NULL); - - /* Read Flow Control Mode */ - hci_req_add(req, HCI_OP_READ_FLOW_CONTROL_MODE, 0, NULL); - - /* Read Location Data */ - hci_req_add(req, HCI_OP_READ_LOCATION_DATA, 0, NULL); -} - -static int amp_init2(struct hci_request *req) -{ - /* Read Local Supported Features. Not all AMP controllers - * support this so it's placed conditionally in the second - * stage init. - */ - if (req->hdev->commands[14] & 0x20) - hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL); - - return 0; -} - -static int hci_init1_req(struct hci_request *req, unsigned long opt) -{ - struct hci_dev *hdev = req->hdev; - - BT_DBG("%s %ld", hdev->name, opt); - - /* Reset */ - if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks)) - hci_reset_req(req, 0); - - switch (hdev->dev_type) { - case HCI_PRIMARY: - bredr_init(req); - break; - case HCI_AMP: - amp_init1(req); - break; - default: - bt_dev_err(hdev, "Unknown device type %d", hdev->dev_type); - break; - } - - return 0; -} - -static void bredr_setup(struct hci_request *req) -{ - __le16 param; - __u8 flt_type; - - /* Read Buffer Size (ACL mtu, max pkt, etc.) */ - hci_req_add(req, HCI_OP_READ_BUFFER_SIZE, 0, NULL); - - /* Read Class of Device */ - hci_req_add(req, HCI_OP_READ_CLASS_OF_DEV, 0, NULL); - - /* Read Local Name */ - hci_req_add(req, HCI_OP_READ_LOCAL_NAME, 0, NULL); - - /* Read Voice Setting */ - hci_req_add(req, HCI_OP_READ_VOICE_SETTING, 0, NULL); - - /* Read Number of Supported IAC */ - hci_req_add(req, HCI_OP_READ_NUM_SUPPORTED_IAC, 0, NULL); - - /* Read Current IAC LAP */ - hci_req_add(req, HCI_OP_READ_CURRENT_IAC_LAP, 0, NULL); - - /* Clear Event Filters */ - flt_type = HCI_FLT_CLEAR_ALL; - hci_req_add(req, HCI_OP_SET_EVENT_FLT, 1, &flt_type); - - /* Connection accept timeout ~20 secs */ - param = cpu_to_le16(0x7d00); - hci_req_add(req, HCI_OP_WRITE_CA_TIMEOUT, 2, ¶m); -} - -static void le_setup(struct hci_request *req) -{ - struct hci_dev *hdev = req->hdev; - - /* Read LE Buffer Size */ - hci_req_add(req, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL); - - /* Read LE Local Supported Features */ - hci_req_add(req, HCI_OP_LE_READ_LOCAL_FEATURES, 0, NULL); - - /* Read LE Supported States */ - hci_req_add(req, HCI_OP_LE_READ_SUPPORTED_STATES, 0, NULL); - - /* LE-only controllers have LE implicitly enabled */ - if (!lmp_bredr_capable(hdev)) - hci_dev_set_flag(hdev, HCI_LE_ENABLED); -} - -static void hci_setup_event_mask(struct hci_request *req) -{ - struct hci_dev *hdev = req->hdev; - - /* The second byte is 0xff instead of 0x9f (two reserved bits - * disabled) since a Broadcom 1.2 dongle doesn't respond to the - * command otherwise. - */ - u8 events[8] = { 0xff, 0xff, 0xfb, 0xff, 0x00, 0x00, 0x00, 0x00 }; - - /* CSR 1.1 dongles does not accept any bitfield so don't try to set - * any event mask for pre 1.2 devices. - */ - if (hdev->hci_ver < BLUETOOTH_VER_1_2) - return; - - if (lmp_bredr_capable(hdev)) { - events[4] |= 0x01; /* Flow Specification Complete */ - } else { - /* Use a different default for LE-only devices */ - memset(events, 0, sizeof(events)); - events[1] |= 0x20; /* Command Complete */ - events[1] |= 0x40; /* Command Status */ - events[1] |= 0x80; /* Hardware Error */ - - /* If the controller supports the Disconnect command, enable - * the corresponding event. In addition enable packet flow - * control related events. - */ - if (hdev->commands[0] & 0x20) { - events[0] |= 0x10; /* Disconnection Complete */ - events[2] |= 0x04; /* Number of Completed Packets */ - events[3] |= 0x02; /* Data Buffer Overflow */ - } - - /* If the controller supports the Read Remote Version - * Information command, enable the corresponding event. - */ - if (hdev->commands[2] & 0x80) - events[1] |= 0x08; /* Read Remote Version Information - * Complete - */ - - if (hdev->le_features[0] & HCI_LE_ENCRYPTION) { - events[0] |= 0x80; /* Encryption Change */ - events[5] |= 0x80; /* Encryption Key Refresh Complete */ - } - } - - if (lmp_inq_rssi_capable(hdev) || - test_bit(HCI_QUIRK_FIXUP_INQUIRY_MODE, &hdev->quirks)) - events[4] |= 0x02; /* Inquiry Result with RSSI */ - - if (lmp_ext_feat_capable(hdev)) - events[4] |= 0x04; /* Read Remote Extended Features Complete */ - - if (lmp_esco_capable(hdev)) { - events[5] |= 0x08; /* Synchronous Connection Complete */ - events[5] |= 0x10; /* Synchronous Connection Changed */ - } - - if (lmp_sniffsubr_capable(hdev)) - events[5] |= 0x20; /* Sniff Subrating */ - - if (lmp_pause_enc_capable(hdev)) - events[5] |= 0x80; /* Encryption Key Refresh Complete */ - - if (lmp_ext_inq_capable(hdev)) - events[5] |= 0x40; /* Extended Inquiry Result */ - - if (lmp_no_flush_capable(hdev)) - events[7] |= 0x01; /* Enhanced Flush Complete */ - - if (lmp_lsto_capable(hdev)) - events[6] |= 0x80; /* Link Supervision Timeout Changed */ - - if (lmp_ssp_capable(hdev)) { - events[6] |= 0x01; /* IO Capability Request */ - events[6] |= 0x02; /* IO Capability Response */ - events[6] |= 0x04; /* User Confirmation Request */ - events[6] |= 0x08; /* User Passkey Request */ - events[6] |= 0x10; /* Remote OOB Data Request */ - events[6] |= 0x20; /* Simple Pairing Complete */ - events[7] |= 0x04; /* User Passkey Notification */ - events[7] |= 0x08; /* Keypress Notification */ - events[7] |= 0x10; /* Remote Host Supported - * Features Notification - */ - } - - if (lmp_le_capable(hdev)) - events[7] |= 0x20; /* LE Meta-Event */ - - hci_req_add(req, HCI_OP_SET_EVENT_MASK, sizeof(events), events); -} - -static int hci_init2_req(struct hci_request *req, unsigned long opt) -{ - struct hci_dev *hdev = req->hdev; - - if (hdev->dev_type == HCI_AMP) - return amp_init2(req); - - if (lmp_bredr_capable(hdev)) - bredr_setup(req); - else - hci_dev_clear_flag(hdev, HCI_BREDR_ENABLED); - - if (lmp_le_capable(hdev)) - le_setup(req); - - /* All Bluetooth 1.2 and later controllers should support the - * HCI command for reading the local supported commands. - * - * Unfortunately some controllers indicate Bluetooth 1.2 support, - * but do not have support for this command. If that is the case, - * the driver can quirk the behavior and skip reading the local - * supported commands. - */ - if (hdev->hci_ver > BLUETOOTH_VER_1_1 && - !test_bit(HCI_QUIRK_BROKEN_LOCAL_COMMANDS, &hdev->quirks)) - hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL); - - if (lmp_ssp_capable(hdev)) { - /* When SSP is available, then the host features page - * should also be available as well. However some - * controllers list the max_page as 0 as long as SSP - * has not been enabled. To achieve proper debugging - * output, force the minimum max_page to 1 at least. - */ - hdev->max_page = 0x01; - - if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) { - u8 mode = 0x01; - - hci_req_add(req, HCI_OP_WRITE_SSP_MODE, - sizeof(mode), &mode); - } else { - struct hci_cp_write_eir cp; - - memset(hdev->eir, 0, sizeof(hdev->eir)); - memset(&cp, 0, sizeof(cp)); - - hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp); - } - } - - if (lmp_inq_rssi_capable(hdev) || - test_bit(HCI_QUIRK_FIXUP_INQUIRY_MODE, &hdev->quirks)) { - u8 mode; - - /* If Extended Inquiry Result events are supported, then - * they are clearly preferred over Inquiry Result with RSSI - * events. - */ - mode = lmp_ext_inq_capable(hdev) ? 0x02 : 0x01; - - hci_req_add(req, HCI_OP_WRITE_INQUIRY_MODE, 1, &mode); - } - - if (lmp_inq_tx_pwr_capable(hdev)) - hci_req_add(req, HCI_OP_READ_INQ_RSP_TX_POWER, 0, NULL); - - if (lmp_ext_feat_capable(hdev)) { - struct hci_cp_read_local_ext_features cp; - - cp.page = 0x01; - hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES, - sizeof(cp), &cp); - } - - if (hci_dev_test_flag(hdev, HCI_LINK_SECURITY)) { - u8 enable = 1; - hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, sizeof(enable), - &enable); - } - - return 0; -} - -static void hci_setup_link_policy(struct hci_request *req) -{ - struct hci_dev *hdev = req->hdev; - struct hci_cp_write_def_link_policy cp; - u16 link_policy = 0; - - if (lmp_rswitch_capable(hdev)) - link_policy |= HCI_LP_RSWITCH; - if (lmp_hold_capable(hdev)) - link_policy |= HCI_LP_HOLD; - if (lmp_sniff_capable(hdev)) - link_policy |= HCI_LP_SNIFF; - if (lmp_park_capable(hdev)) - link_policy |= HCI_LP_PARK; - - cp.policy = cpu_to_le16(link_policy); - hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, sizeof(cp), &cp); -} - -static void hci_set_le_support(struct hci_request *req) -{ - struct hci_dev *hdev = req->hdev; - struct hci_cp_write_le_host_supported cp; - - /* LE-only devices do not support explicit enablement */ - if (!lmp_bredr_capable(hdev)) - return; - - memset(&cp, 0, sizeof(cp)); - - if (hci_dev_test_flag(hdev, HCI_LE_ENABLED)) { - cp.le = 0x01; - cp.simul = 0x00; - } - - if (cp.le != lmp_host_le_capable(hdev)) - hci_req_add(req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(cp), - &cp); -} - -static void hci_set_event_mask_page_2(struct hci_request *req) -{ - struct hci_dev *hdev = req->hdev; - u8 events[8] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 }; - bool changed = false; - - /* If Connectionless Slave Broadcast master role is supported - * enable all necessary events for it. - */ - if (lmp_csb_master_capable(hdev)) { - events[1] |= 0x40; /* Triggered Clock Capture */ - events[1] |= 0x80; /* Synchronization Train Complete */ - events[2] |= 0x10; /* Slave Page Response Timeout */ - events[2] |= 0x20; /* CSB Channel Map Change */ - changed = true; - } - - /* If Connectionless Slave Broadcast slave role is supported - * enable all necessary events for it. - */ - if (lmp_csb_slave_capable(hdev)) { - events[2] |= 0x01; /* Synchronization Train Received */ - events[2] |= 0x02; /* CSB Receive */ - events[2] |= 0x04; /* CSB Timeout */ - events[2] |= 0x08; /* Truncated Page Complete */ - changed = true; - } - - /* Enable Authenticated Payload Timeout Expired event if supported */ - if (lmp_ping_capable(hdev) || hdev->le_features[0] & HCI_LE_PING) { - events[2] |= 0x80; - changed = true; - } - - /* Some Broadcom based controllers indicate support for Set Event - * Mask Page 2 command, but then actually do not support it. Since - * the default value is all bits set to zero, the command is only - * required if the event mask has to be changed. In case no change - * to the event mask is needed, skip this command. - */ - if (changed) - hci_req_add(req, HCI_OP_SET_EVENT_MASK_PAGE_2, - sizeof(events), events); -} - -static int hci_init3_req(struct hci_request *req, unsigned long opt) -{ - struct hci_dev *hdev = req->hdev; - u8 p; - - hci_setup_event_mask(req); - - if (hdev->commands[6] & 0x20 && - !test_bit(HCI_QUIRK_BROKEN_STORED_LINK_KEY, &hdev->quirks)) { - struct hci_cp_read_stored_link_key cp; - - bacpy(&cp.bdaddr, BDADDR_ANY); - cp.read_all = 0x01; - hci_req_add(req, HCI_OP_READ_STORED_LINK_KEY, sizeof(cp), &cp); - } - - if (hdev->commands[5] & 0x10) - hci_setup_link_policy(req); - - if (hdev->commands[8] & 0x01) - hci_req_add(req, HCI_OP_READ_PAGE_SCAN_ACTIVITY, 0, NULL); - - /* Some older Broadcom based Bluetooth 1.2 controllers do not - * support the Read Page Scan Type command. Check support for - * this command in the bit mask of supported commands. - */ - if (hdev->commands[13] & 0x01) - hci_req_add(req, HCI_OP_READ_PAGE_SCAN_TYPE, 0, NULL); - - if (lmp_le_capable(hdev)) { - u8 events[8]; - - memset(events, 0, sizeof(events)); - - if (hdev->le_features[0] & HCI_LE_ENCRYPTION) - events[0] |= 0x10; /* LE Long Term Key Request */ - - /* If controller supports the Connection Parameters Request - * Link Layer Procedure, enable the corresponding event. - */ - if (hdev->le_features[0] & HCI_LE_CONN_PARAM_REQ_PROC) - events[0] |= 0x20; /* LE Remote Connection - * Parameter Request - */ - - /* If the controller supports the Data Length Extension - * feature, enable the corresponding event. - */ - if (hdev->le_features[0] & HCI_LE_DATA_LEN_EXT) - events[0] |= 0x40; /* LE Data Length Change */ - - /* If the controller supports Extended Scanner Filter - * Policies, enable the correspondig event. - */ - if (hdev->le_features[0] & HCI_LE_EXT_SCAN_POLICY) - events[1] |= 0x04; /* LE Direct Advertising - * Report - */ - - /* If the controller supports Channel Selection Algorithm #2 - * feature, enable the corresponding event. - */ - if (hdev->le_features[1] & HCI_LE_CHAN_SEL_ALG2) - events[2] |= 0x08; /* LE Channel Selection - * Algorithm - */ - - /* If the controller supports the LE Set Scan Enable command, - * enable the corresponding advertising report event. - */ - if (hdev->commands[26] & 0x08) - events[0] |= 0x02; /* LE Advertising Report */ - - /* If the controller supports the LE Create Connection - * command, enable the corresponding event. - */ - if (hdev->commands[26] & 0x10) - events[0] |= 0x01; /* LE Connection Complete */ - - /* If the controller supports the LE Connection Update - * command, enable the corresponding event. - */ - if (hdev->commands[27] & 0x04) - events[0] |= 0x04; /* LE Connection Update - * Complete - */ - - /* If the controller supports the LE Read Remote Used Features - * command, enable the corresponding event. - */ - if (hdev->commands[27] & 0x20) - events[0] |= 0x08; /* LE Read Remote Used - * Features Complete - */ - - /* If the controller supports the LE Read Local P-256 - * Public Key command, enable the corresponding event. - */ - if (hdev->commands[34] & 0x02) - events[0] |= 0x80; /* LE Read Local P-256 - * Public Key Complete - */ - - /* If the controller supports the LE Generate DHKey - * command, enable the corresponding event. - */ - if (hdev->commands[34] & 0x04) - events[1] |= 0x01; /* LE Generate DHKey Complete */ - - /* If the controller supports the LE Set Default PHY or - * LE Set PHY commands, enable the corresponding event. - */ - if (hdev->commands[35] & (0x20 | 0x40)) - events[1] |= 0x08; /* LE PHY Update Complete */ - - /* If the controller supports LE Set Extended Scan Parameters - * and LE Set Extended Scan Enable commands, enable the - * corresponding event. - */ - if (use_ext_scan(hdev)) - events[1] |= 0x10; /* LE Extended Advertising - * Report - */ - - /* If the controller supports the LE Extended Create Connection - * command, enable the corresponding event. - */ - if (use_ext_conn(hdev)) - events[1] |= 0x02; /* LE Enhanced Connection - * Complete - */ - - /* If the controller supports the LE Extended Advertising - * command, enable the corresponding event. - */ - if (ext_adv_capable(hdev)) - events[2] |= 0x02; /* LE Advertising Set - * Terminated - */ - - hci_req_add(req, HCI_OP_LE_SET_EVENT_MASK, sizeof(events), - events); - - /* Read LE Advertising Channel TX Power */ - if ((hdev->commands[25] & 0x40) && !ext_adv_capable(hdev)) { - /* HCI TS spec forbids mixing of legacy and extended - * advertising commands wherein READ_ADV_TX_POWER is - * also included. So do not call it if extended adv - * is supported otherwise controller will return - * COMMAND_DISALLOWED for extended commands. - */ - hci_req_add(req, HCI_OP_LE_READ_ADV_TX_POWER, 0, NULL); - } - - if (hdev->commands[26] & 0x40) { - /* Read LE White List Size */ - hci_req_add(req, HCI_OP_LE_READ_WHITE_LIST_SIZE, - 0, NULL); - } - - if (hdev->commands[26] & 0x80) { - /* Clear LE White List */ - hci_req_add(req, HCI_OP_LE_CLEAR_WHITE_LIST, 0, NULL); - } - - if (hdev->commands[34] & 0x40) { - /* Read LE Resolving List Size */ - hci_req_add(req, HCI_OP_LE_READ_RESOLV_LIST_SIZE, - 0, NULL); - } - - if (hdev->commands[34] & 0x20) { - /* Clear LE Resolving List */ - hci_req_add(req, HCI_OP_LE_CLEAR_RESOLV_LIST, 0, NULL); - } - - if (hdev->le_features[0] & HCI_LE_DATA_LEN_EXT) { - /* Read LE Maximum Data Length */ - hci_req_add(req, HCI_OP_LE_READ_MAX_DATA_LEN, 0, NULL); - - /* Read LE Suggested Default Data Length */ - hci_req_add(req, HCI_OP_LE_READ_DEF_DATA_LEN, 0, NULL); - } - - if (ext_adv_capable(hdev)) { - /* Read LE Number of Supported Advertising Sets */ - hci_req_add(req, HCI_OP_LE_READ_NUM_SUPPORTED_ADV_SETS, - 0, NULL); - } - - hci_set_le_support(req); - } - - /* Read features beyond page 1 if available */ - for (p = 2; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) { - struct hci_cp_read_local_ext_features cp; - - cp.page = p; - hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES, - sizeof(cp), &cp); - } - - return 0; -} - -static int hci_init4_req(struct hci_request *req, unsigned long opt) -{ - struct hci_dev *hdev = req->hdev; - - /* Some Broadcom based Bluetooth controllers do not support the - * Delete Stored Link Key command. They are clearly indicating its - * absence in the bit mask of supported commands. - * - * Check the supported commands and only if the the command is marked - * as supported send it. If not supported assume that the controller - * does not have actual support for stored link keys which makes this - * command redundant anyway. - * - * Some controllers indicate that they support handling deleting - * stored link keys, but they don't. The quirk lets a driver - * just disable this command. - */ - if (hdev->commands[6] & 0x80 && - !test_bit(HCI_QUIRK_BROKEN_STORED_LINK_KEY, &hdev->quirks)) { - struct hci_cp_delete_stored_link_key cp; - - bacpy(&cp.bdaddr, BDADDR_ANY); - cp.delete_all = 0x01; - hci_req_add(req, HCI_OP_DELETE_STORED_LINK_KEY, - sizeof(cp), &cp); - } - - /* Set event mask page 2 if the HCI command for it is supported */ - if (hdev->commands[22] & 0x04) - hci_set_event_mask_page_2(req); - - /* Read local codec list if the HCI command is supported */ - if (hdev->commands[29] & 0x20) - hci_req_add(req, HCI_OP_READ_LOCAL_CODECS, 0, NULL); - - /* Get MWS transport configuration if the HCI command is supported */ - if (hdev->commands[30] & 0x08) - hci_req_add(req, HCI_OP_GET_MWS_TRANSPORT_CONFIG, 0, NULL); - - /* Check for Synchronization Train support */ - if (lmp_sync_train_capable(hdev)) - hci_req_add(req, HCI_OP_READ_SYNC_TRAIN_PARAMS, 0, NULL); - - /* Enable Secure Connections if supported and configured */ - if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED) && - bredr_sc_enabled(hdev)) { - u8 support = 0x01; - - hci_req_add(req, HCI_OP_WRITE_SC_SUPPORT, - sizeof(support), &support); - } - - /* Set Suggested Default Data Length to maximum if supported */ - if (hdev->le_features[0] & HCI_LE_DATA_LEN_EXT) { - struct hci_cp_le_write_def_data_len cp; - - cp.tx_len = hdev->le_max_tx_len; - cp.tx_time = hdev->le_max_tx_time; - hci_req_add(req, HCI_OP_LE_WRITE_DEF_DATA_LEN, sizeof(cp), &cp); - } - - /* Set Default PHY parameters if command is supported */ - if (hdev->commands[35] & 0x20) { - struct hci_cp_le_set_default_phy cp; - - cp.all_phys = 0x00; - cp.tx_phys = hdev->le_tx_def_phys; - cp.rx_phys = hdev->le_rx_def_phys; - - hci_req_add(req, HCI_OP_LE_SET_DEFAULT_PHY, sizeof(cp), &cp); - } - - return 0; -} - -static int __hci_init(struct hci_dev *hdev) -{ - int err; - - err = __hci_req_sync(hdev, hci_init1_req, 0, HCI_INIT_TIMEOUT, NULL); - if (err < 0) - return err; - - if (hci_dev_test_flag(hdev, HCI_SETUP)) - hci_debugfs_create_basic(hdev); - - err = __hci_req_sync(hdev, hci_init2_req, 0, HCI_INIT_TIMEOUT, NULL); - if (err < 0) - return err; - - /* HCI_PRIMARY covers both single-mode LE, BR/EDR and dual-mode - * BR/EDR/LE type controllers. AMP controllers only need the - * first two stages of init. - */ - if (hdev->dev_type != HCI_PRIMARY) - return 0; - - err = __hci_req_sync(hdev, hci_init3_req, 0, HCI_INIT_TIMEOUT, NULL); - if (err < 0) - return err; - - err = __hci_req_sync(hdev, hci_init4_req, 0, HCI_INIT_TIMEOUT, NULL); - if (err < 0) - return err; - - /* This function is only called when the controller is actually in - * configured state. When the controller is marked as unconfigured, - * this initialization procedure is not run. - * - * It means that it is possible that a controller runs through its - * setup phase and then discovers missing settings. If that is the - * case, then this function will not be called. It then will only - * be called during the config phase. - * - * So only when in setup phase or config phase, create the debugfs - * entries and register the SMP channels. - */ - if (!hci_dev_test_flag(hdev, HCI_SETUP) && - !hci_dev_test_flag(hdev, HCI_CONFIG)) - return 0; - - hci_debugfs_create_common(hdev); - - if (lmp_bredr_capable(hdev)) - hci_debugfs_create_bredr(hdev); - - if (lmp_le_capable(hdev)) - hci_debugfs_create_le(hdev); - - return 0; -} - -static int hci_init0_req(struct hci_request *req, unsigned long opt) -{ - struct hci_dev *hdev = req->hdev; - - BT_DBG("%s %ld", hdev->name, opt); - - /* Reset */ - if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks)) - hci_reset_req(req, 0); - - /* Read Local Version */ - hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL); - - /* Read BD Address */ - if (hdev->set_bdaddr) - hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL); - - return 0; -} - -static int __hci_unconf_init(struct hci_dev *hdev) -{ - int err; - - if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks)) - return 0; - - err = __hci_req_sync(hdev, hci_init0_req, 0, HCI_INIT_TIMEOUT, NULL); - if (err < 0) - return err; - - if (hci_dev_test_flag(hdev, HCI_SETUP)) - hci_debugfs_create_basic(hdev); - - return 0; -} - -static int hci_scan_req(struct hci_request *req, unsigned long opt) -{ - __u8 scan = opt; - - BT_DBG("%s %x", req->hdev->name, scan); - - /* Inquiry and Page scans */ - hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan); - return 0; -} - -static int hci_auth_req(struct hci_request *req, unsigned long opt) -{ - __u8 auth = opt; - - BT_DBG("%s %x", req->hdev->name, auth); - - /* Authentication */ - hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth); - return 0; -} - -static int hci_encrypt_req(struct hci_request *req, unsigned long opt) -{ - __u8 encrypt = opt; - - BT_DBG("%s %x", req->hdev->name, encrypt); - - /* Encryption */ - hci_req_add(req, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt); - return 0; -} - -static int hci_linkpol_req(struct hci_request *req, unsigned long opt) -{ - __le16 policy = cpu_to_le16(opt); - - BT_DBG("%s %x", req->hdev->name, policy); - - /* Default link policy */ - hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy); - return 0; -} - /* Get HCI device by index. * Device is held on return. */ -struct hci_dev *hci_dev_get(int index) +static struct hci_dev *__hci_dev_get(int index, int *srcu_index) { struct hci_dev *hdev = NULL, *d; @@ -1013,6 +77,8 @@ struct hci_dev *hci_dev_get(int index) list_for_each_entry(d, &hci_dev_list, list) { if (d->id == index) { hdev = hci_dev_hold(d); + if (srcu_index) + *srcu_index = srcu_read_lock(&d->srcu); break; } } @@ -1020,6 +86,22 @@ struct hci_dev *hci_dev_get(int index) return hdev; } +struct hci_dev *hci_dev_get(int index) +{ + return __hci_dev_get(index, NULL); +} + +static struct hci_dev *hci_dev_get_srcu(int index, int *srcu_index) +{ + return __hci_dev_get(index, srcu_index); +} + +static void hci_dev_put_srcu(struct hci_dev *hdev, int srcu_index) +{ + srcu_read_unlock(&hdev->srcu, srcu_index); + hci_dev_put(hdev); +} + /* ---- Inquiry support ---- */ bool hci_discovery_active(struct hci_dev *hdev) @@ -1040,8 +122,6 @@ void hci_discovery_set_state(struct hci_dev *hdev, int state) { int old_state = hdev->discovery.state; - BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state); - if (old_state == state) return; @@ -1049,7 +129,7 @@ void hci_discovery_set_state(struct hci_dev *hdev, int state) switch (state) { case DISCOVERY_STOPPED: - hci_update_background_scan(hdev); + hci_update_passive_scan(hdev); if (old_state != DISCOVERY_STARTING) mgmt_discovering(hdev, 0); @@ -1064,6 +144,8 @@ void hci_discovery_set_state(struct hci_dev *hdev, int state) case DISCOVERY_STOPPING: break; } + + bt_dev_dbg(hdev, "state %u -> %u", old_state, state); } void hci_inquiry_cache_flush(struct hci_dev *hdev) @@ -1240,33 +322,12 @@ static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf) return copied; } -static int hci_inq_req(struct hci_request *req, unsigned long opt) -{ - struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt; - struct hci_dev *hdev = req->hdev; - struct hci_cp_inquiry cp; - - BT_DBG("%s", hdev->name); - - if (test_bit(HCI_INQUIRY, &hdev->flags)) - return 0; - - /* Start Inquiry */ - memcpy(&cp.lap, &ir->lap, 3); - cp.length = ir->length; - cp.num_rsp = ir->num_rsp; - hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp); - - return 0; -} - int hci_inquiry(void __user *arg) { __u8 __user *ptr = arg; struct hci_inquiry_req ir; struct hci_dev *hdev; int err = 0, do_inquiry = 0, max_rsp; - long timeo; __u8 *buf; if (copy_from_user(&ir, ptr, sizeof(ir))) @@ -1286,13 +347,14 @@ int hci_inquiry(void __user *arg) goto done; } - if (hdev->dev_type != HCI_PRIMARY) { + if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) { err = -EOPNOTSUPP; goto done; } - if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) { - err = -EOPNOTSUPP; + /* Restrict maximum inquiry length to 60 seconds */ + if (ir.length > 60) { + err = -EINVAL; goto done; } @@ -1304,11 +366,11 @@ int hci_inquiry(void __user *arg) } hci_dev_unlock(hdev); - timeo = ir.length * msecs_to_jiffies(2000); - if (do_inquiry) { - err = hci_req_sync(hdev, hci_inq_req, (unsigned long) &ir, - timeo, NULL); + hci_req_sync_lock(hdev); + err = hci_inquiry_sync(hdev, ir.length, ir.num_rsp); + hci_req_sync_unlock(hdev); + if (err < 0) goto done; @@ -1316,8 +378,10 @@ int hci_inquiry(void __user *arg) * cleared). If it is interrupted by a signal, return -EINTR. */ if (wait_on_bit(&hdev->flags, HCI_INQUIRY, - TASK_INTERRUPTIBLE)) - return -EINTR; + TASK_INTERRUPTIBLE)) { + err = -EINTR; + goto done; + } } /* for unlimited number of responses we will use buffer with @@ -1363,161 +427,8 @@ static int hci_dev_do_open(struct hci_dev *hdev) hci_req_sync_lock(hdev); - if (hci_dev_test_flag(hdev, HCI_UNREGISTER)) { - ret = -ENODEV; - goto done; - } - - if (!hci_dev_test_flag(hdev, HCI_SETUP) && - !hci_dev_test_flag(hdev, HCI_CONFIG)) { - /* Check for rfkill but allow the HCI setup stage to - * proceed (which in itself doesn't cause any RF activity). - */ - if (hci_dev_test_flag(hdev, HCI_RFKILLED)) { - ret = -ERFKILL; - goto done; - } + ret = hci_dev_open_sync(hdev); - /* Check for valid public address or a configured static - * random adddress, but let the HCI setup proceed to - * be able to determine if there is a public address - * or not. - * - * In case of user channel usage, it is not important - * if a public address or static random address is - * available. - * - * This check is only valid for BR/EDR controllers - * since AMP controllers do not have an address. - */ - if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL) && - hdev->dev_type == HCI_PRIMARY && - !bacmp(&hdev->bdaddr, BDADDR_ANY) && - !bacmp(&hdev->static_addr, BDADDR_ANY)) { - ret = -EADDRNOTAVAIL; - goto done; - } - } - - if (test_bit(HCI_UP, &hdev->flags)) { - ret = -EALREADY; - goto done; - } - - if (hdev->open(hdev)) { - ret = -EIO; - goto done; - } - - set_bit(HCI_RUNNING, &hdev->flags); - hci_sock_dev_event(hdev, HCI_DEV_OPEN); - - atomic_set(&hdev->cmd_cnt, 1); - set_bit(HCI_INIT, &hdev->flags); - - if (hci_dev_test_flag(hdev, HCI_SETUP) || - test_bit(HCI_QUIRK_NON_PERSISTENT_SETUP, &hdev->quirks)) { - hci_sock_dev_event(hdev, HCI_DEV_SETUP); - - if (hdev->setup) - ret = hdev->setup(hdev); - - /* The transport driver can set these quirks before - * creating the HCI device or in its setup callback. - * - * In case any of them is set, the controller has to - * start up as unconfigured. - */ - if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) || - test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks)) - hci_dev_set_flag(hdev, HCI_UNCONFIGURED); - - /* For an unconfigured controller it is required to - * read at least the version information provided by - * the Read Local Version Information command. - * - * If the set_bdaddr driver callback is provided, then - * also the original Bluetooth public device address - * will be read using the Read BD Address command. - */ - if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) - ret = __hci_unconf_init(hdev); - } - - if (hci_dev_test_flag(hdev, HCI_CONFIG)) { - /* If public address change is configured, ensure that - * the address gets programmed. If the driver does not - * support changing the public address, fail the power - * on procedure. - */ - if (bacmp(&hdev->public_addr, BDADDR_ANY) && - hdev->set_bdaddr) - ret = hdev->set_bdaddr(hdev, &hdev->public_addr); - else - ret = -EADDRNOTAVAIL; - } - - if (!ret) { - if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED) && - !hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) { - ret = __hci_init(hdev); - if (!ret && hdev->post_init) - ret = hdev->post_init(hdev); - } - } - - /* If the HCI Reset command is clearing all diagnostic settings, - * then they need to be reprogrammed after the init procedure - * completed. - */ - if (test_bit(HCI_QUIRK_NON_PERSISTENT_DIAG, &hdev->quirks) && - !hci_dev_test_flag(hdev, HCI_USER_CHANNEL) && - hci_dev_test_flag(hdev, HCI_VENDOR_DIAG) && hdev->set_diag) - ret = hdev->set_diag(hdev, true); - - clear_bit(HCI_INIT, &hdev->flags); - - if (!ret) { - hci_dev_hold(hdev); - hci_dev_set_flag(hdev, HCI_RPA_EXPIRED); - hci_adv_instances_set_rpa_expired(hdev, true); - set_bit(HCI_UP, &hdev->flags); - hci_sock_dev_event(hdev, HCI_DEV_UP); - hci_leds_update_powered(hdev, true); - if (!hci_dev_test_flag(hdev, HCI_SETUP) && - !hci_dev_test_flag(hdev, HCI_CONFIG) && - !hci_dev_test_flag(hdev, HCI_UNCONFIGURED) && - !hci_dev_test_flag(hdev, HCI_USER_CHANNEL) && - hci_dev_test_flag(hdev, HCI_MGMT) && - hdev->dev_type == HCI_PRIMARY) { - ret = __hci_req_hci_power_on(hdev); - mgmt_power_on(hdev, ret); - } - } else { - /* Init failed, cleanup */ - flush_work(&hdev->tx_work); - flush_work(&hdev->cmd_work); - flush_work(&hdev->rx_work); - - skb_queue_purge(&hdev->cmd_q); - skb_queue_purge(&hdev->rx_q); - - if (hdev->flush) - hdev->flush(hdev); - - if (hdev->sent_cmd) { - kfree_skb(hdev->sent_cmd); - hdev->sent_cmd = NULL; - } - - clear_bit(HCI_RUNNING, &hdev->flags); - hci_sock_dev_event(hdev, HCI_DEV_CLOSE); - - hdev->close(hdev); - hdev->flags &= BIT(HCI_RAW); - } - -done: hci_req_sync_unlock(hdev); return ret; } @@ -1579,147 +490,19 @@ done: return err; } -/* This function requires the caller holds hdev->lock */ -static void hci_pend_le_actions_clear(struct hci_dev *hdev) -{ - struct hci_conn_params *p; - - list_for_each_entry(p, &hdev->le_conn_params, list) { - if (p->conn) { - hci_conn_drop(p->conn); - hci_conn_put(p->conn); - p->conn = NULL; - } - list_del_init(&p->action); - } - - BT_DBG("All LE pending actions cleared"); -} - int hci_dev_do_close(struct hci_dev *hdev) { - bool auto_off; + int err; BT_DBG("%s %p", hdev->name, hdev); - if (!hci_dev_test_flag(hdev, HCI_UNREGISTER) && - !hci_dev_test_flag(hdev, HCI_USER_CHANNEL) && - test_bit(HCI_UP, &hdev->flags)) { - /* Execute vendor specific shutdown routine */ - if (hdev->shutdown) - hdev->shutdown(hdev); - } - - cancel_delayed_work(&hdev->power_off); - - hci_request_cancel_all(hdev); hci_req_sync_lock(hdev); - if (!test_and_clear_bit(HCI_UP, &hdev->flags)) { - cancel_delayed_work_sync(&hdev->cmd_timer); - hci_req_sync_unlock(hdev); - return 0; - } - - hci_leds_update_powered(hdev, false); - - /* Flush RX and TX works */ - flush_work(&hdev->tx_work); - flush_work(&hdev->rx_work); - - if (hdev->discov_timeout > 0) { - hdev->discov_timeout = 0; - hci_dev_clear_flag(hdev, HCI_DISCOVERABLE); - hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE); - } - - if (hci_dev_test_and_clear_flag(hdev, HCI_SERVICE_CACHE)) - cancel_delayed_work(&hdev->service_cache); - - if (hci_dev_test_flag(hdev, HCI_MGMT)) { - struct adv_info *adv_instance; - - cancel_delayed_work_sync(&hdev->rpa_expired); - - list_for_each_entry(adv_instance, &hdev->adv_instances, list) - cancel_delayed_work_sync(&adv_instance->rpa_expired_cb); - } - - /* Avoid potential lockdep warnings from the *_flush() calls by - * ensuring the workqueue is empty up front. - */ - drain_workqueue(hdev->workqueue); - - hci_dev_lock(hdev); - - hci_discovery_set_state(hdev, DISCOVERY_STOPPED); - - auto_off = hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF); - - if (!auto_off && hdev->dev_type == HCI_PRIMARY && - !hci_dev_test_flag(hdev, HCI_USER_CHANNEL) && - hci_dev_test_flag(hdev, HCI_MGMT)) - __mgmt_power_off(hdev); - - hci_inquiry_cache_flush(hdev); - hci_pend_le_actions_clear(hdev); - hci_conn_hash_flush(hdev); - hci_dev_unlock(hdev); - - smp_unregister(hdev); - - hci_sock_dev_event(hdev, HCI_DEV_DOWN); - - if (hdev->flush) - hdev->flush(hdev); - - /* Reset device */ - skb_queue_purge(&hdev->cmd_q); - atomic_set(&hdev->cmd_cnt, 1); - if (test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks) && - !auto_off && !hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) { - set_bit(HCI_INIT, &hdev->flags); - __hci_req_sync(hdev, hci_reset_req, 0, HCI_CMD_TIMEOUT, NULL); - clear_bit(HCI_INIT, &hdev->flags); - } - - /* flush cmd work */ - flush_work(&hdev->cmd_work); - - /* Drop queues */ - skb_queue_purge(&hdev->rx_q); - skb_queue_purge(&hdev->cmd_q); - skb_queue_purge(&hdev->raw_q); - - /* Drop last sent command */ - if (hdev->sent_cmd) { - cancel_delayed_work_sync(&hdev->cmd_timer); - kfree_skb(hdev->sent_cmd); - hdev->sent_cmd = NULL; - } - - clear_bit(HCI_RUNNING, &hdev->flags); - hci_sock_dev_event(hdev, HCI_DEV_CLOSE); - - /* After this point our queues are empty - * and no tasks are scheduled. */ - hdev->close(hdev); - - /* Clear flags */ - hdev->flags &= BIT(HCI_RAW); - hci_dev_clear_volatile_flags(hdev); - - /* Controller radio is available but is currently powered down */ - hdev->amp_status = AMP_STATUS_POWERED_DOWN; - - memset(hdev->eir, 0, sizeof(hdev->eir)); - memset(hdev->dev_class, 0, sizeof(hdev->dev_class)); - bacpy(&hdev->random_addr, BDADDR_ANY); + err = hci_dev_close_sync(hdev); hci_req_sync_unlock(hdev); - hci_dev_put(hdev); - return 0; + return err; } int hci_dev_close(__u16 dev) @@ -1736,6 +519,7 @@ int hci_dev_close(__u16 dev) goto done; } + cancel_work_sync(&hdev->power_on); if (hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF)) cancel_delayed_work(&hdev->power_off); @@ -1758,6 +542,20 @@ static int hci_dev_do_reset(struct hci_dev *hdev) skb_queue_purge(&hdev->rx_q); skb_queue_purge(&hdev->cmd_q); + /* Cancel these to avoid queueing non-chained pending work */ + hci_dev_set_flag(hdev, HCI_CMD_DRAIN_WORKQUEUE); + /* Wait for + * + * if (!hci_dev_test_flag(hdev, HCI_CMD_DRAIN_WORKQUEUE)) + * queue_delayed_work(&hdev->{cmd,ncmd}_timer) + * + * inside RCU section to see the flag or complete scheduling. + */ + synchronize_rcu(); + /* Explicitly cancel works in case scheduled after setting the flag. */ + cancel_delayed_work(&hdev->cmd_timer); + cancel_delayed_work(&hdev->ncmd_timer); + /* Avoid potential lockdep warnings from the *_flush() calls by * ensuring the workqueue is empty up front. */ @@ -1771,10 +569,15 @@ static int hci_dev_do_reset(struct hci_dev *hdev) if (hdev->flush) hdev->flush(hdev); + hci_dev_clear_flag(hdev, HCI_CMD_DRAIN_WORKQUEUE); + atomic_set(&hdev->cmd_cnt, 1); - hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0; + hdev->acl_cnt = 0; + hdev->sco_cnt = 0; + hdev->le_cnt = 0; + hdev->iso_cnt = 0; - ret = __hci_req_sync(hdev, hci_reset_req, 0, HCI_INIT_TIMEOUT, NULL); + ret = hci_reset_sync(hdev); hci_req_sync_unlock(hdev); return ret; @@ -1783,9 +586,9 @@ static int hci_dev_do_reset(struct hci_dev *hdev) int hci_dev_reset(__u16 dev) { struct hci_dev *hdev; - int err; + int err, srcu_index; - hdev = hci_dev_get(dev); + hdev = hci_dev_get_srcu(dev, &srcu_index); if (!hdev) return -ENODEV; @@ -1807,7 +610,7 @@ int hci_dev_reset(__u16 dev) err = hci_dev_do_reset(hdev); done: - hci_dev_put(hdev); + hci_dev_put_srcu(hdev, srcu_index); return err; } @@ -1837,7 +640,7 @@ done: return ret; } -static void hci_update_scan_state(struct hci_dev *hdev, u8 scan) +static void hci_update_passive_scan_state(struct hci_dev *hdev, u8 scan) { bool conn_changed, discov_changed; @@ -1867,7 +670,7 @@ static void hci_update_scan_state(struct hci_dev *hdev, u8 scan) hci_dev_set_flag(hdev, HCI_BREDR_ENABLED); if (hci_dev_test_flag(hdev, HCI_LE_ENABLED)) - hci_req_update_adv_data(hdev, hdev->cur_adv_instance); + hci_update_adv_data(hdev, hdev->cur_adv_instance); mgmt_new_settings(hdev); } @@ -1877,6 +680,7 @@ int hci_dev_cmd(unsigned int cmd, void __user *arg) { struct hci_dev *hdev; struct hci_dev_req dr; + __le16 policy; int err = 0; if (copy_from_user(&dr, arg, sizeof(dr))) @@ -1896,11 +700,6 @@ int hci_dev_cmd(unsigned int cmd, void __user *arg) goto done; } - if (hdev->dev_type != HCI_PRIMARY) { - err = -EOPNOTSUPP; - goto done; - } - if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) { err = -EOPNOTSUPP; goto done; @@ -1908,8 +707,8 @@ int hci_dev_cmd(unsigned int cmd, void __user *arg) switch (cmd) { case HCISETAUTH: - err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt, - HCI_INIT_TIMEOUT, NULL); + err = hci_cmd_sync_status(hdev, HCI_OP_WRITE_AUTH_ENABLE, + 1, &dr.dev_opt, HCI_CMD_TIMEOUT); break; case HCISETENCRYPT: @@ -1920,30 +719,34 @@ int hci_dev_cmd(unsigned int cmd, void __user *arg) if (!test_bit(HCI_AUTH, &hdev->flags)) { /* Auth must be enabled first */ - err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt, - HCI_INIT_TIMEOUT, NULL); + err = hci_cmd_sync_status(hdev, + HCI_OP_WRITE_AUTH_ENABLE, + 1, &dr.dev_opt, + HCI_CMD_TIMEOUT); if (err) break; } - err = hci_req_sync(hdev, hci_encrypt_req, dr.dev_opt, - HCI_INIT_TIMEOUT, NULL); + err = hci_cmd_sync_status(hdev, HCI_OP_WRITE_ENCRYPT_MODE, + 1, &dr.dev_opt, HCI_CMD_TIMEOUT); break; case HCISETSCAN: - err = hci_req_sync(hdev, hci_scan_req, dr.dev_opt, - HCI_INIT_TIMEOUT, NULL); + err = hci_cmd_sync_status(hdev, HCI_OP_WRITE_SCAN_ENABLE, + 1, &dr.dev_opt, HCI_CMD_TIMEOUT); /* Ensure that the connectable and discoverable states * get correctly modified as this was a non-mgmt change. */ if (!err) - hci_update_scan_state(hdev, dr.dev_opt); + hci_update_passive_scan_state(hdev, dr.dev_opt); break; case HCISETLINKPOL: - err = hci_req_sync(hdev, hci_linkpol_req, dr.dev_opt, - HCI_INIT_TIMEOUT, NULL); + policy = cpu_to_le16(dr.dev_opt); + + err = hci_cmd_sync_status(hdev, HCI_OP_WRITE_DEF_LINK_POLICY, + 2, &policy, HCI_CMD_TIMEOUT); break; case HCISETLINKMODE: @@ -1984,7 +787,7 @@ int hci_get_dev_list(void __user *arg) struct hci_dev *hdev; struct hci_dev_list_req *dl; struct hci_dev_req *dr; - int n = 0, size, err; + int n = 0, err; __u16 dev_num; if (get_user(dev_num, (__u16 __user *) arg)) @@ -1993,12 +796,11 @@ int hci_get_dev_list(void __user *arg) if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr)) return -EINVAL; - size = sizeof(*dl) + dev_num * sizeof(*dr); - - dl = kzalloc(size, GFP_KERNEL); + dl = kzalloc(struct_size(dl, dev_req, dev_num), GFP_KERNEL); if (!dl) return -ENOMEM; + dl->dev_num = dev_num; dr = dl->dev_req; read_lock(&hci_dev_list_lock); @@ -2012,8 +814,8 @@ int hci_get_dev_list(void __user *arg) if (hci_dev_test_flag(hdev, HCI_AUTO_OFF)) flags &= ~BIT(HCI_UP); - (dr + n)->dev_id = hdev->id; - (dr + n)->dev_opt = flags; + dr[n].dev_id = hdev->id; + dr[n].dev_opt = flags; if (++n >= dev_num) break; @@ -2021,9 +823,7 @@ int hci_get_dev_list(void __user *arg) read_unlock(&hci_dev_list_lock); dl->dev_num = n; - size = sizeof(*dl) + n * sizeof(*dr); - - err = copy_to_user(arg, dl, size); + err = copy_to_user(arg, dl, struct_size(dl, dev_req, n)); kfree(dl); return err ? -EFAULT : 0; @@ -2052,9 +852,9 @@ int hci_get_dev_info(void __user *arg) else flags = hdev->flags; - strcpy(di.name, hdev->name); + strscpy(di.name, hdev->name, sizeof(di.name)); di.bdaddr = hdev->bdaddr; - di.type = (hdev->bus & 0x0f) | ((hdev->dev_type & 0x03) << 4); + di.type = (hdev->bus & 0x0f); di.flags = flags; di.pkt_type = hdev->pkt_type; if (lmp_bredr_capable(hdev)) { @@ -2084,20 +884,51 @@ int hci_get_dev_info(void __user *arg) /* ---- Interface to HCI drivers ---- */ +static int hci_dev_do_poweroff(struct hci_dev *hdev) +{ + int err; + + BT_DBG("%s %p", hdev->name, hdev); + + hci_req_sync_lock(hdev); + + err = hci_set_powered_sync(hdev, false); + + hci_req_sync_unlock(hdev); + + return err; +} + static int hci_rfkill_set_block(void *data, bool blocked) { struct hci_dev *hdev = data; + int err; BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked); if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) return -EBUSY; + if (blocked == hci_dev_test_flag(hdev, HCI_RFKILLED)) + return 0; + if (blocked) { hci_dev_set_flag(hdev, HCI_RFKILLED); + if (!hci_dev_test_flag(hdev, HCI_SETUP) && - !hci_dev_test_flag(hdev, HCI_CONFIG)) - hci_dev_do_close(hdev); + !hci_dev_test_flag(hdev, HCI_CONFIG)) { + err = hci_dev_do_poweroff(hdev); + if (err) { + bt_dev_err(hdev, "Error when powering off device on rfkill (%d)", + err); + + /* Make sure the device is still closed even if + * anything during power off sequence (eg. + * disconnecting devices) failed. + */ + hci_dev_do_close(hdev); + } + } } else { hci_dev_clear_flag(hdev, HCI_RFKILLED); } @@ -2120,9 +951,7 @@ static void hci_power_on(struct work_struct *work) hci_dev_test_flag(hdev, HCI_MGMT) && hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF)) { cancel_delayed_work(&hdev->power_off); - hci_req_sync_lock(hdev); - err = __hci_req_hci_power_on(hdev); - hci_req_sync_unlock(hdev); + err = hci_powered_update_sync(hdev); mgmt_power_on(hdev, err); return; } @@ -2141,8 +970,7 @@ static void hci_power_on(struct work_struct *work) */ if (hci_dev_test_flag(hdev, HCI_RFKILLED) || hci_dev_test_flag(hdev, HCI_UNCONFIGURED) || - (hdev->dev_type == HCI_PRIMARY && - !bacmp(&hdev->bdaddr, BDADDR_ANY) && + (!bacmp(&hdev->bdaddr, BDADDR_ANY) && !bacmp(&hdev->static_addr, BDADDR_ANY))) { hci_dev_clear_flag(hdev, HCI_AUTO_OFF); hci_dev_do_close(hdev); @@ -2195,6 +1023,7 @@ static void hci_error_reset(struct work_struct *work) { struct hci_dev *hdev = container_of(work, struct hci_dev, error_reset); + hci_dev_hold(hdev); BT_DBG("%s", hdev->name); if (hdev->hw_error) @@ -2202,10 +1031,10 @@ static void hci_error_reset(struct work_struct *work) else bt_dev_err(hdev, "hardware error 0x%2.2x", hdev->hw_error_code); - if (hci_dev_do_close(hdev)) - return; + if (!hci_dev_do_close(hdev)) + hci_dev_do_open(hdev); - hci_dev_do_open(hdev); + hci_dev_put(hdev); } void hci_uuids_clear(struct hci_dev *hdev) @@ -2220,9 +1049,9 @@ void hci_uuids_clear(struct hci_dev *hdev) void hci_link_keys_clear(struct hci_dev *hdev) { - struct link_key *key; + struct link_key *key, *tmp; - list_for_each_entry_rcu(key, &hdev->link_keys, list) { + list_for_each_entry_safe(key, tmp, &hdev->link_keys, list) { list_del_rcu(&key->list); kfree_rcu(key, rcu); } @@ -2230,9 +1059,9 @@ void hci_link_keys_clear(struct hci_dev *hdev) void hci_smp_ltks_clear(struct hci_dev *hdev) { - struct smp_ltk *k; + struct smp_ltk *k, *tmp; - list_for_each_entry_rcu(k, &hdev->long_term_keys, list) { + list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) { list_del_rcu(&k->list); kfree_rcu(k, rcu); } @@ -2240,14 +1069,41 @@ void hci_smp_ltks_clear(struct hci_dev *hdev) void hci_smp_irks_clear(struct hci_dev *hdev) { - struct smp_irk *k; + struct smp_irk *k, *tmp; - list_for_each_entry_rcu(k, &hdev->identity_resolving_keys, list) { + list_for_each_entry_safe(k, tmp, &hdev->identity_resolving_keys, list) { list_del_rcu(&k->list); kfree_rcu(k, rcu); } } +void hci_blocked_keys_clear(struct hci_dev *hdev) +{ + struct blocked_key *b, *tmp; + + list_for_each_entry_safe(b, tmp, &hdev->blocked_keys, list) { + list_del_rcu(&b->list); + kfree_rcu(b, rcu); + } +} + +bool hci_is_blocked_key(struct hci_dev *hdev, u8 type, u8 val[16]) +{ + bool blocked = false; + struct blocked_key *b; + + rcu_read_lock(); + list_for_each_entry_rcu(b, &hdev->blocked_keys, list) { + if (b->type == type && !memcmp(b->val, val, sizeof(b->val))) { + blocked = true; + break; + } + } + + rcu_read_unlock(); + return blocked; +} + struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr) { struct link_key *k; @@ -2256,6 +1112,16 @@ struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr) list_for_each_entry_rcu(k, &hdev->link_keys, list) { if (bacmp(bdaddr, &k->bdaddr) == 0) { rcu_read_unlock(); + + if (hci_is_blocked_key(hdev, + HCI_BLOCKED_KEY_TYPE_LINKKEY, + k->val)) { + bt_dev_warn_ratelimited(hdev, + "Link key blocked for %pMR", + &k->bdaddr); + return NULL; + } + return k; } } @@ -2324,6 +1190,15 @@ struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, if (smp_ltk_is_sc(k) || ltk_role(k->type) == role) { rcu_read_unlock(); + + if (hci_is_blocked_key(hdev, HCI_BLOCKED_KEY_TYPE_LTK, + k->val)) { + bt_dev_warn_ratelimited(hdev, + "LTK blocked for %pMR", + &k->bdaddr); + return NULL; + } + return k; } } @@ -2334,31 +1209,42 @@ struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, struct smp_irk *hci_find_irk_by_rpa(struct hci_dev *hdev, bdaddr_t *rpa) { + struct smp_irk *irk_to_return = NULL; struct smp_irk *irk; rcu_read_lock(); list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) { if (!bacmp(&irk->rpa, rpa)) { - rcu_read_unlock(); - return irk; + irk_to_return = irk; + goto done; } } list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) { if (smp_irk_matches(hdev, irk->val, rpa)) { bacpy(&irk->rpa, rpa); - rcu_read_unlock(); - return irk; + irk_to_return = irk; + goto done; } } + +done: + if (irk_to_return && hci_is_blocked_key(hdev, HCI_BLOCKED_KEY_TYPE_IRK, + irk_to_return->val)) { + bt_dev_warn_ratelimited(hdev, "Identity key blocked for %pMR", + &irk_to_return->bdaddr); + irk_to_return = NULL; + } + rcu_read_unlock(); - return NULL; + return irk_to_return; } struct smp_irk *hci_find_irk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type) { + struct smp_irk *irk_to_return = NULL; struct smp_irk *irk; /* Identity Address must be public or static random */ @@ -2369,13 +1255,21 @@ struct smp_irk *hci_find_irk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr, list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) { if (addr_type == irk->addr_type && bacmp(bdaddr, &irk->bdaddr) == 0) { - rcu_read_unlock(); - return irk; + irk_to_return = irk; + break; } } + + if (irk_to_return && hci_is_blocked_key(hdev, HCI_BLOCKED_KEY_TYPE_IRK, + irk_to_return->val)) { + bt_dev_warn_ratelimited(hdev, "Identity key blocked for %pMR", + &irk_to_return->bdaddr); + irk_to_return = NULL; + } + rcu_read_unlock(); - return NULL; + return irk_to_return; } struct link_key *hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn, @@ -2495,10 +1389,10 @@ int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr) int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 bdaddr_type) { - struct smp_ltk *k; + struct smp_ltk *k, *tmp; int removed = 0; - list_for_each_entry_rcu(k, &hdev->long_term_keys, list) { + list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) { if (bacmp(bdaddr, &k->bdaddr) || k->bdaddr_type != bdaddr_type) continue; @@ -2514,9 +1408,9 @@ int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 bdaddr_type) void hci_remove_irk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type) { - struct smp_irk *k; + struct smp_irk *k, *tmp; - list_for_each_entry_rcu(k, &hdev->identity_resolving_keys, list) { + list_for_each_entry_safe(k, tmp, &hdev->identity_resolving_keys, list) { if (bacmp(bdaddr, &k->bdaddr) || k->addr_type != addr_type) continue; @@ -2569,19 +1463,41 @@ static void hci_cmd_timeout(struct work_struct *work) struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_timer.work); - if (hdev->sent_cmd) { - struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data; - u16 opcode = __le16_to_cpu(sent->opcode); + if (hdev->req_skb) { + u16 opcode = hci_skb_opcode(hdev->req_skb); bt_dev_err(hdev, "command 0x%4.4x tx timeout", opcode); + + hci_cmd_sync_cancel_sync(hdev, ETIMEDOUT); } else { bt_dev_err(hdev, "command tx timeout"); } + if (hdev->reset) + hdev->reset(hdev); + atomic_set(&hdev->cmd_cnt, 1); queue_work(hdev->workqueue, &hdev->cmd_work); } +/* HCI ncmd timer function */ +static void hci_ncmd_timeout(struct work_struct *work) +{ + struct hci_dev *hdev = container_of(work, struct hci_dev, + ncmd_timer.work); + + bt_dev_err(hdev, "Controller not accepting commands anymore: ncmd = 0"); + + /* During HCI_INIT phase no events can be injected if the ncmd timer + * triggers since the procedure has its own timeout handling. + */ + if (test_bit(HCI_INIT, &hdev->flags)) + return; + + /* This is an irrecoverable state, inject hardware error event */ + hci_reset_dev(hdev); +} + struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 bdaddr_type) { @@ -2685,6 +1601,19 @@ struct adv_info *hci_find_adv_instance(struct hci_dev *hdev, u8 instance) } /* This function requires the caller holds hdev->lock */ +struct adv_info *hci_find_adv_sid(struct hci_dev *hdev, u8 sid) +{ + struct adv_info *adv; + + list_for_each_entry(adv, &hdev->adv_instances, list) { + if (adv->sid == sid) + return adv; + } + + return NULL; +} + +/* This function requires the caller holds hdev->lock */ struct adv_info *hci_get_next_instance(struct hci_dev *hdev, u8 instance) { struct adv_info *cur_instance; @@ -2744,12 +1673,12 @@ void hci_adv_instances_clear(struct hci_dev *hdev) struct adv_info *adv_instance, *n; if (hdev->adv_instance_timeout) { - cancel_delayed_work(&hdev->adv_instance_expire); + disable_delayed_work(&hdev->adv_instance_expire); hdev->adv_instance_timeout = 0; } list_for_each_entry_safe(adv_instance, n, &hdev->adv_instances, list) { - cancel_delayed_work_sync(&adv_instance->rpa_expired_cb); + disable_delayed_work_sync(&adv_instance->rpa_expired_cb); list_del(&adv_instance->list); kfree(adv_instance); } @@ -2769,63 +1698,353 @@ static void adv_instance_rpa_expired(struct work_struct *work) } /* This function requires the caller holds hdev->lock */ -int hci_add_adv_instance(struct hci_dev *hdev, u8 instance, u32 flags, - u16 adv_data_len, u8 *adv_data, - u16 scan_rsp_len, u8 *scan_rsp_data, - u16 timeout, u16 duration) -{ - struct adv_info *adv_instance; - - adv_instance = hci_find_adv_instance(hdev, instance); - if (adv_instance) { - memset(adv_instance->adv_data, 0, - sizeof(adv_instance->adv_data)); - memset(adv_instance->scan_rsp_data, 0, - sizeof(adv_instance->scan_rsp_data)); +struct adv_info *hci_add_adv_instance(struct hci_dev *hdev, u8 instance, + u32 flags, u16 adv_data_len, u8 *adv_data, + u16 scan_rsp_len, u8 *scan_rsp_data, + u16 timeout, u16 duration, s8 tx_power, + u32 min_interval, u32 max_interval, + u8 mesh_handle) +{ + struct adv_info *adv; + + adv = hci_find_adv_instance(hdev, instance); + if (adv) { + memset(adv->adv_data, 0, sizeof(adv->adv_data)); + memset(adv->scan_rsp_data, 0, sizeof(adv->scan_rsp_data)); + memset(adv->per_adv_data, 0, sizeof(adv->per_adv_data)); } else { - if (hdev->adv_instance_cnt >= HCI_MAX_ADV_INSTANCES || - instance < 1 || instance > HCI_MAX_ADV_INSTANCES) - return -EOVERFLOW; + if (hdev->adv_instance_cnt >= hdev->le_num_of_adv_sets || + instance < 1 || instance > hdev->le_num_of_adv_sets + 1) + return ERR_PTR(-EOVERFLOW); - adv_instance = kzalloc(sizeof(*adv_instance), GFP_KERNEL); - if (!adv_instance) - return -ENOMEM; + adv = kzalloc(sizeof(*adv), GFP_KERNEL); + if (!adv) + return ERR_PTR(-ENOMEM); - adv_instance->pending = true; - adv_instance->instance = instance; - list_add(&adv_instance->list, &hdev->adv_instances); + adv->pending = true; + adv->instance = instance; + + /* If controller support only one set and the instance is set to + * 1 then there is no option other than using handle 0x00. + */ + if (hdev->le_num_of_adv_sets == 1 && instance == 1) + adv->handle = 0x00; + else + adv->handle = instance; + + list_add(&adv->list, &hdev->adv_instances); hdev->adv_instance_cnt++; } - adv_instance->flags = flags; - adv_instance->adv_data_len = adv_data_len; - adv_instance->scan_rsp_len = scan_rsp_len; - - if (adv_data_len) - memcpy(adv_instance->adv_data, adv_data, adv_data_len); + adv->flags = flags; + adv->min_interval = min_interval; + adv->max_interval = max_interval; + adv->tx_power = tx_power; + /* Defining a mesh_handle changes the timing units to ms, + * rather than seconds, and ties the instance to the requested + * mesh_tx queue. + */ + adv->mesh = mesh_handle; - if (scan_rsp_len) - memcpy(adv_instance->scan_rsp_data, - scan_rsp_data, scan_rsp_len); + hci_set_adv_instance_data(hdev, instance, adv_data_len, adv_data, + scan_rsp_len, scan_rsp_data); - adv_instance->timeout = timeout; - adv_instance->remaining_time = timeout; + adv->timeout = timeout; + adv->remaining_time = timeout; if (duration == 0) - adv_instance->duration = HCI_DEFAULT_ADV_DURATION; + adv->duration = hdev->def_multi_adv_rotation_duration; else - adv_instance->duration = duration; - - adv_instance->tx_power = HCI_TX_POWER_INVALID; + adv->duration = duration; - INIT_DELAYED_WORK(&adv_instance->rpa_expired_cb, - adv_instance_rpa_expired); + INIT_DELAYED_WORK(&adv->rpa_expired_cb, adv_instance_rpa_expired); BT_DBG("%s for %dMR", hdev->name, instance); + return adv; +} + +/* This function requires the caller holds hdev->lock */ +struct adv_info *hci_add_per_instance(struct hci_dev *hdev, u8 instance, u8 sid, + u32 flags, u8 data_len, u8 *data, + u32 min_interval, u32 max_interval) +{ + struct adv_info *adv; + + adv = hci_add_adv_instance(hdev, instance, flags, 0, NULL, 0, NULL, + 0, 0, HCI_ADV_TX_POWER_NO_PREFERENCE, + min_interval, max_interval, 0); + if (IS_ERR(adv)) + return adv; + + adv->sid = sid; + adv->periodic = true; + adv->per_adv_data_len = data_len; + + if (data) + memcpy(adv->per_adv_data, data, data_len); + + return adv; +} + +/* This function requires the caller holds hdev->lock */ +int hci_set_adv_instance_data(struct hci_dev *hdev, u8 instance, + u16 adv_data_len, u8 *adv_data, + u16 scan_rsp_len, u8 *scan_rsp_data) +{ + struct adv_info *adv; + + adv = hci_find_adv_instance(hdev, instance); + + /* If advertisement doesn't exist, we can't modify its data */ + if (!adv) + return -ENOENT; + + if (adv_data_len && ADV_DATA_CMP(adv, adv_data, adv_data_len)) { + memset(adv->adv_data, 0, sizeof(adv->adv_data)); + memcpy(adv->adv_data, adv_data, adv_data_len); + adv->adv_data_len = adv_data_len; + adv->adv_data_changed = true; + } + + if (scan_rsp_len && SCAN_RSP_CMP(adv, scan_rsp_data, scan_rsp_len)) { + memset(adv->scan_rsp_data, 0, sizeof(adv->scan_rsp_data)); + memcpy(adv->scan_rsp_data, scan_rsp_data, scan_rsp_len); + adv->scan_rsp_len = scan_rsp_len; + adv->scan_rsp_changed = true; + } + + /* Mark as changed if there are flags which would affect it */ + if (((adv->flags & MGMT_ADV_FLAG_APPEARANCE) && hdev->appearance) || + adv->flags & MGMT_ADV_FLAG_LOCAL_NAME) + adv->scan_rsp_changed = true; + return 0; } +/* This function requires the caller holds hdev->lock */ +u32 hci_adv_instance_flags(struct hci_dev *hdev, u8 instance) +{ + u32 flags; + struct adv_info *adv; + + if (instance == 0x00) { + /* Instance 0 always manages the "Tx Power" and "Flags" + * fields + */ + flags = MGMT_ADV_FLAG_TX_POWER | MGMT_ADV_FLAG_MANAGED_FLAGS; + + /* For instance 0, the HCI_ADVERTISING_CONNECTABLE setting + * corresponds to the "connectable" instance flag. + */ + if (hci_dev_test_flag(hdev, HCI_ADVERTISING_CONNECTABLE)) + flags |= MGMT_ADV_FLAG_CONNECTABLE; + + if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE)) + flags |= MGMT_ADV_FLAG_LIMITED_DISCOV; + else if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE)) + flags |= MGMT_ADV_FLAG_DISCOV; + + return flags; + } + + adv = hci_find_adv_instance(hdev, instance); + + /* Return 0 when we got an invalid instance identifier. */ + if (!adv) + return 0; + + return adv->flags; +} + +bool hci_adv_instance_is_scannable(struct hci_dev *hdev, u8 instance) +{ + struct adv_info *adv; + + /* Instance 0x00 always set local name */ + if (instance == 0x00) + return true; + + adv = hci_find_adv_instance(hdev, instance); + if (!adv) + return false; + + if (adv->flags & MGMT_ADV_FLAG_APPEARANCE || + adv->flags & MGMT_ADV_FLAG_LOCAL_NAME) + return true; + + return adv->scan_rsp_len ? true : false; +} + +/* This function requires the caller holds hdev->lock */ +void hci_adv_monitors_clear(struct hci_dev *hdev) +{ + struct adv_monitor *monitor; + int handle; + + idr_for_each_entry(&hdev->adv_monitors_idr, monitor, handle) + hci_free_adv_monitor(hdev, monitor); + + idr_destroy(&hdev->adv_monitors_idr); +} + +/* Frees the monitor structure and do some bookkeepings. + * This function requires the caller holds hdev->lock. + */ +void hci_free_adv_monitor(struct hci_dev *hdev, struct adv_monitor *monitor) +{ + struct adv_pattern *pattern; + struct adv_pattern *tmp; + + if (!monitor) + return; + + list_for_each_entry_safe(pattern, tmp, &monitor->patterns, list) { + list_del(&pattern->list); + kfree(pattern); + } + + if (monitor->handle) + idr_remove(&hdev->adv_monitors_idr, monitor->handle); + + if (monitor->state != ADV_MONITOR_STATE_NOT_REGISTERED) + hdev->adv_monitors_cnt--; + + kfree(monitor); +} + +/* Assigns handle to a monitor, and if offloading is supported and power is on, + * also attempts to forward the request to the controller. + * This function requires the caller holds hci_req_sync_lock. + */ +int hci_add_adv_monitor(struct hci_dev *hdev, struct adv_monitor *monitor) +{ + int min, max, handle; + int status = 0; + + if (!monitor) + return -EINVAL; + + hci_dev_lock(hdev); + + min = HCI_MIN_ADV_MONITOR_HANDLE; + max = HCI_MIN_ADV_MONITOR_HANDLE + HCI_MAX_ADV_MONITOR_NUM_HANDLES; + handle = idr_alloc(&hdev->adv_monitors_idr, monitor, min, max, + GFP_KERNEL); + + hci_dev_unlock(hdev); + + if (handle < 0) + return handle; + + monitor->handle = handle; + + if (!hdev_is_powered(hdev)) + return status; + + switch (hci_get_adv_monitor_offload_ext(hdev)) { + case HCI_ADV_MONITOR_EXT_NONE: + bt_dev_dbg(hdev, "add monitor %d status %d", + monitor->handle, status); + /* Message was not forwarded to controller - not an error */ + break; + + case HCI_ADV_MONITOR_EXT_MSFT: + status = msft_add_monitor_pattern(hdev, monitor); + bt_dev_dbg(hdev, "add monitor %d msft status %d", + handle, status); + break; + } + + return status; +} + +/* Attempts to tell the controller and free the monitor. If somehow the + * controller doesn't have a corresponding handle, remove anyway. + * This function requires the caller holds hci_req_sync_lock. + */ +static int hci_remove_adv_monitor(struct hci_dev *hdev, + struct adv_monitor *monitor) +{ + int status = 0; + int handle; + + switch (hci_get_adv_monitor_offload_ext(hdev)) { + case HCI_ADV_MONITOR_EXT_NONE: /* also goes here when powered off */ + bt_dev_dbg(hdev, "remove monitor %d status %d", + monitor->handle, status); + goto free_monitor; + + case HCI_ADV_MONITOR_EXT_MSFT: + handle = monitor->handle; + status = msft_remove_monitor(hdev, monitor); + bt_dev_dbg(hdev, "remove monitor %d msft status %d", + handle, status); + break; + } + + /* In case no matching handle registered, just free the monitor */ + if (status == -ENOENT) + goto free_monitor; + + return status; + +free_monitor: + if (status == -ENOENT) + bt_dev_warn(hdev, "Removing monitor with no matching handle %d", + monitor->handle); + hci_free_adv_monitor(hdev, monitor); + + return status; +} + +/* This function requires the caller holds hci_req_sync_lock */ +int hci_remove_single_adv_monitor(struct hci_dev *hdev, u16 handle) +{ + struct adv_monitor *monitor = idr_find(&hdev->adv_monitors_idr, handle); + + if (!monitor) + return -EINVAL; + + return hci_remove_adv_monitor(hdev, monitor); +} + +/* This function requires the caller holds hci_req_sync_lock */ +int hci_remove_all_adv_monitor(struct hci_dev *hdev) +{ + struct adv_monitor *monitor; + int idr_next_id = 0; + int status = 0; + + while (1) { + monitor = idr_get_next(&hdev->adv_monitors_idr, &idr_next_id); + if (!monitor) + break; + + status = hci_remove_adv_monitor(hdev, monitor); + if (status) + return status; + + idr_next_id++; + } + + return status; +} + +/* This function requires the caller holds hdev->lock */ +bool hci_is_adv_monitoring(struct hci_dev *hdev) +{ + return !idr_is_empty(&hdev->adv_monitors_idr); +} + +int hci_get_adv_monitor_offload_ext(struct hci_dev *hdev) +{ + if (msft_monitor_supported(hdev)) + return HCI_ADV_MONITOR_EXT_MSFT; + + return HCI_ADV_MONITOR_EXT_NONE; +} + struct bdaddr_list *hci_bdaddr_list_lookup(struct list_head *bdaddr_list, bdaddr_t *bdaddr, u8 type) { @@ -2853,6 +2072,20 @@ struct bdaddr_list_with_irk *hci_bdaddr_list_lookup_with_irk( return NULL; } +struct bdaddr_list_with_flags * +hci_bdaddr_list_lookup_with_flags(struct list_head *bdaddr_list, + bdaddr_t *bdaddr, u8 type) +{ + struct bdaddr_list_with_flags *b; + + list_for_each_entry(b, bdaddr_list, list) { + if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type) + return b; + } + + return NULL; +} + void hci_bdaddr_list_clear(struct list_head *bdaddr_list) { struct bdaddr_list *b, *n; @@ -2914,6 +2147,30 @@ int hci_bdaddr_list_add_with_irk(struct list_head *list, bdaddr_t *bdaddr, return 0; } +int hci_bdaddr_list_add_with_flags(struct list_head *list, bdaddr_t *bdaddr, + u8 type, u32 flags) +{ + struct bdaddr_list_with_flags *entry; + + if (!bacmp(bdaddr, BDADDR_ANY)) + return -EBADF; + + if (hci_bdaddr_list_lookup(list, bdaddr, type)) + return -EEXIST; + + entry = kzalloc(sizeof(*entry), GFP_KERNEL); + if (!entry) + return -ENOMEM; + + bacpy(&entry->bdaddr, bdaddr); + entry->bdaddr_type = type; + entry->flags = flags; + + list_add(&entry->list, list); + + return 0; +} + int hci_bdaddr_list_del(struct list_head *list, bdaddr_t *bdaddr, u8 type) { struct bdaddr_list *entry; @@ -2969,22 +2226,46 @@ struct hci_conn_params *hci_conn_params_lookup(struct hci_dev *hdev, return NULL; } -/* This function requires the caller holds hdev->lock */ +/* This function requires the caller holds hdev->lock or rcu_read_lock */ struct hci_conn_params *hci_pend_le_action_lookup(struct list_head *list, bdaddr_t *addr, u8 addr_type) { struct hci_conn_params *param; - list_for_each_entry(param, list, action) { + rcu_read_lock(); + + list_for_each_entry_rcu(param, list, action) { if (bacmp(¶m->addr, addr) == 0 && - param->addr_type == addr_type) + param->addr_type == addr_type) { + rcu_read_unlock(); return param; + } } + rcu_read_unlock(); + return NULL; } /* This function requires the caller holds hdev->lock */ +void hci_pend_le_list_del_init(struct hci_conn_params *param) +{ + if (list_empty(¶m->action)) + return; + + list_del_rcu(¶m->action); + synchronize_rcu(); + INIT_LIST_HEAD(¶m->action); +} + +/* This function requires the caller holds hdev->lock */ +void hci_pend_le_list_add(struct hci_conn_params *param, + struct list_head *list) +{ + list_add_rcu(¶m->action, list); +} + +/* This function requires the caller holds hdev->lock */ struct hci_conn_params *hci_conn_params_add(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type) { @@ -3017,14 +2298,15 @@ struct hci_conn_params *hci_conn_params_add(struct hci_dev *hdev, return params; } -static void hci_conn_params_free(struct hci_conn_params *params) +void hci_conn_params_free(struct hci_conn_params *params) { + hci_pend_le_list_del_init(params); + if (params->conn) { hci_conn_drop(params->conn); hci_conn_put(params->conn); } - list_del(¶ms->action); list_del(¶ms->list); kfree(params); } @@ -3040,7 +2322,7 @@ void hci_conn_params_del(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type) hci_conn_params_free(params); - hci_update_background_scan(hdev); + hci_update_passive_scan(hdev); BT_DBG("addr %pMR (type %u)", addr, addr_type); } @@ -3054,7 +2336,7 @@ void hci_conn_params_clear_disabled(struct hci_dev *hdev) if (params->auto_connect != HCI_AUTO_CONN_DISABLED) continue; - /* If trying to estabilish one time connection to disabled + /* If trying to establish one time connection to disabled * device, leave the params, but mark them as just once. */ if (params->explicit_connect) { @@ -3062,8 +2344,7 @@ void hci_conn_params_clear_disabled(struct hci_dev *hdev) continue; } - list_del(¶ms->list); - kfree(params); + hci_conn_params_free(params); } BT_DBG("All LE disabled connection parameters were removed"); @@ -3108,15 +2389,71 @@ void hci_copy_identity_address(struct hci_dev *hdev, bdaddr_t *bdaddr, } } +static void hci_clear_wake_reason(struct hci_dev *hdev) +{ + hci_dev_lock(hdev); + + hdev->wake_reason = 0; + bacpy(&hdev->wake_addr, BDADDR_ANY); + hdev->wake_addr_type = 0; + + hci_dev_unlock(hdev); +} + +static int hci_suspend_notifier(struct notifier_block *nb, unsigned long action, + void *data) +{ + struct hci_dev *hdev = + container_of(nb, struct hci_dev, suspend_notifier); + int ret = 0; + + /* Userspace has full control of this device. Do nothing. */ + if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) + return NOTIFY_DONE; + + /* To avoid a potential race with hci_unregister_dev. */ + hci_dev_hold(hdev); + + switch (action) { + case PM_HIBERNATION_PREPARE: + case PM_SUSPEND_PREPARE: + ret = hci_suspend_dev(hdev); + break; + case PM_POST_HIBERNATION: + case PM_POST_SUSPEND: + ret = hci_resume_dev(hdev); + break; + } + + if (ret) + bt_dev_err(hdev, "Suspend notifier action (%lu) failed: %d", + action, ret); + + hci_dev_put(hdev); + return NOTIFY_DONE; +} + /* Alloc HCI device */ -struct hci_dev *hci_alloc_dev(void) +struct hci_dev *hci_alloc_dev_priv(int sizeof_priv) { struct hci_dev *hdev; + unsigned int alloc_size; + + alloc_size = sizeof(*hdev); + if (sizeof_priv) { + /* Fixme: May need ALIGN-ment? */ + alloc_size += sizeof_priv; + } - hdev = kzalloc(sizeof(*hdev), GFP_KERNEL); + hdev = kzalloc(alloc_size, GFP_KERNEL); if (!hdev) return NULL; + if (init_srcu_struct(&hdev->srcu)) { + kfree(hdev); + return NULL; + } + hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1); hdev->esco_type = (ESCO_HV1); hdev->link_mode = (HCI_LM_ACCEPT); @@ -3129,14 +2466,26 @@ struct hci_dev *hci_alloc_dev(void) hdev->cur_adv_instance = 0x00; hdev->adv_instance_timeout = 0; + hdev->advmon_allowlist_duration = 300; + hdev->advmon_no_filter_duration = 500; + hdev->enable_advmon_interleave_scan = 0x00; /* Default to disable */ + hdev->sniff_max_interval = 800; hdev->sniff_min_interval = 80; hdev->le_adv_channel_map = 0x07; hdev->le_adv_min_interval = 0x0800; hdev->le_adv_max_interval = 0x0800; - hdev->le_scan_interval = 0x0060; - hdev->le_scan_window = 0x0030; + hdev->le_scan_interval = DISCOV_LE_SCAN_INT_FAST; + hdev->le_scan_window = DISCOV_LE_SCAN_WIN_FAST; + hdev->le_scan_int_suspend = DISCOV_LE_SCAN_INT_SLOW1; + hdev->le_scan_window_suspend = DISCOV_LE_SCAN_WIN_SLOW1; + hdev->le_scan_int_discovery = DISCOV_LE_SCAN_INT; + hdev->le_scan_window_discovery = DISCOV_LE_SCAN_WIN; + hdev->le_scan_int_adv_monitor = DISCOV_LE_SCAN_INT_FAST; + hdev->le_scan_window_adv_monitor = DISCOV_LE_SCAN_WIN_FAST; + hdev->le_scan_int_connect = DISCOV_LE_SCAN_INT_CONN; + hdev->le_scan_window_connect = DISCOV_LE_SCAN_WIN_CONN; hdev->le_conn_min_interval = 0x0018; hdev->le_conn_max_interval = 0x0028; hdev->le_conn_latency = 0x0000; @@ -3151,37 +2500,58 @@ struct hci_dev *hci_alloc_dev(void) hdev->le_min_key_size = SMP_MIN_ENC_KEY_SIZE; hdev->le_tx_def_phys = HCI_LE_SET_PHY_1M; hdev->le_rx_def_phys = HCI_LE_SET_PHY_1M; + hdev->le_num_of_adv_sets = HCI_MAX_ADV_INSTANCES; + hdev->def_multi_adv_rotation_duration = HCI_DEFAULT_ADV_DURATION; + hdev->def_le_autoconnect_timeout = HCI_LE_CONN_TIMEOUT; + hdev->min_le_tx_power = HCI_TX_POWER_INVALID; + hdev->max_le_tx_power = HCI_TX_POWER_INVALID; hdev->rpa_timeout = HCI_DEFAULT_RPA_TIMEOUT; hdev->discov_interleaved_timeout = DISCOV_INTERLEAVED_TIMEOUT; hdev->conn_info_min_age = DEFAULT_CONN_INFO_MIN_AGE; hdev->conn_info_max_age = DEFAULT_CONN_INFO_MAX_AGE; + hdev->auth_payload_timeout = DEFAULT_AUTH_PAYLOAD_TIMEOUT; + hdev->min_enc_key_size = HCI_MIN_ENC_KEY_SIZE; + + /* default 1.28 sec page scan */ + hdev->def_page_scan_type = PAGE_SCAN_TYPE_STANDARD; + hdev->def_page_scan_int = 0x0800; + hdev->def_page_scan_window = 0x0012; mutex_init(&hdev->lock); mutex_init(&hdev->req_lock); + mutex_init(&hdev->mgmt_pending_lock); + ida_init(&hdev->unset_handle_ida); + + INIT_LIST_HEAD(&hdev->mesh_pending); INIT_LIST_HEAD(&hdev->mgmt_pending); - INIT_LIST_HEAD(&hdev->blacklist); - INIT_LIST_HEAD(&hdev->whitelist); + INIT_LIST_HEAD(&hdev->reject_list); + INIT_LIST_HEAD(&hdev->accept_list); INIT_LIST_HEAD(&hdev->uuids); INIT_LIST_HEAD(&hdev->link_keys); INIT_LIST_HEAD(&hdev->long_term_keys); INIT_LIST_HEAD(&hdev->identity_resolving_keys); INIT_LIST_HEAD(&hdev->remote_oob_data); - INIT_LIST_HEAD(&hdev->le_white_list); + INIT_LIST_HEAD(&hdev->le_accept_list); INIT_LIST_HEAD(&hdev->le_resolv_list); INIT_LIST_HEAD(&hdev->le_conn_params); INIT_LIST_HEAD(&hdev->pend_le_conns); INIT_LIST_HEAD(&hdev->pend_le_reports); INIT_LIST_HEAD(&hdev->conn_hash.list); INIT_LIST_HEAD(&hdev->adv_instances); + INIT_LIST_HEAD(&hdev->blocked_keys); + INIT_LIST_HEAD(&hdev->monitored_devices); + INIT_LIST_HEAD(&hdev->local_codecs); INIT_WORK(&hdev->rx_work, hci_rx_work); INIT_WORK(&hdev->cmd_work, hci_cmd_work); INIT_WORK(&hdev->tx_work, hci_tx_work); INIT_WORK(&hdev->power_on, hci_power_on); INIT_WORK(&hdev->error_reset, hci_error_reset); + hci_cmd_sync_init(hdev); + INIT_DELAYED_WORK(&hdev->power_off, hci_power_off); skb_queue_head_init(&hdev->rx_q); @@ -3191,15 +2561,16 @@ struct hci_dev *hci_alloc_dev(void) init_waitqueue_head(&hdev->req_wait_q); INIT_DELAYED_WORK(&hdev->cmd_timer, hci_cmd_timeout); + INIT_DELAYED_WORK(&hdev->ncmd_timer, hci_ncmd_timeout); - hci_request_setup(hdev); + hci_devcd_setup(hdev); hci_init_sysfs(hdev); discovery_init(hdev); return hdev; } -EXPORT_SYMBOL(hci_alloc_dev); +EXPORT_SYMBOL(hci_alloc_dev_priv); /* Free HCI device */ void hci_free_dev(struct hci_dev *hdev) @@ -3217,24 +2588,15 @@ int hci_register_dev(struct hci_dev *hdev) if (!hdev->open || !hdev->close || !hdev->send) return -EINVAL; - /* Do not allow HCI_AMP devices to register at index 0, - * so the index can be used as the AMP controller ID. - */ - switch (hdev->dev_type) { - case HCI_PRIMARY: - id = ida_simple_get(&hci_index_ida, 0, 0, GFP_KERNEL); - break; - case HCI_AMP: - id = ida_simple_get(&hci_index_ida, 1, 0, GFP_KERNEL); - break; - default: - return -EINVAL; - } - + id = ida_alloc_max(&hci_index_ida, HCI_MAX_ID - 1, GFP_KERNEL); if (id < 0) return id; - sprintf(hdev->name, "hci%d", id); + error = dev_set_name(&hdev->dev, "hci%u", id); + if (error) + return error; + + hdev->name = dev_name(&hdev->dev); hdev->id = id; BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus); @@ -3256,8 +2618,6 @@ int hci_register_dev(struct hci_dev *hdev) if (!IS_ERR_OR_NULL(bt_debugfs)) hdev->debugfs = debugfs_create_dir(hdev->name, bt_debugfs); - dev_set_name(&hdev->dev, "%s", hdev->name); - error = device_add(&hdev->dev); if (error < 0) goto err_wqueue; @@ -3280,12 +2640,10 @@ int hci_register_dev(struct hci_dev *hdev) hci_dev_set_flag(hdev, HCI_SETUP); hci_dev_set_flag(hdev, HCI_AUTO_OFF); - if (hdev->dev_type == HCI_PRIMARY) { - /* Assume BR/EDR support until proven otherwise (such as - * through reading supported features during init. - */ - hci_dev_set_flag(hdev, HCI_BREDR_ENABLED); - } + /* Assume BR/EDR support until proven otherwise (such as + * through reading supported features during init. + */ + hci_dev_set_flag(hdev, HCI_BREDR_ENABLED); write_lock(&hci_dev_list_lock); list_add(&hdev->list, &hci_dev_list); @@ -3294,21 +2652,35 @@ int hci_register_dev(struct hci_dev *hdev) /* Devices that are marked for raw-only usage are unconfigured * and should not be included in normal operation. */ - if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks)) + if (hci_test_quirk(hdev, HCI_QUIRK_RAW_DEVICE)) hci_dev_set_flag(hdev, HCI_UNCONFIGURED); + /* Mark Remote Wakeup connection flag as supported if driver has wakeup + * callback. + */ + if (hdev->wakeup) + hdev->conn_flags |= HCI_CONN_FLAG_REMOTE_WAKEUP; + hci_sock_dev_event(hdev, HCI_DEV_REG); hci_dev_hold(hdev); + error = hci_register_suspend_notifier(hdev); + if (error) + BT_WARN("register suspend notifier failed error:%d\n", error); + queue_work(hdev->req_workqueue, &hdev->power_on); + idr_init(&hdev->adv_monitors_idr); + msft_register(hdev); + return id; err_wqueue: + debugfs_remove_recursive(hdev->debugfs); destroy_workqueue(hdev->workqueue); destroy_workqueue(hdev->req_workqueue); err: - ida_simple_remove(&hci_index_ida, hdev->id); + ida_free(&hci_index_ida, hdev->id); return error; } @@ -3317,19 +2689,28 @@ EXPORT_SYMBOL(hci_register_dev); /* Unregister HCI device */ void hci_unregister_dev(struct hci_dev *hdev) { - int id; - BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus); + mutex_lock(&hdev->unregister_lock); hci_dev_set_flag(hdev, HCI_UNREGISTER); - - id = hdev->id; + mutex_unlock(&hdev->unregister_lock); write_lock(&hci_dev_list_lock); list_del(&hdev->list); write_unlock(&hci_dev_list_lock); - cancel_work_sync(&hdev->power_on); + synchronize_srcu(&hdev->srcu); + cleanup_srcu_struct(&hdev->srcu); + + disable_work_sync(&hdev->rx_work); + disable_work_sync(&hdev->cmd_work); + disable_work_sync(&hdev->tx_work); + disable_work_sync(&hdev->power_on); + disable_work_sync(&hdev->error_reset); + + hci_cmd_sync_clear(hdev); + + hci_unregister_suspend_notifier(hdev); hci_dev_do_close(hdev); @@ -3353,7 +2734,14 @@ void hci_unregister_dev(struct hci_dev *hdev) } device_del(&hdev->dev); + /* Actual cleanup is deferred until hci_release_dev(). */ + hci_dev_put(hdev); +} +EXPORT_SYMBOL(hci_unregister_dev); +/* Release HCI device */ +void hci_release_dev(struct hci_dev *hdev) +{ debugfs_remove_recursive(hdev->debugfs); kfree_const(hdev->hw_info); kfree_const(hdev->fw_info); @@ -3362,46 +2750,145 @@ void hci_unregister_dev(struct hci_dev *hdev) destroy_workqueue(hdev->req_workqueue); hci_dev_lock(hdev); - hci_bdaddr_list_clear(&hdev->blacklist); - hci_bdaddr_list_clear(&hdev->whitelist); + hci_bdaddr_list_clear(&hdev->reject_list); + hci_bdaddr_list_clear(&hdev->accept_list); hci_uuids_clear(hdev); hci_link_keys_clear(hdev); hci_smp_ltks_clear(hdev); hci_smp_irks_clear(hdev); hci_remote_oob_data_clear(hdev); hci_adv_instances_clear(hdev); - hci_bdaddr_list_clear(&hdev->le_white_list); + hci_adv_monitors_clear(hdev); + hci_bdaddr_list_clear(&hdev->le_accept_list); hci_bdaddr_list_clear(&hdev->le_resolv_list); hci_conn_params_clear_all(hdev); hci_discovery_filter_clear(hdev); + hci_blocked_keys_clear(hdev); + hci_codec_list_clear(&hdev->local_codecs); + msft_release(hdev); hci_dev_unlock(hdev); - hci_dev_put(hdev); + ida_destroy(&hdev->unset_handle_ida); + ida_free(&hci_index_ida, hdev->id); + kfree_skb(hdev->sent_cmd); + kfree_skb(hdev->req_skb); + kfree_skb(hdev->recv_event); + kfree(hdev); +} +EXPORT_SYMBOL(hci_release_dev); + +int hci_register_suspend_notifier(struct hci_dev *hdev) +{ + int ret = 0; - ida_simple_remove(&hci_index_ida, id); + if (!hdev->suspend_notifier.notifier_call && + !hci_test_quirk(hdev, HCI_QUIRK_NO_SUSPEND_NOTIFIER)) { + hdev->suspend_notifier.notifier_call = hci_suspend_notifier; + ret = register_pm_notifier(&hdev->suspend_notifier); + } + + return ret; +} + +int hci_unregister_suspend_notifier(struct hci_dev *hdev) +{ + int ret = 0; + + if (hdev->suspend_notifier.notifier_call) { + ret = unregister_pm_notifier(&hdev->suspend_notifier); + if (!ret) + hdev->suspend_notifier.notifier_call = NULL; + } + + return ret; +} + +/* Cancel ongoing command synchronously: + * + * - Cancel command timer + * - Reset command counter + * - Cancel command request + */ +static void hci_cancel_cmd_sync(struct hci_dev *hdev, int err) +{ + bt_dev_dbg(hdev, "err 0x%2.2x", err); + + if (hci_dev_test_flag(hdev, HCI_UNREGISTER)) { + disable_delayed_work_sync(&hdev->cmd_timer); + disable_delayed_work_sync(&hdev->ncmd_timer); + } else { + cancel_delayed_work_sync(&hdev->cmd_timer); + cancel_delayed_work_sync(&hdev->ncmd_timer); + } + + atomic_set(&hdev->cmd_cnt, 1); + + hci_cmd_sync_cancel_sync(hdev, err); } -EXPORT_SYMBOL(hci_unregister_dev); /* Suspend HCI device */ int hci_suspend_dev(struct hci_dev *hdev) { + int ret; + + bt_dev_dbg(hdev, ""); + + /* Suspend should only act on when powered. */ + if (!hdev_is_powered(hdev) || + hci_dev_test_flag(hdev, HCI_UNREGISTER)) + return 0; + + /* If powering down don't attempt to suspend */ + if (mgmt_powering_down(hdev)) + return 0; + + /* Cancel potentially blocking sync operation before suspend */ + hci_cancel_cmd_sync(hdev, EHOSTDOWN); + + hci_req_sync_lock(hdev); + ret = hci_suspend_sync(hdev); + hci_req_sync_unlock(hdev); + + hci_clear_wake_reason(hdev); + mgmt_suspending(hdev, hdev->suspend_state); + hci_sock_dev_event(hdev, HCI_DEV_SUSPEND); - return 0; + return ret; } EXPORT_SYMBOL(hci_suspend_dev); /* Resume HCI device */ int hci_resume_dev(struct hci_dev *hdev) { + int ret; + + bt_dev_dbg(hdev, ""); + + /* Resume should only act on when powered. */ + if (!hdev_is_powered(hdev) || + hci_dev_test_flag(hdev, HCI_UNREGISTER)) + return 0; + + /* If powering down don't attempt to resume */ + if (mgmt_powering_down(hdev)) + return 0; + + hci_req_sync_lock(hdev); + ret = hci_resume_sync(hdev); + hci_req_sync_unlock(hdev); + + mgmt_resuming(hdev, hdev->wake_reason, &hdev->wake_addr, + hdev->wake_addr_type); + hci_sock_dev_event(hdev, HCI_DEV_RESUME); - return 0; + return ret; } EXPORT_SYMBOL(hci_resume_dev); /* Reset HCI device */ int hci_reset_dev(struct hci_dev *hdev) { - const u8 hw_err[] = { HCI_EV_HARDWARE_ERROR, 0x01, 0x00 }; + static const u8 hw_err[] = { HCI_EV_HARDWARE_ERROR, 0x01, 0x00 }; struct sk_buff *skb; skb = bt_skb_alloc(3, GFP_ATOMIC); @@ -3411,23 +2898,62 @@ int hci_reset_dev(struct hci_dev *hdev) hci_skb_pkt_type(skb) = HCI_EVENT_PKT; skb_put_data(skb, hw_err, 3); + bt_dev_err(hdev, "Injecting HCI hardware error event"); + /* Send Hardware Error to upper stack */ return hci_recv_frame(hdev, skb); } EXPORT_SYMBOL(hci_reset_dev); +static u8 hci_dev_classify_pkt_type(struct hci_dev *hdev, struct sk_buff *skb) +{ + if (hdev->classify_pkt_type) + return hdev->classify_pkt_type(hdev, skb); + + return hci_skb_pkt_type(skb); +} + /* Receive frame from HCI drivers */ int hci_recv_frame(struct hci_dev *hdev, struct sk_buff *skb) { + u8 dev_pkt_type; + if (!hdev || (!test_bit(HCI_UP, &hdev->flags) && !test_bit(HCI_INIT, &hdev->flags))) { kfree_skb(skb); return -ENXIO; } - if (hci_skb_pkt_type(skb) != HCI_EVENT_PKT && - hci_skb_pkt_type(skb) != HCI_ACLDATA_PKT && - hci_skb_pkt_type(skb) != HCI_SCODATA_PKT) { + /* Check if the driver agree with packet type classification */ + dev_pkt_type = hci_dev_classify_pkt_type(hdev, skb); + if (hci_skb_pkt_type(skb) != dev_pkt_type) { + hci_skb_pkt_type(skb) = dev_pkt_type; + } + + switch (hci_skb_pkt_type(skb)) { + case HCI_EVENT_PKT: + break; + case HCI_ACLDATA_PKT: + /* Detect if ISO packet has been sent as ACL */ + if (hci_conn_num(hdev, CIS_LINK) || + hci_conn_num(hdev, BIS_LINK) || + hci_conn_num(hdev, PA_LINK)) { + __u16 handle = __le16_to_cpu(hci_acl_hdr(skb)->handle); + __u8 type; + + type = hci_conn_lookup_type(hdev, hci_handle(handle)); + if (type == CIS_LINK || type == BIS_LINK || + type == PA_LINK) + hci_skb_pkt_type(skb) = HCI_ISODATA_PKT; + } + break; + case HCI_SCODATA_PKT: + break; + case HCI_ISODATA_PKT: + break; + case HCI_DRV_PKT: + break; + default: kfree_skb(skb); return -EINVAL; } @@ -3509,7 +3035,7 @@ int hci_unregister_cb(struct hci_cb *cb) } EXPORT_SYMBOL(hci_unregister_cb); -static void hci_send_frame(struct hci_dev *hdev, struct sk_buff *skb) +static int hci_send_frame(struct hci_dev *hdev, struct sk_buff *skb) { int err; @@ -3532,14 +3058,33 @@ static void hci_send_frame(struct hci_dev *hdev, struct sk_buff *skb) if (!test_bit(HCI_RUNNING, &hdev->flags)) { kfree_skb(skb); - return; + return -EINVAL; + } + + if (hci_skb_pkt_type(skb) == HCI_DRV_PKT) { + /* Intercept HCI Drv packet here and don't go with hdev->send + * callback. + */ + err = hci_drv_process_cmd(hdev, skb); + kfree_skb(skb); + return err; } err = hdev->send(hdev, skb); if (err < 0) { bt_dev_err(hdev, "sending frame failed (%d)", err); kfree_skb(skb); + return err; } + + return 0; +} + +static int hci_send_conn_frame(struct hci_dev *hdev, struct hci_conn *conn, + struct sk_buff *skb) +{ + hci_conn_tx_queue(conn, skb); + return hci_send_frame(hdev, skb); } /* Send HCI command */ @@ -3550,7 +3095,7 @@ int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen, BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen); - skb = hci_prepare_cmd(hdev, opcode, plen, param); + skb = hci_cmd_sync_alloc(hdev, opcode, plen, param, NULL); if (!skb) { bt_dev_err(hdev, "no memory for command"); return -ENOMEM; @@ -3585,7 +3130,7 @@ int __hci_cmd_send(struct hci_dev *hdev, u16 opcode, u32 plen, return -EINVAL; } - skb = hci_prepare_cmd(hdev, opcode, plen, param); + skb = hci_cmd_sync_alloc(hdev, opcode, plen, param, NULL); if (!skb) { bt_dev_err(hdev, "no memory for command (opcode 0x%4.4x)", opcode); @@ -3599,41 +3144,65 @@ int __hci_cmd_send(struct hci_dev *hdev, u16 opcode, u32 plen, EXPORT_SYMBOL(__hci_cmd_send); /* Get data from the previously sent command */ -void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode) +static void *hci_cmd_data(struct sk_buff *skb, __u16 opcode) { struct hci_command_hdr *hdr; - if (!hdev->sent_cmd) + if (!skb || skb->len < HCI_COMMAND_HDR_SIZE) return NULL; - hdr = (void *) hdev->sent_cmd->data; + hdr = (void *)skb->data; if (hdr->opcode != cpu_to_le16(opcode)) return NULL; - BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode); + return skb->data + HCI_COMMAND_HDR_SIZE; +} - return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE; +/* Get data from the previously sent command */ +void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode) +{ + void *data; + + /* Check if opcode matches last sent command */ + data = hci_cmd_data(hdev->sent_cmd, opcode); + if (!data) + /* Check if opcode matches last request */ + data = hci_cmd_data(hdev->req_skb, opcode); + + return data; } -/* Send HCI command and wait for command commplete event */ -struct sk_buff *hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen, - const void *param, u32 timeout) +/* Get data from last received event */ +void *hci_recv_event_data(struct hci_dev *hdev, __u8 event) { - struct sk_buff *skb; + struct hci_event_hdr *hdr; + int offset; - if (!test_bit(HCI_UP, &hdev->flags)) - return ERR_PTR(-ENETDOWN); + if (!hdev->recv_event) + return NULL; - bt_dev_dbg(hdev, "opcode 0x%4.4x plen %d", opcode, plen); + hdr = (void *)hdev->recv_event->data; + offset = sizeof(*hdr); - hci_req_sync_lock(hdev); - skb = __hci_cmd_sync(hdev, opcode, plen, param, timeout); - hci_req_sync_unlock(hdev); + if (hdr->evt != event) { + /* In case of LE metaevent check the subevent match */ + if (hdr->evt == HCI_EV_LE_META) { + struct hci_ev_le_meta *ev; - return skb; + ev = (void *)hdev->recv_event->data + offset; + offset += sizeof(*ev); + if (ev->subevent == event) + goto found; + } + return NULL; + } + +found: + bt_dev_dbg(hdev, "event 0x%2.2x", event); + + return hdev->recv_event->data + offset; } -EXPORT_SYMBOL(hci_cmd_sync); /* Send ACL data */ static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags) @@ -3660,17 +3229,7 @@ static void hci_queue_acl(struct hci_chan *chan, struct sk_buff_head *queue, hci_skb_pkt_type(skb) = HCI_ACLDATA_PKT; - switch (hdev->dev_type) { - case HCI_PRIMARY: - hci_add_acl_hdr(skb, conn->handle, flags); - break; - case HCI_AMP: - hci_add_acl_hdr(skb, chan->handle, flags); - break; - default: - bt_dev_err(hdev, "unknown dev_type %d", hdev->dev_type); - return; - } + hci_add_acl_hdr(skb, conn->handle, flags); list = skb_shinfo(skb)->frag_list; if (!list) { @@ -3708,6 +3267,8 @@ static void hci_queue_acl(struct hci_chan *chan, struct sk_buff_head *queue, spin_unlock_bh(&queue->lock); } + + bt_dev_dbg(hdev, "chan %p queued %d", chan, skb_queue_len(queue)); } void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags) @@ -3739,12 +3300,124 @@ void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb) hci_skb_pkt_type(skb) = HCI_SCODATA_PKT; skb_queue_tail(&conn->data_q, skb); + + bt_dev_dbg(hdev, "hcon %p queued %d", conn, + skb_queue_len(&conn->data_q)); + + queue_work(hdev->workqueue, &hdev->tx_work); +} + +/* Send ISO data */ +static void hci_add_iso_hdr(struct sk_buff *skb, __u16 handle, __u8 flags) +{ + struct hci_iso_hdr *hdr; + int len = skb->len; + + skb_push(skb, HCI_ISO_HDR_SIZE); + skb_reset_transport_header(skb); + hdr = (struct hci_iso_hdr *)skb_transport_header(skb); + hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags)); + hdr->dlen = cpu_to_le16(len); +} + +static void hci_queue_iso(struct hci_conn *conn, struct sk_buff_head *queue, + struct sk_buff *skb) +{ + struct hci_dev *hdev = conn->hdev; + struct sk_buff *list; + __u16 flags; + + skb->len = skb_headlen(skb); + skb->data_len = 0; + + hci_skb_pkt_type(skb) = HCI_ISODATA_PKT; + + list = skb_shinfo(skb)->frag_list; + + flags = hci_iso_flags_pack(list ? ISO_START : ISO_SINGLE, 0x00); + hci_add_iso_hdr(skb, conn->handle, flags); + + if (!list) { + /* Non fragmented */ + BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len); + + skb_queue_tail(queue, skb); + } else { + /* Fragmented */ + BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len); + + skb_shinfo(skb)->frag_list = NULL; + + __skb_queue_tail(queue, skb); + + do { + skb = list; list = list->next; + + hci_skb_pkt_type(skb) = HCI_ISODATA_PKT; + flags = hci_iso_flags_pack(list ? ISO_CONT : ISO_END, + 0x00); + hci_add_iso_hdr(skb, conn->handle, flags); + + BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len); + + __skb_queue_tail(queue, skb); + } while (list); + } + + bt_dev_dbg(hdev, "hcon %p queued %d", conn, skb_queue_len(queue)); +} + +void hci_send_iso(struct hci_conn *conn, struct sk_buff *skb) +{ + struct hci_dev *hdev = conn->hdev; + + BT_DBG("%s len %d", hdev->name, skb->len); + + hci_queue_iso(conn, &conn->data_q, skb); + queue_work(hdev->workqueue, &hdev->tx_work); } /* ---- HCI TX task (outgoing data) ---- */ /* HCI Connection scheduler */ +static inline void hci_quote_sent(struct hci_conn *conn, int num, int *quote) +{ + struct hci_dev *hdev; + int cnt, q; + + if (!conn) { + *quote = 0; + return; + } + + hdev = conn->hdev; + + switch (conn->type) { + case ACL_LINK: + cnt = hdev->acl_cnt; + break; + case SCO_LINK: + case ESCO_LINK: + cnt = hdev->sco_cnt; + break; + case LE_LINK: + cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt; + break; + case CIS_LINK: + case BIS_LINK: + case PA_LINK: + cnt = hdev->iso_cnt; + break; + default: + cnt = 0; + bt_dev_err(hdev, "unknown link type %d", conn->type); + } + + q = cnt / num; + *quote = q ? q : 1; +} + static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type, int *quote) { @@ -3758,9 +3431,14 @@ static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type, rcu_read_lock(); list_for_each_entry_rcu(c, &h->list, list) { - if (c->type != type || skb_queue_empty(&c->data_q)) + if (c->type != type || + skb_queue_empty(&c->data_q)) continue; + bt_dev_dbg(hdev, "hcon %p state %s queued %d", c, + state_to_string(c->state), + skb_queue_len(&c->data_q)); + if (c->state != BT_CONNECTED && c->state != BT_CONFIG) continue; @@ -3777,29 +3455,7 @@ static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type, rcu_read_unlock(); - if (conn) { - int cnt, q; - - switch (conn->type) { - case ACL_LINK: - cnt = hdev->acl_cnt; - break; - case SCO_LINK: - case ESCO_LINK: - cnt = hdev->sco_cnt; - break; - case LE_LINK: - cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt; - break; - default: - cnt = 0; - bt_dev_err(hdev, "unknown link type %d", conn->type); - } - - q = cnt / num; - *quote = q ? q : 1; - } else - *quote = 0; + hci_quote_sent(conn, num, quote); BT_DBG("conn %p quote %d", conn, *quote); return conn; @@ -3812,10 +3468,10 @@ static void hci_link_tx_to(struct hci_dev *hdev, __u8 type) bt_dev_err(hdev, "link tx timeout"); - rcu_read_lock(); + hci_dev_lock(hdev); /* Kill stalled connections */ - list_for_each_entry_rcu(c, &h->list, list) { + list_for_each_entry(c, &h->list, list) { if (c->type == type && c->sent) { bt_dev_err(hdev, "killing stalled connection %pMR", &c->dst); @@ -3823,7 +3479,7 @@ static void hci_link_tx_to(struct hci_dev *hdev, __u8 type) } } - rcu_read_unlock(); + hci_dev_unlock(hdev); } static struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type, @@ -3833,7 +3489,7 @@ static struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type, struct hci_chan *chan = NULL; unsigned int num = 0, min = ~0, cur_prio = 0; struct hci_conn *conn; - int cnt, q, conn_num = 0; + int conn_num = 0; BT_DBG("%s", hdev->name); @@ -3883,27 +3539,8 @@ static struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type, if (!chan) return NULL; - switch (chan->conn->type) { - case ACL_LINK: - cnt = hdev->acl_cnt; - break; - case AMP_LINK: - cnt = hdev->block_cnt; - break; - case SCO_LINK: - case ESCO_LINK: - cnt = hdev->sco_cnt; - break; - case LE_LINK: - cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt; - break; - default: - cnt = 0; - bt_dev_err(hdev, "unknown link type %d", chan->conn->type); - } + hci_quote_sent(chan->conn, num, quote); - q = cnt / num; - *quote = q ? q : 1; BT_DBG("chan %p quote %d", chan, *quote); return chan; } @@ -3958,21 +3595,82 @@ static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type) } -static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb) +static void __check_timeout(struct hci_dev *hdev, unsigned int cnt, u8 type) { - /* Calculate count of blocks used by this packet */ - return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len); + unsigned long timeout; + + if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) + return; + + switch (type) { + case ACL_LINK: + /* tx timeout must be longer than maximum link supervision + * timeout (40.9 seconds) + */ + timeout = hdev->acl_last_tx + HCI_ACL_TX_TIMEOUT; + break; + case LE_LINK: + /* tx timeout must be longer than maximum link supervision + * timeout (40.9 seconds) + */ + timeout = hdev->le_last_tx + HCI_ACL_TX_TIMEOUT; + break; + case CIS_LINK: + case BIS_LINK: + case PA_LINK: + /* tx timeout must be longer than the maximum transport latency + * (8.388607 seconds) + */ + timeout = hdev->iso_last_tx + HCI_ISO_TX_TIMEOUT; + break; + default: + return; + } + + if (!cnt && time_after(jiffies, timeout)) + hci_link_tx_to(hdev, type); } -static void __check_timeout(struct hci_dev *hdev, unsigned int cnt) +/* Schedule SCO */ +static void hci_sched_sco(struct hci_dev *hdev, __u8 type) { - if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) { - /* ACL tx timeout must be longer than maximum - * link supervision timeout (40.9 seconds) */ - if (!cnt && time_after(jiffies, hdev->acl_last_tx + - HCI_ACL_TX_TIMEOUT)) - hci_link_tx_to(hdev, ACL_LINK); + struct hci_conn *conn; + struct sk_buff *skb; + int quote, *cnt; + unsigned int pkts = hdev->sco_pkts; + + bt_dev_dbg(hdev, "type %u", type); + + if (!hci_conn_num(hdev, type) || !pkts) + return; + + /* Use sco_pkts if flow control has not been enabled which will limit + * the amount of buffer sent in a row. + */ + if (!hci_dev_test_flag(hdev, HCI_SCO_FLOWCTL)) + cnt = &pkts; + else + cnt = &hdev->sco_cnt; + + while (*cnt && (conn = hci_low_sent(hdev, type, "e))) { + while (quote-- && (skb = skb_dequeue(&conn->data_q))) { + BT_DBG("skb %p len %d", skb, skb->len); + hci_send_conn_frame(hdev, conn, skb); + + conn->sent++; + if (conn->sent == ~0) + conn->sent = 0; + (*cnt)--; + } } + + /* Rescheduled if all packets were sent and flow control is not enabled + * as there could be more packets queued that could not be sent and + * since no HCI_EV_NUM_COMP_PKTS event will be generated the reschedule + * needs to be forced. + */ + if (!pkts && !hci_dev_test_flag(hdev, HCI_SCO_FLOWCTL)) + queue_work(hdev->workqueue, &hdev->tx_work); } static void hci_sched_acl_pkt(struct hci_dev *hdev) @@ -3982,7 +3680,7 @@ static void hci_sched_acl_pkt(struct hci_dev *hdev) struct sk_buff *skb; int quote; - __check_timeout(hdev, cnt); + __check_timeout(hdev, cnt, ACL_LINK); while (hdev->acl_cnt && (chan = hci_chan_sent(hdev, ACL_LINK, "e))) { @@ -4000,12 +3698,16 @@ static void hci_sched_acl_pkt(struct hci_dev *hdev) hci_conn_enter_active_mode(chan->conn, bt_cb(skb)->force_active); - hci_send_frame(hdev, skb); + hci_send_conn_frame(hdev, chan->conn, skb); hdev->acl_last_tx = jiffies; hdev->acl_cnt--; chan->sent++; chan->conn->sent++; + + /* Send pending SCO packets right away */ + hci_sched_sco(hdev, SCO_LINK); + hci_sched_sco(hdev, ESCO_LINK); } } @@ -4013,29 +3715,36 @@ static void hci_sched_acl_pkt(struct hci_dev *hdev) hci_prio_recalculate(hdev, ACL_LINK); } -static void hci_sched_acl_blk(struct hci_dev *hdev) +static void hci_sched_acl(struct hci_dev *hdev) +{ + BT_DBG("%s", hdev->name); + + /* No ACL link over BR/EDR controller */ + if (!hci_conn_num(hdev, ACL_LINK)) + return; + + hci_sched_acl_pkt(hdev); +} + +static void hci_sched_le(struct hci_dev *hdev) { - unsigned int cnt = hdev->block_cnt; struct hci_chan *chan; struct sk_buff *skb; - int quote; - u8 type; - - __check_timeout(hdev, cnt); + int quote, *cnt, tmp; BT_DBG("%s", hdev->name); - if (hdev->dev_type == HCI_AMP) - type = AMP_LINK; - else - type = ACL_LINK; + if (!hci_conn_num(hdev, LE_LINK)) + return; - while (hdev->block_cnt > 0 && - (chan = hci_chan_sent(hdev, type, "e))) { - u32 priority = (skb_peek(&chan->data_q))->priority; - while (quote > 0 && (skb = skb_peek(&chan->data_q))) { - int blocks; + cnt = hdev->le_pkts ? &hdev->le_cnt : &hdev->acl_cnt; + + __check_timeout(hdev, *cnt, LE_LINK); + tmp = *cnt; + while (*cnt && (chan = hci_chan_sent(hdev, LE_LINK, "e))) { + u32 priority = (skb_peek(&chan->data_q))->priority; + while (quote-- && (skb = skb_peek(&chan->data_q))) { BT_DBG("chan %p skb %p len %d priority %u", chan, skb, skb->len, skb->priority); @@ -4045,163 +3754,70 @@ static void hci_sched_acl_blk(struct hci_dev *hdev) skb = skb_dequeue(&chan->data_q); - blocks = __get_blocks(hdev, skb); - if (blocks > hdev->block_cnt) - return; - - hci_conn_enter_active_mode(chan->conn, - bt_cb(skb)->force_active); - - hci_send_frame(hdev, skb); - hdev->acl_last_tx = jiffies; + hci_send_conn_frame(hdev, chan->conn, skb); + hdev->le_last_tx = jiffies; - hdev->block_cnt -= blocks; - quote -= blocks; + (*cnt)--; + chan->sent++; + chan->conn->sent++; - chan->sent += blocks; - chan->conn->sent += blocks; + /* Send pending SCO packets right away */ + hci_sched_sco(hdev, SCO_LINK); + hci_sched_sco(hdev, ESCO_LINK); } } - if (cnt != hdev->block_cnt) - hci_prio_recalculate(hdev, type); -} - -static void hci_sched_acl(struct hci_dev *hdev) -{ - BT_DBG("%s", hdev->name); - - /* No ACL link over BR/EDR controller */ - if (!hci_conn_num(hdev, ACL_LINK) && hdev->dev_type == HCI_PRIMARY) - return; - - /* No AMP link over AMP controller */ - if (!hci_conn_num(hdev, AMP_LINK) && hdev->dev_type == HCI_AMP) - return; - - switch (hdev->flow_ctl_mode) { - case HCI_FLOW_CTL_MODE_PACKET_BASED: - hci_sched_acl_pkt(hdev); - break; - - case HCI_FLOW_CTL_MODE_BLOCK_BASED: - hci_sched_acl_blk(hdev); - break; - } + if (*cnt != tmp) + hci_prio_recalculate(hdev, LE_LINK); } -/* Schedule SCO */ -static void hci_sched_sco(struct hci_dev *hdev) +/* Schedule iso */ +static void hci_sched_iso(struct hci_dev *hdev, __u8 type) { struct hci_conn *conn; struct sk_buff *skb; - int quote; + int quote, *cnt; BT_DBG("%s", hdev->name); - if (!hci_conn_num(hdev, SCO_LINK)) + if (!hci_conn_num(hdev, type)) return; - while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, "e))) { - while (quote-- && (skb = skb_dequeue(&conn->data_q))) { - BT_DBG("skb %p len %d", skb, skb->len); - hci_send_frame(hdev, skb); + cnt = &hdev->iso_cnt; - conn->sent++; - if (conn->sent == ~0) - conn->sent = 0; - } - } -} - -static void hci_sched_esco(struct hci_dev *hdev) -{ - struct hci_conn *conn; - struct sk_buff *skb; - int quote; - - BT_DBG("%s", hdev->name); + __check_timeout(hdev, *cnt, type); - if (!hci_conn_num(hdev, ESCO_LINK)) - return; - - while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK, - "e))) { + while (*cnt && (conn = hci_low_sent(hdev, type, "e))) { while (quote-- && (skb = skb_dequeue(&conn->data_q))) { BT_DBG("skb %p len %d", skb, skb->len); - hci_send_frame(hdev, skb); + + hci_send_conn_frame(hdev, conn, skb); + hdev->iso_last_tx = jiffies; conn->sent++; if (conn->sent == ~0) conn->sent = 0; + (*cnt)--; } } } -static void hci_sched_le(struct hci_dev *hdev) -{ - struct hci_chan *chan; - struct sk_buff *skb; - int quote, cnt, tmp; - - BT_DBG("%s", hdev->name); - - if (!hci_conn_num(hdev, LE_LINK)) - return; - - if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) { - /* LE tx timeout must be longer than maximum - * link supervision timeout (40.9 seconds) */ - if (!hdev->le_cnt && hdev->le_pkts && - time_after(jiffies, hdev->le_last_tx + HZ * 45)) - hci_link_tx_to(hdev, LE_LINK); - } - - cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt; - tmp = cnt; - while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, "e))) { - u32 priority = (skb_peek(&chan->data_q))->priority; - while (quote-- && (skb = skb_peek(&chan->data_q))) { - BT_DBG("chan %p skb %p len %d priority %u", chan, skb, - skb->len, skb->priority); - - /* Stop if priority has changed */ - if (skb->priority < priority) - break; - - skb = skb_dequeue(&chan->data_q); - - hci_send_frame(hdev, skb); - hdev->le_last_tx = jiffies; - - cnt--; - chan->sent++; - chan->conn->sent++; - } - } - - if (hdev->le_pkts) - hdev->le_cnt = cnt; - else - hdev->acl_cnt = cnt; - - if (cnt != tmp) - hci_prio_recalculate(hdev, LE_LINK); -} - static void hci_tx_work(struct work_struct *work) { struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work); struct sk_buff *skb; - BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt, - hdev->sco_cnt, hdev->le_cnt); + BT_DBG("%s acl %d sco %d le %d iso %d", hdev->name, hdev->acl_cnt, + hdev->sco_cnt, hdev->le_cnt, hdev->iso_cnt); if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) { /* Schedule queues and send stuff to HCI driver */ + hci_sched_sco(hdev, SCO_LINK); + hci_sched_sco(hdev, ESCO_LINK); + hci_sched_iso(hdev, CIS_LINK); + hci_sched_iso(hdev, BIS_LINK); + hci_sched_iso(hdev, PA_LINK); hci_sched_acl(hdev); - hci_sched_sco(hdev); - hci_sched_esco(hdev); hci_sched_le(hdev); } @@ -4215,68 +3831,96 @@ static void hci_tx_work(struct work_struct *work) /* ACL data packet */ static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb) { - struct hci_acl_hdr *hdr = (void *) skb->data; - struct hci_conn *conn; + struct hci_acl_hdr *hdr; __u16 handle, flags; + int err; - skb_pull(skb, HCI_ACL_HDR_SIZE); + hdr = skb_pull_data(skb, sizeof(*hdr)); + if (!hdr) { + bt_dev_err(hdev, "ACL packet too small"); + kfree_skb(skb); + return; + } handle = __le16_to_cpu(hdr->handle); flags = hci_flags(handle); handle = hci_handle(handle); - BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len, - handle, flags); + bt_dev_dbg(hdev, "len %d handle 0x%4.4x flags 0x%4.4x", skb->len, + handle, flags); hdev->stat.acl_rx++; - hci_dev_lock(hdev); - conn = hci_conn_hash_lookup_handle(hdev, handle); - hci_dev_unlock(hdev); - - if (conn) { - hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF); - - /* Send to upper protocol */ - l2cap_recv_acldata(conn, skb, flags); - return; - } else { + err = l2cap_recv_acldata(hdev, handle, skb, flags); + if (err == -ENOENT) bt_dev_err(hdev, "ACL packet for unknown connection handle %d", handle); - } - - kfree_skb(skb); + else if (err) + bt_dev_dbg(hdev, "ACL packet recv for handle %d failed: %d", + handle, err); } /* SCO data packet */ static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb) { - struct hci_sco_hdr *hdr = (void *) skb->data; - struct hci_conn *conn; - __u16 handle; + struct hci_sco_hdr *hdr; + __u16 handle, flags; + int err; - skb_pull(skb, HCI_SCO_HDR_SIZE); + hdr = skb_pull_data(skb, sizeof(*hdr)); + if (!hdr) { + bt_dev_err(hdev, "SCO packet too small"); + kfree_skb(skb); + return; + } handle = __le16_to_cpu(hdr->handle); + flags = hci_flags(handle); + handle = hci_handle(handle); - BT_DBG("%s len %d handle 0x%4.4x", hdev->name, skb->len, handle); + bt_dev_dbg(hdev, "len %d handle 0x%4.4x flags 0x%4.4x", skb->len, + handle, flags); hdev->stat.sco_rx++; - hci_dev_lock(hdev); - conn = hci_conn_hash_lookup_handle(hdev, handle); - hci_dev_unlock(hdev); + hci_skb_pkt_status(skb) = flags & 0x03; + + err = sco_recv_scodata(hdev, handle, skb); + if (err == -ENOENT) + bt_dev_err_ratelimited(hdev, "SCO packet for unknown connection handle %d", + handle); + else if (err) + bt_dev_dbg(hdev, "SCO packet recv for handle %d failed: %d", + handle, err); +} - if (conn) { - /* Send to upper protocol */ - sco_recv_scodata(conn, skb); +static void hci_isodata_packet(struct hci_dev *hdev, struct sk_buff *skb) +{ + struct hci_iso_hdr *hdr; + __u16 handle, flags; + int err; + + hdr = skb_pull_data(skb, sizeof(*hdr)); + if (!hdr) { + bt_dev_err(hdev, "ISO packet too small"); + kfree_skb(skb); return; - } else { - bt_dev_err(hdev, "SCO packet for unknown connection handle %d", - handle); } - kfree_skb(skb); + handle = __le16_to_cpu(hdr->handle); + flags = hci_flags(handle); + handle = hci_handle(handle); + + bt_dev_dbg(hdev, "len %d handle 0x%4.4x flags 0x%4.4x", skb->len, + handle, flags); + + err = iso_recv(hdev, handle, skb, flags); + if (err == -ENOENT) + bt_dev_err(hdev, "ISO packet for unknown connection handle %d", + handle); + else if (err) + bt_dev_dbg(hdev, "ISO packet recv for handle %d failed: %d", + handle, err); } static bool hci_req_is_complete(struct hci_dev *hdev) @@ -4337,23 +3981,28 @@ void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status, return; } + /* If we reach this point this event matches the last command sent */ + hci_dev_clear_flag(hdev, HCI_CMD_PENDING); + /* If the command succeeded and there's still more commands in * this request the request is not yet complete. */ if (!status && !hci_req_is_complete(hdev)) return; + skb = hdev->req_skb; + /* If this was the last command in a request the complete - * callback would be found in hdev->sent_cmd instead of the + * callback would be found in hdev->req_skb instead of the * command queue (hdev->cmd_q). */ - if (bt_cb(hdev->sent_cmd)->hci.req_flags & HCI_REQ_SKB) { - *req_complete_skb = bt_cb(hdev->sent_cmd)->hci.req_complete_skb; + if (skb && bt_cb(skb)->hci.req_flags & HCI_REQ_SKB) { + *req_complete_skb = bt_cb(skb)->hci.req_complete_skb; return; } - if (bt_cb(hdev->sent_cmd)->hci.req_complete) { - *req_complete = bt_cb(hdev->sent_cmd)->hci.req_complete; + if (skb && bt_cb(skb)->hci.req_complete) { + *req_complete = bt_cb(skb)->hci.req_complete; return; } @@ -4369,7 +4018,7 @@ void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status, *req_complete_skb = bt_cb(skb)->hci.req_complete_skb; else *req_complete = bt_cb(skb)->hci.req_complete; - kfree_skb(skb); + dev_kfree_skb_irq(skb); } spin_unlock_irqrestore(&hdev->cmd_q.lock, flags); } @@ -4381,7 +4030,14 @@ static void hci_rx_work(struct work_struct *work) BT_DBG("%s", hdev->name); - while ((skb = skb_dequeue(&hdev->rx_q))) { + /* The kcov_remote functions used for collecting packet parsing + * coverage information from this background thread and associate + * the coverage with the syscall's thread which originally injected + * the packet. This helps fuzzing the kernel. + */ + for (; (skb = skb_dequeue(&hdev->rx_q)); kcov_remote_stop()) { + kcov_remote_start_common(skb_get_kcov_handle(skb)); + /* Send copy to monitor */ hci_send_to_monitor(hdev, skb); @@ -4390,7 +4046,14 @@ static void hci_rx_work(struct work_struct *work) hci_send_to_sock(hdev, skb); } - if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) { + /* If the device has been opened in HCI_USER_CHANNEL, + * the userspace has exclusive access to device. + * When device is HCI_INIT, we still need to process + * the data packets to the driver in order + * to complete its setup(). + */ + if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL) && + !test_bit(HCI_INIT, &hdev->flags)) { kfree_skb(skb); continue; } @@ -4400,6 +4063,7 @@ static void hci_rx_work(struct work_struct *work) switch (hci_skb_pkt_type(skb)) { case HCI_ACLDATA_PKT: case HCI_SCODATA_PKT: + case HCI_ISODATA_PKT: kfree_skb(skb); continue; } @@ -4422,6 +4086,11 @@ static void hci_rx_work(struct work_struct *work) hci_scodata_packet(hdev, skb); break; + case HCI_ISODATA_PKT: + BT_DBG("%s ISO data packet", hdev->name); + hci_isodata_packet(hdev, skb); + break; + default: kfree_skb(skb); break; @@ -4429,10 +4098,47 @@ static void hci_rx_work(struct work_struct *work) } } +static int hci_send_cmd_sync(struct hci_dev *hdev, struct sk_buff *skb) +{ + int err; + + bt_dev_dbg(hdev, "skb %p", skb); + + kfree_skb(hdev->sent_cmd); + + hdev->sent_cmd = skb_clone(skb, GFP_KERNEL); + if (!hdev->sent_cmd) { + skb_queue_head(&hdev->cmd_q, skb); + queue_work(hdev->workqueue, &hdev->cmd_work); + return -EINVAL; + } + + if (hci_skb_opcode(skb) != HCI_OP_NOP) { + err = hci_send_frame(hdev, skb); + if (err < 0) { + hci_cmd_sync_cancel_sync(hdev, -err); + return err; + } + atomic_dec(&hdev->cmd_cnt); + } else { + err = -ENODATA; + kfree_skb(skb); + } + + if (hdev->req_status == HCI_REQ_PEND && + !hci_dev_test_and_set_flag(hdev, HCI_CMD_PENDING)) { + kfree_skb(hdev->req_skb); + hdev->req_skb = skb_clone(hdev->sent_cmd, GFP_KERNEL); + } + + return err; +} + static void hci_cmd_work(struct work_struct *work) { struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work); struct sk_buff *skb; + int err; BT_DBG("%s cmd_cnt %d cmd queued %d", hdev->name, atomic_read(&hdev->cmd_cnt), skb_queue_len(&hdev->cmd_q)); @@ -4443,20 +4149,17 @@ static void hci_cmd_work(struct work_struct *work) if (!skb) return; - kfree_skb(hdev->sent_cmd); - - hdev->sent_cmd = skb_clone(skb, GFP_KERNEL); - if (hdev->sent_cmd) { - atomic_dec(&hdev->cmd_cnt); - hci_send_frame(hdev, skb); - if (test_bit(HCI_RESET, &hdev->flags)) - cancel_delayed_work(&hdev->cmd_timer); - else - schedule_delayed_work(&hdev->cmd_timer, - HCI_CMD_TIMEOUT); - } else { - skb_queue_head(&hdev->cmd_q, skb); - queue_work(hdev->workqueue, &hdev->cmd_work); - } + err = hci_send_cmd_sync(hdev, skb); + if (err) + return; + + rcu_read_lock(); + if (test_bit(HCI_RESET, &hdev->flags) || + hci_dev_test_flag(hdev, HCI_CMD_DRAIN_WORKQUEUE)) + cancel_delayed_work(&hdev->cmd_timer); + else + queue_delayed_work(hdev->workqueue, &hdev->cmd_timer, + HCI_CMD_TIMEOUT); + rcu_read_unlock(); } } |
