diff options
Diffstat (limited to 'net/bluetooth')
-rw-r--r-- | net/bluetooth/a2mp.h | 10 | ||||
-rw-r--r-- | net/bluetooth/bnep/bnep.h | 6 | ||||
-rw-r--r-- | net/bluetooth/hci_conn.c | 146 | ||||
-rw-r--r-- | net/bluetooth/hci_core.c | 143 | ||||
-rw-r--r-- | net/bluetooth/hci_event.c | 114 | ||||
-rw-r--r-- | net/bluetooth/hci_request.c | 350 | ||||
-rw-r--r-- | net/bluetooth/hci_request.h | 2 | ||||
-rw-r--r-- | net/bluetooth/hidp/core.c | 2 | ||||
-rw-r--r-- | net/bluetooth/l2cap_core.c | 734 | ||||
-rw-r--r-- | net/bluetooth/l2cap_sock.c | 67 | ||||
-rw-r--r-- | net/bluetooth/mgmt.c | 113 | ||||
-rw-r--r-- | net/bluetooth/rfcomm/core.c | 13 | ||||
-rw-r--r-- | net/bluetooth/rfcomm/tty.c | 4 | ||||
-rw-r--r-- | net/bluetooth/sco.c | 13 | ||||
-rw-r--r-- | net/bluetooth/smp.c | 29 |
15 files changed, 1570 insertions, 176 deletions
diff --git a/net/bluetooth/a2mp.h b/net/bluetooth/a2mp.h index 0029d5119be6..2fd253a61a2a 100644 --- a/net/bluetooth/a2mp.h +++ b/net/bluetooth/a2mp.h @@ -36,14 +36,14 @@ struct a2mp_cmd { __u8 code; __u8 ident; __le16 len; - __u8 data[0]; + __u8 data[]; } __packed; /* A2MP command codes */ #define A2MP_COMMAND_REJ 0x01 struct a2mp_cmd_rej { __le16 reason; - __u8 data[0]; + __u8 data[]; } __packed; #define A2MP_DISCOVER_REQ 0x02 @@ -62,7 +62,7 @@ struct a2mp_cl { struct a2mp_discov_rsp { __le16 mtu; __le16 ext_feat; - struct a2mp_cl cl[0]; + struct a2mp_cl cl[]; } __packed; #define A2MP_CHANGE_NOTIFY 0x04 @@ -93,7 +93,7 @@ struct a2mp_amp_assoc_req { struct a2mp_amp_assoc_rsp { __u8 id; __u8 status; - __u8 amp_assoc[0]; + __u8 amp_assoc[]; } __packed; #define A2MP_CREATEPHYSLINK_REQ 0x0A @@ -101,7 +101,7 @@ struct a2mp_amp_assoc_rsp { struct a2mp_physlink_req { __u8 local_id; __u8 remote_id; - __u8 amp_assoc[0]; + __u8 amp_assoc[]; } __packed; #define A2MP_CREATEPHYSLINK_RSP 0x0B diff --git a/net/bluetooth/bnep/bnep.h b/net/bluetooth/bnep/bnep.h index 24f18b133959..9680473ed7ef 100644 --- a/net/bluetooth/bnep/bnep.h +++ b/net/bluetooth/bnep/bnep.h @@ -74,14 +74,14 @@ struct bnep_setup_conn_req { __u8 type; __u8 ctrl; __u8 uuid_size; - __u8 service[0]; + __u8 service[]; } __packed; struct bnep_set_filter_req { __u8 type; __u8 ctrl; __be16 len; - __u8 list[0]; + __u8 list[]; } __packed; struct bnep_control_rsp { @@ -93,7 +93,7 @@ struct bnep_control_rsp { struct bnep_ext_hdr { __u8 type; __u8 len; - __u8 data[0]; + __u8 data[]; } __packed; /* BNEP ioctl defines */ diff --git a/net/bluetooth/hci_conn.c b/net/bluetooth/hci_conn.c index 87691404d0c6..e245bc155cc2 100644 --- a/net/bluetooth/hci_conn.c +++ b/net/bluetooth/hci_conn.c @@ -467,6 +467,23 @@ static void hci_conn_auto_accept(struct work_struct *work) &conn->dst); } +static void le_disable_advertising(struct hci_dev *hdev) +{ + if (ext_adv_capable(hdev)) { + struct hci_cp_le_set_ext_adv_enable cp; + + cp.enable = 0x00; + cp.num_of_sets = 0x00; + + hci_send_cmd(hdev, HCI_OP_LE_SET_EXT_ADV_ENABLE, sizeof(cp), + &cp); + } else { + u8 enable = 0x00; + hci_send_cmd(hdev, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), + &enable); + } +} + static void le_conn_timeout(struct work_struct *work) { struct hci_conn *conn = container_of(work, struct hci_conn, @@ -481,9 +498,8 @@ static void le_conn_timeout(struct work_struct *work) * (which doesn't have a timeout of its own). */ if (conn->role == HCI_ROLE_SLAVE) { - u8 enable = 0x00; - hci_send_cmd(hdev, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), - &enable); + /* Disable LE Advertising */ + le_disable_advertising(hdev); hci_le_conn_failed(conn, HCI_ERROR_ADVERTISING_TIMEOUT); return; } @@ -898,6 +914,16 @@ static void hci_req_directed_advertising(struct hci_request *req, cp.peer_addr_type = conn->dst_type; bacpy(&cp.peer_addr, &conn->dst); + /* As per Core Spec 5.2 Vol 2, PART E, Sec 7.8.53, for + * advertising_event_property LE_LEGACY_ADV_DIRECT_IND + * does not supports advertising data when the advertising set already + * contains some, the controller shall return erroc code 'Invalid + * HCI Command Parameters(0x12). + * So it is required to remove adv set for handle 0x00. since we use + * instance 0 for directed adv. + */ + hci_req_add(req, HCI_OP_LE_REMOVE_ADV_SET, sizeof(cp.handle), &cp.handle); + hci_req_add(req, HCI_OP_LE_SET_EXT_ADV_PARAMS, sizeof(cp), &cp); if (own_addr_type == ADDR_LE_DEV_RANDOM && @@ -1029,11 +1055,8 @@ struct hci_conn *hci_connect_le(struct hci_dev *hdev, bdaddr_t *dst, * anyway have to disable it in order to start directed * advertising. */ - if (hci_dev_test_flag(hdev, HCI_LE_ADV)) { - u8 enable = 0x00; - hci_req_add(&req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), - &enable); - } + if (hci_dev_test_flag(hdev, HCI_LE_ADV)) + __hci_req_disable_advertising(&req); /* If requested to connect as slave use directed advertising */ if (conn->role == HCI_ROLE_SLAVE) { @@ -1725,3 +1748,110 @@ struct hci_chan *hci_chan_lookup_handle(struct hci_dev *hdev, __u16 handle) return hchan; } + +u32 hci_conn_get_phy(struct hci_conn *conn) +{ + u32 phys = 0; + + hci_dev_lock(conn->hdev); + + /* BLUETOOTH CORE SPECIFICATION Version 5.2 | Vol 2, Part B page 471: + * Table 6.2: Packets defined for synchronous, asynchronous, and + * CSB logical transport types. + */ + switch (conn->type) { + case SCO_LINK: + /* SCO logical transport (1 Mb/s): + * HV1, HV2, HV3 and DV. + */ + phys |= BT_PHY_BR_1M_1SLOT; + + break; + + case ACL_LINK: + /* ACL logical transport (1 Mb/s) ptt=0: + * DH1, DM3, DH3, DM5 and DH5. + */ + phys |= BT_PHY_BR_1M_1SLOT; + + if (conn->pkt_type & (HCI_DM3 | HCI_DH3)) + phys |= BT_PHY_BR_1M_3SLOT; + + if (conn->pkt_type & (HCI_DM5 | HCI_DH5)) + phys |= BT_PHY_BR_1M_5SLOT; + + /* ACL logical transport (2 Mb/s) ptt=1: + * 2-DH1, 2-DH3 and 2-DH5. + */ + if (!(conn->pkt_type & HCI_2DH1)) + phys |= BT_PHY_EDR_2M_1SLOT; + + if (!(conn->pkt_type & HCI_2DH3)) + phys |= BT_PHY_EDR_2M_3SLOT; + + if (!(conn->pkt_type & HCI_2DH5)) + phys |= BT_PHY_EDR_2M_5SLOT; + + /* ACL logical transport (3 Mb/s) ptt=1: + * 3-DH1, 3-DH3 and 3-DH5. + */ + if (!(conn->pkt_type & HCI_3DH1)) + phys |= BT_PHY_EDR_3M_1SLOT; + + if (!(conn->pkt_type & HCI_3DH3)) + phys |= BT_PHY_EDR_3M_3SLOT; + + if (!(conn->pkt_type & HCI_3DH5)) + phys |= BT_PHY_EDR_3M_5SLOT; + + break; + + case ESCO_LINK: + /* eSCO logical transport (1 Mb/s): EV3, EV4 and EV5 */ + phys |= BT_PHY_BR_1M_1SLOT; + + if (!(conn->pkt_type & (ESCO_EV4 | ESCO_EV5))) + phys |= BT_PHY_BR_1M_3SLOT; + + /* eSCO logical transport (2 Mb/s): 2-EV3, 2-EV5 */ + if (!(conn->pkt_type & ESCO_2EV3)) + phys |= BT_PHY_EDR_2M_1SLOT; + + if (!(conn->pkt_type & ESCO_2EV5)) + phys |= BT_PHY_EDR_2M_3SLOT; + + /* eSCO logical transport (3 Mb/s): 3-EV3, 3-EV5 */ + if (!(conn->pkt_type & ESCO_3EV3)) + phys |= BT_PHY_EDR_3M_1SLOT; + + if (!(conn->pkt_type & ESCO_3EV5)) + phys |= BT_PHY_EDR_3M_3SLOT; + + break; + + case LE_LINK: + if (conn->le_tx_phy & HCI_LE_SET_PHY_1M) + phys |= BT_PHY_LE_1M_TX; + + if (conn->le_rx_phy & HCI_LE_SET_PHY_1M) + phys |= BT_PHY_LE_1M_RX; + + if (conn->le_tx_phy & HCI_LE_SET_PHY_2M) + phys |= BT_PHY_LE_2M_TX; + + if (conn->le_rx_phy & HCI_LE_SET_PHY_2M) + phys |= BT_PHY_LE_2M_RX; + + if (conn->le_tx_phy & HCI_LE_SET_PHY_CODED) + phys |= BT_PHY_LE_CODED_TX; + + if (conn->le_rx_phy & HCI_LE_SET_PHY_CODED) + phys |= BT_PHY_LE_CODED_RX; + + break; + } + + hci_dev_unlock(conn->hdev); + + return phys; +} diff --git a/net/bluetooth/hci_core.c b/net/bluetooth/hci_core.c index cbbc34a006d1..2e7bc2da8371 100644 --- a/net/bluetooth/hci_core.c +++ b/net/bluetooth/hci_core.c @@ -31,6 +31,8 @@ #include <linux/debugfs.h> #include <linux/crypto.h> #include <linux/property.h> +#include <linux/suspend.h> +#include <linux/wait.h> #include <asm/unaligned.h> #include <net/bluetooth/bluetooth.h> @@ -603,6 +605,9 @@ static int hci_init3_req(struct hci_request *req, unsigned long opt) if (hdev->commands[8] & 0x01) hci_req_add(req, HCI_OP_READ_PAGE_SCAN_ACTIVITY, 0, NULL); + if (hdev->commands[18] & 0x04) + hci_req_add(req, HCI_OP_READ_DEF_ERR_DATA_REPORTING, 0, NULL); + /* Some older Broadcom based Bluetooth 1.2 controllers do not * support the Read Page Scan Type command. Check support for * this command in the bit mask of supported commands. @@ -838,6 +843,26 @@ static int hci_init4_req(struct hci_request *req, unsigned long opt) sizeof(support), &support); } + /* Set erroneous data reporting if supported to the wideband speech + * setting value + */ + if (hdev->commands[18] & 0x08) { + bool enabled = hci_dev_test_flag(hdev, + HCI_WIDEBAND_SPEECH_ENABLED); + + if (enabled != + (hdev->err_data_reporting == ERR_DATA_REPORTING_ENABLED)) { + struct hci_cp_write_def_err_data_reporting cp; + + cp.err_data_reporting = enabled ? + ERR_DATA_REPORTING_ENABLED : + ERR_DATA_REPORTING_DISABLED; + + hci_req_add(req, HCI_OP_WRITE_DEF_ERR_DATA_REPORTING, + sizeof(cp), &cp); + } + } + /* Set Suggested Default Data Length to maximum if supported */ if (hdev->le_features[0] & HCI_LE_DATA_LEN_EXT) { struct hci_cp_le_write_def_data_len cp; @@ -1764,6 +1789,9 @@ int hci_dev_do_close(struct hci_dev *hdev) clear_bit(HCI_RUNNING, &hdev->flags); hci_sock_dev_event(hdev, HCI_DEV_CLOSE); + if (test_and_clear_bit(SUSPEND_POWERING_DOWN, hdev->suspend_tasks)) + wake_up(&hdev->suspend_wait_q); + /* After this point our queues are empty * and no tasks are scheduled. */ hdev->close(hdev); @@ -2285,7 +2313,7 @@ void hci_link_keys_clear(struct hci_dev *hdev) { struct link_key *key; - list_for_each_entry_rcu(key, &hdev->link_keys, list) { + list_for_each_entry(key, &hdev->link_keys, list) { list_del_rcu(&key->list); kfree_rcu(key, rcu); } @@ -2295,7 +2323,7 @@ void hci_smp_ltks_clear(struct hci_dev *hdev) { struct smp_ltk *k; - list_for_each_entry_rcu(k, &hdev->long_term_keys, list) { + list_for_each_entry(k, &hdev->long_term_keys, list) { list_del_rcu(&k->list); kfree_rcu(k, rcu); } @@ -2305,7 +2333,7 @@ void hci_smp_irks_clear(struct hci_dev *hdev) { struct smp_irk *k; - list_for_each_entry_rcu(k, &hdev->identity_resolving_keys, list) { + list_for_each_entry(k, &hdev->identity_resolving_keys, list) { list_del_rcu(&k->list); kfree_rcu(k, rcu); } @@ -2315,7 +2343,7 @@ void hci_blocked_keys_clear(struct hci_dev *hdev) { struct blocked_key *b; - list_for_each_entry_rcu(b, &hdev->blocked_keys, list) { + list_for_each_entry(b, &hdev->blocked_keys, list) { list_del_rcu(&b->list); kfree_rcu(b, rcu); } @@ -2327,7 +2355,7 @@ bool hci_is_blocked_key(struct hci_dev *hdev, u8 type, u8 val[16]) struct blocked_key *b; rcu_read_lock(); - list_for_each_entry(b, &hdev->blocked_keys, list) { + list_for_each_entry_rcu(b, &hdev->blocked_keys, list) { if (b->type == type && !memcmp(b->val, val, sizeof(b->val))) { blocked = true; break; @@ -3241,6 +3269,94 @@ void hci_copy_identity_address(struct hci_dev *hdev, bdaddr_t *bdaddr, } } +static int hci_suspend_wait_event(struct hci_dev *hdev) +{ +#define WAKE_COND \ + (find_first_bit(hdev->suspend_tasks, __SUSPEND_NUM_TASKS) == \ + __SUSPEND_NUM_TASKS) + + int i; + int ret = wait_event_timeout(hdev->suspend_wait_q, + WAKE_COND, SUSPEND_NOTIFIER_TIMEOUT); + + if (ret == 0) { + bt_dev_dbg(hdev, "Timed out waiting for suspend"); + for (i = 0; i < __SUSPEND_NUM_TASKS; ++i) { + if (test_bit(i, hdev->suspend_tasks)) + bt_dev_dbg(hdev, "Bit %d is set", i); + clear_bit(i, hdev->suspend_tasks); + } + + ret = -ETIMEDOUT; + } else { + ret = 0; + } + + return ret; +} + +static void hci_prepare_suspend(struct work_struct *work) +{ + struct hci_dev *hdev = + container_of(work, struct hci_dev, suspend_prepare); + + hci_dev_lock(hdev); + hci_req_prepare_suspend(hdev, hdev->suspend_state_next); + hci_dev_unlock(hdev); +} + +static int hci_change_suspend_state(struct hci_dev *hdev, + enum suspended_state next) +{ + hdev->suspend_state_next = next; + set_bit(SUSPEND_PREPARE_NOTIFIER, hdev->suspend_tasks); + queue_work(hdev->req_workqueue, &hdev->suspend_prepare); + return hci_suspend_wait_event(hdev); +} + +static int hci_suspend_notifier(struct notifier_block *nb, unsigned long action, + void *data) +{ + struct hci_dev *hdev = + container_of(nb, struct hci_dev, suspend_notifier); + int ret = 0; + + /* If powering down, wait for completion. */ + if (mgmt_powering_down(hdev)) { + set_bit(SUSPEND_POWERING_DOWN, hdev->suspend_tasks); + ret = hci_suspend_wait_event(hdev); + if (ret) + goto done; + } + + /* Suspend notifier should only act on events when powered. */ + if (!hdev_is_powered(hdev)) + goto done; + + if (action == PM_SUSPEND_PREPARE) { + /* Suspend consists of two actions: + * - First, disconnect everything and make the controller not + * connectable (disabling scanning) + * - Second, program event filter/whitelist and enable scan + */ + ret = hci_change_suspend_state(hdev, BT_SUSPEND_DISCONNECT); + + /* Only configure whitelist if disconnect succeeded */ + if (!ret) + ret = hci_change_suspend_state(hdev, + BT_SUSPEND_COMPLETE); + } else if (action == PM_POST_SUSPEND) { + ret = hci_change_suspend_state(hdev, BT_RUNNING); + } + + /* If suspend failed, restore it to running */ + if (ret && action == PM_SUSPEND_PREPARE) + hci_change_suspend_state(hdev, BT_RUNNING); + +done: + return ret ? notifier_from_errno(-EBUSY) : NOTIFY_STOP; +} + /* Alloc HCI device */ struct hci_dev *hci_alloc_dev(void) { @@ -3299,6 +3415,7 @@ struct hci_dev *hci_alloc_dev(void) INIT_LIST_HEAD(&hdev->mgmt_pending); INIT_LIST_HEAD(&hdev->blacklist); INIT_LIST_HEAD(&hdev->whitelist); + INIT_LIST_HEAD(&hdev->wakeable); INIT_LIST_HEAD(&hdev->uuids); INIT_LIST_HEAD(&hdev->link_keys); INIT_LIST_HEAD(&hdev->long_term_keys); @@ -3318,6 +3435,7 @@ struct hci_dev *hci_alloc_dev(void) INIT_WORK(&hdev->tx_work, hci_tx_work); INIT_WORK(&hdev->power_on, hci_power_on); INIT_WORK(&hdev->error_reset, hci_error_reset); + INIT_WORK(&hdev->suspend_prepare, hci_prepare_suspend); INIT_DELAYED_WORK(&hdev->power_off, hci_power_off); @@ -3326,6 +3444,7 @@ struct hci_dev *hci_alloc_dev(void) skb_queue_head_init(&hdev->raw_q); init_waitqueue_head(&hdev->req_wait_q); + init_waitqueue_head(&hdev->suspend_wait_q); INIT_DELAYED_WORK(&hdev->cmd_timer, hci_cmd_timeout); @@ -3437,6 +3556,11 @@ int hci_register_dev(struct hci_dev *hdev) hci_sock_dev_event(hdev, HCI_DEV_REG); hci_dev_hold(hdev); + hdev->suspend_notifier.notifier_call = hci_suspend_notifier; + error = register_pm_notifier(&hdev->suspend_notifier); + if (error) + goto err_wqueue; + queue_work(hdev->req_workqueue, &hdev->power_on); return id; @@ -3470,6 +3594,8 @@ void hci_unregister_dev(struct hci_dev *hdev) hci_dev_do_close(hdev); + unregister_pm_notifier(&hdev->suspend_notifier); + if (!test_bit(HCI_INIT, &hdev->flags) && !hci_dev_test_flag(hdev, HCI_SETUP) && !hci_dev_test_flag(hdev, HCI_CONFIG)) { @@ -4387,13 +4513,16 @@ static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb) { struct hci_sco_hdr *hdr = (void *) skb->data; struct hci_conn *conn; - __u16 handle; + __u16 handle, flags; skb_pull(skb, HCI_SCO_HDR_SIZE); handle = __le16_to_cpu(hdr->handle); + flags = hci_flags(handle); + handle = hci_handle(handle); - BT_DBG("%s len %d handle 0x%4.4x", hdev->name, skb->len, handle); + BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len, + handle, flags); hdev->stat.sco_rx++; diff --git a/net/bluetooth/hci_event.c b/net/bluetooth/hci_event.c index 6ddc4a74a5e4..0a591be8b0ae 100644 --- a/net/bluetooth/hci_event.c +++ b/net/bluetooth/hci_event.c @@ -901,6 +901,37 @@ static void hci_cc_read_inq_rsp_tx_power(struct hci_dev *hdev, hdev->inq_tx_power = rp->tx_power; } +static void hci_cc_read_def_err_data_reporting(struct hci_dev *hdev, + struct sk_buff *skb) +{ + struct hci_rp_read_def_err_data_reporting *rp = (void *)skb->data; + + BT_DBG("%s status 0x%2.2x", hdev->name, rp->status); + + if (rp->status) + return; + + hdev->err_data_reporting = rp->err_data_reporting; +} + +static void hci_cc_write_def_err_data_reporting(struct hci_dev *hdev, + struct sk_buff *skb) +{ + __u8 status = *((__u8 *)skb->data); + struct hci_cp_write_def_err_data_reporting *cp; + + BT_DBG("%s status 0x%2.2x", hdev->name, status); + + if (status) + return; + + cp = hci_sent_cmd_data(hdev, HCI_OP_WRITE_DEF_ERR_DATA_REPORTING); + if (!cp) + return; + + hdev->err_data_reporting = cp->err_data_reporting; +} + static void hci_cc_pin_code_reply(struct hci_dev *hdev, struct sk_buff *skb) { struct hci_rp_pin_code_reply *rp = (void *) skb->data; @@ -2202,10 +2233,22 @@ static void hci_cs_disconnect(struct hci_dev *hdev, u8 status) hci_dev_lock(hdev); conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle)); - if (conn) + if (conn) { + u8 type = conn->type; + mgmt_disconnect_failed(hdev, &conn->dst, conn->type, conn->dst_type, status); + /* If the disconnection failed for any reason, the upper layer + * does not retry to disconnect in current implementation. + * Hence, we need to do some basic cleanup here and re-enable + * advertising if necessary. + */ + hci_conn_del(conn); + if (type == LE_LINK) + hci_req_reenable_advertising(hdev); + } + hci_dev_unlock(hdev); } @@ -2474,6 +2517,7 @@ static void hci_inquiry_result_evt(struct hci_dev *hdev, struct sk_buff *skb) static void hci_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb) { struct hci_ev_conn_complete *ev = (void *) skb->data; + struct inquiry_entry *ie; struct hci_conn *conn; BT_DBG("%s", hdev->name); @@ -2482,14 +2526,30 @@ static void hci_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb) conn = hci_conn_hash_lookup_ba(hdev, ev->link_type, &ev->bdaddr); if (!conn) { - if (ev->link_type != SCO_LINK) - goto unlock; + /* Connection may not exist if auto-connected. Check the inquiry + * cache to see if we've already discovered this bdaddr before. + * If found and link is an ACL type, create a connection class + * automatically. + */ + ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr); + if (ie && ev->link_type == ACL_LINK) { + conn = hci_conn_add(hdev, ev->link_type, &ev->bdaddr, + HCI_ROLE_SLAVE); + if (!conn) { + bt_dev_err(hdev, "no memory for new conn"); + goto unlock; + } + } else { + if (ev->link_type != SCO_LINK) + goto unlock; - conn = hci_conn_hash_lookup_ba(hdev, ESCO_LINK, &ev->bdaddr); - if (!conn) - goto unlock; + conn = hci_conn_hash_lookup_ba(hdev, ESCO_LINK, + &ev->bdaddr); + if (!conn) + goto unlock; - conn->type = SCO_LINK; + conn->type = SCO_LINK; + } } if (!ev->status) { @@ -2743,6 +2803,14 @@ static void hci_disconn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb) hci_disconn_cfm(conn, ev->reason); hci_conn_del(conn); + /* The suspend notifier is waiting for all devices to disconnect so + * clear the bit from pending tasks and inform the wait queue. + */ + if (list_empty(&hdev->conn_hash.list) && + test_and_clear_bit(SUSPEND_DISCONNECTING, hdev->suspend_tasks)) { + wake_up(&hdev->suspend_wait_q); + } + /* Re-enable advertising if necessary, since it might * have been disabled by the connection. From the * HCI_LE_Set_Advertise_Enable command description in @@ -2895,14 +2963,14 @@ static void read_enc_key_size_complete(struct hci_dev *hdev, u8 status, if (!conn) goto unlock; - /* If we fail to read the encryption key size, assume maximum - * (which is the same we do also when this HCI command isn't - * supported. + /* While unexpected, the read_enc_key_size command may fail. The most + * secure approach is to then assume the key size is 0 to force a + * disconnection. */ if (rp->status) { bt_dev_err(hdev, "failed to read key size for handle %u", handle); - conn->enc_key_size = HCI_LINK_KEY_SIZE; + conn->enc_key_size = 0; } else { conn->enc_key_size = rp->key_size; } @@ -3302,6 +3370,14 @@ static void hci_cmd_complete_evt(struct hci_dev *hdev, struct sk_buff *skb, hci_cc_read_inq_rsp_tx_power(hdev, skb); break; + case HCI_OP_READ_DEF_ERR_DATA_REPORTING: + hci_cc_read_def_err_data_reporting(hdev, skb); + break; + + case HCI_OP_WRITE_DEF_ERR_DATA_REPORTING: + hci_cc_write_def_err_data_reporting(hdev, skb); + break; + case HCI_OP_PIN_CODE_REPLY: hci_cc_pin_code_reply(hdev, skb); break; @@ -4557,6 +4633,16 @@ static void hci_user_confirm_request_evt(struct hci_dev *hdev, goto confirm; } + /* If there already exists link key in local host, leave the + * decision to user space since the remote device could be + * legitimate or malicious. + */ + if (hci_find_link_key(hdev, &ev->bdaddr)) { + bt_dev_dbg(hdev, "Local host already has link key"); + confirm_hint = 1; + goto confirm; + } + BT_DBG("Auto-accept of user confirmation with %ums delay", hdev->auto_accept_delay); @@ -5858,6 +5944,11 @@ void hci_event_packet(struct hci_dev *hdev, struct sk_buff *skb) u8 status = 0, event = hdr->evt, req_evt = 0; u16 opcode = HCI_OP_NOP; + if (!event) { + bt_dev_warn(hdev, "Received unexpected HCI Event 00000000"); + goto done; + } + if (hdev->sent_cmd && bt_cb(hdev->sent_cmd)->hci.req_event == event) { struct hci_command_hdr *cmd_hdr = (void *) hdev->sent_cmd->data; opcode = __le16_to_cpu(cmd_hdr->opcode); @@ -6069,6 +6160,7 @@ void hci_event_packet(struct hci_dev *hdev, struct sk_buff *skb) req_complete_skb(hdev, status, opcode, orig_skb); } +done: kfree_skb(orig_skb); kfree_skb(skb); hdev->stat.evt_rx++; diff --git a/net/bluetooth/hci_request.c b/net/bluetooth/hci_request.c index 2a1b64dbf76e..649e1e5ed446 100644 --- a/net/bluetooth/hci_request.c +++ b/net/bluetooth/hci_request.c @@ -34,6 +34,9 @@ #define HCI_REQ_PEND 1 #define HCI_REQ_CANCELED 2 +#define LE_SUSPEND_SCAN_WINDOW 0x0012 +#define LE_SUSPEND_SCAN_INTERVAL 0x0060 + void hci_req_init(struct hci_request *req, struct hci_dev *hdev) { skb_queue_head_init(&req->cmd_q); @@ -654,6 +657,11 @@ void hci_req_add_le_scan_disable(struct hci_request *req) { struct hci_dev *hdev = req->hdev; + if (hdev->scanning_paused) { + bt_dev_dbg(hdev, "Scanning is paused for suspend"); + return; + } + if (use_ext_scan(hdev)) { struct hci_cp_le_set_ext_scan_enable cp; @@ -670,15 +678,55 @@ void hci_req_add_le_scan_disable(struct hci_request *req) } } -static void add_to_white_list(struct hci_request *req, - struct hci_conn_params *params) +static void del_from_white_list(struct hci_request *req, bdaddr_t *bdaddr, + u8 bdaddr_type) +{ + struct hci_cp_le_del_from_white_list cp; + + cp.bdaddr_type = bdaddr_type; + bacpy(&cp.bdaddr, bdaddr); + + bt_dev_dbg(req->hdev, "Remove %pMR (0x%x) from whitelist", &cp.bdaddr, + cp.bdaddr_type); + hci_req_add(req, HCI_OP_LE_DEL_FROM_WHITE_LIST, sizeof(cp), &cp); +} + +/* Adds connection to white list if needed. On error, returns -1. */ +static int add_to_white_list(struct hci_request *req, + struct hci_conn_params *params, u8 *num_entries, + bool allow_rpa) { struct hci_cp_le_add_to_white_list cp; + struct hci_dev *hdev = req->hdev; + + /* Already in white list */ + if (hci_bdaddr_list_lookup(&hdev->le_white_list, ¶ms->addr, + params->addr_type)) + return 0; + + /* Select filter policy to accept all advertising */ + if (*num_entries >= hdev->le_white_list_size) + return -1; + + /* White list can not be used with RPAs */ + if (!allow_rpa && + hci_find_irk_by_addr(hdev, ¶ms->addr, params->addr_type)) { + return -1; + } + /* During suspend, only wakeable devices can be in whitelist */ + if (hdev->suspended && !params->wakeable) + return 0; + + *num_entries += 1; cp.bdaddr_type = params->addr_type; bacpy(&cp.bdaddr, ¶ms->addr); + bt_dev_dbg(hdev, "Add %pMR (0x%x) to whitelist", &cp.bdaddr, + cp.bdaddr_type); hci_req_add(req, HCI_OP_LE_ADD_TO_WHITE_LIST, sizeof(cp), &cp); + + return 0; } static u8 update_white_list(struct hci_request *req) @@ -686,7 +734,14 @@ static u8 update_white_list(struct hci_request *req) struct hci_dev *hdev = req->hdev; struct hci_conn_params *params; struct bdaddr_list *b; - uint8_t white_list_entries = 0; + u8 num_entries = 0; + bool pend_conn, pend_report; + /* We allow whitelisting even with RPAs in suspend. In the worst case, + * we won't be able to wake from devices that use the privacy1.2 + * features. Additionally, once we support privacy1.2 and IRK + * offloading, we can update this to also check for those conditions. + */ + bool allow_rpa = hdev->suspended; /* Go through the current white list programmed into the * controller one by one and check if that address is still @@ -695,29 +750,28 @@ static u8 update_white_list(struct hci_request *req) * command to remove it from the controller. */ list_for_each_entry(b, &hdev->le_white_list, list) { - /* If the device is neither in pend_le_conns nor - * pend_le_reports then remove it from the whitelist. + pend_conn = hci_pend_le_action_lookup(&hdev->pend_le_conns, + &b->bdaddr, + b->bdaddr_type); + pend_report = hci_pend_le_action_lookup(&hdev->pend_le_reports, + &b->bdaddr, + b->bdaddr_type); + + /* If the device is not likely to connect or report, + * remove it from the whitelist. */ - if (!hci_pend_le_action_lookup(&hdev->pend_le_conns, - &b->bdaddr, b->bdaddr_type) && - !hci_pend_le_action_lookup(&hdev->pend_le_reports, - &b->bdaddr, b->bdaddr_type)) { - struct hci_cp_le_del_from_white_list cp; - - cp.bdaddr_type = b->bdaddr_type; - bacpy(&cp.bdaddr, &b->bdaddr); - - hci_req_add(req, HCI_OP_LE_DEL_FROM_WHITE_LIST, - sizeof(cp), &cp); + if (!pend_conn && !pend_report) { + del_from_white_list(req, &b->bdaddr, b->bdaddr_type); continue; } - if (hci_find_irk_by_addr(hdev, &b->bdaddr, b->bdaddr_type)) { - /* White list can not be used with RPAs */ + /* White list can not be used with RPAs */ + if (!allow_rpa && + hci_find_irk_by_addr(hdev, &b->bdaddr, b->bdaddr_type)) { return 0x00; } - white_list_entries++; + num_entries++; } /* Since all no longer valid white list entries have been @@ -731,47 +785,17 @@ static u8 update_white_list(struct hci_request *req) * white list. */ list_for_each_entry(params, &hdev->pend_le_conns, action) { - if (hci_bdaddr_list_lookup(&hdev->le_white_list, - ¶ms->addr, params->addr_type)) - continue; - - if (white_list_entries >= hdev->le_white_list_size) { - /* Select filter policy to accept all advertising */ + if (add_to_white_list(req, params, &num_entries, allow_rpa)) return 0x00; - } - - if (hci_find_irk_by_addr(hdev, ¶ms->addr, - params->addr_type)) { - /* White list can not be used with RPAs */ - return 0x00; - } - - white_list_entries++; - add_to_white_list(req, params); } /* After adding all new pending connections, walk through * the list of pending reports and also add these to the - * white list if there is still space. + * white list if there is still space. Abort if space runs out. */ list_for_each_entry(params, &hdev->pend_le_reports, action) { - if (hci_bdaddr_list_lookup(&hdev->le_white_list, - ¶ms->addr, params->addr_type)) - continue; - - if (white_list_entries >= hdev->le_white_list_size) { - /* Select filter policy to accept all advertising */ - return 0x00; - } - - if (hci_find_irk_by_addr(hdev, ¶ms->addr, - params->addr_type)) { - /* White list can not be used with RPAs */ + if (add_to_white_list(req, params, &num_entries, allow_rpa)) return 0x00; - } - - white_list_entries++; - add_to_white_list(req, params); } /* Select filter policy to use white list */ @@ -866,6 +890,12 @@ void hci_req_add_le_passive_scan(struct hci_request *req) struct hci_dev *hdev = req->hdev; u8 own_addr_type; u8 filter_policy; + u8 window, interval; + + if (hdev->scanning_paused) { + bt_dev_dbg(hdev, "Scanning is paused for suspend"); + return; + } /* Set require_privacy to false since no SCAN_REQ are send * during passive scanning. Not using an non-resolvable address @@ -896,8 +926,17 @@ void hci_req_add_le_passive_scan(struct hci_request *req) (hdev->le_features[0] & HCI_LE_EXT_SCAN_POLICY)) filter_policy |= 0x02; - hci_req_start_scan(req, LE_SCAN_PASSIVE, hdev->le_scan_interval, - hdev->le_scan_window, own_addr_type, filter_policy); + if (hdev->suspended) { + window = LE_SUSPEND_SCAN_WINDOW; + interval = LE_SUSPEND_SCAN_INTERVAL; + } else { + window = hdev->le_scan_window; + interval = hdev->le_scan_interval; + } + + bt_dev_dbg(hdev, "LE passive scan with whitelist = %d", filter_policy); + hci_req_start_scan(req, LE_SCAN_PASSIVE, interval, window, + own_addr_type, filter_policy); } static u8 get_adv_instance_scan_rsp_len(struct hci_dev *hdev, u8 instance) @@ -918,6 +957,187 @@ static u8 get_adv_instance_scan_rsp_len(struct hci_dev *hdev, u8 instance) return adv_instance->scan_rsp_len; } +static void hci_req_clear_event_filter(struct hci_request *req) +{ + struct hci_cp_set_event_filter f; + + memset(&f, 0, sizeof(f)); + f.flt_type = HCI_FLT_CLEAR_ALL; + hci_req_add(req, HCI_OP_SET_EVENT_FLT, 1, &f); + + /* Update page scan state (since we may have modified it when setting + * the event filter). + */ + __hci_req_update_scan(req); +} + +static void hci_req_set_event_filter(struct hci_request *req) +{ + struct bdaddr_list *b; + struct hci_cp_set_event_filter f; + struct hci_dev *hdev = req->hdev; + u8 scan; + + /* Always clear event filter when starting */ + hci_req_clear_event_filter(req); + + list_for_each_entry(b, &hdev->wakeable, list) { + memset(&f, 0, sizeof(f)); + bacpy(&f.addr_conn_flt.bdaddr, &b->bdaddr); + f.flt_type = HCI_FLT_CONN_SETUP; + f.cond_type = HCI_CONN_SETUP_ALLOW_BDADDR; + f.addr_conn_flt.auto_accept = HCI_CONN_SETUP_AUTO_ON; + + bt_dev_dbg(hdev, "Adding event filters for %pMR", &b->bdaddr); + hci_req_add(req, HCI_OP_SET_EVENT_FLT, sizeof(f), &f); + } + + scan = !list_empty(&hdev->wakeable) ? SCAN_PAGE : SCAN_DISABLED; + hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan); +} + +static void hci_req_config_le_suspend_scan(struct hci_request *req) +{ + /* Can't change params without disabling first */ + hci_req_add_le_scan_disable(req); + + /* Configure params and enable scanning */ + hci_req_add_le_passive_scan(req); + + /* Block suspend notifier on response */ + set_bit(SUSPEND_SCAN_ENABLE, req->hdev->suspend_tasks); +} + +static void suspend_req_complete(struct hci_dev *hdev, u8 status, u16 opcode) +{ + bt_dev_dbg(hdev, "Request complete opcode=0x%x, status=0x%x", opcode, + status); + if (test_and_clear_bit(SUSPEND_SCAN_ENABLE, hdev->suspend_tasks) || + test_and_clear_bit(SUSPEND_SCAN_DISABLE, hdev->suspend_tasks)) { + wake_up(&hdev->suspend_wait_q); + } +} + +/* Call with hci_dev_lock */ +void hci_req_prepare_suspend(struct hci_dev *hdev, enum suspended_state next) +{ + int old_state; + struct hci_conn *conn; + struct hci_request req; + u8 page_scan; + int disconnect_counter; + + if (next == hdev->suspend_state) { + bt_dev_dbg(hdev, "Same state before and after: %d", next); + goto done; + } + + hdev->suspend_state = next; + hci_req_init(&req, hdev); + + if (next == BT_SUSPEND_DISCONNECT) { + /* Mark device as suspended */ + hdev->suspended = true; + + /* Pause discovery if not already stopped */ + old_state = hdev->discovery.state; + if (old_state != DISCOVERY_STOPPED) { + set_bit(SUSPEND_PAUSE_DISCOVERY, hdev->suspend_tasks); + hci_discovery_set_state(hdev, DISCOVERY_STOPPING); + queue_work(hdev->req_workqueue, &hdev->discov_update); + } + + hdev->discovery_paused = true; + hdev->discovery_old_state = old_state; + + /* Stop advertising */ + old_state = hci_dev_test_flag(hdev, HCI_ADVERTISING); + if (old_state) { + set_bit(SUSPEND_PAUSE_ADVERTISING, hdev->suspend_tasks); + cancel_delayed_work(&hdev->discov_off); + queue_delayed_work(hdev->req_workqueue, + &hdev->discov_off, 0); + } + + hdev->advertising_paused = true; + hdev->advertising_old_state = old_state; + /* Disable page scan */ + page_scan = SCAN_DISABLED; + hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE, 1, &page_scan); + + /* Disable LE passive scan */ + hci_req_add_le_scan_disable(&req); + + /* Mark task needing completion */ + set_bit(SUSPEND_SCAN_DISABLE, hdev->suspend_tasks); + + /* Prevent disconnects from causing scanning to be re-enabled */ + hdev->scanning_paused = true; + + /* Run commands before disconnecting */ + hci_req_run(&req, suspend_req_complete); + + disconnect_counter = 0; + /* Soft disconnect everything (power off) */ + list_for_each_entry(conn, &hdev->conn_hash.list, list) { + hci_disconnect(conn, HCI_ERROR_REMOTE_POWER_OFF); + disconnect_counter++; + } + + if (disconnect_counter > 0) { + bt_dev_dbg(hdev, + "Had %d disconnects. Will wait on them", + disconnect_counter); + set_bit(SUSPEND_DISCONNECTING, hdev->suspend_tasks); + } + } else if (next == BT_SUSPEND_COMPLETE) { + /* Unpause to take care of updating scanning params */ + hdev->scanning_paused = false; + /* Enable event filter for paired devices */ + hci_req_set_event_filter(&req); + /* Enable passive scan at lower duty cycle */ + hci_req_config_le_suspend_scan(&req); + /* Pause scan changes again. */ + hdev->scanning_paused = true; + hci_req_run(&req, suspend_req_complete); + } else { + hdev->suspended = false; + hdev->scanning_paused = false; + + hci_req_clear_event_filter(&req); + /* Reset passive/background scanning to normal */ + hci_req_config_le_suspend_scan(&req); + + /* Unpause advertising */ + hdev->advertising_paused = false; + if (hdev->advertising_old_state) { + set_bit(SUSPEND_UNPAUSE_ADVERTISING, + hdev->suspend_tasks); + hci_dev_set_flag(hdev, HCI_ADVERTISING); + queue_work(hdev->req_workqueue, + &hdev->discoverable_update); + hdev->advertising_old_state = 0; + } + + /* Unpause discovery */ + hdev->discovery_paused = false; + if (hdev->discovery_old_state != DISCOVERY_STOPPED && + hdev->discovery_old_state != DISCOVERY_STOPPING) { + set_bit(SUSPEND_UNPAUSE_DISCOVERY, hdev->suspend_tasks); + hci_discovery_set_state(hdev, DISCOVERY_STARTING); + queue_work(hdev->req_workqueue, &hdev->discov_update); + } + + hci_req_run(&req, suspend_req_complete); + } + + hdev->suspend_state = next; + +done: + clear_bit(SUSPEND_PREPARE_NOTIFIER, hdev->suspend_tasks); + wake_up(&hdev->suspend_wait_q); +} + static u8 get_cur_adv_instance_scan_rsp_len(struct hci_dev *hdev) { u8 instance = hdev->cur_adv_instance; @@ -1499,7 +1719,7 @@ int hci_get_random_address(struct hci_dev *hdev, bool require_privacy, err = smp_generate_rpa(hdev, hdev->irk, &hdev->rpa); if (err < 0) { - BT_ERR("%s failed to generate new RPA", hdev->name); + bt_dev_err(hdev, "failed to generate new RPA"); return err; } @@ -2015,6 +2235,9 @@ void __hci_req_update_scan(struct hci_request *req) if (mgmt_powering_down(hdev)) return; + if (hdev->scanning_paused) + return; + if (hci_dev_test_flag(hdev, HCI_CONNECTABLE) || disconnected_whitelist_entries(hdev)) scan = SCAN_PAGE; @@ -2504,23 +2727,6 @@ static int active_scan(struct hci_request *req, unsigned long opt) BT_DBG("%s", hdev->name); - if (hci_dev_test_flag(hdev, HCI_LE_ADV)) { - hci_dev_lock(hdev); - - /* Don't let discovery abort an outgoing connection attempt - * that's using directed advertising. - */ - if (hci_lookup_le_connect(hdev)) { - hci_dev_unlock(hdev); - return -EBUSY; - } - - cancel_adv_timeout(hdev); - hci_dev_unlock(hdev); - - __hci_req_disable_advertising(req); - } - /* If controller is scanning, it means the background scanning is * running. Thus, we should temporarily stop it in order to set the * discovery scanning parameters. diff --git a/net/bluetooth/hci_request.h b/net/bluetooth/hci_request.h index a7019fbeadd3..0e81614d235e 100644 --- a/net/bluetooth/hci_request.h +++ b/net/bluetooth/hci_request.h @@ -68,6 +68,8 @@ void __hci_req_update_eir(struct hci_request *req); void hci_req_add_le_scan_disable(struct hci_request *req); void hci_req_add_le_passive_scan(struct hci_request *req); +void hci_req_prepare_suspend(struct hci_dev *hdev, enum suspended_state next); + void hci_req_reenable_advertising(struct hci_dev *hdev); void __hci_req_enable_advertising(struct hci_request *req); void __hci_req_disable_advertising(struct hci_request *req); diff --git a/net/bluetooth/hidp/core.c b/net/bluetooth/hidp/core.c index bef84b95e2c4..3b4fa27a44e6 100644 --- a/net/bluetooth/hidp/core.c +++ b/net/bluetooth/hidp/core.c @@ -1279,7 +1279,7 @@ static int hidp_session_thread(void *arg) add_wait_queue(sk_sleep(session->intr_sock->sk), &intr_wait); /* This memory barrier is paired with wq_has_sleeper(). See * sock_poll_wait() for more information why this is needed. */ - smp_mb(); + smp_mb__before_atomic(); /* notify synchronous startup that we're ready */ atomic_inc(&session->state); diff --git a/net/bluetooth/l2cap_core.c b/net/bluetooth/l2cap_core.c index 195459a1e53e..fd9d0d08f9c9 100644 --- a/net/bluetooth/l2cap_core.c +++ b/net/bluetooth/l2cap_core.c @@ -45,6 +45,7 @@ #define LE_FLOWCTL_MAX_CREDITS 65535 bool disable_ertm; +bool enable_ecred; static u32 l2cap_feat_mask = L2CAP_FEAT_FIXED_CHAN | L2CAP_FEAT_UCD; @@ -419,6 +420,9 @@ static void l2cap_chan_timeout(struct work_struct *work) BT_DBG("chan %p state %s", chan, state_to_string(chan->state)); mutex_lock(&conn->chan_lock); + /* __set_chan_timer() calls l2cap_chan_hold(chan) while scheduling + * this work. No need to call l2cap_chan_hold(chan) here again. + */ l2cap_chan_lock(chan); if (chan->state == BT_CONNECTED || chan->state == BT_CONFIG) @@ -431,12 +435,12 @@ static void l2cap_chan_timeout(struct work_struct *work) l2cap_chan_close(chan, reason); - l2cap_chan_unlock(chan); - chan->ops->close(chan); - mutex_unlock(&conn->chan_lock); + l2cap_chan_unlock(chan); l2cap_chan_put(chan); + + mutex_unlock(&conn->chan_lock); } struct l2cap_chan *l2cap_chan_create(void) @@ -532,6 +536,17 @@ static void l2cap_le_flowctl_init(struct l2cap_chan *chan, u16 tx_credits) skb_queue_head_init(&chan->tx_q); } +static void l2cap_ecred_init(struct l2cap_chan *chan, u16 tx_credits) +{ + l2cap_le_flowctl_init(chan, tx_credits); + + /* L2CAP implementations shall support a minimum MPS of 64 octets */ + if (chan->mps < L2CAP_ECRED_MIN_MPS) { + chan->mps = L2CAP_ECRED_MIN_MPS; + chan->rx_credits = (chan->imtu / chan->mps) + 1; + } +} + void __l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan) { BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn, @@ -638,6 +653,7 @@ void l2cap_chan_del(struct l2cap_chan *chan, int err) break; case L2CAP_MODE_LE_FLOWCTL: + case L2CAP_MODE_EXT_FLOWCTL: skb_queue_purge(&chan->tx_q); break; @@ -662,6 +678,29 @@ void l2cap_chan_del(struct l2cap_chan *chan, int err) } EXPORT_SYMBOL_GPL(l2cap_chan_del); +static void __l2cap_chan_list(struct l2cap_conn *conn, l2cap_chan_func_t func, + void *data) +{ + struct l2cap_chan *chan; + + list_for_each_entry(chan, &conn->chan_l, list) { + func(chan, data); + } +} + +void l2cap_chan_list(struct l2cap_conn *conn, l2cap_chan_func_t func, + void *data) +{ + if (!conn) + return; + + mutex_lock(&conn->chan_lock); + __l2cap_chan_list(conn, func, data); + mutex_unlock(&conn->chan_lock); +} + +EXPORT_SYMBOL_GPL(l2cap_chan_list); + static void l2cap_conn_update_id_addr(struct work_struct *work) { struct l2cap_conn *conn = container_of(work, struct l2cap_conn, @@ -704,6 +743,27 @@ static void l2cap_chan_le_connect_reject(struct l2cap_chan *chan) &rsp); } +static void l2cap_chan_ecred_connect_reject(struct l2cap_chan *chan) +{ + struct l2cap_conn *conn = chan->conn; + struct l2cap_ecred_conn_rsp rsp; + u16 result; + + if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) + result = L2CAP_CR_LE_AUTHORIZATION; + else + result = L2CAP_CR_LE_BAD_PSM; + + l2cap_state_change(chan, BT_DISCONN); + + memset(&rsp, 0, sizeof(rsp)); + + rsp.result = cpu_to_le16(result); + + l2cap_send_cmd(conn, chan->ident, L2CAP_LE_CONN_RSP, sizeof(rsp), + &rsp); +} + static void l2cap_chan_connect_reject(struct l2cap_chan *chan) { struct l2cap_conn *conn = chan->conn; @@ -749,8 +809,16 @@ void l2cap_chan_close(struct l2cap_chan *chan, int reason) if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED) { if (conn->hcon->type == ACL_LINK) l2cap_chan_connect_reject(chan); - else if (conn->hcon->type == LE_LINK) - l2cap_chan_le_connect_reject(chan); + else if (conn->hcon->type == LE_LINK) { + switch (chan->mode) { + case L2CAP_MODE_LE_FLOWCTL: + l2cap_chan_le_connect_reject(chan); + break; + case L2CAP_MODE_EXT_FLOWCTL: + l2cap_chan_ecred_connect_reject(chan); + break; + } + } } l2cap_chan_del(chan, reason); @@ -1273,8 +1341,13 @@ static void l2cap_chan_ready(struct l2cap_chan *chan) chan->conf_state = 0; __clear_chan_timer(chan); - if (chan->mode == L2CAP_MODE_LE_FLOWCTL && !chan->tx_credits) - chan->ops->suspend(chan); + switch (chan->mode) { + case L2CAP_MODE_LE_FLOWCTL: + case L2CAP_MODE_EXT_FLOWCTL: + if (!chan->tx_credits) + chan->ops->suspend(chan); + break; + } chan->state = BT_CONNECTED; @@ -1306,6 +1379,81 @@ static void l2cap_le_connect(struct l2cap_chan *chan) sizeof(req), &req); } +struct l2cap_ecred_conn_data { + struct { + struct l2cap_ecred_conn_req req; + __le16 scid[5]; + } __packed pdu; + struct l2cap_chan *chan; + struct pid *pid; + int count; +}; + +static void l2cap_ecred_defer_connect(struct l2cap_chan *chan, void *data) +{ + struct l2cap_ecred_conn_data *conn = data; + struct pid *pid; + + if (chan == conn->chan) + return; + + if (!test_and_clear_bit(FLAG_DEFER_SETUP, &chan->flags)) + return; + + pid = chan->ops->get_peer_pid(chan); + + /* Only add deferred channels with the same PID/PSM */ + if (conn->pid != pid || chan->psm != conn->chan->psm || chan->ident || + chan->mode != L2CAP_MODE_EXT_FLOWCTL || chan->state != BT_CONNECT) + return; + + if (test_and_set_bit(FLAG_ECRED_CONN_REQ_SENT, &chan->flags)) + return; + + l2cap_ecred_init(chan, 0); + + /* Set the same ident so we can match on the rsp */ + chan->ident = conn->chan->ident; + + /* Include all channels deferred */ + conn->pdu.scid[conn->count] = cpu_to_le16(chan->scid); + + conn->count++; +} + +static void l2cap_ecred_connect(struct l2cap_chan *chan) +{ + struct l2cap_conn *conn = chan->conn; + struct l2cap_ecred_conn_data data; + + if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) + return; + + if (test_and_set_bit(FLAG_ECRED_CONN_REQ_SENT, &chan->flags)) + return; + + l2cap_ecred_init(chan, 0); + + data.pdu.req.psm = chan->psm; + data.pdu.req.mtu = cpu_to_le16(chan->imtu); + data.pdu.req.mps = cpu_to_le16(chan->mps); + data.pdu.req.credits = cpu_to_le16(chan->rx_credits); + data.pdu.scid[0] = cpu_to_le16(chan->scid); + + chan->ident = l2cap_get_ident(conn); + data.pid = chan->ops->get_peer_pid(chan); + + data.count = 1; + data.chan = chan; + data.pid = chan->ops->get_peer_pid(chan); + + __l2cap_chan_list(conn, l2cap_ecred_defer_connect, &data); + + l2cap_send_cmd(conn, chan->ident, L2CAP_ECRED_CONN_REQ, + sizeof(data.pdu.req) + data.count * sizeof(__le16), + &data.pdu); +} + static void l2cap_le_start(struct l2cap_chan *chan) { struct l2cap_conn *conn = chan->conn; @@ -1318,8 +1466,12 @@ static void l2cap_le_start(struct l2cap_chan *chan) return; } - if (chan->state == BT_CONNECT) - l2cap_le_connect(chan); + if (chan->state == BT_CONNECT) { + if (chan->mode == L2CAP_MODE_EXT_FLOWCTL) + l2cap_ecred_connect(chan); + else + l2cap_le_connect(chan); + } } static void l2cap_start_connection(struct l2cap_chan *chan) @@ -1737,9 +1889,9 @@ static void l2cap_conn_del(struct hci_conn *hcon, int err) l2cap_chan_del(chan, err); - l2cap_chan_unlock(chan); - chan->ops->close(chan); + + l2cap_chan_unlock(chan); l2cap_chan_put(chan); } @@ -2505,6 +2657,7 @@ int l2cap_chan_send(struct l2cap_chan *chan, struct msghdr *msg, size_t len) switch (chan->mode) { case L2CAP_MODE_LE_FLOWCTL: + case L2CAP_MODE_EXT_FLOWCTL: /* Check outgoing MTU */ if (len > chan->omtu) return -EMSGSIZE; @@ -3773,6 +3926,45 @@ void __l2cap_le_connect_rsp_defer(struct l2cap_chan *chan) &rsp); } +void __l2cap_ecred_conn_rsp_defer(struct l2cap_chan *chan) +{ + struct { + struct l2cap_ecred_conn_rsp rsp; + __le16 dcid[5]; + } __packed pdu; + struct l2cap_conn *conn = chan->conn; + u16 ident = chan->ident; + int i = 0; + + if (!ident) + return; + + BT_DBG("chan %p ident %d", chan, ident); + + pdu.rsp.mtu = cpu_to_le16(chan->imtu); + pdu.rsp.mps = cpu_to_le16(chan->mps); + pdu.rsp.credits = cpu_to_le16(chan->rx_credits); + pdu.rsp.result = cpu_to_le16(L2CAP_CR_LE_SUCCESS); + + mutex_lock(&conn->chan_lock); + + list_for_each_entry(chan, &conn->chan_l, list) { + if (chan->ident != ident) + continue; + + /* Reset ident so only one response is sent */ + chan->ident = 0; + + /* Include all channels pending with the same ident */ + pdu.dcid[i++] = cpu_to_le16(chan->scid); + } + + mutex_unlock(&conn->chan_lock); + + l2cap_send_cmd(conn, ident, L2CAP_ECRED_CONN_RSP, + sizeof(pdu.rsp) + i * sizeof(__le16), &pdu); +} + void __l2cap_connect_rsp_defer(struct l2cap_chan *chan) { struct l2cap_conn_rsp rsp; @@ -4181,7 +4373,8 @@ static inline int l2cap_config_req(struct l2cap_conn *conn, return 0; } - if (chan->state != BT_CONFIG && chan->state != BT_CONNECT2) { + if (chan->state != BT_CONFIG && chan->state != BT_CONNECT2 && + chan->state != BT_CONNECTED) { cmd_reject_invalid_cid(conn, cmd->ident, chan->scid, chan->dcid); goto unlock; @@ -4405,6 +4598,7 @@ static inline int l2cap_disconnect_req(struct l2cap_conn *conn, return 0; } + l2cap_chan_hold(chan); l2cap_chan_lock(chan); rsp.dcid = cpu_to_le16(chan->scid); @@ -4413,12 +4607,11 @@ static inline int l2cap_disconnect_req(struct l2cap_conn *conn, chan->ops->set_shutdown(chan); - l2cap_chan_hold(chan); l2cap_chan_del(chan, ECONNRESET); - l2cap_chan_unlock(chan); - chan->ops->close(chan); + + l2cap_chan_unlock(chan); l2cap_chan_put(chan); mutex_unlock(&conn->chan_lock); @@ -4450,20 +4643,21 @@ static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn, return 0; } + l2cap_chan_hold(chan); l2cap_chan_lock(chan); if (chan->state != BT_DISCONN) { l2cap_chan_unlock(chan); + l2cap_chan_put(chan); mutex_unlock(&conn->chan_lock); return 0; } - l2cap_chan_hold(chan); l2cap_chan_del(chan, 0); - l2cap_chan_unlock(chan); - chan->ops->close(chan); + + l2cap_chan_unlock(chan); l2cap_chan_put(chan); mutex_unlock(&conn->chan_lock); @@ -5714,6 +5908,356 @@ static inline int l2cap_le_credits(struct l2cap_conn *conn, return 0; } +static inline int l2cap_ecred_conn_req(struct l2cap_conn *conn, + struct l2cap_cmd_hdr *cmd, u16 cmd_len, + u8 *data) +{ + struct l2cap_ecred_conn_req *req = (void *) data; + struct { + struct l2cap_ecred_conn_rsp rsp; + __le16 dcid[5]; + } __packed pdu; + struct l2cap_chan *chan, *pchan; + u16 mtu, mps; + __le16 psm; + u8 result, len = 0; + int i, num_scid; + bool defer = false; + + if (!enable_ecred) + return -EINVAL; + + if (cmd_len < sizeof(*req) || cmd_len - sizeof(*req) % sizeof(u16)) { + result = L2CAP_CR_LE_INVALID_PARAMS; + goto response; + } + + mtu = __le16_to_cpu(req->mtu); + mps = __le16_to_cpu(req->mps); + + if (mtu < L2CAP_ECRED_MIN_MTU || mps < L2CAP_ECRED_MIN_MPS) { + result = L2CAP_CR_LE_UNACCEPT_PARAMS; + goto response; + } + + psm = req->psm; + + BT_DBG("psm 0x%2.2x mtu %u mps %u", __le16_to_cpu(psm), mtu, mps); + + memset(&pdu, 0, sizeof(pdu)); + + /* Check if we have socket listening on psm */ + pchan = l2cap_global_chan_by_psm(BT_LISTEN, psm, &conn->hcon->src, + &conn->hcon->dst, LE_LINK); + if (!pchan) { + result = L2CAP_CR_LE_BAD_PSM; + goto response; + } + + mutex_lock(&conn->chan_lock); + l2cap_chan_lock(pchan); + + if (!smp_sufficient_security(conn->hcon, pchan->sec_level, + SMP_ALLOW_STK)) { + result = L2CAP_CR_LE_AUTHENTICATION; + goto unlock; + } + + result = L2CAP_CR_LE_SUCCESS; + cmd_len -= sizeof(req); + num_scid = cmd_len / sizeof(u16); + + for (i = 0; i < num_scid; i++) { + u16 scid = __le16_to_cpu(req->scid[i]); + + BT_DBG("scid[%d] 0x%4.4x", i, scid); + + pdu.dcid[i] = 0x0000; + len += sizeof(*pdu.dcid); + + /* Check for valid dynamic CID range */ + if (scid < L2CAP_CID_DYN_START || scid > L2CAP_CID_LE_DYN_END) { + result = L2CAP_CR_LE_INVALID_SCID; + continue; + } + + /* Check if we already have channel with that dcid */ + if (__l2cap_get_chan_by_dcid(conn, scid)) { + result = L2CAP_CR_LE_SCID_IN_USE; + continue; + } + + chan = pchan->ops->new_connection(pchan); + if (!chan) { + result = L2CAP_CR_LE_NO_MEM; + continue; + } + + bacpy(&chan->src, &conn->hcon->src); + bacpy(&chan->dst, &conn->hcon->dst); + chan->src_type = bdaddr_src_type(conn->hcon); + chan->dst_type = bdaddr_dst_type(conn->hcon); + chan->psm = psm; + chan->dcid = scid; + chan->omtu = mtu; + chan->remote_mps = mps; + + __l2cap_chan_add(conn, chan); + + l2cap_ecred_init(chan, __le16_to_cpu(req->credits)); + + /* Init response */ + if (!pdu.rsp.credits) { + pdu.rsp.mtu = cpu_to_le16(chan->imtu); + pdu.rsp.mps = cpu_to_le16(chan->mps); + pdu.rsp.credits = cpu_to_le16(chan->rx_credits); + } + + pdu.dcid[i] = cpu_to_le16(chan->scid); + + __set_chan_timer(chan, chan->ops->get_sndtimeo(chan)); + + chan->ident = cmd->ident; + + if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) { + l2cap_state_change(chan, BT_CONNECT2); + defer = true; + chan->ops->defer(chan); + } else { + l2cap_chan_ready(chan); + } + } + +unlock: + l2cap_chan_unlock(pchan); + mutex_unlock(&conn->chan_lock); + l2cap_chan_put(pchan); + +response: + pdu.rsp.result = cpu_to_le16(result); + + if (defer) + return 0; + + l2cap_send_cmd(conn, cmd->ident, L2CAP_ECRED_CONN_RSP, + sizeof(pdu.rsp) + len, &pdu); + + return 0; +} + +static inline int l2cap_ecred_conn_rsp(struct l2cap_conn *conn, + struct l2cap_cmd_hdr *cmd, u16 cmd_len, + u8 *data) +{ + struct l2cap_ecred_conn_rsp *rsp = (void *) data; + struct hci_conn *hcon = conn->hcon; + u16 mtu, mps, credits, result; + struct l2cap_chan *chan; + int err = 0, sec_level; + int i = 0; + + if (cmd_len < sizeof(*rsp)) + return -EPROTO; + + mtu = __le16_to_cpu(rsp->mtu); + mps = __le16_to_cpu(rsp->mps); + credits = __le16_to_cpu(rsp->credits); + result = __le16_to_cpu(rsp->result); + + BT_DBG("mtu %u mps %u credits %u result 0x%4.4x", mtu, mps, credits, + result); + + mutex_lock(&conn->chan_lock); + + cmd_len -= sizeof(*rsp); + + list_for_each_entry(chan, &conn->chan_l, list) { + u16 dcid; + + if (chan->ident != cmd->ident || + chan->mode != L2CAP_MODE_EXT_FLOWCTL || + chan->state == BT_CONNECTED) + continue; + + l2cap_chan_lock(chan); + + /* Check that there is a dcid for each pending channel */ + if (cmd_len < sizeof(dcid)) { + l2cap_chan_del(chan, ECONNREFUSED); + l2cap_chan_unlock(chan); + continue; + } + + dcid = __le16_to_cpu(rsp->dcid[i++]); + cmd_len -= sizeof(u16); + + BT_DBG("dcid[%d] 0x%4.4x", i, dcid); + + /* Check if dcid is already in use */ + if (dcid && __l2cap_get_chan_by_dcid(conn, dcid)) { + /* If a device receives a + * L2CAP_CREDIT_BASED_CONNECTION_RSP packet with an + * already-assigned Destination CID, then both the + * original channel and the new channel shall be + * immediately discarded and not used. + */ + l2cap_chan_del(chan, ECONNREFUSED); + l2cap_chan_unlock(chan); + chan = __l2cap_get_chan_by_dcid(conn, dcid); + l2cap_chan_lock(chan); + l2cap_chan_del(chan, ECONNRESET); + l2cap_chan_unlock(chan); + continue; + } + + switch (result) { + case L2CAP_CR_LE_AUTHENTICATION: + case L2CAP_CR_LE_ENCRYPTION: + /* If we already have MITM protection we can't do + * anything. + */ + if (hcon->sec_level > BT_SECURITY_MEDIUM) { + l2cap_chan_del(chan, ECONNREFUSED); + break; + } + + sec_level = hcon->sec_level + 1; + if (chan->sec_level < sec_level) + chan->sec_level = sec_level; + + /* We'll need to send a new Connect Request */ + clear_bit(FLAG_ECRED_CONN_REQ_SENT, &chan->flags); + + smp_conn_security(hcon, chan->sec_level); + break; + + case L2CAP_CR_LE_BAD_PSM: + l2cap_chan_del(chan, ECONNREFUSED); + break; + + default: + /* If dcid was not set it means channels was refused */ + if (!dcid) { + l2cap_chan_del(chan, ECONNREFUSED); + break; + } + + chan->ident = 0; + chan->dcid = dcid; + chan->omtu = mtu; + chan->remote_mps = mps; + chan->tx_credits = credits; + l2cap_chan_ready(chan); + break; + } + + l2cap_chan_unlock(chan); + } + + mutex_unlock(&conn->chan_lock); + + return err; +} + +static inline int l2cap_ecred_reconf_req(struct l2cap_conn *conn, + struct l2cap_cmd_hdr *cmd, u16 cmd_len, + u8 *data) +{ + struct l2cap_ecred_reconf_req *req = (void *) data; + struct l2cap_ecred_reconf_rsp rsp; + u16 mtu, mps, result; + struct l2cap_chan *chan; + int i, num_scid; + + if (!enable_ecred) + return -EINVAL; + + if (cmd_len < sizeof(*req) || cmd_len - sizeof(*req) % sizeof(u16)) { + result = L2CAP_CR_LE_INVALID_PARAMS; + goto respond; + } + + mtu = __le16_to_cpu(req->mtu); + mps = __le16_to_cpu(req->mps); + + BT_DBG("mtu %u mps %u", mtu, mps); + + if (mtu < L2CAP_ECRED_MIN_MTU) { + result = L2CAP_RECONF_INVALID_MTU; + goto respond; + } + + if (mps < L2CAP_ECRED_MIN_MPS) { + result = L2CAP_RECONF_INVALID_MPS; + goto respond; + } + + cmd_len -= sizeof(*req); + num_scid = cmd_len / sizeof(u16); + result = L2CAP_RECONF_SUCCESS; + + for (i = 0; i < num_scid; i++) { + u16 scid; + + scid = __le16_to_cpu(req->scid[i]); + if (!scid) + return -EPROTO; + + chan = __l2cap_get_chan_by_dcid(conn, scid); + if (!chan) + continue; + + /* If the MTU value is decreased for any of the included + * channels, then the receiver shall disconnect all + * included channels. + */ + if (chan->omtu > mtu) { + BT_ERR("chan %p decreased MTU %u -> %u", chan, + chan->omtu, mtu); + result = L2CAP_RECONF_INVALID_MTU; + } + + chan->omtu = mtu; + chan->remote_mps = mps; + } + +respond: + rsp.result = cpu_to_le16(result); + + l2cap_send_cmd(conn, cmd->ident, L2CAP_ECRED_RECONF_RSP, sizeof(rsp), + &rsp); + + return 0; +} + +static inline int l2cap_ecred_reconf_rsp(struct l2cap_conn *conn, + struct l2cap_cmd_hdr *cmd, u16 cmd_len, + u8 *data) +{ + struct l2cap_chan *chan; + struct l2cap_ecred_conn_rsp *rsp = (void *) data; + u16 result; + + if (cmd_len < sizeof(*rsp)) + return -EPROTO; + + result = __le16_to_cpu(rsp->result); + + BT_DBG("result 0x%4.4x", rsp->result); + + if (!result) + return 0; + + list_for_each_entry(chan, &conn->chan_l, list) { + if (chan->ident != cmd->ident) + continue; + + l2cap_chan_del(chan, ECONNRESET); + } + + return 0; +} + static inline int l2cap_le_command_rej(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data) @@ -5769,6 +6313,22 @@ static inline int l2cap_le_sig_cmd(struct l2cap_conn *conn, err = l2cap_le_credits(conn, cmd, cmd_len, data); break; + case L2CAP_ECRED_CONN_REQ: + err = l2cap_ecred_conn_req(conn, cmd, cmd_len, data); + break; + + case L2CAP_ECRED_CONN_RSP: + err = l2cap_ecred_conn_rsp(conn, cmd, cmd_len, data); + break; + + case L2CAP_ECRED_RECONF_REQ: + err = l2cap_ecred_reconf_req(conn, cmd, cmd_len, data); + break; + + case L2CAP_ECRED_RECONF_RSP: + err = l2cap_ecred_reconf_rsp(conn, cmd, cmd_len, data); + break; + case L2CAP_DISCONN_REQ: err = l2cap_disconnect_req(conn, cmd, cmd_len, data); break; @@ -5831,9 +6391,7 @@ static inline void l2cap_sig_channel(struct l2cap_conn *conn, struct sk_buff *skb) { struct hci_conn *hcon = conn->hcon; - u8 *data = skb->data; - int len = skb->len; - struct l2cap_cmd_hdr cmd; + struct l2cap_cmd_hdr *cmd; int err; l2cap_raw_recv(conn, skb); @@ -5841,35 +6399,34 @@ static inline void l2cap_sig_channel(struct l2cap_conn *conn, if (hcon->type != ACL_LINK) goto drop; - while (len >= L2CAP_CMD_HDR_SIZE) { - u16 cmd_len; - memcpy(&cmd, data, L2CAP_CMD_HDR_SIZE); - data += L2CAP_CMD_HDR_SIZE; - len -= L2CAP_CMD_HDR_SIZE; + while (skb->len >= L2CAP_CMD_HDR_SIZE) { + u16 len; + + cmd = (void *) skb->data; + skb_pull(skb, L2CAP_CMD_HDR_SIZE); - cmd_len = le16_to_cpu(cmd.len); + len = le16_to_cpu(cmd->len); - BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd.code, cmd_len, - cmd.ident); + BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd->code, len, + cmd->ident); - if (cmd_len > len || !cmd.ident) { + if (len > skb->len || !cmd->ident) { BT_DBG("corrupted command"); break; } - err = l2cap_bredr_sig_cmd(conn, &cmd, cmd_len, data); + err = l2cap_bredr_sig_cmd(conn, cmd, len, skb->data); if (err) { struct l2cap_cmd_rej_unk rej; BT_ERR("Wrong link type (%d)", err); rej.reason = cpu_to_le16(L2CAP_REJ_NOT_UNDERSTOOD); - l2cap_send_cmd(conn, cmd.ident, L2CAP_COMMAND_REJ, + l2cap_send_cmd(conn, cmd->ident, L2CAP_COMMAND_REJ, sizeof(rej), &rej); } - data += cmd_len; - len -= cmd_len; + skb_pull(skb, len); } drop: @@ -6814,11 +7371,13 @@ static void l2cap_chan_le_send_credits(struct l2cap_chan *chan) struct l2cap_le_credits pkt; u16 return_credits; - return_credits = ((chan->imtu / chan->mps) + 1) - chan->rx_credits; + return_credits = (chan->imtu / chan->mps) + 1; - if (!return_credits) + if (chan->rx_credits >= return_credits) return; + return_credits -= chan->rx_credits; + BT_DBG("chan %p returning %u credits to sender", chan, return_credits); chan->rx_credits += return_credits; @@ -6831,7 +7390,7 @@ static void l2cap_chan_le_send_credits(struct l2cap_chan *chan) l2cap_send_cmd(conn, chan->ident, L2CAP_LE_CREDITS, sizeof(pkt), &pkt); } -static int l2cap_le_recv(struct l2cap_chan *chan, struct sk_buff *skb) +static int l2cap_ecred_recv(struct l2cap_chan *chan, struct sk_buff *skb) { int err; @@ -6846,7 +7405,7 @@ static int l2cap_le_recv(struct l2cap_chan *chan, struct sk_buff *skb) return err; } -static int l2cap_le_data_rcv(struct l2cap_chan *chan, struct sk_buff *skb) +static int l2cap_ecred_data_rcv(struct l2cap_chan *chan, struct sk_buff *skb) { int err; @@ -6894,7 +7453,7 @@ static int l2cap_le_data_rcv(struct l2cap_chan *chan, struct sk_buff *skb) } if (skb->len == sdu_len) - return l2cap_le_recv(chan, skb); + return l2cap_ecred_recv(chan, skb); chan->sdu = skb; chan->sdu_len = sdu_len; @@ -6926,7 +7485,7 @@ static int l2cap_le_data_rcv(struct l2cap_chan *chan, struct sk_buff *skb) skb = NULL; if (chan->sdu->len == chan->sdu_len) { - err = l2cap_le_recv(chan, chan->sdu); + err = l2cap_ecred_recv(chan, chan->sdu); if (!err) { chan->sdu = NULL; chan->sdu_last_frag = NULL; @@ -6987,7 +7546,8 @@ static void l2cap_data_channel(struct l2cap_conn *conn, u16 cid, switch (chan->mode) { case L2CAP_MODE_LE_FLOWCTL: - if (l2cap_le_data_rcv(chan, skb) < 0) + case L2CAP_MODE_EXT_FLOWCTL: + if (l2cap_ecred_data_rcv(chan, skb) < 0) goto drop; goto done; @@ -7206,6 +7766,33 @@ static bool is_valid_psm(u16 psm, u8 dst_type) { return ((psm & 0x0101) == 0x0001); } +struct l2cap_chan_data { + struct l2cap_chan *chan; + struct pid *pid; + int count; +}; + +static void l2cap_chan_by_pid(struct l2cap_chan *chan, void *data) +{ + struct l2cap_chan_data *d = data; + struct pid *pid; + + if (chan == d->chan) + return; + + if (!test_bit(FLAG_DEFER_SETUP, &chan->flags)) + return; + + pid = chan->ops->get_peer_pid(chan); + + /* Only count deferred channels with the same PID/PSM */ + if (d->pid != pid || chan->psm != d->chan->psm || chan->ident || + chan->mode != L2CAP_MODE_EXT_FLOWCTL || chan->state != BT_CONNECT) + return; + + d->count++; +} + int l2cap_chan_connect(struct l2cap_chan *chan, __le16 psm, u16 cid, bdaddr_t *dst, u8 dst_type) { @@ -7214,8 +7801,8 @@ int l2cap_chan_connect(struct l2cap_chan *chan, __le16 psm, u16 cid, struct hci_dev *hdev; int err; - BT_DBG("%pMR -> %pMR (type %u) psm 0x%2.2x", &chan->src, dst, - dst_type, __le16_to_cpu(psm)); + BT_DBG("%pMR -> %pMR (type %u) psm 0x%4.4x mode 0x%2.2x", &chan->src, + dst, dst_type, __le16_to_cpu(psm), chan->mode); hdev = hci_get_route(dst, &chan->src, chan->src_type); if (!hdev) @@ -7244,6 +7831,12 @@ int l2cap_chan_connect(struct l2cap_chan *chan, __le16 psm, u16 cid, break; case L2CAP_MODE_LE_FLOWCTL: break; + case L2CAP_MODE_EXT_FLOWCTL: + if (!enable_ecred) { + err = -EOPNOTSUPP; + goto done; + } + break; case L2CAP_MODE_ERTM: case L2CAP_MODE_STREAMING: if (!disable_ertm) @@ -7319,6 +7912,23 @@ int l2cap_chan_connect(struct l2cap_chan *chan, __le16 psm, u16 cid, goto done; } + if (chan->mode == L2CAP_MODE_EXT_FLOWCTL) { + struct l2cap_chan_data data; + + data.chan = chan; + data.pid = chan->ops->get_peer_pid(chan); + data.count = 1; + + l2cap_chan_list(conn, l2cap_chan_by_pid, &data); + + /* Check if there isn't too many channels being connected */ + if (data.count > L2CAP_ECRED_CONN_SCID_MAX) { + hci_conn_drop(hcon); + err = -EPROTO; + goto done; + } + } + mutex_lock(&conn->chan_lock); l2cap_chan_lock(chan); @@ -7368,6 +7978,38 @@ done: } EXPORT_SYMBOL_GPL(l2cap_chan_connect); +static void l2cap_ecred_reconfigure(struct l2cap_chan *chan) +{ + struct l2cap_conn *conn = chan->conn; + struct { + struct l2cap_ecred_reconf_req req; + __le16 scid; + } pdu; + + pdu.req.mtu = cpu_to_le16(chan->imtu); + pdu.req.mps = cpu_to_le16(chan->mps); + pdu.scid = cpu_to_le16(chan->scid); + + chan->ident = l2cap_get_ident(conn); + + l2cap_send_cmd(conn, chan->ident, L2CAP_ECRED_RECONF_REQ, + sizeof(pdu), &pdu); +} + +int l2cap_chan_reconfigure(struct l2cap_chan *chan, __u16 mtu) +{ + if (chan->imtu > mtu) + return -EINVAL; + + BT_DBG("chan %p mtu 0x%4.4x", chan, mtu); + + chan->imtu = mtu; + + l2cap_ecred_reconfigure(chan); + + return 0; +} + /* ---- L2CAP interface with lower layer (HCI) ---- */ int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr) @@ -7579,7 +8221,8 @@ static void l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt) else __set_chan_timer(chan, L2CAP_DISC_TIMEOUT); } else if (chan->state == BT_CONNECT2 && - chan->mode != L2CAP_MODE_LE_FLOWCTL) { + !(chan->mode == L2CAP_MODE_EXT_FLOWCTL || + chan->mode == L2CAP_MODE_LE_FLOWCTL)) { struct l2cap_conn_rsp rsp; __u16 res, stat; @@ -7787,3 +8430,6 @@ void l2cap_exit(void) module_param(disable_ertm, bool, 0644); MODULE_PARM_DESC(disable_ertm, "Disable enhanced retransmission mode"); + +module_param(enable_ecred, bool, 0644); +MODULE_PARM_DESC(enable_ecred, "Enable enhanced credit flow control mode"); diff --git a/net/bluetooth/l2cap_sock.c b/net/bluetooth/l2cap_sock.c index a7be8b59b3c2..117ba20ea194 100644 --- a/net/bluetooth/l2cap_sock.c +++ b/net/bluetooth/l2cap_sock.c @@ -232,7 +232,7 @@ static int l2cap_sock_connect(struct socket *sock, struct sockaddr *addr, return -EINVAL; } - if (chan->psm && bdaddr_type_is_le(chan->src_type)) + if (chan->psm && bdaddr_type_is_le(chan->src_type) && !chan->mode) chan->mode = L2CAP_MODE_LE_FLOWCTL; err = l2cap_chan_connect(chan, la.l2_psm, __le16_to_cpu(la.l2_cid), @@ -274,6 +274,12 @@ static int l2cap_sock_listen(struct socket *sock, int backlog) case L2CAP_MODE_BASIC: case L2CAP_MODE_LE_FLOWCTL: break; + case L2CAP_MODE_EXT_FLOWCTL: + if (!enable_ecred) { + err = -EOPNOTSUPP; + goto done; + } + break; case L2CAP_MODE_ERTM: case L2CAP_MODE_STREAMING: if (!disable_ertm) @@ -427,6 +433,8 @@ static int l2cap_sock_getsockopt_old(struct socket *sock, int optname, opts.max_tx = chan->max_tx; opts.txwin_size = chan->tx_win; + BT_DBG("mode 0x%2.2x", chan->mode); + len = min_t(unsigned int, len, sizeof(opts)); if (copy_to_user(optval, (char *) &opts, len)) err = -EFAULT; @@ -499,6 +507,7 @@ static int l2cap_sock_getsockopt(struct socket *sock, int level, int optname, struct l2cap_chan *chan = l2cap_pi(sk)->chan; struct bt_security sec; struct bt_power pwr; + u32 phys; int len, err = 0; BT_DBG("sk %p", sk); @@ -603,6 +612,18 @@ static int l2cap_sock_getsockopt(struct socket *sock, int level, int optname, err = -EFAULT; break; + case BT_PHY: + if (sk->sk_state != BT_CONNECTED) { + err = -ENOTCONN; + break; + } + + phys = hci_conn_get_phy(chan->conn->hcon); + + if (put_user(phys, (u32 __user *) optval)) + err = -EFAULT; + break; + default: err = -ENOPROTOOPT; break; @@ -694,6 +715,8 @@ static int l2cap_sock_setsockopt_old(struct socket *sock, int optname, break; } + BT_DBG("mode 0x%2.2x", chan->mode); + chan->imtu = opts.imtu; chan->omtu = opts.omtu; chan->fcs = opts.fcs; @@ -926,7 +949,8 @@ static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname, break; } - if (sk->sk_state == BT_CONNECTED) { + if (chan->mode == L2CAP_MODE_LE_FLOWCTL && + sk->sk_state == BT_CONNECTED) { err = -EISCONN; break; } @@ -936,7 +960,12 @@ static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname, break; } - chan->imtu = opt; + if (chan->mode == L2CAP_MODE_EXT_FLOWCTL && + sk->sk_state == BT_CONNECTED) + err = l2cap_chan_reconfigure(chan, opt); + else + chan->imtu = opt; + break; default: @@ -991,7 +1020,11 @@ static int l2cap_sock_recvmsg(struct socket *sock, struct msghdr *msg, if (sk->sk_state == BT_CONNECT2 && test_bit(BT_SK_DEFER_SETUP, &bt_sk(sk)->flags)) { - if (bdaddr_type_is_le(pi->chan->src_type)) { + if (pi->chan->mode == L2CAP_MODE_EXT_FLOWCTL) { + sk->sk_state = BT_CONNECTED; + pi->chan->state = BT_CONNECTED; + __l2cap_ecred_conn_rsp_defer(pi->chan); + } else if (bdaddr_type_is_le(pi->chan->src_type)) { sk->sk_state = BT_CONNECTED; pi->chan->state = BT_CONNECTED; __l2cap_le_connect_rsp_defer(pi->chan); @@ -1042,7 +1075,7 @@ done: } /* Kill socket (only if zapped and orphan) - * Must be called on unlocked socket. + * Must be called on unlocked socket, with l2cap channel lock. */ static void l2cap_sock_kill(struct sock *sk) { @@ -1193,6 +1226,7 @@ static int l2cap_sock_release(struct socket *sock) { struct sock *sk = sock->sk; int err; + struct l2cap_chan *chan; BT_DBG("sock %p, sk %p", sock, sk); @@ -1202,9 +1236,17 @@ static int l2cap_sock_release(struct socket *sock) bt_sock_unlink(&l2cap_sk_list, sk); err = l2cap_sock_shutdown(sock, 2); + chan = l2cap_pi(sk)->chan; + + l2cap_chan_hold(chan); + l2cap_chan_lock(chan); sock_orphan(sk); l2cap_sock_kill(sk); + + l2cap_chan_unlock(chan); + l2cap_chan_put(chan); + return err; } @@ -1222,12 +1264,15 @@ static void l2cap_sock_cleanup_listen(struct sock *parent) BT_DBG("child chan %p state %s", chan, state_to_string(chan->state)); + l2cap_chan_hold(chan); l2cap_chan_lock(chan); + __clear_chan_timer(chan); l2cap_chan_close(chan, ECONNRESET); - l2cap_chan_unlock(chan); - l2cap_sock_kill(sk); + + l2cap_chan_unlock(chan); + l2cap_chan_put(chan); } } @@ -1459,6 +1504,13 @@ static long l2cap_sock_get_sndtimeo_cb(struct l2cap_chan *chan) return sk->sk_sndtimeo; } +static struct pid *l2cap_sock_get_peer_pid_cb(struct l2cap_chan *chan) +{ + struct sock *sk = chan->data; + + return sk->sk_peer_pid; +} + static void l2cap_sock_suspend_cb(struct l2cap_chan *chan) { struct sock *sk = chan->data; @@ -1480,6 +1532,7 @@ static const struct l2cap_ops l2cap_chan_ops = { .suspend = l2cap_sock_suspend_cb, .set_shutdown = l2cap_sock_set_shutdown_cb, .get_sndtimeo = l2cap_sock_get_sndtimeo_cb, + .get_peer_pid = l2cap_sock_get_peer_pid_cb, .alloc_skb = l2cap_sock_alloc_skb_cb, }; diff --git a/net/bluetooth/mgmt.c b/net/bluetooth/mgmt.c index 3074363c68df..6552003a170e 100644 --- a/net/bluetooth/mgmt.c +++ b/net/bluetooth/mgmt.c @@ -38,7 +38,7 @@ #include "mgmt_util.h" #define MGMT_VERSION 1 -#define MGMT_REVISION 15 +#define MGMT_REVISION 16 static const u16 mgmt_commands[] = { MGMT_OP_READ_INDEX_LIST, @@ -107,6 +107,7 @@ static const u16 mgmt_commands[] = { MGMT_OP_READ_EXT_INFO, MGMT_OP_SET_APPEARANCE, MGMT_OP_SET_BLOCKED_KEYS, + MGMT_OP_SET_WIDEBAND_SPEECH, }; static const u16 mgmt_events[] = { @@ -762,6 +763,10 @@ static u32 get_supported_settings(struct hci_dev *hdev) if (lmp_sc_capable(hdev)) settings |= MGMT_SETTING_SECURE_CONN; + + if (test_bit(HCI_QUIRK_WIDEBAND_SPEECH_SUPPORTED, + &hdev->quirks)) + settings |= MGMT_SETTING_WIDEBAND_SPEECH; } if (lmp_le_capable(hdev)) { @@ -846,6 +851,9 @@ static u32 get_current_settings(struct hci_dev *hdev) settings |= MGMT_SETTING_STATIC_ADDRESS; } + if (hci_dev_test_flag(hdev, HCI_WIDEBAND_SPEECH_ENABLED)) + settings |= MGMT_SETTING_WIDEBAND_SPEECH; + return settings; } @@ -1382,6 +1390,12 @@ static int set_discoverable(struct sock *sk, struct hci_dev *hdev, void *data, goto failed; } + if (hdev->advertising_paused) { + err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE, + MGMT_STATUS_BUSY); + goto failed; + } + if (!hdev_is_powered(hdev)) { bool changed = false; @@ -3589,6 +3603,62 @@ static int set_blocked_keys(struct sock *sk, struct hci_dev *hdev, void *data, err, NULL, 0); } +static int set_wideband_speech(struct sock *sk, struct hci_dev *hdev, + void *data, u16 len) +{ + struct mgmt_mode *cp = data; + int err; + bool changed = false; + + BT_DBG("request for %s", hdev->name); + + if (!test_bit(HCI_QUIRK_WIDEBAND_SPEECH_SUPPORTED, &hdev->quirks)) + return mgmt_cmd_status(sk, hdev->id, + MGMT_OP_SET_WIDEBAND_SPEECH, + MGMT_STATUS_NOT_SUPPORTED); + + if (cp->val != 0x00 && cp->val != 0x01) + return mgmt_cmd_status(sk, hdev->id, + MGMT_OP_SET_WIDEBAND_SPEECH, + MGMT_STATUS_INVALID_PARAMS); + + hci_dev_lock(hdev); + + if (pending_find(MGMT_OP_SET_WIDEBAND_SPEECH, hdev)) { + err = mgmt_cmd_status(sk, hdev->id, + MGMT_OP_SET_WIDEBAND_SPEECH, + MGMT_STATUS_BUSY); + goto unlock; + } + + if (hdev_is_powered(hdev) && + !!cp->val != hci_dev_test_flag(hdev, + HCI_WIDEBAND_SPEECH_ENABLED)) { + err = mgmt_cmd_status(sk, hdev->id, + MGMT_OP_SET_WIDEBAND_SPEECH, + MGMT_STATUS_REJECTED); + goto unlock; + } + + if (cp->val) + changed = !hci_dev_test_and_set_flag(hdev, + HCI_WIDEBAND_SPEECH_ENABLED); + else + changed = hci_dev_test_and_clear_flag(hdev, + HCI_WIDEBAND_SPEECH_ENABLED); + + err = send_settings_rsp(sk, MGMT_OP_SET_WIDEBAND_SPEECH, hdev); + if (err < 0) + goto unlock; + + if (changed) + err = new_settings(hdev, sk); + +unlock: + hci_dev_unlock(hdev); + return err; +} + static void read_local_oob_data_complete(struct hci_dev *hdev, u8 status, u16 opcode, struct sk_buff *skb) { @@ -3865,6 +3935,13 @@ void mgmt_start_discovery_complete(struct hci_dev *hdev, u8 status) } hci_dev_unlock(hdev); + + /* Handle suspend notifier */ + if (test_and_clear_bit(SUSPEND_UNPAUSE_DISCOVERY, + hdev->suspend_tasks)) { + bt_dev_dbg(hdev, "Unpaused discovery"); + wake_up(&hdev->suspend_wait_q); + } } static bool discovery_type_is_valid(struct hci_dev *hdev, uint8_t type, @@ -3926,6 +4003,13 @@ static int start_discovery_internal(struct sock *sk, struct hci_dev *hdev, goto failed; } + /* Can't start discovery when it is paused */ + if (hdev->discovery_paused) { + err = mgmt_cmd_complete(sk, hdev->id, op, MGMT_STATUS_BUSY, + &cp->type, sizeof(cp->type)); + goto failed; + } + /* Clear the discovery filter first to free any previously * allocated memory for the UUID list. */ @@ -4093,6 +4177,12 @@ void mgmt_stop_discovery_complete(struct hci_dev *hdev, u8 status) } hci_dev_unlock(hdev); + + /* Handle suspend notifier */ + if (test_and_clear_bit(SUSPEND_PAUSE_DISCOVERY, hdev->suspend_tasks)) { + bt_dev_dbg(hdev, "Paused discovery"); + wake_up(&hdev->suspend_wait_q); + } } static int stop_discovery(struct sock *sk, struct hci_dev *hdev, void *data, @@ -4324,6 +4414,17 @@ static void set_advertising_complete(struct hci_dev *hdev, u8 status, if (match.sk) sock_put(match.sk); + /* Handle suspend notifier */ + if (test_and_clear_bit(SUSPEND_PAUSE_ADVERTISING, + hdev->suspend_tasks)) { + bt_dev_dbg(hdev, "Paused advertising"); + wake_up(&hdev->suspend_wait_q); + } else if (test_and_clear_bit(SUSPEND_UNPAUSE_ADVERTISING, + hdev->suspend_tasks)) { + bt_dev_dbg(hdev, "Unpaused advertising"); + wake_up(&hdev->suspend_wait_q); + } + /* If "Set Advertising" was just disabled and instance advertising was * set up earlier, then re-enable multi-instance advertising. */ @@ -4375,6 +4476,10 @@ static int set_advertising(struct sock *sk, struct hci_dev *hdev, void *data, return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING, MGMT_STATUS_INVALID_PARAMS); + if (hdev->advertising_paused) + return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING, + MGMT_STATUS_BUSY); + hci_dev_lock(hdev); val = !!cp->val; @@ -6743,8 +6848,11 @@ static int add_advertising(struct sock *sk, struct hci_dev *hdev, if (!err) err = hci_req_run(&req, add_advertising_complete); - if (err < 0) + if (err < 0) { + err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING, + MGMT_STATUS_FAILED); mgmt_pending_remove(cmd); + } unlock: hci_dev_unlock(hdev); @@ -6990,6 +7098,7 @@ static const struct hci_mgmt_handler mgmt_handlers[] = { { set_phy_configuration, MGMT_SET_PHY_CONFIGURATION_SIZE }, { set_blocked_keys, MGMT_OP_SET_BLOCKED_KEYS_SIZE, HCI_MGMT_VAR_LEN }, + { set_wideband_speech, MGMT_SETTING_SIZE }, }; void mgmt_index_added(struct hci_dev *hdev) diff --git a/net/bluetooth/rfcomm/core.c b/net/bluetooth/rfcomm/core.c index 3a9e9d9670be..2e20af317cea 100644 --- a/net/bluetooth/rfcomm/core.c +++ b/net/bluetooth/rfcomm/core.c @@ -40,7 +40,6 @@ static bool disable_cfc; static bool l2cap_ertm; static int channel_mtu = -1; -static unsigned int l2cap_mtu = RFCOMM_MAX_L2CAP_MTU; static struct task_struct *rfcomm_thread; @@ -73,8 +72,6 @@ static struct rfcomm_session *rfcomm_session_del(struct rfcomm_session *s); /* ---- RFCOMM frame parsing macros ---- */ #define __get_dlci(b) ((b & 0xfc) >> 2) -#define __get_channel(b) ((b & 0xf8) >> 3) -#define __get_dir(b) ((b & 0x04) >> 2) #define __get_type(b) ((b & 0xef)) #define __test_ea(b) ((b & 0x01)) @@ -87,7 +84,6 @@ static struct rfcomm_session *rfcomm_session_del(struct rfcomm_session *s); #define __ctrl(type, pf) (((type & 0xef) | (pf << 4))) #define __dlci(dir, chn) (((chn & 0x1f) << 1) | dir) #define __srv_channel(dlci) (dlci >> 1) -#define __dir(dlci) (dlci & 0x01) #define __len8(len) (((len) << 1) | 1) #define __len16(len) ((len) << 1) @@ -752,7 +748,8 @@ static struct rfcomm_session *rfcomm_session_create(bdaddr_t *src, /* Set L2CAP options */ sk = sock->sk; lock_sock(sk); - l2cap_pi(sk)->chan->imtu = l2cap_mtu; + /* Set MTU to 0 so L2CAP can auto select the MTU */ + l2cap_pi(sk)->chan->imtu = 0; l2cap_pi(sk)->chan->sec_level = sec_level; if (l2cap_ertm) l2cap_pi(sk)->chan->mode = L2CAP_MODE_ERTM; @@ -2039,7 +2036,8 @@ static int rfcomm_add_listener(bdaddr_t *ba) /* Set L2CAP options */ sk = sock->sk; lock_sock(sk); - l2cap_pi(sk)->chan->imtu = l2cap_mtu; + /* Set MTU to 0 so L2CAP can auto select the MTU */ + l2cap_pi(sk)->chan->imtu = 0; release_sock(sk); /* Start listening on the socket */ @@ -2237,9 +2235,6 @@ MODULE_PARM_DESC(disable_cfc, "Disable credit based flow control"); module_param(channel_mtu, int, 0644); MODULE_PARM_DESC(channel_mtu, "Default MTU for the RFCOMM channel"); -module_param(l2cap_mtu, uint, 0644); -MODULE_PARM_DESC(l2cap_mtu, "Default MTU for the L2CAP connection"); - module_param(l2cap_ertm, bool, 0644); MODULE_PARM_DESC(l2cap_ertm, "Use L2CAP ERTM mode for connection"); diff --git a/net/bluetooth/rfcomm/tty.c b/net/bluetooth/rfcomm/tty.c index 0c7d31c6c18c..a58584949a95 100644 --- a/net/bluetooth/rfcomm/tty.c +++ b/net/bluetooth/rfcomm/tty.c @@ -413,10 +413,8 @@ static int __rfcomm_create_dev(struct sock *sk, void __user *arg) dlc = rfcomm_dlc_exists(&req.src, &req.dst, req.channel); if (IS_ERR(dlc)) return PTR_ERR(dlc); - else if (dlc) { - rfcomm_dlc_put(dlc); + if (dlc) return -EBUSY; - } dlc = rfcomm_dlc_alloc(GFP_KERNEL); if (!dlc) return -ENOMEM; diff --git a/net/bluetooth/sco.c b/net/bluetooth/sco.c index b91d6b440fdf..c8c3d38cdc7b 100644 --- a/net/bluetooth/sco.c +++ b/net/bluetooth/sco.c @@ -922,6 +922,7 @@ static int sco_sock_getsockopt(struct socket *sock, int level, int optname, struct sock *sk = sock->sk; int len, err = 0; struct bt_voice voice; + u32 phys; BT_DBG("sk %p", sk); @@ -956,6 +957,18 @@ static int sco_sock_getsockopt(struct socket *sock, int level, int optname, break; + case BT_PHY: + if (sk->sk_state != BT_CONNECTED) { + err = -ENOTCONN; + break; + } + + phys = hci_conn_get_phy(sco_pi(sk)->conn->hcon); + + if (put_user(phys, (u32 __user *) optval)) + err = -EFAULT; + break; + default: err = -ENOPROTOOPT; break; diff --git a/net/bluetooth/smp.c b/net/bluetooth/smp.c index 204f14f8b507..1476a91ce935 100644 --- a/net/bluetooth/smp.c +++ b/net/bluetooth/smp.c @@ -1145,7 +1145,7 @@ static void sc_generate_link_key(struct smp_chan *smp) return; if (test_bit(SMP_FLAG_CT2, &smp->flags)) { - /* SALT = 0x00000000000000000000000000000000746D7031 */ + /* SALT = 0x000000000000000000000000746D7031 */ const u8 salt[16] = { 0x31, 0x70, 0x6d, 0x74 }; if (smp_h7(smp->tfm_cmac, smp->tk, salt, smp->link_key)) { @@ -1203,7 +1203,7 @@ static void sc_generate_ltk(struct smp_chan *smp) set_bit(SMP_FLAG_DEBUG_KEY, &smp->flags); if (test_bit(SMP_FLAG_CT2, &smp->flags)) { - /* SALT = 0x00000000000000000000000000000000746D7032 */ + /* SALT = 0x000000000000000000000000746D7032 */ const u8 salt[16] = { 0x32, 0x70, 0x6d, 0x74 }; if (smp_h7(smp->tfm_cmac, key->val, salt, smp->tk)) @@ -2115,7 +2115,7 @@ static u8 smp_cmd_pairing_random(struct l2cap_conn *conn, struct sk_buff *skb) struct l2cap_chan *chan = conn->smp; struct smp_chan *smp = chan->data; struct hci_conn *hcon = conn->hcon; - u8 *pkax, *pkbx, *na, *nb; + u8 *pkax, *pkbx, *na, *nb, confirm_hint; u32 passkey; int err; @@ -2168,6 +2168,24 @@ static u8 smp_cmd_pairing_random(struct l2cap_conn *conn, struct sk_buff *skb) smp_send_cmd(conn, SMP_CMD_PAIRING_RANDOM, sizeof(smp->prnd), smp->prnd); SMP_ALLOW_CMD(smp, SMP_CMD_DHKEY_CHECK); + + /* Only Just-Works pairing requires extra checks */ + if (smp->method != JUST_WORKS) + goto mackey_and_ltk; + + /* If there already exists long term key in local host, leave + * the decision to user space since the remote device could + * be legitimate or malicious. + */ + if (hci_find_ltk(hcon->hdev, &hcon->dst, hcon->dst_type, + hcon->role)) { + /* Set passkey to 0. The value can be any number since + * it'll be ignored anyway. + */ + passkey = 0; + confirm_hint = 1; + goto confirm; + } } mackey_and_ltk: @@ -2188,8 +2206,11 @@ mackey_and_ltk: if (err) return SMP_UNSPECIFIED; + confirm_hint = 0; + +confirm: err = mgmt_user_confirm_request(hcon->hdev, &hcon->dst, hcon->type, - hcon->dst_type, passkey, 0); + hcon->dst_type, passkey, confirm_hint); if (err) return SMP_UNSPECIFIED; |