summaryrefslogtreecommitdiff
path: root/net/bluetooth/hci_conn.c
diff options
context:
space:
mode:
Diffstat (limited to 'net/bluetooth/hci_conn.c')
-rw-r--r--net/bluetooth/hci_conn.c527
1 files changed, 353 insertions, 174 deletions
diff --git a/net/bluetooth/hci_conn.c b/net/bluetooth/hci_conn.c
index 3ad74f76983b..4f379184df5b 100644
--- a/net/bluetooth/hci_conn.c
+++ b/net/bluetooth/hci_conn.c
@@ -27,6 +27,7 @@
#include <linux/export.h>
#include <linux/debugfs.h>
+#include <linux/errqueue.h>
#include <net/bluetooth/bluetooth.h>
#include <net/bluetooth/hci_core.h>
@@ -34,7 +35,6 @@
#include <net/bluetooth/iso.h>
#include <net/bluetooth/mgmt.h>
-#include "hci_request.h"
#include "smp.h"
#include "eir.h"
@@ -107,8 +107,7 @@ void hci_connect_le_scan_cleanup(struct hci_conn *conn, u8 status)
* where a timeout + cancel does indicate an actual failure.
*/
if (status && status != HCI_ERROR_UNKNOWN_CONN_ID)
- mgmt_connect_failed(hdev, &conn->dst, conn->type,
- conn->dst_type, status);
+ mgmt_connect_failed(hdev, conn, status);
/* The connection attempt was doing scan for new RPA, and is
* in scan phase. If params are not associated with any other
@@ -241,13 +240,13 @@ static int configure_datapath_sync(struct hci_dev *hdev, struct bt_codec *codec)
__u8 vnd_len, *vnd_data = NULL;
struct hci_op_configure_data_path *cmd = NULL;
+ /* Do not take below 2 checks as error since the 1st means user do not
+ * want to use HFP offload mode and the 2nd means the vendor controller
+ * do not need to send below HCI command for offload mode.
+ */
if (!codec->data_path || !hdev->get_codec_config_data)
return 0;
- /* Do not take me as error */
- if (!hdev->get_codec_config_data)
- return 0;
-
err = hdev->get_codec_config_data(hdev, ESCO_LINK, codec, &vnd_len,
&vnd_data);
if (err < 0)
@@ -291,6 +290,9 @@ static int hci_enhanced_setup_sync(struct hci_dev *hdev, void *data)
kfree(conn_handle);
+ if (!hci_conn_valid(hdev, conn))
+ return -ECANCELED;
+
bt_dev_dbg(hdev, "hcon %p", conn);
configure_datapath_sync(hdev, &conn->codec);
@@ -664,11 +666,6 @@ static void le_conn_timeout(struct work_struct *work)
hci_abort_conn(conn, HCI_ERROR_REMOTE_USER_TERM);
}
-struct iso_cig_params {
- struct hci_cp_le_set_cig_params cp;
- struct hci_cis_params cis[0x1f];
-};
-
struct iso_list_data {
union {
u8 cig;
@@ -784,12 +781,11 @@ static int hci_le_big_terminate(struct hci_dev *hdev, u8 big, struct hci_conn *c
if (!d)
return -ENOMEM;
- memset(d, 0, sizeof(*d));
d->big = big;
d->sync_handle = conn->sync_handle;
if (test_and_clear_bit(HCI_CONN_PA_SYNC, &conn->flags)) {
- hci_conn_hash_list_flag(hdev, find_bis, ISO_LINK,
+ hci_conn_hash_list_flag(hdev, find_bis, BIS_LINK,
HCI_CONN_PA_SYNC, d);
if (!d->count)
@@ -799,7 +795,7 @@ static int hci_le_big_terminate(struct hci_dev *hdev, u8 big, struct hci_conn *c
}
if (test_and_clear_bit(HCI_CONN_BIG_SYNC, &conn->flags)) {
- hci_conn_hash_list_flag(hdev, find_bis, ISO_LINK,
+ hci_conn_hash_list_flag(hdev, find_bis, BIS_LINK,
HCI_CONN_BIG_SYNC, d);
if (!d->count)
@@ -889,9 +885,11 @@ static void cis_cleanup(struct hci_conn *conn)
/* Check if ISO connection is a CIS and remove CIG if there are
* no other connections using it.
*/
- hci_conn_hash_list_state(hdev, find_cis, ISO_LINK, BT_BOUND, &d);
- hci_conn_hash_list_state(hdev, find_cis, ISO_LINK, BT_CONNECT, &d);
- hci_conn_hash_list_state(hdev, find_cis, ISO_LINK, BT_CONNECTED, &d);
+ hci_conn_hash_list_state(hdev, find_cis, CIS_LINK, BT_BOUND, &d);
+ hci_conn_hash_list_state(hdev, find_cis, CIS_LINK, BT_CONNECT,
+ &d);
+ hci_conn_hash_list_state(hdev, find_cis, CIS_LINK, BT_CONNECTED,
+ &d);
if (d.count)
return;
@@ -904,16 +902,43 @@ static int hci_conn_hash_alloc_unset(struct hci_dev *hdev)
U16_MAX, GFP_ATOMIC);
}
-struct hci_conn *hci_conn_add(struct hci_dev *hdev, int type, bdaddr_t *dst,
- u8 role, u16 handle)
+static struct hci_conn *__hci_conn_add(struct hci_dev *hdev, int type, bdaddr_t *dst,
+ u8 role, u16 handle)
{
struct hci_conn *conn;
+ switch (type) {
+ case ACL_LINK:
+ if (!hdev->acl_mtu)
+ return ERR_PTR(-ECONNREFUSED);
+ break;
+ case CIS_LINK:
+ case BIS_LINK:
+ if (hdev->iso_mtu)
+ /* Dedicated ISO Buffer exists */
+ break;
+ fallthrough;
+ case LE_LINK:
+ if (hdev->le_mtu && hdev->le_mtu < HCI_MIN_LE_MTU)
+ return ERR_PTR(-ECONNREFUSED);
+ if (!hdev->le_mtu && hdev->acl_mtu < HCI_MIN_LE_MTU)
+ return ERR_PTR(-ECONNREFUSED);
+ break;
+ case SCO_LINK:
+ case ESCO_LINK:
+ if (!hdev->sco_pkts)
+ /* Controller does not support SCO or eSCO over HCI */
+ return ERR_PTR(-ECONNREFUSED);
+ break;
+ default:
+ return ERR_PTR(-ECONNREFUSED);
+ }
+
bt_dev_dbg(hdev, "dst %pMR handle 0x%4.4x", dst, handle);
conn = kzalloc(sizeof(*conn), GFP_KERNEL);
if (!conn)
- return NULL;
+ return ERR_PTR(-ENOMEM);
bacpy(&conn->dst, dst);
bacpy(&conn->src, &hdev->bdaddr);
@@ -931,6 +956,7 @@ struct hci_conn *hci_conn_add(struct hci_dev *hdev, int type, bdaddr_t *dst,
conn->tx_power = HCI_TX_POWER_INVALID;
conn->max_tx_power = HCI_TX_POWER_INVALID;
conn->sync_handle = HCI_SYNC_HANDLE_INVALID;
+ conn->sid = HCI_SID_INVALID;
set_bit(HCI_CONN_POWER_SAVE, &conn->flags);
conn->disc_timeout = HCI_DISCONN_TIMEOUT;
@@ -944,12 +970,15 @@ struct hci_conn *hci_conn_add(struct hci_dev *hdev, int type, bdaddr_t *dst,
switch (type) {
case ACL_LINK:
conn->pkt_type = hdev->pkt_type & ACL_PTYPE_MASK;
+ conn->mtu = hdev->acl_mtu;
break;
case LE_LINK:
/* conn->src should reflect the local identity address */
hci_copy_identity_address(hdev, &conn->src, &conn->src_type);
+ conn->mtu = hdev->le_mtu ? hdev->le_mtu : hdev->acl_mtu;
break;
- case ISO_LINK:
+ case CIS_LINK:
+ case BIS_LINK:
/* conn->src should reflect the local identity address */
hci_copy_identity_address(hdev, &conn->src, &conn->src_type);
@@ -959,6 +988,8 @@ struct hci_conn *hci_conn_add(struct hci_dev *hdev, int type, bdaddr_t *dst,
else if (conn->role == HCI_ROLE_MASTER)
conn->cleanup = cis_cleanup;
+ conn->mtu = hdev->iso_mtu ? hdev->iso_mtu :
+ hdev->le_mtu ? hdev->le_mtu : hdev->acl_mtu;
break;
case SCO_LINK:
if (lmp_esco_capable(hdev))
@@ -966,13 +997,17 @@ struct hci_conn *hci_conn_add(struct hci_dev *hdev, int type, bdaddr_t *dst,
(hdev->esco_type & EDR_ESCO_MASK);
else
conn->pkt_type = hdev->pkt_type & SCO_PTYPE_MASK;
+
+ conn->mtu = hdev->sco_mtu;
break;
case ESCO_LINK:
conn->pkt_type = hdev->esco_type & ~EDR_ESCO_MASK;
+ conn->mtu = hdev->sco_mtu;
break;
}
skb_queue_head_init(&conn->data_q);
+ skb_queue_head_init(&conn->tx_q.queue);
INIT_LIST_HEAD(&conn->chan_list);
INIT_LIST_HEAD(&conn->link_list);
@@ -1011,9 +1046,18 @@ struct hci_conn *hci_conn_add_unset(struct hci_dev *hdev, int type,
handle = hci_conn_hash_alloc_unset(hdev);
if (unlikely(handle < 0))
- return NULL;
+ return ERR_PTR(-ECONNREFUSED);
- return hci_conn_add(hdev, type, dst, role, handle);
+ return __hci_conn_add(hdev, type, dst, role, handle);
+}
+
+struct hci_conn *hci_conn_add(struct hci_dev *hdev, int type, bdaddr_t *dst,
+ u8 role, u16 handle)
+{
+ if (handle > HCI_CONN_HANDLE_MAX)
+ return ERR_PTR(-EINVAL);
+
+ return __hci_conn_add(hdev, type, dst, role, handle);
}
static void hci_conn_cleanup_child(struct hci_conn *conn, u8 reason)
@@ -1031,7 +1075,8 @@ static void hci_conn_cleanup_child(struct hci_conn *conn, u8 reason)
if (HCI_CONN_HANDLE_UNSET(conn->handle))
hci_conn_failed(conn, reason);
break;
- case ISO_LINK:
+ case CIS_LINK:
+ case BIS_LINK:
if ((conn->state != BT_CONNECTED &&
!test_bit(HCI_CONN_CREATE_CIS, &conn->flags)) ||
test_bit(HCI_CONN_BIG_CREATED, &conn->flags))
@@ -1090,9 +1135,9 @@ void hci_conn_del(struct hci_conn *conn)
hci_conn_unlink(conn);
- cancel_delayed_work_sync(&conn->disc_work);
- cancel_delayed_work_sync(&conn->auto_accept_work);
- cancel_delayed_work_sync(&conn->idle_work);
+ disable_delayed_work_sync(&conn->disc_work);
+ disable_delayed_work_sync(&conn->auto_accept_work);
+ disable_delayed_work_sync(&conn->idle_work);
if (conn->type == ACL_LINK) {
/* Unacked frames */
@@ -1106,7 +1151,8 @@ void hci_conn_del(struct hci_conn *conn)
hdev->acl_cnt += conn->sent;
} else {
/* Unacked ISO frames */
- if (conn->type == ISO_LINK) {
+ if (conn->type == CIS_LINK ||
+ conn->type == BIS_LINK) {
if (hdev->iso_pkts)
hdev->iso_cnt += conn->sent;
else if (hdev->le_pkts)
@@ -1117,6 +1163,7 @@ void hci_conn_del(struct hci_conn *conn)
}
skb_queue_purge(&conn->data_q);
+ skb_queue_purge(&conn->tx_q.queue);
/* Remove the connection from the list and cleanup its remaining
* state. This is a separate function since for some cases like
@@ -1140,8 +1187,7 @@ struct hci_dev *hci_get_route(bdaddr_t *dst, bdaddr_t *src, uint8_t src_type)
list_for_each_entry(d, &hci_dev_list, list) {
if (!test_bit(HCI_UP, &d->flags) ||
- hci_dev_test_flag(d, HCI_USER_CHANNEL) ||
- d->dev_type != HCI_PRIMARY)
+ hci_dev_test_flag(d, HCI_USER_CHANNEL))
continue;
/* Simple routing:
@@ -1215,8 +1261,7 @@ void hci_conn_failed(struct hci_conn *conn, u8 status)
hci_le_conn_failed(conn, status);
break;
case ACL_LINK:
- mgmt_connect_failed(hdev, &conn->dst, conn->type,
- conn->dst_type, status);
+ mgmt_connect_failed(hdev, conn, status);
break;
}
@@ -1263,7 +1308,7 @@ u8 hci_conn_set_handle(struct hci_conn *conn, u16 handle)
struct hci_conn *hci_connect_le(struct hci_dev *hdev, bdaddr_t *dst,
u8 dst_type, bool dst_resolved, u8 sec_level,
- u16 conn_timeout, u8 role)
+ u16 conn_timeout, u8 role, u8 phy, u8 sec_phy)
{
struct hci_conn *conn;
struct smp_irk *irk;
@@ -1317,8 +1362,8 @@ struct hci_conn *hci_connect_le(struct hci_dev *hdev, bdaddr_t *dst,
bacpy(&conn->dst, dst);
} else {
conn = hci_conn_add_unset(hdev, LE_LINK, dst, role);
- if (!conn)
- return ERR_PTR(-ENOMEM);
+ if (IS_ERR(conn))
+ return conn;
hci_conn_hold(conn);
conn->pending_sec_level = sec_level;
}
@@ -1326,6 +1371,8 @@ struct hci_conn *hci_connect_le(struct hci_dev *hdev, bdaddr_t *dst,
conn->dst_type = dst_type;
conn->sec_level = BT_SECURITY_LOW;
conn->conn_timeout = conn_timeout;
+ conn->le_adv_phy = phy;
+ conn->le_adv_sec_phy = sec_phy;
err = hci_connect_le_sync(hdev, conn);
if (err) {
@@ -1454,8 +1501,8 @@ static int qos_set_bis(struct hci_dev *hdev, struct bt_iso_qos *qos)
/* This function requires the caller holds hdev->lock */
static struct hci_conn *hci_add_bis(struct hci_dev *hdev, bdaddr_t *dst,
- struct bt_iso_qos *qos, __u8 base_len,
- __u8 *base)
+ __u8 sid, struct bt_iso_qos *qos,
+ __u8 base_len, __u8 *base)
{
struct hci_conn *conn;
int err;
@@ -1491,11 +1538,12 @@ static struct hci_conn *hci_add_bis(struct hci_dev *hdev, bdaddr_t *dst,
memcmp(conn->le_per_adv_data, base, base_len)))
return ERR_PTR(-EADDRINUSE);
- conn = hci_conn_add_unset(hdev, ISO_LINK, dst, HCI_ROLE_MASTER);
- if (!conn)
- return ERR_PTR(-ENOMEM);
+ conn = hci_conn_add_unset(hdev, BIS_LINK, dst, HCI_ROLE_MASTER);
+ if (IS_ERR(conn))
+ return conn;
conn->state = BT_CONNECT;
+ conn->sid = sid;
hci_conn_hold(conn);
return conn;
@@ -1536,8 +1584,8 @@ struct hci_conn *hci_connect_le_scan(struct hci_dev *hdev, bdaddr_t *dst,
BT_DBG("requesting refresh of dst_addr");
conn = hci_conn_add_unset(hdev, LE_LINK, dst, HCI_ROLE_MASTER);
- if (!conn)
- return ERR_PTR(-ENOMEM);
+ if (IS_ERR(conn))
+ return conn;
if (hci_explicit_conn_params_set(hdev, dst, dst_type) < 0) {
hci_conn_del(conn);
@@ -1584,8 +1632,8 @@ struct hci_conn *hci_connect_acl(struct hci_dev *hdev, bdaddr_t *dst,
acl = hci_conn_hash_lookup_ba(hdev, ACL_LINK, dst);
if (!acl) {
acl = hci_conn_add_unset(hdev, ACL_LINK, dst, HCI_ROLE_MASTER);
- if (!acl)
- return ERR_PTR(-ENOMEM);
+ if (IS_ERR(acl))
+ return acl;
}
hci_conn_hold(acl);
@@ -1653,9 +1701,9 @@ struct hci_conn *hci_connect_sco(struct hci_dev *hdev, int type, bdaddr_t *dst,
sco = hci_conn_hash_lookup_ba(hdev, type, dst);
if (!sco) {
sco = hci_conn_add_unset(hdev, type, dst, HCI_ROLE_MASTER);
- if (!sco) {
+ if (IS_ERR(sco)) {
hci_conn_drop(acl);
- return ERR_PTR(-ENOMEM);
+ return sco;
}
}
@@ -1699,7 +1747,7 @@ static int hci_le_create_big(struct hci_conn *conn, struct bt_iso_qos *qos)
data.count = 0;
/* Create a BIS for each bound connection */
- hci_conn_hash_list_state(hdev, bis_list, ISO_LINK,
+ hci_conn_hash_list_state(hdev, bis_list, BIS_LINK,
BT_BOUND, &data);
cp.handle = qos->bcast.big;
@@ -1720,34 +1768,33 @@ static int hci_le_create_big(struct hci_conn *conn, struct bt_iso_qos *qos)
static int set_cig_params_sync(struct hci_dev *hdev, void *data)
{
+ DEFINE_FLEX(struct hci_cp_le_set_cig_params, pdu, cis, num_cis, 0x1f);
u8 cig_id = PTR_UINT(data);
struct hci_conn *conn;
struct bt_iso_qos *qos;
- struct iso_cig_params pdu;
+ u8 aux_num_cis = 0;
u8 cis_id;
conn = hci_conn_hash_lookup_cig(hdev, cig_id);
if (!conn)
return 0;
- memset(&pdu, 0, sizeof(pdu));
-
qos = &conn->iso_qos;
- pdu.cp.cig_id = cig_id;
- hci_cpu_to_le24(qos->ucast.out.interval, pdu.cp.c_interval);
- hci_cpu_to_le24(qos->ucast.in.interval, pdu.cp.p_interval);
- pdu.cp.sca = qos->ucast.sca;
- pdu.cp.packing = qos->ucast.packing;
- pdu.cp.framing = qos->ucast.framing;
- pdu.cp.c_latency = cpu_to_le16(qos->ucast.out.latency);
- pdu.cp.p_latency = cpu_to_le16(qos->ucast.in.latency);
+ pdu->cig_id = cig_id;
+ hci_cpu_to_le24(qos->ucast.out.interval, pdu->c_interval);
+ hci_cpu_to_le24(qos->ucast.in.interval, pdu->p_interval);
+ pdu->sca = qos->ucast.sca;
+ pdu->packing = qos->ucast.packing;
+ pdu->framing = qos->ucast.framing;
+ pdu->c_latency = cpu_to_le16(qos->ucast.out.latency);
+ pdu->p_latency = cpu_to_le16(qos->ucast.in.latency);
/* Reprogram all CIS(s) with the same CIG, valid range are:
* num_cis: 0x00 to 0x1F
* cis_id: 0x00 to 0xEF
*/
for (cis_id = 0x00; cis_id < 0xf0 &&
- pdu.cp.num_cis < ARRAY_SIZE(pdu.cis); cis_id++) {
+ aux_num_cis < pdu->num_cis; cis_id++) {
struct hci_cis_params *cis;
conn = hci_conn_hash_lookup_cis(hdev, NULL, 0, cig_id, cis_id);
@@ -1756,7 +1803,7 @@ static int set_cig_params_sync(struct hci_dev *hdev, void *data)
qos = &conn->iso_qos;
- cis = &pdu.cis[pdu.cp.num_cis++];
+ cis = &pdu->cis[aux_num_cis++];
cis->cis_id = cis_id;
cis->c_sdu = cpu_to_le16(conn->iso_qos.ucast.out.sdu);
cis->p_sdu = cpu_to_le16(conn->iso_qos.ucast.in.sdu);
@@ -1767,14 +1814,14 @@ static int set_cig_params_sync(struct hci_dev *hdev, void *data)
cis->c_rtn = qos->ucast.out.rtn;
cis->p_rtn = qos->ucast.in.rtn;
}
+ pdu->num_cis = aux_num_cis;
- if (!pdu.cp.num_cis)
+ if (!pdu->num_cis)
return 0;
return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_CIG_PARAMS,
- sizeof(pdu.cp) +
- pdu.cp.num_cis * sizeof(pdu.cis[0]), &pdu,
- HCI_CMD_TIMEOUT);
+ struct_size(pdu, cis, pdu->num_cis),
+ pdu, HCI_CMD_TIMEOUT);
}
static bool hci_le_set_cig_params(struct hci_conn *conn, struct bt_iso_qos *qos)
@@ -1789,12 +1836,12 @@ static bool hci_le_set_cig_params(struct hci_conn *conn, struct bt_iso_qos *qos)
for (data.cig = 0x00; data.cig < 0xf0; data.cig++) {
data.count = 0;
- hci_conn_hash_list_state(hdev, find_cis, ISO_LINK,
+ hci_conn_hash_list_state(hdev, find_cis, CIS_LINK,
BT_CONNECT, &data);
if (data.count)
continue;
- hci_conn_hash_list_state(hdev, find_cis, ISO_LINK,
+ hci_conn_hash_list_state(hdev, find_cis, CIS_LINK,
BT_CONNECTED, &data);
if (!data.count)
break;
@@ -1844,9 +1891,10 @@ struct hci_conn *hci_bind_cis(struct hci_dev *hdev, bdaddr_t *dst,
cis = hci_conn_hash_lookup_cis(hdev, dst, dst_type, qos->ucast.cig,
qos->ucast.cis);
if (!cis) {
- cis = hci_conn_add_unset(hdev, ISO_LINK, dst, HCI_ROLE_MASTER);
- if (!cis)
- return ERR_PTR(-ENOMEM);
+ cis = hci_conn_add_unset(hdev, CIS_LINK, dst,
+ HCI_ROLE_MASTER);
+ if (IS_ERR(cis))
+ return cis;
cis->cleanup = cis_cleanup;
cis->dst_type = dst_type;
cis->iso_qos.ucast.cig = BT_ISO_QOS_CIG_UNSET;
@@ -1936,7 +1984,7 @@ bool hci_iso_setup_path(struct hci_conn *conn)
int hci_conn_check_create_cis(struct hci_conn *conn)
{
- if (conn->type != ISO_LINK || !bacmp(&conn->dst, BDADDR_ANY))
+ if (conn->type != CIS_LINK)
return -EINVAL;
if (!conn->parent || conn->parent->state != BT_CONNECTED ||
@@ -1981,14 +2029,8 @@ static void hci_iso_qos_setup(struct hci_dev *hdev, struct hci_conn *conn,
struct bt_iso_io_qos *qos, __u8 phy)
{
/* Only set MTU if PHY is enabled */
- if (!qos->sdu && qos->phy) {
- if (hdev->iso_mtu > 0)
- qos->sdu = hdev->iso_mtu;
- else if (hdev->le_mtu > 0)
- qos->sdu = hdev->le_mtu;
- else
- qos->sdu = hdev->acl_mtu;
- }
+ if (!qos->sdu && qos->phy)
+ qos->sdu = conn->mtu;
/* Use the same PHY as ACL if set to any */
if (qos->phy == BT_ISO_PHY_ANY)
@@ -2021,7 +2063,8 @@ static int create_big_sync(struct hci_dev *hdev, void *data)
if (qos->bcast.bis)
sync_interval = interval * 4;
- err = hci_start_per_adv_sync(hdev, qos->bcast.bis, conn->le_per_adv_data_len,
+ err = hci_start_per_adv_sync(hdev, qos->bcast.bis, conn->sid,
+ conn->le_per_adv_data_len,
conn->le_per_adv_data, flags, interval,
interval, sync_interval);
if (err)
@@ -2030,111 +2073,54 @@ static int create_big_sync(struct hci_dev *hdev, void *data)
return hci_le_create_big(conn, &conn->iso_qos);
}
-static void create_pa_complete(struct hci_dev *hdev, void *data, int err)
-{
- struct hci_cp_le_pa_create_sync *cp = data;
-
- bt_dev_dbg(hdev, "");
-
- if (err)
- bt_dev_err(hdev, "Unable to create PA: %d", err);
-
- kfree(cp);
-}
-
-static int create_pa_sync(struct hci_dev *hdev, void *data)
-{
- struct hci_cp_le_pa_create_sync *cp = data;
- int err;
-
- err = __hci_cmd_sync_status(hdev, HCI_OP_LE_PA_CREATE_SYNC,
- sizeof(*cp), cp, HCI_CMD_TIMEOUT);
- if (err) {
- hci_dev_clear_flag(hdev, HCI_PA_SYNC);
- return err;
- }
-
- return hci_update_passive_scan_sync(hdev);
-}
-
struct hci_conn *hci_pa_create_sync(struct hci_dev *hdev, bdaddr_t *dst,
__u8 dst_type, __u8 sid,
struct bt_iso_qos *qos)
{
- struct hci_cp_le_pa_create_sync *cp;
struct hci_conn *conn;
- int err;
- if (hci_dev_test_and_set_flag(hdev, HCI_PA_SYNC))
- return ERR_PTR(-EBUSY);
+ bt_dev_dbg(hdev, "dst %pMR type %d sid %d", dst, dst_type, sid);
- conn = hci_conn_add_unset(hdev, ISO_LINK, dst, HCI_ROLE_SLAVE);
- if (!conn)
- return ERR_PTR(-ENOMEM);
+ conn = hci_conn_add_unset(hdev, BIS_LINK, dst, HCI_ROLE_SLAVE);
+ if (IS_ERR(conn))
+ return conn;
conn->iso_qos = *qos;
+ conn->dst_type = dst_type;
+ conn->sid = sid;
conn->state = BT_LISTEN;
+ conn->conn_timeout = msecs_to_jiffies(qos->bcast.sync_timeout * 10);
hci_conn_hold(conn);
- cp = kzalloc(sizeof(*cp), GFP_KERNEL);
- if (!cp) {
- hci_dev_clear_flag(hdev, HCI_PA_SYNC);
- hci_conn_drop(conn);
- return ERR_PTR(-ENOMEM);
- }
-
- cp->options = qos->bcast.options;
- cp->sid = sid;
- cp->addr_type = dst_type;
- bacpy(&cp->addr, dst);
- cp->skip = cpu_to_le16(qos->bcast.skip);
- cp->sync_timeout = cpu_to_le16(qos->bcast.sync_timeout);
- cp->sync_cte_type = qos->bcast.sync_cte_type;
-
- /* Queue start pa_create_sync and scan */
- err = hci_cmd_sync_queue(hdev, create_pa_sync, cp, create_pa_complete);
- if (err < 0) {
- hci_conn_drop(conn);
- kfree(cp);
- return ERR_PTR(err);
- }
+ hci_connect_pa_sync(hdev, conn);
return conn;
}
-int hci_le_big_create_sync(struct hci_dev *hdev, struct hci_conn *hcon,
- struct bt_iso_qos *qos,
- __u16 sync_handle, __u8 num_bis, __u8 bis[])
+int hci_conn_big_create_sync(struct hci_dev *hdev, struct hci_conn *hcon,
+ struct bt_iso_qos *qos, __u16 sync_handle,
+ __u8 num_bis, __u8 bis[])
{
- struct _packed {
- struct hci_cp_le_big_create_sync cp;
- __u8 bis[0x11];
- } pdu;
int err;
- if (num_bis < 0x01 || num_bis > sizeof(pdu.bis))
+ if (num_bis < 0x01 || num_bis > ISO_MAX_NUM_BIS)
return -EINVAL;
err = qos_set_big(hdev, qos);
if (err)
return err;
- if (hcon)
- hcon->iso_qos.bcast.big = qos->bcast.big;
+ if (hcon) {
+ /* Update hcon QoS */
+ hcon->iso_qos = *qos;
- memset(&pdu, 0, sizeof(pdu));
- pdu.cp.handle = qos->bcast.big;
- pdu.cp.sync_handle = cpu_to_le16(sync_handle);
- pdu.cp.encryption = qos->bcast.encryption;
- memcpy(pdu.cp.bcode, qos->bcast.bcode, sizeof(pdu.cp.bcode));
- pdu.cp.mse = qos->bcast.mse;
- pdu.cp.timeout = cpu_to_le16(qos->bcast.timeout);
- pdu.cp.num_bis = num_bis;
- memcpy(pdu.bis, bis, num_bis);
+ hcon->num_bis = num_bis;
+ memcpy(hcon->bis, bis, num_bis);
+ hcon->conn_timeout = msecs_to_jiffies(qos->bcast.timeout * 10);
+ }
- return hci_send_cmd(hdev, HCI_OP_LE_BIG_CREATE_SYNC,
- sizeof(pdu.cp) + num_bis, &pdu);
+ return hci_connect_big_sync(hdev, hcon);
}
static void create_big_complete(struct hci_dev *hdev, void *data, int err)
@@ -2150,7 +2136,7 @@ static void create_big_complete(struct hci_dev *hdev, void *data, int err)
}
}
-struct hci_conn *hci_bind_bis(struct hci_dev *hdev, bdaddr_t *dst,
+struct hci_conn *hci_bind_bis(struct hci_dev *hdev, bdaddr_t *dst, __u8 sid,
struct bt_iso_qos *qos,
__u8 base_len, __u8 *base)
{
@@ -2172,7 +2158,7 @@ struct hci_conn *hci_bind_bis(struct hci_dev *hdev, bdaddr_t *dst,
base, base_len);
/* We need hci_conn object using the BDADDR_ANY as dst */
- conn = hci_add_bis(hdev, dst, qos, base_len, eir);
+ conn = hci_add_bis(hdev, dst, sid, qos, base_len, eir);
if (IS_ERR(conn))
return conn;
@@ -2198,13 +2184,9 @@ struct hci_conn *hci_bind_bis(struct hci_dev *hdev, bdaddr_t *dst,
conn->iso_qos.bcast.big);
if (parent && parent != conn) {
link = hci_conn_link(parent, conn);
- if (!link) {
- hci_conn_drop(conn);
- return ERR_PTR(-ENOLINK);
- }
-
- /* Link takes the refcount */
hci_conn_drop(conn);
+ if (!link)
+ return ERR_PTR(-ENOLINK);
}
return conn;
@@ -2227,20 +2209,35 @@ static void bis_mark_per_adv(struct hci_conn *conn, void *data)
}
struct hci_conn *hci_connect_bis(struct hci_dev *hdev, bdaddr_t *dst,
- __u8 dst_type, struct bt_iso_qos *qos,
+ __u8 dst_type, __u8 sid,
+ struct bt_iso_qos *qos,
__u8 base_len, __u8 *base)
{
struct hci_conn *conn;
int err;
struct iso_list_data data;
- conn = hci_bind_bis(hdev, dst, qos, base_len, base);
+ conn = hci_bind_bis(hdev, dst, sid, qos, base_len, base);
if (IS_ERR(conn))
return conn;
if (conn->state == BT_CONNECTED)
return conn;
+ /* Check if SID needs to be allocated then search for the first
+ * available.
+ */
+ if (conn->sid == HCI_SID_INVALID) {
+ u8 sid;
+
+ for (sid = 0; sid <= 0x0f; sid++) {
+ if (!hci_find_adv_sid(hdev, sid)) {
+ conn->sid = sid;
+ break;
+ }
+ }
+ }
+
data.big = qos->bcast.big;
data.bis = qos->bcast.bis;
@@ -2248,7 +2245,7 @@ struct hci_conn *hci_connect_bis(struct hci_dev *hdev, bdaddr_t *dst,
* the start periodic advertising and create BIG commands have
* been queued
*/
- hci_conn_hash_list_state(hdev, bis_mark_per_adv, ISO_LINK,
+ hci_conn_hash_list_state(hdev, bis_mark_per_adv, BIS_LINK,
BT_BOUND, &data);
/* Queue start periodic advertising and create BIG */
@@ -2273,7 +2270,7 @@ struct hci_conn *hci_connect_cis(struct hci_dev *hdev, bdaddr_t *dst,
le = hci_connect_le(hdev, dst, dst_type, false,
BT_SECURITY_LOW,
HCI_LE_CONN_TIMEOUT,
- HCI_ROLE_SLAVE);
+ HCI_ROLE_SLAVE, 0, 0);
else
le = hci_connect_le_scan(hdev, dst, dst_type,
BT_SECURITY_LOW,
@@ -2294,15 +2291,12 @@ struct hci_conn *hci_connect_cis(struct hci_dev *hdev, bdaddr_t *dst,
}
link = hci_conn_link(le, cis);
+ hci_conn_drop(cis);
if (!link) {
hci_conn_drop(le);
- hci_conn_drop(cis);
return ERR_PTR(-ENOLINK);
}
- /* Link takes the refcount */
- hci_conn_drop(cis);
-
cis->state = BT_CONNECT;
hci_le_create_cis_pending(hdev);
@@ -2926,5 +2920,190 @@ int hci_abort_conn(struct hci_conn *conn, u8 reason)
return 0;
}
- return hci_cmd_sync_queue_once(hdev, abort_conn_sync, conn, NULL);
+ /* Run immediately if on cmd_sync_work since this may be called
+ * as a result to MGMT_OP_DISCONNECT/MGMT_OP_UNPAIR which does
+ * already queue its callback on cmd_sync_work.
+ */
+ return hci_cmd_sync_run_once(hdev, abort_conn_sync, conn, NULL);
+}
+
+void hci_setup_tx_timestamp(struct sk_buff *skb, size_t key_offset,
+ const struct sockcm_cookie *sockc)
+{
+ struct sock *sk = skb ? skb->sk : NULL;
+ int key;
+
+ /* This shall be called on a single skb of those generated by user
+ * sendmsg(), and only when the sendmsg() does not return error to
+ * user. This is required for keeping the tskey that increments here in
+ * sync with possible sendmsg() counting by user.
+ *
+ * Stream sockets shall set key_offset to sendmsg() length in bytes
+ * and call with the last fragment, others to 1 and first fragment.
+ */
+
+ if (!skb || !sockc || !sk || !key_offset)
+ return;
+
+ sock_tx_timestamp(sk, sockc, &skb_shinfo(skb)->tx_flags);
+
+ if (sk->sk_type == SOCK_STREAM)
+ key = atomic_add_return(key_offset, &sk->sk_tskey);
+
+ if (sockc->tsflags & SOF_TIMESTAMPING_OPT_ID &&
+ sockc->tsflags & SOF_TIMESTAMPING_TX_RECORD_MASK) {
+ if (sockc->tsflags & SOCKCM_FLAG_TS_OPT_ID) {
+ skb_shinfo(skb)->tskey = sockc->ts_opt_id;
+ } else {
+ if (sk->sk_type != SOCK_STREAM)
+ key = atomic_inc_return(&sk->sk_tskey);
+ skb_shinfo(skb)->tskey = key - 1;
+ }
+ }
+}
+
+void hci_conn_tx_queue(struct hci_conn *conn, struct sk_buff *skb)
+{
+ struct tx_queue *comp = &conn->tx_q;
+ bool track = false;
+
+ /* Emit SND now, ie. just before sending to driver */
+ if (skb_shinfo(skb)->tx_flags & SKBTX_SW_TSTAMP)
+ __skb_tstamp_tx(skb, NULL, NULL, skb->sk, SCM_TSTAMP_SND);
+
+ /* COMPLETION tstamp is emitted for tracked skb later in Number of
+ * Completed Packets event. Available only for flow controlled cases.
+ *
+ * TODO: SCO support without flowctl (needs to be done in drivers)
+ */
+ switch (conn->type) {
+ case CIS_LINK:
+ case BIS_LINK:
+ case ACL_LINK:
+ case LE_LINK:
+ break;
+ case SCO_LINK:
+ case ESCO_LINK:
+ if (!hci_dev_test_flag(conn->hdev, HCI_SCO_FLOWCTL))
+ return;
+ break;
+ default:
+ return;
+ }
+
+ if (skb->sk && (skb_shinfo(skb)->tx_flags & SKBTX_COMPLETION_TSTAMP))
+ track = true;
+
+ /* If nothing is tracked, just count extra skbs at the queue head */
+ if (!track && !comp->tracked) {
+ comp->extra++;
+ return;
+ }
+
+ if (track) {
+ skb = skb_clone_sk(skb);
+ if (!skb)
+ goto count_only;
+
+ comp->tracked++;
+ } else {
+ skb = skb_clone(skb, GFP_KERNEL);
+ if (!skb)
+ goto count_only;
+ }
+
+ skb_queue_tail(&comp->queue, skb);
+ return;
+
+count_only:
+ /* Stop tracking skbs, and only count. This will not emit timestamps for
+ * the packets, but if we get here something is more seriously wrong.
+ */
+ comp->tracked = 0;
+ comp->extra += skb_queue_len(&comp->queue) + 1;
+ skb_queue_purge(&comp->queue);
+}
+
+void hci_conn_tx_dequeue(struct hci_conn *conn)
+{
+ struct tx_queue *comp = &conn->tx_q;
+ struct sk_buff *skb;
+
+ /* If there are tracked skbs, the counted extra go before dequeuing real
+ * skbs, to keep ordering. When nothing is tracked, the ordering doesn't
+ * matter so dequeue real skbs first to get rid of them ASAP.
+ */
+ if (comp->extra && (comp->tracked || skb_queue_empty(&comp->queue))) {
+ comp->extra--;
+ return;
+ }
+
+ skb = skb_dequeue(&comp->queue);
+ if (!skb)
+ return;
+
+ if (skb->sk) {
+ comp->tracked--;
+ __skb_tstamp_tx(skb, NULL, NULL, skb->sk,
+ SCM_TSTAMP_COMPLETION);
+ }
+
+ kfree_skb(skb);
+}
+
+u8 *hci_conn_key_enc_size(struct hci_conn *conn)
+{
+ if (conn->type == ACL_LINK) {
+ struct link_key *key;
+
+ key = hci_find_link_key(conn->hdev, &conn->dst);
+ if (!key)
+ return NULL;
+
+ return &key->pin_len;
+ } else if (conn->type == LE_LINK) {
+ struct smp_ltk *ltk;
+
+ ltk = hci_find_ltk(conn->hdev, &conn->dst, conn->dst_type,
+ conn->role);
+ if (!ltk)
+ return NULL;
+
+ return &ltk->enc_size;
+ }
+
+ return NULL;
+}
+
+int hci_ethtool_ts_info(unsigned int index, int sk_proto,
+ struct kernel_ethtool_ts_info *info)
+{
+ struct hci_dev *hdev;
+
+ hdev = hci_dev_get(index);
+ if (!hdev)
+ return -ENODEV;
+
+ info->so_timestamping =
+ SOF_TIMESTAMPING_RX_SOFTWARE |
+ SOF_TIMESTAMPING_SOFTWARE;
+ info->phc_index = -1;
+ info->tx_types = BIT(HWTSTAMP_TX_OFF);
+ info->rx_filters = BIT(HWTSTAMP_FILTER_NONE);
+
+ switch (sk_proto) {
+ case BTPROTO_ISO:
+ case BTPROTO_L2CAP:
+ info->so_timestamping |= SOF_TIMESTAMPING_TX_SOFTWARE;
+ info->so_timestamping |= SOF_TIMESTAMPING_TX_COMPLETION;
+ break;
+ case BTPROTO_SCO:
+ info->so_timestamping |= SOF_TIMESTAMPING_TX_SOFTWARE;
+ if (hci_dev_test_flag(hdev, HCI_SCO_FLOWCTL))
+ info->so_timestamping |= SOF_TIMESTAMPING_TX_COMPLETION;
+ break;
+ }
+
+ hci_dev_put(hdev);
+ return 0;
}