diff options
Diffstat (limited to 'net/bluetooth')
48 files changed, 14839 insertions, 10130 deletions
diff --git a/net/bluetooth/6lowpan.c b/net/bluetooth/6lowpan.c index 133d7ea063fb..2c21ae8abadc 100644 --- a/net/bluetooth/6lowpan.c +++ b/net/bluetooth/6lowpan.c @@ -13,6 +13,7 @@ #include <net/ipv6.h> #include <net/ip6_route.h> #include <net/addrconf.h> +#include <net/netdev_lock.h> #include <net/pkt_sched.h> #include <net/bluetooth/bluetooth.h> @@ -52,6 +53,11 @@ static bool enable_6lowpan; static struct l2cap_chan *listen_chan; static DEFINE_MUTEX(set_lock); +enum { + LOWPAN_PEER_CLOSING, + LOWPAN_PEER_MAXBITS +}; + struct lowpan_peer { struct list_head list; struct rcu_head rcu; @@ -60,6 +66,8 @@ struct lowpan_peer { /* peer addresses in various formats */ unsigned char lladdr[ETH_ALEN]; struct in6_addr peer_addr; + + DECLARE_BITMAP(flags, LOWPAN_PEER_MAXBITS); }; struct lowpan_btle_dev { @@ -133,7 +141,7 @@ static inline struct lowpan_peer *peer_lookup_dst(struct lowpan_btle_dev *dev, struct in6_addr *daddr, struct sk_buff *skb) { - struct rt6_info *rt = (struct rt6_info *)skb_dst(skb); + struct rt6_info *rt = dst_rt6_info(skb_dst(skb)); int count = atomic_read(&dev->peer_count); const struct in6_addr *nexthop; struct lowpan_peer *peer; @@ -240,7 +248,7 @@ static int give_skb_to_upper(struct sk_buff *skb, struct net_device *dev) if (!skb_cp) return NET_RX_DROP; - return netif_rx_ni(skb_cp); + return netif_rx(skb_cp); } static int iphc_decompress(struct sk_buff *skb, struct net_device *netdev, @@ -288,6 +296,7 @@ static int recv_pkt(struct sk_buff *skb, struct net_device *dev, local_skb->pkt_type = PACKET_HOST; local_skb->dev = dev; + skb_reset_mac_header(local_skb); skb_set_transport_header(local_skb, sizeof(struct ipv6hdr)); if (give_skb_to_upper(local_skb, dev) != NET_RX_SUCCESS) { @@ -441,9 +450,9 @@ static int send_pkt(struct l2cap_chan *chan, struct sk_buff *skb, iv.iov_len = skb->len; memset(&msg, 0, sizeof(msg)); - iov_iter_kvec(&msg.msg_iter, WRITE, &iv, 1, skb->len); + iov_iter_kvec(&msg.msg_iter, ITER_SOURCE, &iv, 1, skb->len); - err = l2cap_chan_send(chan, &msg, skb->len); + err = l2cap_chan_send(chan, &msg, skb->len, NULL); if (err > 0) { netdev->stats.tx_bytes += err; netdev->stats.tx_packets++; @@ -572,7 +581,7 @@ static void netdev_setup(struct net_device *dev) dev->needs_free_netdev = true; } -static struct device_type bt_type = { +static const struct device_type bt_type = { .name = "bluetooth", }; @@ -641,7 +650,6 @@ static struct l2cap_chan *add_peer_chan(struct l2cap_chan *chan, return NULL; peer->chan = chan; - memset(&peer->peer_addr, 0, sizeof(struct in6_addr)); baswap((void *)peer->lladdr, &chan->dst); @@ -826,11 +834,16 @@ static struct sk_buff *chan_alloc_skb_cb(struct l2cap_chan *chan, unsigned long hdr_len, unsigned long len, int nb) { + struct sk_buff *skb; + /* Note that we must allocate using GFP_ATOMIC here as * this function is called originally from netdev hard xmit * function in atomic context. */ - return bt_skb_alloc(hdr_len + len, GFP_ATOMIC); + skb = bt_skb_alloc(hdr_len + len, GFP_ATOMIC); + if (!skb) + return ERR_PTR(-ENOMEM); + return skb; } static void chan_suspend_cb(struct l2cap_chan *chan) @@ -893,7 +906,7 @@ static int bt_6lowpan_connect(bdaddr_t *addr, u8 dst_type) chan->ops = &bt_6lowpan_chan_ops; err = l2cap_chan_connect(chan, cpu_to_le16(L2CAP_PSM_IPSP), 0, - addr, dst_type); + addr, dst_type, L2CAP_CONN_TIMEOUT); BT_DBG("chan %p err %d", chan, err); if (err < 0) @@ -914,7 +927,9 @@ static int bt_6lowpan_disconnect(struct l2cap_conn *conn, u8 dst_type) BT_DBG("peer %p chan %p", peer, peer->chan); + l2cap_chan_lock(peer->chan); l2cap_chan_close(peer->chan, ENOENT); + l2cap_chan_unlock(peer->chan); return 0; } @@ -951,10 +966,11 @@ static struct l2cap_chan *bt_6lowpan_listen(void) } static int get_l2cap_conn(char *buf, bdaddr_t *addr, u8 *addr_type, - struct l2cap_conn **conn) + struct l2cap_conn **conn, bool disconnect) { struct hci_conn *hcon; struct hci_dev *hdev; + int le_addr_type; int n; n = sscanf(buf, "%hhx:%hhx:%hhx:%hhx:%hhx:%hhx %hhu", @@ -965,14 +981,34 @@ static int get_l2cap_conn(char *buf, bdaddr_t *addr, u8 *addr_type, if (n < 7) return -EINVAL; + if (disconnect) { + /* The "disconnect" debugfs command has used different address + * type constants than "connect" since 2015. Let's retain that + * for now even though it's obviously buggy... + */ + *addr_type += 1; + } + + switch (*addr_type) { + case BDADDR_LE_PUBLIC: + le_addr_type = ADDR_LE_DEV_PUBLIC; + break; + case BDADDR_LE_RANDOM: + le_addr_type = ADDR_LE_DEV_RANDOM; + break; + default: + return -EINVAL; + } + /* The LE_PUBLIC address type is ignored because of BDADDR_ANY */ hdev = hci_get_route(addr, BDADDR_ANY, BDADDR_LE_PUBLIC); if (!hdev) return -ENOENT; hci_dev_lock(hdev); - hcon = hci_conn_hash_lookup_le(hdev, addr, *addr_type); + hcon = hci_conn_hash_lookup_le(hdev, addr, le_addr_type); hci_dev_unlock(hdev); + hci_dev_put(hdev); if (!hcon) return -ENOENT; @@ -987,41 +1023,52 @@ static int get_l2cap_conn(char *buf, bdaddr_t *addr, u8 *addr_type, static void disconnect_all_peers(void) { struct lowpan_btle_dev *entry; - struct lowpan_peer *peer, *tmp_peer, *new_peer; - struct list_head peers; - - INIT_LIST_HEAD(&peers); + struct lowpan_peer *peer; + int nchans; - /* We make a separate list of peers as the close_cb() will - * modify the device peers list so it is better not to mess - * with the same list at the same time. + /* l2cap_chan_close() cannot be called from RCU, and lock ordering + * chan->lock > devices_lock prevents taking write side lock, so copy + * then close. */ rcu_read_lock(); + list_for_each_entry_rcu(entry, &bt_6lowpan_devices, list) + list_for_each_entry_rcu(peer, &entry->peers, list) + clear_bit(LOWPAN_PEER_CLOSING, peer->flags); + rcu_read_unlock(); - list_for_each_entry_rcu(entry, &bt_6lowpan_devices, list) { - list_for_each_entry_rcu(peer, &entry->peers, list) { - new_peer = kmalloc(sizeof(*new_peer), GFP_ATOMIC); - if (!new_peer) - break; + do { + struct l2cap_chan *chans[32]; + int i; - new_peer->chan = peer->chan; - INIT_LIST_HEAD(&new_peer->list); + nchans = 0; - list_add(&new_peer->list, &peers); - } - } + spin_lock(&devices_lock); - rcu_read_unlock(); + list_for_each_entry_rcu(entry, &bt_6lowpan_devices, list) { + list_for_each_entry_rcu(peer, &entry->peers, list) { + if (test_and_set_bit(LOWPAN_PEER_CLOSING, + peer->flags)) + continue; - spin_lock(&devices_lock); - list_for_each_entry_safe(peer, tmp_peer, &peers, list) { - l2cap_chan_close(peer->chan, ENOENT); + l2cap_chan_hold(peer->chan); + chans[nchans++] = peer->chan; - list_del_rcu(&peer->list); - kfree_rcu(peer, rcu); - } - spin_unlock(&devices_lock); + if (nchans >= ARRAY_SIZE(chans)) + goto done; + } + } + +done: + spin_unlock(&devices_lock); + + for (i = 0; i < nchans; ++i) { + l2cap_chan_lock(chans[i]); + l2cap_chan_close(chans[i], ENOENT); + l2cap_chan_unlock(chans[i]); + l2cap_chan_put(chans[i]); + } + } while (nchans); } struct set_enable { @@ -1044,7 +1091,9 @@ static void do_enable_set(struct work_struct *work) mutex_lock(&set_lock); if (listen_chan) { + l2cap_chan_lock(listen_chan); l2cap_chan_close(listen_chan, 0); + l2cap_chan_unlock(listen_chan); l2cap_chan_put(listen_chan); } @@ -1097,13 +1146,15 @@ static ssize_t lowpan_control_write(struct file *fp, buf[buf_size] = '\0'; if (memcmp(buf, "connect ", 8) == 0) { - ret = get_l2cap_conn(&buf[8], &addr, &addr_type, &conn); + ret = get_l2cap_conn(&buf[8], &addr, &addr_type, &conn, false); if (ret == -EINVAL) return ret; mutex_lock(&set_lock); if (listen_chan) { + l2cap_chan_lock(listen_chan); l2cap_chan_close(listen_chan, 0); + l2cap_chan_unlock(listen_chan); l2cap_chan_put(listen_chan); listen_chan = NULL; } @@ -1134,7 +1185,7 @@ static ssize_t lowpan_control_write(struct file *fp, } if (memcmp(buf, "disconnect ", 11) == 0) { - ret = get_l2cap_conn(&buf[11], &addr, &addr_type, &conn); + ret = get_l2cap_conn(&buf[11], &addr, &addr_type, &conn, true); if (ret < 0) return ret; @@ -1265,7 +1316,9 @@ static void __exit bt_6lowpan_exit(void) debugfs_remove(lowpan_control_debugfs); if (listen_chan) { + l2cap_chan_lock(listen_chan); l2cap_chan_close(listen_chan, 0); + l2cap_chan_unlock(listen_chan); l2cap_chan_put(listen_chan); } diff --git a/net/bluetooth/Kconfig b/net/bluetooth/Kconfig index e0ab4cd7afc3..6b2b65a66700 100644 --- a/net/bluetooth/Kconfig +++ b/net/bluetooth/Kconfig @@ -29,6 +29,7 @@ menuconfig BT SCO audio links L2CAP (Logical Link Control and Adaptation Protocol) SMP (Security Manager Protocol) on LE (Low Energy) links + ISO isochronous links HCI Device drivers (Interface to the hardware) RFCOMM Module (RFCOMM Protocol) BNEP Module (Bluetooth Network Encapsulation Protocol) @@ -61,14 +62,6 @@ source "net/bluetooth/cmtp/Kconfig" source "net/bluetooth/hidp/Kconfig" -config BT_HS - bool "Bluetooth High Speed (HS) features" - depends on BT_BREDR - help - Bluetooth High Speed includes support for off-loading - Bluetooth connections via 802.11 (wifi) physical layer - available with Bluetooth version 3.0 or later. - config BT_LE bool "Bluetooth Low Energy (LE) features" depends on BT @@ -77,6 +70,17 @@ config BT_LE Bluetooth Low Energy includes support low-energy physical layer available with Bluetooth version 4.0 or later. +config BT_LE_L2CAP_ECRED + bool "Bluetooth L2CAP Enhanced Credit Flow Control" + depends on BT_LE + default y + help + Bluetooth Low Energy L2CAP Enhanced Credit Flow Control available with + Bluetooth version 5.2 or later. + + This can be overridden by passing bluetooth.enable_ecred=[1|0] + on the kernel commandline. + config BT_6LOWPAN tristate "Bluetooth 6LoWPAN support" depends on BT_LE && 6LOWPAN diff --git a/net/bluetooth/Makefile b/net/bluetooth/Makefile index a52bba8500e1..a7eede7616d8 100644 --- a/net/bluetooth/Makefile +++ b/net/bluetooth/Makefile @@ -14,11 +14,13 @@ bluetooth_6lowpan-y := 6lowpan.o bluetooth-y := af_bluetooth.o hci_core.o hci_conn.o hci_event.o mgmt.o \ hci_sock.o hci_sysfs.o l2cap_core.o l2cap_sock.o smp.o lib.o \ - ecdh_helper.o hci_request.o mgmt_util.o mgmt_config.o hci_codec.o \ - eir.o hci_sync.o + ecdh_helper.o mgmt_util.o mgmt_config.o hci_codec.o eir.o hci_sync.o \ + hci_drv.o + +bluetooth-$(CONFIG_DEV_COREDUMP) += coredump.o bluetooth-$(CONFIG_BT_BREDR) += sco.o -bluetooth-$(CONFIG_BT_HS) += a2mp.o amp.o +bluetooth-$(CONFIG_BT_LE) += iso.o bluetooth-$(CONFIG_BT_LEDS) += leds.o bluetooth-$(CONFIG_BT_MSFTEXT) += msft.o bluetooth-$(CONFIG_BT_AOSPEXT) += aosp.o diff --git a/net/bluetooth/a2mp.c b/net/bluetooth/a2mp.c deleted file mode 100644 index 1fcc482397c3..000000000000 --- a/net/bluetooth/a2mp.c +++ /dev/null @@ -1,1054 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-only -/* - Copyright (c) 2010,2011 Code Aurora Forum. All rights reserved. - Copyright (c) 2011,2012 Intel Corp. - -*/ - -#include <net/bluetooth/bluetooth.h> -#include <net/bluetooth/hci_core.h> -#include <net/bluetooth/l2cap.h> - -#include "hci_request.h" -#include "a2mp.h" -#include "amp.h" - -#define A2MP_FEAT_EXT 0x8000 - -/* Global AMP Manager list */ -static LIST_HEAD(amp_mgr_list); -static DEFINE_MUTEX(amp_mgr_list_lock); - -/* A2MP build & send command helper functions */ -static struct a2mp_cmd *__a2mp_build(u8 code, u8 ident, u16 len, void *data) -{ - struct a2mp_cmd *cmd; - int plen; - - plen = sizeof(*cmd) + len; - cmd = kzalloc(plen, GFP_KERNEL); - if (!cmd) - return NULL; - - cmd->code = code; - cmd->ident = ident; - cmd->len = cpu_to_le16(len); - - memcpy(cmd->data, data, len); - - return cmd; -} - -static void a2mp_send(struct amp_mgr *mgr, u8 code, u8 ident, u16 len, void *data) -{ - struct l2cap_chan *chan = mgr->a2mp_chan; - struct a2mp_cmd *cmd; - u16 total_len = len + sizeof(*cmd); - struct kvec iv; - struct msghdr msg; - - cmd = __a2mp_build(code, ident, len, data); - if (!cmd) - return; - - iv.iov_base = cmd; - iv.iov_len = total_len; - - memset(&msg, 0, sizeof(msg)); - - iov_iter_kvec(&msg.msg_iter, WRITE, &iv, 1, total_len); - - l2cap_chan_send(chan, &msg, total_len); - - kfree(cmd); -} - -static u8 __next_ident(struct amp_mgr *mgr) -{ - if (++mgr->ident == 0) - mgr->ident = 1; - - return mgr->ident; -} - -static struct amp_mgr *amp_mgr_lookup_by_state(u8 state) -{ - struct amp_mgr *mgr; - - mutex_lock(&_mgr_list_lock); - list_for_each_entry(mgr, &_mgr_list, list) { - if (test_and_clear_bit(state, &mgr->state)) { - amp_mgr_get(mgr); - mutex_unlock(&_mgr_list_lock); - return mgr; - } - } - mutex_unlock(&_mgr_list_lock); - - return NULL; -} - -/* hci_dev_list shall be locked */ -static void __a2mp_add_cl(struct amp_mgr *mgr, struct a2mp_cl *cl) -{ - struct hci_dev *hdev; - int i = 1; - - cl[0].id = AMP_ID_BREDR; - cl[0].type = AMP_TYPE_BREDR; - cl[0].status = AMP_STATUS_BLUETOOTH_ONLY; - - list_for_each_entry(hdev, &hci_dev_list, list) { - if (hdev->dev_type == HCI_AMP) { - cl[i].id = hdev->id; - cl[i].type = hdev->amp_type; - if (test_bit(HCI_UP, &hdev->flags)) - cl[i].status = hdev->amp_status; - else - cl[i].status = AMP_STATUS_POWERED_DOWN; - i++; - } - } -} - -/* Processing A2MP messages */ -static int a2mp_command_rej(struct amp_mgr *mgr, struct sk_buff *skb, - struct a2mp_cmd *hdr) -{ - struct a2mp_cmd_rej *rej = (void *) skb->data; - - if (le16_to_cpu(hdr->len) < sizeof(*rej)) - return -EINVAL; - - BT_DBG("ident %u reason %d", hdr->ident, le16_to_cpu(rej->reason)); - - skb_pull(skb, sizeof(*rej)); - - return 0; -} - -static int a2mp_discover_req(struct amp_mgr *mgr, struct sk_buff *skb, - struct a2mp_cmd *hdr) -{ - struct a2mp_discov_req *req = (void *) skb->data; - u16 len = le16_to_cpu(hdr->len); - struct a2mp_discov_rsp *rsp; - u16 ext_feat; - u8 num_ctrl; - struct hci_dev *hdev; - - if (len < sizeof(*req)) - return -EINVAL; - - skb_pull(skb, sizeof(*req)); - - ext_feat = le16_to_cpu(req->ext_feat); - - BT_DBG("mtu %d efm 0x%4.4x", le16_to_cpu(req->mtu), ext_feat); - - /* check that packet is not broken for now */ - while (ext_feat & A2MP_FEAT_EXT) { - if (len < sizeof(ext_feat)) - return -EINVAL; - - ext_feat = get_unaligned_le16(skb->data); - BT_DBG("efm 0x%4.4x", ext_feat); - len -= sizeof(ext_feat); - skb_pull(skb, sizeof(ext_feat)); - } - - read_lock(&hci_dev_list_lock); - - /* at minimum the BR/EDR needs to be listed */ - num_ctrl = 1; - - list_for_each_entry(hdev, &hci_dev_list, list) { - if (hdev->dev_type == HCI_AMP) - num_ctrl++; - } - - len = struct_size(rsp, cl, num_ctrl); - rsp = kmalloc(len, GFP_ATOMIC); - if (!rsp) { - read_unlock(&hci_dev_list_lock); - return -ENOMEM; - } - - rsp->mtu = cpu_to_le16(L2CAP_A2MP_DEFAULT_MTU); - rsp->ext_feat = 0; - - __a2mp_add_cl(mgr, rsp->cl); - - read_unlock(&hci_dev_list_lock); - - a2mp_send(mgr, A2MP_DISCOVER_RSP, hdr->ident, len, rsp); - - kfree(rsp); - return 0; -} - -static int a2mp_discover_rsp(struct amp_mgr *mgr, struct sk_buff *skb, - struct a2mp_cmd *hdr) -{ - struct a2mp_discov_rsp *rsp = (void *) skb->data; - u16 len = le16_to_cpu(hdr->len); - struct a2mp_cl *cl; - u16 ext_feat; - bool found = false; - - if (len < sizeof(*rsp)) - return -EINVAL; - - len -= sizeof(*rsp); - skb_pull(skb, sizeof(*rsp)); - - ext_feat = le16_to_cpu(rsp->ext_feat); - - BT_DBG("mtu %d efm 0x%4.4x", le16_to_cpu(rsp->mtu), ext_feat); - - /* check that packet is not broken for now */ - while (ext_feat & A2MP_FEAT_EXT) { - if (len < sizeof(ext_feat)) - return -EINVAL; - - ext_feat = get_unaligned_le16(skb->data); - BT_DBG("efm 0x%4.4x", ext_feat); - len -= sizeof(ext_feat); - skb_pull(skb, sizeof(ext_feat)); - } - - cl = (void *) skb->data; - while (len >= sizeof(*cl)) { - BT_DBG("Remote AMP id %u type %u status %u", cl->id, cl->type, - cl->status); - - if (cl->id != AMP_ID_BREDR && cl->type != AMP_TYPE_BREDR) { - struct a2mp_info_req req; - - found = true; - - memset(&req, 0, sizeof(req)); - - req.id = cl->id; - a2mp_send(mgr, A2MP_GETINFO_REQ, __next_ident(mgr), - sizeof(req), &req); - } - - len -= sizeof(*cl); - cl = skb_pull(skb, sizeof(*cl)); - } - - /* Fall back to L2CAP init sequence */ - if (!found) { - struct l2cap_conn *conn = mgr->l2cap_conn; - struct l2cap_chan *chan; - - mutex_lock(&conn->chan_lock); - - list_for_each_entry(chan, &conn->chan_l, list) { - - BT_DBG("chan %p state %s", chan, - state_to_string(chan->state)); - - if (chan->scid == L2CAP_CID_A2MP) - continue; - - l2cap_chan_lock(chan); - - if (chan->state == BT_CONNECT) - l2cap_send_conn_req(chan); - - l2cap_chan_unlock(chan); - } - - mutex_unlock(&conn->chan_lock); - } - - return 0; -} - -static int a2mp_change_notify(struct amp_mgr *mgr, struct sk_buff *skb, - struct a2mp_cmd *hdr) -{ - struct a2mp_cl *cl = (void *) skb->data; - - while (skb->len >= sizeof(*cl)) { - BT_DBG("Controller id %u type %u status %u", cl->id, cl->type, - cl->status); - cl = skb_pull(skb, sizeof(*cl)); - } - - /* TODO send A2MP_CHANGE_RSP */ - - return 0; -} - -static void read_local_amp_info_complete(struct hci_dev *hdev, u8 status, - u16 opcode) -{ - BT_DBG("%s status 0x%2.2x", hdev->name, status); - - a2mp_send_getinfo_rsp(hdev); -} - -static int a2mp_getinfo_req(struct amp_mgr *mgr, struct sk_buff *skb, - struct a2mp_cmd *hdr) -{ - struct a2mp_info_req *req = (void *) skb->data; - struct hci_dev *hdev; - struct hci_request hreq; - int err = 0; - - if (le16_to_cpu(hdr->len) < sizeof(*req)) - return -EINVAL; - - BT_DBG("id %u", req->id); - - hdev = hci_dev_get(req->id); - if (!hdev || hdev->dev_type != HCI_AMP) { - struct a2mp_info_rsp rsp; - - memset(&rsp, 0, sizeof(rsp)); - - rsp.id = req->id; - rsp.status = A2MP_STATUS_INVALID_CTRL_ID; - - a2mp_send(mgr, A2MP_GETINFO_RSP, hdr->ident, sizeof(rsp), - &rsp); - - goto done; - } - - set_bit(READ_LOC_AMP_INFO, &mgr->state); - hci_req_init(&hreq, hdev); - hci_req_add(&hreq, HCI_OP_READ_LOCAL_AMP_INFO, 0, NULL); - err = hci_req_run(&hreq, read_local_amp_info_complete); - if (err < 0) - a2mp_send_getinfo_rsp(hdev); - -done: - if (hdev) - hci_dev_put(hdev); - - skb_pull(skb, sizeof(*req)); - return 0; -} - -static int a2mp_getinfo_rsp(struct amp_mgr *mgr, struct sk_buff *skb, - struct a2mp_cmd *hdr) -{ - struct a2mp_info_rsp *rsp = (struct a2mp_info_rsp *) skb->data; - struct a2mp_amp_assoc_req req; - struct amp_ctrl *ctrl; - - if (le16_to_cpu(hdr->len) < sizeof(*rsp)) - return -EINVAL; - - BT_DBG("id %u status 0x%2.2x", rsp->id, rsp->status); - - if (rsp->status) - return -EINVAL; - - ctrl = amp_ctrl_add(mgr, rsp->id); - if (!ctrl) - return -ENOMEM; - - memset(&req, 0, sizeof(req)); - - req.id = rsp->id; - a2mp_send(mgr, A2MP_GETAMPASSOC_REQ, __next_ident(mgr), sizeof(req), - &req); - - skb_pull(skb, sizeof(*rsp)); - return 0; -} - -static int a2mp_getampassoc_req(struct amp_mgr *mgr, struct sk_buff *skb, - struct a2mp_cmd *hdr) -{ - struct a2mp_amp_assoc_req *req = (void *) skb->data; - struct hci_dev *hdev; - struct amp_mgr *tmp; - - if (le16_to_cpu(hdr->len) < sizeof(*req)) - return -EINVAL; - - BT_DBG("id %u", req->id); - - /* Make sure that other request is not processed */ - tmp = amp_mgr_lookup_by_state(READ_LOC_AMP_ASSOC); - - hdev = hci_dev_get(req->id); - if (!hdev || hdev->amp_type == AMP_TYPE_BREDR || tmp) { - struct a2mp_amp_assoc_rsp rsp; - - memset(&rsp, 0, sizeof(rsp)); - rsp.id = req->id; - - if (tmp) { - rsp.status = A2MP_STATUS_COLLISION_OCCURED; - amp_mgr_put(tmp); - } else { - rsp.status = A2MP_STATUS_INVALID_CTRL_ID; - } - - a2mp_send(mgr, A2MP_GETAMPASSOC_RSP, hdr->ident, sizeof(rsp), - &rsp); - - goto done; - } - - amp_read_loc_assoc(hdev, mgr); - -done: - if (hdev) - hci_dev_put(hdev); - - skb_pull(skb, sizeof(*req)); - return 0; -} - -static int a2mp_getampassoc_rsp(struct amp_mgr *mgr, struct sk_buff *skb, - struct a2mp_cmd *hdr) -{ - struct a2mp_amp_assoc_rsp *rsp = (void *) skb->data; - u16 len = le16_to_cpu(hdr->len); - struct hci_dev *hdev; - struct amp_ctrl *ctrl; - struct hci_conn *hcon; - size_t assoc_len; - - if (len < sizeof(*rsp)) - return -EINVAL; - - assoc_len = len - sizeof(*rsp); - - BT_DBG("id %u status 0x%2.2x assoc len %zu", rsp->id, rsp->status, - assoc_len); - - if (rsp->status) - return -EINVAL; - - /* Save remote ASSOC data */ - ctrl = amp_ctrl_lookup(mgr, rsp->id); - if (ctrl) { - u8 *assoc; - - assoc = kmemdup(rsp->amp_assoc, assoc_len, GFP_KERNEL); - if (!assoc) { - amp_ctrl_put(ctrl); - return -ENOMEM; - } - - ctrl->assoc = assoc; - ctrl->assoc_len = assoc_len; - ctrl->assoc_rem_len = assoc_len; - ctrl->assoc_len_so_far = 0; - - amp_ctrl_put(ctrl); - } - - /* Create Phys Link */ - hdev = hci_dev_get(rsp->id); - if (!hdev) - return -EINVAL; - - hcon = phylink_add(hdev, mgr, rsp->id, true); - if (!hcon) - goto done; - - BT_DBG("Created hcon %p: loc:%u -> rem:%u", hcon, hdev->id, rsp->id); - - mgr->bredr_chan->remote_amp_id = rsp->id; - - amp_create_phylink(hdev, mgr, hcon); - -done: - hci_dev_put(hdev); - skb_pull(skb, len); - return 0; -} - -static int a2mp_createphyslink_req(struct amp_mgr *mgr, struct sk_buff *skb, - struct a2mp_cmd *hdr) -{ - struct a2mp_physlink_req *req = (void *) skb->data; - struct a2mp_physlink_rsp rsp; - struct hci_dev *hdev; - struct hci_conn *hcon; - struct amp_ctrl *ctrl; - - if (le16_to_cpu(hdr->len) < sizeof(*req)) - return -EINVAL; - - BT_DBG("local_id %u, remote_id %u", req->local_id, req->remote_id); - - memset(&rsp, 0, sizeof(rsp)); - - rsp.local_id = req->remote_id; - rsp.remote_id = req->local_id; - - hdev = hci_dev_get(req->remote_id); - if (!hdev || hdev->amp_type == AMP_TYPE_BREDR) { - rsp.status = A2MP_STATUS_INVALID_CTRL_ID; - goto send_rsp; - } - - ctrl = amp_ctrl_lookup(mgr, rsp.remote_id); - if (!ctrl) { - ctrl = amp_ctrl_add(mgr, rsp.remote_id); - if (ctrl) { - amp_ctrl_get(ctrl); - } else { - rsp.status = A2MP_STATUS_UNABLE_START_LINK_CREATION; - goto send_rsp; - } - } - - if (ctrl) { - size_t assoc_len = le16_to_cpu(hdr->len) - sizeof(*req); - u8 *assoc; - - assoc = kmemdup(req->amp_assoc, assoc_len, GFP_KERNEL); - if (!assoc) { - amp_ctrl_put(ctrl); - hci_dev_put(hdev); - return -ENOMEM; - } - - ctrl->assoc = assoc; - ctrl->assoc_len = assoc_len; - ctrl->assoc_rem_len = assoc_len; - ctrl->assoc_len_so_far = 0; - - amp_ctrl_put(ctrl); - } - - hcon = phylink_add(hdev, mgr, req->local_id, false); - if (hcon) { - amp_accept_phylink(hdev, mgr, hcon); - rsp.status = A2MP_STATUS_SUCCESS; - } else { - rsp.status = A2MP_STATUS_UNABLE_START_LINK_CREATION; - } - -send_rsp: - if (hdev) - hci_dev_put(hdev); - - /* Reply error now and success after HCI Write Remote AMP Assoc - command complete with success status - */ - if (rsp.status != A2MP_STATUS_SUCCESS) { - a2mp_send(mgr, A2MP_CREATEPHYSLINK_RSP, hdr->ident, - sizeof(rsp), &rsp); - } else { - set_bit(WRITE_REMOTE_AMP_ASSOC, &mgr->state); - mgr->ident = hdr->ident; - } - - skb_pull(skb, le16_to_cpu(hdr->len)); - return 0; -} - -static int a2mp_discphyslink_req(struct amp_mgr *mgr, struct sk_buff *skb, - struct a2mp_cmd *hdr) -{ - struct a2mp_physlink_req *req = (void *) skb->data; - struct a2mp_physlink_rsp rsp; - struct hci_dev *hdev; - struct hci_conn *hcon; - - if (le16_to_cpu(hdr->len) < sizeof(*req)) - return -EINVAL; - - BT_DBG("local_id %u remote_id %u", req->local_id, req->remote_id); - - memset(&rsp, 0, sizeof(rsp)); - - rsp.local_id = req->remote_id; - rsp.remote_id = req->local_id; - rsp.status = A2MP_STATUS_SUCCESS; - - hdev = hci_dev_get(req->remote_id); - if (!hdev) { - rsp.status = A2MP_STATUS_INVALID_CTRL_ID; - goto send_rsp; - } - - hcon = hci_conn_hash_lookup_ba(hdev, AMP_LINK, - &mgr->l2cap_conn->hcon->dst); - if (!hcon) { - bt_dev_err(hdev, "no phys link exist"); - rsp.status = A2MP_STATUS_NO_PHYSICAL_LINK_EXISTS; - goto clean; - } - - /* TODO Disconnect Phys Link here */ - -clean: - hci_dev_put(hdev); - -send_rsp: - a2mp_send(mgr, A2MP_DISCONNPHYSLINK_RSP, hdr->ident, sizeof(rsp), &rsp); - - skb_pull(skb, sizeof(*req)); - return 0; -} - -static inline int a2mp_cmd_rsp(struct amp_mgr *mgr, struct sk_buff *skb, - struct a2mp_cmd *hdr) -{ - BT_DBG("ident %u code 0x%2.2x", hdr->ident, hdr->code); - - skb_pull(skb, le16_to_cpu(hdr->len)); - return 0; -} - -/* Handle A2MP signalling */ -static int a2mp_chan_recv_cb(struct l2cap_chan *chan, struct sk_buff *skb) -{ - struct a2mp_cmd *hdr; - struct amp_mgr *mgr = chan->data; - int err = 0; - - amp_mgr_get(mgr); - - while (skb->len >= sizeof(*hdr)) { - u16 len; - - hdr = (void *) skb->data; - len = le16_to_cpu(hdr->len); - - BT_DBG("code 0x%2.2x id %u len %u", hdr->code, hdr->ident, len); - - skb_pull(skb, sizeof(*hdr)); - - if (len > skb->len || !hdr->ident) { - err = -EINVAL; - break; - } - - mgr->ident = hdr->ident; - - switch (hdr->code) { - case A2MP_COMMAND_REJ: - a2mp_command_rej(mgr, skb, hdr); - break; - - case A2MP_DISCOVER_REQ: - err = a2mp_discover_req(mgr, skb, hdr); - break; - - case A2MP_CHANGE_NOTIFY: - err = a2mp_change_notify(mgr, skb, hdr); - break; - - case A2MP_GETINFO_REQ: - err = a2mp_getinfo_req(mgr, skb, hdr); - break; - - case A2MP_GETAMPASSOC_REQ: - err = a2mp_getampassoc_req(mgr, skb, hdr); - break; - - case A2MP_CREATEPHYSLINK_REQ: - err = a2mp_createphyslink_req(mgr, skb, hdr); - break; - - case A2MP_DISCONNPHYSLINK_REQ: - err = a2mp_discphyslink_req(mgr, skb, hdr); - break; - - case A2MP_DISCOVER_RSP: - err = a2mp_discover_rsp(mgr, skb, hdr); - break; - - case A2MP_GETINFO_RSP: - err = a2mp_getinfo_rsp(mgr, skb, hdr); - break; - - case A2MP_GETAMPASSOC_RSP: - err = a2mp_getampassoc_rsp(mgr, skb, hdr); - break; - - case A2MP_CHANGE_RSP: - case A2MP_CREATEPHYSLINK_RSP: - case A2MP_DISCONNPHYSLINK_RSP: - err = a2mp_cmd_rsp(mgr, skb, hdr); - break; - - default: - BT_ERR("Unknown A2MP sig cmd 0x%2.2x", hdr->code); - err = -EINVAL; - break; - } - } - - if (err) { - struct a2mp_cmd_rej rej; - - memset(&rej, 0, sizeof(rej)); - - rej.reason = cpu_to_le16(0); - hdr = (void *) skb->data; - - BT_DBG("Send A2MP Rej: cmd 0x%2.2x err %d", hdr->code, err); - - a2mp_send(mgr, A2MP_COMMAND_REJ, hdr->ident, sizeof(rej), - &rej); - } - - /* Always free skb and return success error code to prevent - from sending L2CAP Disconnect over A2MP channel */ - kfree_skb(skb); - - amp_mgr_put(mgr); - - return 0; -} - -static void a2mp_chan_close_cb(struct l2cap_chan *chan) -{ - l2cap_chan_put(chan); -} - -static void a2mp_chan_state_change_cb(struct l2cap_chan *chan, int state, - int err) -{ - struct amp_mgr *mgr = chan->data; - - if (!mgr) - return; - - BT_DBG("chan %p state %s", chan, state_to_string(state)); - - chan->state = state; - - switch (state) { - case BT_CLOSED: - if (mgr) - amp_mgr_put(mgr); - break; - } -} - -static struct sk_buff *a2mp_chan_alloc_skb_cb(struct l2cap_chan *chan, - unsigned long hdr_len, - unsigned long len, int nb) -{ - struct sk_buff *skb; - - skb = bt_skb_alloc(hdr_len + len, GFP_KERNEL); - if (!skb) - return ERR_PTR(-ENOMEM); - - return skb; -} - -static const struct l2cap_ops a2mp_chan_ops = { - .name = "L2CAP A2MP channel", - .recv = a2mp_chan_recv_cb, - .close = a2mp_chan_close_cb, - .state_change = a2mp_chan_state_change_cb, - .alloc_skb = a2mp_chan_alloc_skb_cb, - - /* Not implemented for A2MP */ - .new_connection = l2cap_chan_no_new_connection, - .teardown = l2cap_chan_no_teardown, - .ready = l2cap_chan_no_ready, - .defer = l2cap_chan_no_defer, - .resume = l2cap_chan_no_resume, - .set_shutdown = l2cap_chan_no_set_shutdown, - .get_sndtimeo = l2cap_chan_no_get_sndtimeo, -}; - -static struct l2cap_chan *a2mp_chan_open(struct l2cap_conn *conn, bool locked) -{ - struct l2cap_chan *chan; - int err; - - chan = l2cap_chan_create(); - if (!chan) - return NULL; - - BT_DBG("chan %p", chan); - - chan->chan_type = L2CAP_CHAN_FIXED; - chan->scid = L2CAP_CID_A2MP; - chan->dcid = L2CAP_CID_A2MP; - chan->omtu = L2CAP_A2MP_DEFAULT_MTU; - chan->imtu = L2CAP_A2MP_DEFAULT_MTU; - chan->flush_to = L2CAP_DEFAULT_FLUSH_TO; - - chan->ops = &a2mp_chan_ops; - - l2cap_chan_set_defaults(chan); - chan->remote_max_tx = chan->max_tx; - chan->remote_tx_win = chan->tx_win; - - chan->retrans_timeout = L2CAP_DEFAULT_RETRANS_TO; - chan->monitor_timeout = L2CAP_DEFAULT_MONITOR_TO; - - skb_queue_head_init(&chan->tx_q); - - chan->mode = L2CAP_MODE_ERTM; - - err = l2cap_ertm_init(chan); - if (err < 0) { - l2cap_chan_del(chan, 0); - return NULL; - } - - chan->conf_state = 0; - - if (locked) - __l2cap_chan_add(conn, chan); - else - l2cap_chan_add(conn, chan); - - chan->remote_mps = chan->omtu; - chan->mps = chan->omtu; - - chan->state = BT_CONNECTED; - - return chan; -} - -/* AMP Manager functions */ -struct amp_mgr *amp_mgr_get(struct amp_mgr *mgr) -{ - BT_DBG("mgr %p orig refcnt %d", mgr, kref_read(&mgr->kref)); - - kref_get(&mgr->kref); - - return mgr; -} - -static void amp_mgr_destroy(struct kref *kref) -{ - struct amp_mgr *mgr = container_of(kref, struct amp_mgr, kref); - - BT_DBG("mgr %p", mgr); - - mutex_lock(&_mgr_list_lock); - list_del(&mgr->list); - mutex_unlock(&_mgr_list_lock); - - amp_ctrl_list_flush(mgr); - kfree(mgr); -} - -int amp_mgr_put(struct amp_mgr *mgr) -{ - BT_DBG("mgr %p orig refcnt %d", mgr, kref_read(&mgr->kref)); - - return kref_put(&mgr->kref, &_mgr_destroy); -} - -static struct amp_mgr *amp_mgr_create(struct l2cap_conn *conn, bool locked) -{ - struct amp_mgr *mgr; - struct l2cap_chan *chan; - - mgr = kzalloc(sizeof(*mgr), GFP_KERNEL); - if (!mgr) - return NULL; - - BT_DBG("conn %p mgr %p", conn, mgr); - - mgr->l2cap_conn = conn; - - chan = a2mp_chan_open(conn, locked); - if (!chan) { - kfree(mgr); - return NULL; - } - - mgr->a2mp_chan = chan; - chan->data = mgr; - - conn->hcon->amp_mgr = mgr; - - kref_init(&mgr->kref); - - /* Remote AMP ctrl list initialization */ - INIT_LIST_HEAD(&mgr->amp_ctrls); - mutex_init(&mgr->amp_ctrls_lock); - - mutex_lock(&_mgr_list_lock); - list_add(&mgr->list, &_mgr_list); - mutex_unlock(&_mgr_list_lock); - - return mgr; -} - -struct l2cap_chan *a2mp_channel_create(struct l2cap_conn *conn, - struct sk_buff *skb) -{ - struct amp_mgr *mgr; - - if (conn->hcon->type != ACL_LINK) - return NULL; - - mgr = amp_mgr_create(conn, false); - if (!mgr) { - BT_ERR("Could not create AMP manager"); - return NULL; - } - - BT_DBG("mgr: %p chan %p", mgr, mgr->a2mp_chan); - - return mgr->a2mp_chan; -} - -void a2mp_send_getinfo_rsp(struct hci_dev *hdev) -{ - struct amp_mgr *mgr; - struct a2mp_info_rsp rsp; - - mgr = amp_mgr_lookup_by_state(READ_LOC_AMP_INFO); - if (!mgr) - return; - - BT_DBG("%s mgr %p", hdev->name, mgr); - - memset(&rsp, 0, sizeof(rsp)); - - rsp.id = hdev->id; - rsp.status = A2MP_STATUS_INVALID_CTRL_ID; - - if (hdev->amp_type != AMP_TYPE_BREDR) { - rsp.status = 0; - rsp.total_bw = cpu_to_le32(hdev->amp_total_bw); - rsp.max_bw = cpu_to_le32(hdev->amp_max_bw); - rsp.min_latency = cpu_to_le32(hdev->amp_min_latency); - rsp.pal_cap = cpu_to_le16(hdev->amp_pal_cap); - rsp.assoc_size = cpu_to_le16(hdev->amp_assoc_size); - } - - a2mp_send(mgr, A2MP_GETINFO_RSP, mgr->ident, sizeof(rsp), &rsp); - amp_mgr_put(mgr); -} - -void a2mp_send_getampassoc_rsp(struct hci_dev *hdev, u8 status) -{ - struct amp_mgr *mgr; - struct amp_assoc *loc_assoc = &hdev->loc_assoc; - struct a2mp_amp_assoc_rsp *rsp; - size_t len; - - mgr = amp_mgr_lookup_by_state(READ_LOC_AMP_ASSOC); - if (!mgr) - return; - - BT_DBG("%s mgr %p", hdev->name, mgr); - - len = sizeof(struct a2mp_amp_assoc_rsp) + loc_assoc->len; - rsp = kzalloc(len, GFP_KERNEL); - if (!rsp) { - amp_mgr_put(mgr); - return; - } - - rsp->id = hdev->id; - - if (status) { - rsp->status = A2MP_STATUS_INVALID_CTRL_ID; - } else { - rsp->status = A2MP_STATUS_SUCCESS; - memcpy(rsp->amp_assoc, loc_assoc->data, loc_assoc->len); - } - - a2mp_send(mgr, A2MP_GETAMPASSOC_RSP, mgr->ident, len, rsp); - amp_mgr_put(mgr); - kfree(rsp); -} - -void a2mp_send_create_phy_link_req(struct hci_dev *hdev, u8 status) -{ - struct amp_mgr *mgr; - struct amp_assoc *loc_assoc = &hdev->loc_assoc; - struct a2mp_physlink_req *req; - struct l2cap_chan *bredr_chan; - size_t len; - - mgr = amp_mgr_lookup_by_state(READ_LOC_AMP_ASSOC_FINAL); - if (!mgr) - return; - - len = sizeof(*req) + loc_assoc->len; - - BT_DBG("%s mgr %p assoc_len %zu", hdev->name, mgr, len); - - req = kzalloc(len, GFP_KERNEL); - if (!req) { - amp_mgr_put(mgr); - return; - } - - bredr_chan = mgr->bredr_chan; - if (!bredr_chan) - goto clean; - - req->local_id = hdev->id; - req->remote_id = bredr_chan->remote_amp_id; - memcpy(req->amp_assoc, loc_assoc->data, loc_assoc->len); - - a2mp_send(mgr, A2MP_CREATEPHYSLINK_REQ, __next_ident(mgr), len, req); - -clean: - amp_mgr_put(mgr); - kfree(req); -} - -void a2mp_send_create_phy_link_rsp(struct hci_dev *hdev, u8 status) -{ - struct amp_mgr *mgr; - struct a2mp_physlink_rsp rsp; - struct hci_conn *hs_hcon; - - mgr = amp_mgr_lookup_by_state(WRITE_REMOTE_AMP_ASSOC); - if (!mgr) - return; - - memset(&rsp, 0, sizeof(rsp)); - - hs_hcon = hci_conn_hash_lookup_state(hdev, AMP_LINK, BT_CONNECT); - if (!hs_hcon) { - rsp.status = A2MP_STATUS_UNABLE_START_LINK_CREATION; - } else { - rsp.remote_id = hs_hcon->remote_id; - rsp.status = A2MP_STATUS_SUCCESS; - } - - BT_DBG("%s mgr %p hs_hcon %p status %u", hdev->name, mgr, hs_hcon, - status); - - rsp.local_id = hdev->id; - a2mp_send(mgr, A2MP_CREATEPHYSLINK_RSP, mgr->ident, sizeof(rsp), &rsp); - amp_mgr_put(mgr); -} - -void a2mp_discover_amp(struct l2cap_chan *chan) -{ - struct l2cap_conn *conn = chan->conn; - struct amp_mgr *mgr = conn->hcon->amp_mgr; - struct a2mp_discov_req req; - - BT_DBG("chan %p conn %p mgr %p", chan, conn, mgr); - - if (!mgr) { - mgr = amp_mgr_create(conn, true); - if (!mgr) - return; - } - - mgr->bredr_chan = chan; - - memset(&req, 0, sizeof(req)); - - req.mtu = cpu_to_le16(L2CAP_A2MP_DEFAULT_MTU); - req.ext_feat = 0; - a2mp_send(mgr, A2MP_DISCOVER_REQ, 1, sizeof(req), &req); -} diff --git a/net/bluetooth/a2mp.h b/net/bluetooth/a2mp.h deleted file mode 100644 index 2fd253a61a2a..000000000000 --- a/net/bluetooth/a2mp.h +++ /dev/null @@ -1,154 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ -/* - Copyright (c) 2010,2011 Code Aurora Forum. All rights reserved. - Copyright (c) 2011,2012 Intel Corp. - -*/ - -#ifndef __A2MP_H -#define __A2MP_H - -#include <net/bluetooth/l2cap.h> - -enum amp_mgr_state { - READ_LOC_AMP_INFO, - READ_LOC_AMP_ASSOC, - READ_LOC_AMP_ASSOC_FINAL, - WRITE_REMOTE_AMP_ASSOC, -}; - -struct amp_mgr { - struct list_head list; - struct l2cap_conn *l2cap_conn; - struct l2cap_chan *a2mp_chan; - struct l2cap_chan *bredr_chan; - struct kref kref; - __u8 ident; - __u8 handle; - unsigned long state; - unsigned long flags; - - struct list_head amp_ctrls; - struct mutex amp_ctrls_lock; -}; - -struct a2mp_cmd { - __u8 code; - __u8 ident; - __le16 len; - __u8 data[]; -} __packed; - -/* A2MP command codes */ -#define A2MP_COMMAND_REJ 0x01 -struct a2mp_cmd_rej { - __le16 reason; - __u8 data[]; -} __packed; - -#define A2MP_DISCOVER_REQ 0x02 -struct a2mp_discov_req { - __le16 mtu; - __le16 ext_feat; -} __packed; - -struct a2mp_cl { - __u8 id; - __u8 type; - __u8 status; -} __packed; - -#define A2MP_DISCOVER_RSP 0x03 -struct a2mp_discov_rsp { - __le16 mtu; - __le16 ext_feat; - struct a2mp_cl cl[]; -} __packed; - -#define A2MP_CHANGE_NOTIFY 0x04 -#define A2MP_CHANGE_RSP 0x05 - -#define A2MP_GETINFO_REQ 0x06 -struct a2mp_info_req { - __u8 id; -} __packed; - -#define A2MP_GETINFO_RSP 0x07 -struct a2mp_info_rsp { - __u8 id; - __u8 status; - __le32 total_bw; - __le32 max_bw; - __le32 min_latency; - __le16 pal_cap; - __le16 assoc_size; -} __packed; - -#define A2MP_GETAMPASSOC_REQ 0x08 -struct a2mp_amp_assoc_req { - __u8 id; -} __packed; - -#define A2MP_GETAMPASSOC_RSP 0x09 -struct a2mp_amp_assoc_rsp { - __u8 id; - __u8 status; - __u8 amp_assoc[]; -} __packed; - -#define A2MP_CREATEPHYSLINK_REQ 0x0A -#define A2MP_DISCONNPHYSLINK_REQ 0x0C -struct a2mp_physlink_req { - __u8 local_id; - __u8 remote_id; - __u8 amp_assoc[]; -} __packed; - -#define A2MP_CREATEPHYSLINK_RSP 0x0B -#define A2MP_DISCONNPHYSLINK_RSP 0x0D -struct a2mp_physlink_rsp { - __u8 local_id; - __u8 remote_id; - __u8 status; -} __packed; - -/* A2MP response status */ -#define A2MP_STATUS_SUCCESS 0x00 -#define A2MP_STATUS_INVALID_CTRL_ID 0x01 -#define A2MP_STATUS_UNABLE_START_LINK_CREATION 0x02 -#define A2MP_STATUS_NO_PHYSICAL_LINK_EXISTS 0x02 -#define A2MP_STATUS_COLLISION_OCCURED 0x03 -#define A2MP_STATUS_DISCONN_REQ_RECVD 0x04 -#define A2MP_STATUS_PHYS_LINK_EXISTS 0x05 -#define A2MP_STATUS_SECURITY_VIOLATION 0x06 - -struct amp_mgr *amp_mgr_get(struct amp_mgr *mgr); - -#if IS_ENABLED(CONFIG_BT_HS) -int amp_mgr_put(struct amp_mgr *mgr); -struct l2cap_chan *a2mp_channel_create(struct l2cap_conn *conn, - struct sk_buff *skb); -void a2mp_discover_amp(struct l2cap_chan *chan); -#else -static inline int amp_mgr_put(struct amp_mgr *mgr) -{ - return 0; -} - -static inline struct l2cap_chan *a2mp_channel_create(struct l2cap_conn *conn, - struct sk_buff *skb) -{ - return NULL; -} - -static inline void a2mp_discover_amp(struct l2cap_chan *chan) -{ -} -#endif - -void a2mp_send_getinfo_rsp(struct hci_dev *hdev); -void a2mp_send_getampassoc_rsp(struct hci_dev *hdev, u8 status); -void a2mp_send_create_phy_link_req(struct hci_dev *hdev, u8 status); -void a2mp_send_create_phy_link_rsp(struct hci_dev *hdev, u8 status); - -#endif /* __A2MP_H */ diff --git a/net/bluetooth/af_bluetooth.c b/net/bluetooth/af_bluetooth.c index ee319779781e..2b94e2077203 100644 --- a/net/bluetooth/af_bluetooth.c +++ b/net/bluetooth/af_bluetooth.c @@ -34,11 +34,14 @@ #include <net/bluetooth/bluetooth.h> #include <linux/proc_fs.h> +#include <linux/ethtool.h> +#include <linux/sockios.h> + #include "leds.h" #include "selftest.h" /* Bluetooth sockets */ -#define BT_MAX_PROTO 8 +#define BT_MAX_PROTO (BTPROTO_LAST + 1) static const struct net_proto_family *bt_proto[BT_MAX_PROTO]; static DEFINE_RWLOCK(bt_proto_lock); @@ -52,6 +55,7 @@ static const char *const bt_key_strings[BT_MAX_PROTO] = { "sk_lock-AF_BLUETOOTH-BTPROTO_CMTP", "sk_lock-AF_BLUETOOTH-BTPROTO_HIDP", "sk_lock-AF_BLUETOOTH-BTPROTO_AVDTP", + "sk_lock-AF_BLUETOOTH-BTPROTO_ISO", }; static struct lock_class_key bt_slock_key[BT_MAX_PROTO]; @@ -64,6 +68,7 @@ static const char *const bt_slock_key_strings[BT_MAX_PROTO] = { "slock-AF_BLUETOOTH-BTPROTO_CMTP", "slock-AF_BLUETOOTH-BTPROTO_HIDP", "slock-AF_BLUETOOTH-BTPROTO_AVDTP", + "slock-AF_BLUETOOTH-BTPROTO_ISO", }; void bt_sock_reclassify_lock(struct sock *sk, int proto) @@ -138,6 +143,35 @@ static int bt_sock_create(struct net *net, struct socket *sock, int proto, return err; } +struct sock *bt_sock_alloc(struct net *net, struct socket *sock, + struct proto *prot, int proto, gfp_t prio, int kern) +{ + struct sock *sk; + + sk = sk_alloc(net, PF_BLUETOOTH, prio, prot, kern); + if (!sk) + return NULL; + + sock_init_data(sock, sk); + INIT_LIST_HEAD(&bt_sk(sk)->accept_q); + + sock_reset_flag(sk, SOCK_ZAPPED); + + sk->sk_protocol = proto; + sk->sk_state = BT_OPEN; + + /* Init peer information so it can be properly monitored */ + if (!kern) { + spin_lock(&sk->sk_peer_lock); + sk->sk_peer_pid = get_pid(task_tgid(current)); + sk->sk_peer_cred = get_current_cred(); + spin_unlock(&sk->sk_peer_lock); + } + + return sk; +} +EXPORT_SYMBOL(bt_sock_alloc); + void bt_sock_link(struct bt_sock_list *l, struct sock *sk) { write_lock(&l->lock); @@ -154,8 +188,33 @@ void bt_sock_unlink(struct bt_sock_list *l, struct sock *sk) } EXPORT_SYMBOL(bt_sock_unlink); +bool bt_sock_linked(struct bt_sock_list *l, struct sock *s) +{ + struct sock *sk; + + if (!l || !s) + return false; + + read_lock(&l->lock); + + sk_for_each(sk, &l->head) { + if (s == sk) { + read_unlock(&l->lock); + return true; + } + } + + read_unlock(&l->lock); + + return false; +} +EXPORT_SYMBOL(bt_sock_linked); + void bt_accept_enqueue(struct sock *parent, struct sock *sk, bool bh) { + const struct cred *old_cred; + struct pid *old_pid; + BT_DBG("parent %p, sk %p", parent, sk); sock_hold(sk); @@ -168,6 +227,19 @@ void bt_accept_enqueue(struct sock *parent, struct sock *sk, bool bh) list_add_tail(&bt_sk(sk)->accept_q, &bt_sk(parent)->accept_q); bt_sk(sk)->parent = parent; + /* Copy credentials from parent since for incoming connections the + * socket is allocated by the kernel. + */ + spin_lock(&sk->sk_peer_lock); + old_pid = sk->sk_peer_pid; + old_cred = sk->sk_peer_cred; + sk->sk_peer_pid = get_pid(parent->sk_peer_pid); + sk->sk_peer_cred = get_cred(parent->sk_peer_cred); + spin_unlock(&sk->sk_peer_lock); + + put_pid(old_pid); + put_cred(old_cred); + if (bh) bh_unlock_sock(sk); else @@ -251,7 +323,6 @@ EXPORT_SYMBOL(bt_accept_dequeue); int bt_sock_recvmsg(struct socket *sock, struct msghdr *msg, size_t len, int flags) { - int noblock = flags & MSG_DONTWAIT; struct sock *sk = sock->sk; struct sk_buff *skb; size_t copied; @@ -263,10 +334,10 @@ int bt_sock_recvmsg(struct socket *sock, struct msghdr *msg, size_t len, if (flags & MSG_OOB) return -EOPNOTSUPP; - skb = skb_recv_datagram(sk, flags, noblock, &err); + skb = skb_recv_datagram(sk, flags, &err); if (!skb) { if (sk->sk_shutdown & RCV_SHUTDOWN) - return 0; + err = 0; return err; } @@ -281,14 +352,25 @@ int bt_sock_recvmsg(struct socket *sock, struct msghdr *msg, size_t len, skb_reset_transport_header(skb); err = skb_copy_datagram_msg(skb, 0, msg, copied); if (err == 0) { - sock_recv_ts_and_drops(msg, sk, skb); + sock_recv_cmsgs(msg, sk, skb); if (msg->msg_name && bt_sk(sk)->skb_msg_name) bt_sk(sk)->skb_msg_name(skb, msg->msg_name, &msg->msg_namelen); - if (bt_sk(sk)->skb_put_cmsg) - bt_sk(sk)->skb_put_cmsg(skb, msg, sk); + if (test_bit(BT_SK_PKT_STATUS, &bt_sk(sk)->flags)) { + u8 pkt_status = hci_skb_pkt_status(skb); + + put_cmsg(msg, SOL_BLUETOOTH, BT_SCM_PKT_STATUS, + sizeof(pkt_status), &pkt_status); + } + + if (test_bit(BT_SK_PKT_SEQNUM, &bt_sk(sk)->flags)) { + u16 pkt_seqnum = hci_skb_pkt_seqnum(skb); + + put_cmsg(msg, SOL_BLUETOOTH, BT_SCM_PKT_SEQNUM, + sizeof(pkt_seqnum), &pkt_seqnum); + } } skb_free_datagram(sk, skb); @@ -385,7 +467,7 @@ int bt_sock_stream_recvmsg(struct socket *sock, struct msghdr *msg, copied += chunk; size -= chunk; - sock_recv_ts_and_drops(msg, sk, skb); + sock_recv_cmsgs(msg, sk, skb); if (!(flags & MSG_PEEK)) { int skb_len = skb_headlen(skb); @@ -491,6 +573,86 @@ __poll_t bt_sock_poll(struct file *file, struct socket *sock, } EXPORT_SYMBOL(bt_sock_poll); +static int bt_ethtool_get_ts_info(struct sock *sk, unsigned int index, + void __user *useraddr) +{ + struct ethtool_ts_info info; + struct kernel_ethtool_ts_info ts_info = {}; + int ret; + + ret = hci_ethtool_ts_info(index, sk->sk_protocol, &ts_info); + if (ret == -ENODEV) + return ret; + else if (ret < 0) + return -EIO; + + memset(&info, 0, sizeof(info)); + + info.cmd = ETHTOOL_GET_TS_INFO; + info.so_timestamping = ts_info.so_timestamping; + info.phc_index = ts_info.phc_index; + info.tx_types = ts_info.tx_types; + info.rx_filters = ts_info.rx_filters; + + if (copy_to_user(useraddr, &info, sizeof(info))) + return -EFAULT; + + return 0; +} + +static int bt_ethtool(struct sock *sk, const struct ifreq *ifr, + void __user *useraddr) +{ + unsigned int index; + u32 ethcmd; + int n; + + if (copy_from_user(ðcmd, useraddr, sizeof(ethcmd))) + return -EFAULT; + + if (sscanf(ifr->ifr_name, "hci%u%n", &index, &n) != 1 || + n != strlen(ifr->ifr_name)) + return -ENODEV; + + switch (ethcmd) { + case ETHTOOL_GET_TS_INFO: + return bt_ethtool_get_ts_info(sk, index, useraddr); + } + + return -EOPNOTSUPP; +} + +static int bt_dev_ioctl(struct socket *sock, unsigned int cmd, void __user *arg) +{ + struct sock *sk = sock->sk; + struct ifreq ifr = {}; + void __user *data; + char *colon; + int ret = -ENOIOCTLCMD; + + if (get_user_ifreq(&ifr, &data, arg)) + return -EFAULT; + + ifr.ifr_name[IFNAMSIZ - 1] = 0; + colon = strchr(ifr.ifr_name, ':'); + if (colon) + *colon = 0; + + switch (cmd) { + case SIOCETHTOOL: + ret = bt_ethtool(sk, &ifr, data); + break; + } + + if (colon) + *colon = ':'; + + if (put_user_ifreq(&ifr, arg)) + return -EFAULT; + + return ret; +} + int bt_sock_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg) { struct sock *sk = sock->sk; @@ -515,13 +677,18 @@ int bt_sock_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg) if (sk->sk_state == BT_LISTEN) return -EINVAL; - lock_sock(sk); + spin_lock(&sk->sk_receive_queue.lock); skb = skb_peek(&sk->sk_receive_queue); amount = skb ? skb->len : 0; - release_sock(sk); + spin_unlock(&sk->sk_receive_queue.lock); + err = put_user(amount, (int __user *)arg); break; + case SIOCETHTOOL: + err = bt_dev_ioctl(sock, cmd, (void __user *)arg); + break; + default: err = -ENOIOCTLCMD; break; @@ -568,7 +735,7 @@ int bt_sock_wait_state(struct sock *sk, int state, unsigned long timeo) EXPORT_SYMBOL(bt_sock_wait_state); /* This function expects the sk lock to be held when called */ -int bt_sock_wait_ready(struct sock *sk, unsigned long flags) +int bt_sock_wait_ready(struct sock *sk, unsigned int msg_flags) { DECLARE_WAITQUEUE(wait, current); unsigned long timeo; @@ -576,7 +743,7 @@ int bt_sock_wait_ready(struct sock *sk, unsigned long flags) BT_DBG("sk %p", sk); - timeo = sock_sndtimeo(sk, flags & O_NONBLOCK); + timeo = sock_sndtimeo(sk, !!(msg_flags & MSG_DONTWAIT)); add_wait_queue(sk_sleep(sk), &wait); set_current_state(TASK_INTERRUPTIBLE); @@ -655,7 +822,7 @@ static int bt_seq_show(struct seq_file *seq, void *v) refcount_read(&sk->sk_refcnt), sk_rmem_alloc_get(sk), sk_wmem_alloc_get(sk), - from_kuid(seq_user_ns(seq), sock_i_uid(sk)), + from_kuid(seq_user_ns(seq), sk_uid(sk)), sock_i_ino(sk), bt->parent ? sock_i_ino(bt->parent) : 0LU); @@ -736,7 +903,7 @@ static int __init bt_init(void) err = bt_sysfs_init(); if (err < 0) - return err; + goto cleanup_led; err = sock_register(&bt_sock_family_ops); if (err) @@ -772,11 +939,16 @@ unregister_socket: sock_unregister(PF_BLUETOOTH); cleanup_sysfs: bt_sysfs_cleanup(); +cleanup_led: + bt_leds_cleanup(); + debugfs_remove_recursive(bt_debugfs); return err; } static void __exit bt_exit(void) { + iso_exit(); + mgmt_exit(); sco_exit(); diff --git a/net/bluetooth/amp.c b/net/bluetooth/amp.c deleted file mode 100644 index 2134f92bd7ac..000000000000 --- a/net/bluetooth/amp.c +++ /dev/null @@ -1,591 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-only -/* - Copyright (c) 2011,2012 Intel Corp. - -*/ - -#include <net/bluetooth/bluetooth.h> -#include <net/bluetooth/hci.h> -#include <net/bluetooth/hci_core.h> -#include <crypto/hash.h> - -#include "hci_request.h" -#include "a2mp.h" -#include "amp.h" - -/* Remote AMP Controllers interface */ -void amp_ctrl_get(struct amp_ctrl *ctrl) -{ - BT_DBG("ctrl %p orig refcnt %d", ctrl, - kref_read(&ctrl->kref)); - - kref_get(&ctrl->kref); -} - -static void amp_ctrl_destroy(struct kref *kref) -{ - struct amp_ctrl *ctrl = container_of(kref, struct amp_ctrl, kref); - - BT_DBG("ctrl %p", ctrl); - - kfree(ctrl->assoc); - kfree(ctrl); -} - -int amp_ctrl_put(struct amp_ctrl *ctrl) -{ - BT_DBG("ctrl %p orig refcnt %d", ctrl, - kref_read(&ctrl->kref)); - - return kref_put(&ctrl->kref, &_ctrl_destroy); -} - -struct amp_ctrl *amp_ctrl_add(struct amp_mgr *mgr, u8 id) -{ - struct amp_ctrl *ctrl; - - ctrl = kzalloc(sizeof(*ctrl), GFP_KERNEL); - if (!ctrl) - return NULL; - - kref_init(&ctrl->kref); - ctrl->id = id; - - mutex_lock(&mgr->amp_ctrls_lock); - list_add(&ctrl->list, &mgr->amp_ctrls); - mutex_unlock(&mgr->amp_ctrls_lock); - - BT_DBG("mgr %p ctrl %p", mgr, ctrl); - - return ctrl; -} - -void amp_ctrl_list_flush(struct amp_mgr *mgr) -{ - struct amp_ctrl *ctrl, *n; - - BT_DBG("mgr %p", mgr); - - mutex_lock(&mgr->amp_ctrls_lock); - list_for_each_entry_safe(ctrl, n, &mgr->amp_ctrls, list) { - list_del(&ctrl->list); - amp_ctrl_put(ctrl); - } - mutex_unlock(&mgr->amp_ctrls_lock); -} - -struct amp_ctrl *amp_ctrl_lookup(struct amp_mgr *mgr, u8 id) -{ - struct amp_ctrl *ctrl; - - BT_DBG("mgr %p id %u", mgr, id); - - mutex_lock(&mgr->amp_ctrls_lock); - list_for_each_entry(ctrl, &mgr->amp_ctrls, list) { - if (ctrl->id == id) { - amp_ctrl_get(ctrl); - mutex_unlock(&mgr->amp_ctrls_lock); - return ctrl; - } - } - mutex_unlock(&mgr->amp_ctrls_lock); - - return NULL; -} - -/* Physical Link interface */ -static u8 __next_handle(struct amp_mgr *mgr) -{ - if (++mgr->handle == 0) - mgr->handle = 1; - - return mgr->handle; -} - -struct hci_conn *phylink_add(struct hci_dev *hdev, struct amp_mgr *mgr, - u8 remote_id, bool out) -{ - bdaddr_t *dst = &mgr->l2cap_conn->hcon->dst; - struct hci_conn *hcon; - u8 role = out ? HCI_ROLE_MASTER : HCI_ROLE_SLAVE; - - hcon = hci_conn_add(hdev, AMP_LINK, dst, role); - if (!hcon) - return NULL; - - BT_DBG("hcon %p dst %pMR", hcon, dst); - - hcon->state = BT_CONNECT; - hcon->attempt++; - hcon->handle = __next_handle(mgr); - hcon->remote_id = remote_id; - hcon->amp_mgr = amp_mgr_get(mgr); - - return hcon; -} - -/* AMP crypto key generation interface */ -static int hmac_sha256(u8 *key, u8 ksize, char *plaintext, u8 psize, u8 *output) -{ - struct crypto_shash *tfm; - struct shash_desc *shash; - int ret; - - if (!ksize) - return -EINVAL; - - tfm = crypto_alloc_shash("hmac(sha256)", 0, 0); - if (IS_ERR(tfm)) { - BT_DBG("crypto_alloc_ahash failed: err %ld", PTR_ERR(tfm)); - return PTR_ERR(tfm); - } - - ret = crypto_shash_setkey(tfm, key, ksize); - if (ret) { - BT_DBG("crypto_ahash_setkey failed: err %d", ret); - goto failed; - } - - shash = kzalloc(sizeof(*shash) + crypto_shash_descsize(tfm), - GFP_KERNEL); - if (!shash) { - ret = -ENOMEM; - goto failed; - } - - shash->tfm = tfm; - - ret = crypto_shash_digest(shash, plaintext, psize, output); - - kfree(shash); - -failed: - crypto_free_shash(tfm); - return ret; -} - -int phylink_gen_key(struct hci_conn *conn, u8 *data, u8 *len, u8 *type) -{ - struct hci_dev *hdev = conn->hdev; - struct link_key *key; - u8 keybuf[HCI_AMP_LINK_KEY_SIZE]; - u8 gamp_key[HCI_AMP_LINK_KEY_SIZE]; - int err; - - if (!hci_conn_check_link_mode(conn)) - return -EACCES; - - BT_DBG("conn %p key_type %d", conn, conn->key_type); - - /* Legacy key */ - if (conn->key_type < 3) { - bt_dev_err(hdev, "legacy key type %u", conn->key_type); - return -EACCES; - } - - *type = conn->key_type; - *len = HCI_AMP_LINK_KEY_SIZE; - - key = hci_find_link_key(hdev, &conn->dst); - if (!key) { - BT_DBG("No Link key for conn %p dst %pMR", conn, &conn->dst); - return -EACCES; - } - - /* BR/EDR Link Key concatenated together with itself */ - memcpy(&keybuf[0], key->val, HCI_LINK_KEY_SIZE); - memcpy(&keybuf[HCI_LINK_KEY_SIZE], key->val, HCI_LINK_KEY_SIZE); - - /* Derive Generic AMP Link Key (gamp) */ - err = hmac_sha256(keybuf, HCI_AMP_LINK_KEY_SIZE, "gamp", 4, gamp_key); - if (err) { - bt_dev_err(hdev, "could not derive Generic AMP Key: err %d", err); - return err; - } - - if (conn->key_type == HCI_LK_DEBUG_COMBINATION) { - BT_DBG("Use Generic AMP Key (gamp)"); - memcpy(data, gamp_key, HCI_AMP_LINK_KEY_SIZE); - return err; - } - - /* Derive Dedicated AMP Link Key: "802b" is 802.11 PAL keyID */ - return hmac_sha256(gamp_key, HCI_AMP_LINK_KEY_SIZE, "802b", 4, data); -} - -static void read_local_amp_assoc_complete(struct hci_dev *hdev, u8 status, - u16 opcode, struct sk_buff *skb) -{ - struct hci_rp_read_local_amp_assoc *rp = (void *)skb->data; - struct amp_assoc *assoc = &hdev->loc_assoc; - size_t rem_len, frag_len; - - BT_DBG("%s status 0x%2.2x", hdev->name, rp->status); - - if (rp->status) - goto send_rsp; - - frag_len = skb->len - sizeof(*rp); - rem_len = __le16_to_cpu(rp->rem_len); - - if (rem_len > frag_len) { - BT_DBG("frag_len %zu rem_len %zu", frag_len, rem_len); - - memcpy(assoc->data + assoc->offset, rp->frag, frag_len); - assoc->offset += frag_len; - - /* Read other fragments */ - amp_read_loc_assoc_frag(hdev, rp->phy_handle); - - return; - } - - memcpy(assoc->data + assoc->offset, rp->frag, rem_len); - assoc->len = assoc->offset + rem_len; - assoc->offset = 0; - -send_rsp: - /* Send A2MP Rsp when all fragments are received */ - a2mp_send_getampassoc_rsp(hdev, rp->status); - a2mp_send_create_phy_link_req(hdev, rp->status); -} - -void amp_read_loc_assoc_frag(struct hci_dev *hdev, u8 phy_handle) -{ - struct hci_cp_read_local_amp_assoc cp; - struct amp_assoc *loc_assoc = &hdev->loc_assoc; - struct hci_request req; - int err; - - BT_DBG("%s handle %u", hdev->name, phy_handle); - - cp.phy_handle = phy_handle; - cp.max_len = cpu_to_le16(hdev->amp_assoc_size); - cp.len_so_far = cpu_to_le16(loc_assoc->offset); - - hci_req_init(&req, hdev); - hci_req_add(&req, HCI_OP_READ_LOCAL_AMP_ASSOC, sizeof(cp), &cp); - err = hci_req_run_skb(&req, read_local_amp_assoc_complete); - if (err < 0) - a2mp_send_getampassoc_rsp(hdev, A2MP_STATUS_INVALID_CTRL_ID); -} - -void amp_read_loc_assoc(struct hci_dev *hdev, struct amp_mgr *mgr) -{ - struct hci_cp_read_local_amp_assoc cp; - struct hci_request req; - int err; - - memset(&hdev->loc_assoc, 0, sizeof(struct amp_assoc)); - memset(&cp, 0, sizeof(cp)); - - cp.max_len = cpu_to_le16(hdev->amp_assoc_size); - - set_bit(READ_LOC_AMP_ASSOC, &mgr->state); - hci_req_init(&req, hdev); - hci_req_add(&req, HCI_OP_READ_LOCAL_AMP_ASSOC, sizeof(cp), &cp); - err = hci_req_run_skb(&req, read_local_amp_assoc_complete); - if (err < 0) - a2mp_send_getampassoc_rsp(hdev, A2MP_STATUS_INVALID_CTRL_ID); -} - -void amp_read_loc_assoc_final_data(struct hci_dev *hdev, - struct hci_conn *hcon) -{ - struct hci_cp_read_local_amp_assoc cp; - struct amp_mgr *mgr = hcon->amp_mgr; - struct hci_request req; - int err; - - if (!mgr) - return; - - cp.phy_handle = hcon->handle; - cp.len_so_far = cpu_to_le16(0); - cp.max_len = cpu_to_le16(hdev->amp_assoc_size); - - set_bit(READ_LOC_AMP_ASSOC_FINAL, &mgr->state); - - /* Read Local AMP Assoc final link information data */ - hci_req_init(&req, hdev); - hci_req_add(&req, HCI_OP_READ_LOCAL_AMP_ASSOC, sizeof(cp), &cp); - err = hci_req_run_skb(&req, read_local_amp_assoc_complete); - if (err < 0) - a2mp_send_getampassoc_rsp(hdev, A2MP_STATUS_INVALID_CTRL_ID); -} - -static void write_remote_amp_assoc_complete(struct hci_dev *hdev, u8 status, - u16 opcode, struct sk_buff *skb) -{ - struct hci_rp_write_remote_amp_assoc *rp = (void *)skb->data; - - BT_DBG("%s status 0x%2.2x phy_handle 0x%2.2x", - hdev->name, rp->status, rp->phy_handle); - - if (rp->status) - return; - - amp_write_rem_assoc_continue(hdev, rp->phy_handle); -} - -/* Write AMP Assoc data fragments, returns true with last fragment written*/ -static bool amp_write_rem_assoc_frag(struct hci_dev *hdev, - struct hci_conn *hcon) -{ - struct hci_cp_write_remote_amp_assoc *cp; - struct amp_mgr *mgr = hcon->amp_mgr; - struct amp_ctrl *ctrl; - struct hci_request req; - u16 frag_len, len; - - ctrl = amp_ctrl_lookup(mgr, hcon->remote_id); - if (!ctrl) - return false; - - if (!ctrl->assoc_rem_len) { - BT_DBG("all fragments are written"); - ctrl->assoc_rem_len = ctrl->assoc_len; - ctrl->assoc_len_so_far = 0; - - amp_ctrl_put(ctrl); - return true; - } - - frag_len = min_t(u16, 248, ctrl->assoc_rem_len); - len = frag_len + sizeof(*cp); - - cp = kzalloc(len, GFP_KERNEL); - if (!cp) { - amp_ctrl_put(ctrl); - return false; - } - - BT_DBG("hcon %p ctrl %p frag_len %u assoc_len %u rem_len %u", - hcon, ctrl, frag_len, ctrl->assoc_len, ctrl->assoc_rem_len); - - cp->phy_handle = hcon->handle; - cp->len_so_far = cpu_to_le16(ctrl->assoc_len_so_far); - cp->rem_len = cpu_to_le16(ctrl->assoc_rem_len); - memcpy(cp->frag, ctrl->assoc, frag_len); - - ctrl->assoc_len_so_far += frag_len; - ctrl->assoc_rem_len -= frag_len; - - amp_ctrl_put(ctrl); - - hci_req_init(&req, hdev); - hci_req_add(&req, HCI_OP_WRITE_REMOTE_AMP_ASSOC, len, cp); - hci_req_run_skb(&req, write_remote_amp_assoc_complete); - - kfree(cp); - - return false; -} - -void amp_write_rem_assoc_continue(struct hci_dev *hdev, u8 handle) -{ - struct hci_conn *hcon; - - BT_DBG("%s phy handle 0x%2.2x", hdev->name, handle); - - hcon = hci_conn_hash_lookup_handle(hdev, handle); - if (!hcon) - return; - - /* Send A2MP create phylink rsp when all fragments are written */ - if (amp_write_rem_assoc_frag(hdev, hcon)) - a2mp_send_create_phy_link_rsp(hdev, 0); -} - -void amp_write_remote_assoc(struct hci_dev *hdev, u8 handle) -{ - struct hci_conn *hcon; - - BT_DBG("%s phy handle 0x%2.2x", hdev->name, handle); - - hcon = hci_conn_hash_lookup_handle(hdev, handle); - if (!hcon) - return; - - BT_DBG("%s phy handle 0x%2.2x hcon %p", hdev->name, handle, hcon); - - amp_write_rem_assoc_frag(hdev, hcon); -} - -static void create_phylink_complete(struct hci_dev *hdev, u8 status, - u16 opcode) -{ - struct hci_cp_create_phy_link *cp; - - BT_DBG("%s status 0x%2.2x", hdev->name, status); - - cp = hci_sent_cmd_data(hdev, HCI_OP_CREATE_PHY_LINK); - if (!cp) - return; - - hci_dev_lock(hdev); - - if (status) { - struct hci_conn *hcon; - - hcon = hci_conn_hash_lookup_handle(hdev, cp->phy_handle); - if (hcon) - hci_conn_del(hcon); - } else { - amp_write_remote_assoc(hdev, cp->phy_handle); - } - - hci_dev_unlock(hdev); -} - -void amp_create_phylink(struct hci_dev *hdev, struct amp_mgr *mgr, - struct hci_conn *hcon) -{ - struct hci_cp_create_phy_link cp; - struct hci_request req; - - cp.phy_handle = hcon->handle; - - BT_DBG("%s hcon %p phy handle 0x%2.2x", hdev->name, hcon, - hcon->handle); - - if (phylink_gen_key(mgr->l2cap_conn->hcon, cp.key, &cp.key_len, - &cp.key_type)) { - BT_DBG("Cannot create link key"); - return; - } - - hci_req_init(&req, hdev); - hci_req_add(&req, HCI_OP_CREATE_PHY_LINK, sizeof(cp), &cp); - hci_req_run(&req, create_phylink_complete); -} - -static void accept_phylink_complete(struct hci_dev *hdev, u8 status, - u16 opcode) -{ - struct hci_cp_accept_phy_link *cp; - - BT_DBG("%s status 0x%2.2x", hdev->name, status); - - if (status) - return; - - cp = hci_sent_cmd_data(hdev, HCI_OP_ACCEPT_PHY_LINK); - if (!cp) - return; - - amp_write_remote_assoc(hdev, cp->phy_handle); -} - -void amp_accept_phylink(struct hci_dev *hdev, struct amp_mgr *mgr, - struct hci_conn *hcon) -{ - struct hci_cp_accept_phy_link cp; - struct hci_request req; - - cp.phy_handle = hcon->handle; - - BT_DBG("%s hcon %p phy handle 0x%2.2x", hdev->name, hcon, - hcon->handle); - - if (phylink_gen_key(mgr->l2cap_conn->hcon, cp.key, &cp.key_len, - &cp.key_type)) { - BT_DBG("Cannot create link key"); - return; - } - - hci_req_init(&req, hdev); - hci_req_add(&req, HCI_OP_ACCEPT_PHY_LINK, sizeof(cp), &cp); - hci_req_run(&req, accept_phylink_complete); -} - -void amp_physical_cfm(struct hci_conn *bredr_hcon, struct hci_conn *hs_hcon) -{ - struct hci_dev *bredr_hdev = hci_dev_hold(bredr_hcon->hdev); - struct amp_mgr *mgr = hs_hcon->amp_mgr; - struct l2cap_chan *bredr_chan; - - BT_DBG("bredr_hcon %p hs_hcon %p mgr %p", bredr_hcon, hs_hcon, mgr); - - if (!bredr_hdev || !mgr || !mgr->bredr_chan) - return; - - bredr_chan = mgr->bredr_chan; - - l2cap_chan_lock(bredr_chan); - - set_bit(FLAG_EFS_ENABLE, &bredr_chan->flags); - bredr_chan->remote_amp_id = hs_hcon->remote_id; - bredr_chan->local_amp_id = hs_hcon->hdev->id; - bredr_chan->hs_hcon = hs_hcon; - bredr_chan->conn->mtu = hs_hcon->hdev->block_mtu; - - __l2cap_physical_cfm(bredr_chan, 0); - - l2cap_chan_unlock(bredr_chan); - - hci_dev_put(bredr_hdev); -} - -void amp_create_logical_link(struct l2cap_chan *chan) -{ - struct hci_conn *hs_hcon = chan->hs_hcon; - struct hci_cp_create_accept_logical_link cp; - struct hci_dev *hdev; - - BT_DBG("chan %p hs_hcon %p dst %pMR", chan, hs_hcon, - &chan->conn->hcon->dst); - - if (!hs_hcon) - return; - - hdev = hci_dev_hold(chan->hs_hcon->hdev); - if (!hdev) - return; - - cp.phy_handle = hs_hcon->handle; - - cp.tx_flow_spec.id = chan->local_id; - cp.tx_flow_spec.stype = chan->local_stype; - cp.tx_flow_spec.msdu = cpu_to_le16(chan->local_msdu); - cp.tx_flow_spec.sdu_itime = cpu_to_le32(chan->local_sdu_itime); - cp.tx_flow_spec.acc_lat = cpu_to_le32(chan->local_acc_lat); - cp.tx_flow_spec.flush_to = cpu_to_le32(chan->local_flush_to); - - cp.rx_flow_spec.id = chan->remote_id; - cp.rx_flow_spec.stype = chan->remote_stype; - cp.rx_flow_spec.msdu = cpu_to_le16(chan->remote_msdu); - cp.rx_flow_spec.sdu_itime = cpu_to_le32(chan->remote_sdu_itime); - cp.rx_flow_spec.acc_lat = cpu_to_le32(chan->remote_acc_lat); - cp.rx_flow_spec.flush_to = cpu_to_le32(chan->remote_flush_to); - - if (hs_hcon->out) - hci_send_cmd(hdev, HCI_OP_CREATE_LOGICAL_LINK, sizeof(cp), - &cp); - else - hci_send_cmd(hdev, HCI_OP_ACCEPT_LOGICAL_LINK, sizeof(cp), - &cp); - - hci_dev_put(hdev); -} - -void amp_disconnect_logical_link(struct hci_chan *hchan) -{ - struct hci_conn *hcon = hchan->conn; - struct hci_cp_disconn_logical_link cp; - - if (hcon->state != BT_CONNECTED) { - BT_DBG("hchan %p not connected", hchan); - return; - } - - cp.log_handle = cpu_to_le16(hchan->handle); - hci_send_cmd(hcon->hdev, HCI_OP_DISCONN_LOGICAL_LINK, sizeof(cp), &cp); -} - -void amp_destroy_logical_link(struct hci_chan *hchan, u8 reason) -{ - BT_DBG("hchan %p", hchan); - - hci_chan_del(hchan); -} diff --git a/net/bluetooth/amp.h b/net/bluetooth/amp.h deleted file mode 100644 index 832764dfbfb3..000000000000 --- a/net/bluetooth/amp.h +++ /dev/null @@ -1,61 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ -/* - Copyright (c) 2011,2012 Intel Corp. - -*/ - -#ifndef __AMP_H -#define __AMP_H - -struct amp_ctrl { - struct list_head list; - struct kref kref; - __u8 id; - __u16 assoc_len_so_far; - __u16 assoc_rem_len; - __u16 assoc_len; - __u8 *assoc; -}; - -int amp_ctrl_put(struct amp_ctrl *ctrl); -void amp_ctrl_get(struct amp_ctrl *ctrl); -struct amp_ctrl *amp_ctrl_add(struct amp_mgr *mgr, u8 id); -struct amp_ctrl *amp_ctrl_lookup(struct amp_mgr *mgr, u8 id); -void amp_ctrl_list_flush(struct amp_mgr *mgr); - -struct hci_conn *phylink_add(struct hci_dev *hdev, struct amp_mgr *mgr, - u8 remote_id, bool out); - -int phylink_gen_key(struct hci_conn *hcon, u8 *data, u8 *len, u8 *type); - -void amp_read_loc_info(struct hci_dev *hdev, struct amp_mgr *mgr); -void amp_read_loc_assoc_frag(struct hci_dev *hdev, u8 phy_handle); -void amp_read_loc_assoc(struct hci_dev *hdev, struct amp_mgr *mgr); -void amp_read_loc_assoc_final_data(struct hci_dev *hdev, - struct hci_conn *hcon); -void amp_create_phylink(struct hci_dev *hdev, struct amp_mgr *mgr, - struct hci_conn *hcon); -void amp_accept_phylink(struct hci_dev *hdev, struct amp_mgr *mgr, - struct hci_conn *hcon); - -#if IS_ENABLED(CONFIG_BT_HS) -void amp_create_logical_link(struct l2cap_chan *chan); -void amp_disconnect_logical_link(struct hci_chan *hchan); -#else -static inline void amp_create_logical_link(struct l2cap_chan *chan) -{ -} - -static inline void amp_disconnect_logical_link(struct hci_chan *hchan) -{ -} -#endif - -void amp_write_remote_assoc(struct hci_dev *hdev, u8 handle); -void amp_write_rem_assoc_continue(struct hci_dev *hdev, u8 handle); -void amp_physical_cfm(struct hci_conn *bredr_hcon, struct hci_conn *hs_hcon); -void amp_create_logical_link(struct l2cap_chan *chan); -void amp_disconnect_logical_link(struct hci_chan *hchan); -void amp_destroy_logical_link(struct hci_chan *hchan, u8 reason); - -#endif /* __AMP_H */ diff --git a/net/bluetooth/aosp.c b/net/bluetooth/aosp.c index 432ae3aac9e3..59025771af53 100644 --- a/net/bluetooth/aosp.c +++ b/net/bluetooth/aosp.c @@ -54,7 +54,10 @@ void aosp_do_open(struct hci_dev *hdev) /* LE Get Vendor Capabilities Command */ skb = __hci_cmd_sync(hdev, hci_opcode_pack(0x3f, 0x153), 0, NULL, HCI_CMD_TIMEOUT); - if (IS_ERR(skb)) { + if (IS_ERR_OR_NULL(skb)) { + if (!skb) + skb = ERR_PTR(-EIO); + bt_dev_err(hdev, "AOSP get vendor capabilities (%ld)", PTR_ERR(skb)); return; @@ -67,7 +70,7 @@ void aosp_do_open(struct hci_dev *hdev) rp = (struct aosp_rp_le_get_vendor_capa *)skb->data; version_supported = le16_to_cpu(rp->version_supported); - /* AOSP displays the verion number like v0.98, v1.00, etc. */ + /* AOSP displays the version number like v0.98, v1.00, etc. */ bt_dev_info(hdev, "AOSP extensions version v%u.%02u", version_supported >> 8, version_supported & 0xff); @@ -152,7 +155,10 @@ static int enable_quality_report(struct hci_dev *hdev) skb = __hci_cmd_sync(hdev, BQR_OPCODE, sizeof(cp), &cp, HCI_CMD_TIMEOUT); - if (IS_ERR(skb)) { + if (IS_ERR_OR_NULL(skb)) { + if (!skb) + skb = ERR_PTR(-EIO); + bt_dev_err(hdev, "Enabling Android BQR failed (%ld)", PTR_ERR(skb)); return PTR_ERR(skb); @@ -171,7 +177,10 @@ static int disable_quality_report(struct hci_dev *hdev) skb = __hci_cmd_sync(hdev, BQR_OPCODE, sizeof(cp), &cp, HCI_CMD_TIMEOUT); - if (IS_ERR(skb)) { + if (IS_ERR_OR_NULL(skb)) { + if (!skb) + skb = ERR_PTR(-EIO); + bt_dev_err(hdev, "Disabling Android BQR failed (%ld)", PTR_ERR(skb)); return PTR_ERR(skb); diff --git a/net/bluetooth/bnep/core.c b/net/bluetooth/bnep/core.c index 40baa6b7321a..d44987d4515c 100644 --- a/net/bluetooth/bnep/core.c +++ b/net/bluetooth/bnep/core.c @@ -29,7 +29,7 @@ #include <linux/kthread.h> #include <linux/file.h> #include <linux/etherdevice.h> -#include <asm/unaligned.h> +#include <linux/unaligned.h> #include <net/bluetooth/bluetooth.h> #include <net/bluetooth/l2cap.h> @@ -385,7 +385,8 @@ static int bnep_rx_frame(struct bnep_session *s, struct sk_buff *skb) case BNEP_COMPRESSED_DST_ONLY: __skb_put_data(nskb, skb_mac_header(skb), ETH_ALEN); - __skb_put_data(nskb, s->eh.h_source, ETH_ALEN + 2); + __skb_put_data(nskb, s->eh.h_source, ETH_ALEN); + put_unaligned(s->eh.h_proto, (__be16 *)__skb_put(nskb, 2)); break; case BNEP_GENERAL: @@ -400,7 +401,7 @@ static int bnep_rx_frame(struct bnep_session *s, struct sk_buff *skb) dev->stats.rx_packets++; nskb->ip_summed = CHECKSUM_NONE; nskb->protocol = eth_type_trans(nskb, dev); - netif_rx_ni(nskb); + netif_rx(nskb); return 0; badframe: @@ -549,7 +550,7 @@ static struct device *bnep_get_device(struct bnep_session *session) return &conn->hcon->dev; } -static struct device_type bnep_type = { +static const struct device_type bnep_type = { .name = "bluetooth", }; @@ -744,8 +745,7 @@ static int __init bnep_init(void) if (flt[0]) BT_INFO("BNEP filters: %s", flt); - bnep_sock_init(); - return 0; + return bnep_sock_init(); } static void __exit bnep_exit(void) diff --git a/net/bluetooth/bnep/sock.c b/net/bluetooth/bnep/sock.c index 57d509d77cb4..00d47bcf4d7d 100644 --- a/net/bluetooth/bnep/sock.c +++ b/net/bluetooth/bnep/sock.c @@ -205,21 +205,13 @@ static int bnep_sock_create(struct net *net, struct socket *sock, int protocol, if (sock->type != SOCK_RAW) return -ESOCKTNOSUPPORT; - sk = sk_alloc(net, PF_BLUETOOTH, GFP_ATOMIC, &bnep_proto, kern); + sk = bt_sock_alloc(net, sock, &bnep_proto, protocol, GFP_ATOMIC, kern); if (!sk) return -ENOMEM; - sock_init_data(sock, sk); - sock->ops = &bnep_sock_ops; - sock->state = SS_UNCONNECTED; - sock_reset_flag(sk, SOCK_ZAPPED); - - sk->sk_protocol = protocol; - sk->sk_state = BT_OPEN; - bt_sock_link(&bnep_sk_list, sk); return 0; } diff --git a/net/bluetooth/cmtp/Kconfig b/net/bluetooth/cmtp/Kconfig index c8337786da6b..34e923466236 100644 --- a/net/bluetooth/cmtp/Kconfig +++ b/net/bluetooth/cmtp/Kconfig @@ -1,7 +1,7 @@ # SPDX-License-Identifier: GPL-2.0-only config BT_CMTP - tristate "CMTP protocol support" - depends on BT_BREDR && ISDN_CAPI + tristate "CMTP protocol support (DEPRECATED)" + depends on BT_BREDR && ISDN_CAPI && DEPRECATED help CMTP (CAPI Message Transport Protocol) is a transport layer for CAPI messages. CMTP is required for the Bluetooth Common diff --git a/net/bluetooth/cmtp/capi.c b/net/bluetooth/cmtp/capi.c index f3bedc3b613a..884703fda979 100644 --- a/net/bluetooth/cmtp/capi.c +++ b/net/bluetooth/cmtp/capi.c @@ -248,18 +248,10 @@ static void cmtp_recv_interopmsg(struct cmtp_session *session, struct sk_buff *s break; case CAPI_FUNCTION_GET_MANUFACTURER: - if (skb->len < CAPI_MSG_BASELEN + 15) - break; - - if (!info && ctrl) { - int len = min_t(uint, CAPI_MANUFACTURER_LEN, - skb->data[CAPI_MSG_BASELEN + 14]); - - memset(ctrl->manu, 0, CAPI_MANUFACTURER_LEN); - strncpy(ctrl->manu, - skb->data + CAPI_MSG_BASELEN + 15, len); - } - + if (!info && ctrl && skb->len > CAPI_MSG_BASELEN + 14) + strscpy_pad(ctrl->manu, + skb->data + CAPI_MSG_BASELEN + 15, + skb->data[CAPI_MSG_BASELEN + 14]); break; case CAPI_FUNCTION_GET_VERSION: @@ -276,18 +268,10 @@ static void cmtp_recv_interopmsg(struct cmtp_session *session, struct sk_buff *s break; case CAPI_FUNCTION_GET_SERIAL_NUMBER: - if (skb->len < CAPI_MSG_BASELEN + 17) - break; - - if (!info && ctrl) { - int len = min_t(uint, CAPI_SERIAL_LEN, - skb->data[CAPI_MSG_BASELEN + 16]); - - memset(ctrl->serial, 0, CAPI_SERIAL_LEN); - strncpy(ctrl->serial, - skb->data + CAPI_MSG_BASELEN + 17, len); - } - + if (!info && ctrl && skb->len > CAPI_MSG_BASELEN + 16) + strscpy_pad(ctrl->serial, + skb->data + CAPI_MSG_BASELEN + 17, + skb->data[CAPI_MSG_BASELEN + 16]); break; } diff --git a/net/bluetooth/coredump.c b/net/bluetooth/coredump.c new file mode 100644 index 000000000000..720cb79adf96 --- /dev/null +++ b/net/bluetooth/coredump.c @@ -0,0 +1,553 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (C) 2023 Google Corporation + */ + +#include <linux/devcoredump.h> + +#include <linux/unaligned.h> +#include <net/bluetooth/bluetooth.h> +#include <net/bluetooth/hci_core.h> + +enum hci_devcoredump_pkt_type { + HCI_DEVCOREDUMP_PKT_INIT, + HCI_DEVCOREDUMP_PKT_SKB, + HCI_DEVCOREDUMP_PKT_PATTERN, + HCI_DEVCOREDUMP_PKT_COMPLETE, + HCI_DEVCOREDUMP_PKT_ABORT, +}; + +struct hci_devcoredump_skb_cb { + u16 pkt_type; +}; + +struct hci_devcoredump_skb_pattern { + u8 pattern; + u32 len; +} __packed; + +#define hci_dmp_cb(skb) ((struct hci_devcoredump_skb_cb *)((skb)->cb)) + +#define DBG_UNEXPECTED_STATE() \ + bt_dev_dbg(hdev, \ + "Unexpected packet (%d) for state (%d). ", \ + hci_dmp_cb(skb)->pkt_type, hdev->dump.state) + +#define MAX_DEVCOREDUMP_HDR_SIZE 512 /* bytes */ + +static int hci_devcd_update_hdr_state(char *buf, size_t size, int state) +{ + int len = 0; + + if (!buf) + return 0; + + len = scnprintf(buf, size, "Bluetooth devcoredump\nState: %d\n", state); + + return len + 1; /* scnprintf adds \0 at the end upon state rewrite */ +} + +/* Call with hci_dev_lock only. */ +static int hci_devcd_update_state(struct hci_dev *hdev, int state) +{ + bt_dev_dbg(hdev, "Updating devcoredump state from %d to %d.", + hdev->dump.state, state); + + hdev->dump.state = state; + + return hci_devcd_update_hdr_state(hdev->dump.head, + hdev->dump.alloc_size, state); +} + +static int hci_devcd_mkheader(struct hci_dev *hdev, struct sk_buff *skb) +{ + char dump_start[] = "--- Start dump ---\n"; + char hdr[80]; + int hdr_len; + + hdr_len = hci_devcd_update_hdr_state(hdr, sizeof(hdr), + HCI_DEVCOREDUMP_IDLE); + skb_put_data(skb, hdr, hdr_len); + + if (hdev->dump.dmp_hdr) + hdev->dump.dmp_hdr(hdev, skb); + + skb_put_data(skb, dump_start, strlen(dump_start)); + + return skb->len; +} + +/* Do not call with hci_dev_lock since this calls driver code. */ +static void hci_devcd_notify(struct hci_dev *hdev, int state) +{ + if (hdev->dump.notify_change) + hdev->dump.notify_change(hdev, state); +} + +/* Call with hci_dev_lock only. */ +void hci_devcd_reset(struct hci_dev *hdev) +{ + hdev->dump.head = NULL; + hdev->dump.tail = NULL; + hdev->dump.alloc_size = 0; + + hci_devcd_update_state(hdev, HCI_DEVCOREDUMP_IDLE); + + cancel_delayed_work(&hdev->dump.dump_timeout); + skb_queue_purge(&hdev->dump.dump_q); +} + +/* Call with hci_dev_lock only. */ +static void hci_devcd_free(struct hci_dev *hdev) +{ + vfree(hdev->dump.head); + + hci_devcd_reset(hdev); +} + +/* Call with hci_dev_lock only. */ +static int hci_devcd_alloc(struct hci_dev *hdev, u32 size) +{ + hdev->dump.head = vmalloc(size); + if (!hdev->dump.head) + return -ENOMEM; + + hdev->dump.alloc_size = size; + hdev->dump.tail = hdev->dump.head; + hdev->dump.end = hdev->dump.head + size; + + hci_devcd_update_state(hdev, HCI_DEVCOREDUMP_IDLE); + + return 0; +} + +/* Call with hci_dev_lock only. */ +static bool hci_devcd_copy(struct hci_dev *hdev, char *buf, u32 size) +{ + if (hdev->dump.tail + size > hdev->dump.end) + return false; + + memcpy(hdev->dump.tail, buf, size); + hdev->dump.tail += size; + + return true; +} + +/* Call with hci_dev_lock only. */ +static bool hci_devcd_memset(struct hci_dev *hdev, u8 pattern, u32 len) +{ + if (hdev->dump.tail + len > hdev->dump.end) + return false; + + memset(hdev->dump.tail, pattern, len); + hdev->dump.tail += len; + + return true; +} + +/* Call with hci_dev_lock only. */ +static int hci_devcd_prepare(struct hci_dev *hdev, u32 dump_size) +{ + struct sk_buff *skb; + int dump_hdr_size; + int err = 0; + + skb = alloc_skb(MAX_DEVCOREDUMP_HDR_SIZE, GFP_ATOMIC); + if (!skb) + return -ENOMEM; + + dump_hdr_size = hci_devcd_mkheader(hdev, skb); + + if (hci_devcd_alloc(hdev, dump_hdr_size + dump_size)) { + err = -ENOMEM; + goto hdr_free; + } + + /* Insert the device header */ + if (!hci_devcd_copy(hdev, skb->data, skb->len)) { + bt_dev_err(hdev, "Failed to insert header"); + hci_devcd_free(hdev); + + err = -ENOMEM; + goto hdr_free; + } + +hdr_free: + kfree_skb(skb); + + return err; +} + +static void hci_devcd_handle_pkt_init(struct hci_dev *hdev, struct sk_buff *skb) +{ + u32 dump_size; + + if (hdev->dump.state != HCI_DEVCOREDUMP_IDLE) { + DBG_UNEXPECTED_STATE(); + return; + } + + if (skb->len != sizeof(dump_size)) { + bt_dev_dbg(hdev, "Invalid dump init pkt"); + return; + } + + dump_size = get_unaligned_le32(skb_pull_data(skb, 4)); + if (!dump_size) { + bt_dev_err(hdev, "Zero size dump init pkt"); + return; + } + + if (hci_devcd_prepare(hdev, dump_size)) { + bt_dev_err(hdev, "Failed to prepare for dump"); + return; + } + + hci_devcd_update_state(hdev, HCI_DEVCOREDUMP_ACTIVE); + queue_delayed_work(hdev->workqueue, &hdev->dump.dump_timeout, + hdev->dump.timeout); +} + +static void hci_devcd_handle_pkt_skb(struct hci_dev *hdev, struct sk_buff *skb) +{ + if (hdev->dump.state != HCI_DEVCOREDUMP_ACTIVE) { + DBG_UNEXPECTED_STATE(); + return; + } + + if (!hci_devcd_copy(hdev, skb->data, skb->len)) + bt_dev_dbg(hdev, "Failed to insert skb"); +} + +static void hci_devcd_handle_pkt_pattern(struct hci_dev *hdev, + struct sk_buff *skb) +{ + struct hci_devcoredump_skb_pattern *pattern; + + if (hdev->dump.state != HCI_DEVCOREDUMP_ACTIVE) { + DBG_UNEXPECTED_STATE(); + return; + } + + if (skb->len != sizeof(*pattern)) { + bt_dev_dbg(hdev, "Invalid pattern skb"); + return; + } + + pattern = skb_pull_data(skb, sizeof(*pattern)); + + if (!hci_devcd_memset(hdev, pattern->pattern, pattern->len)) + bt_dev_dbg(hdev, "Failed to set pattern"); +} + +static void hci_devcd_dump(struct hci_dev *hdev) +{ + struct sk_buff *skb; + u32 size; + + bt_dev_dbg(hdev, "state %d", hdev->dump.state); + + size = hdev->dump.tail - hdev->dump.head; + + /* Send a copy to monitor as a diagnostic packet */ + skb = bt_skb_alloc(size, GFP_ATOMIC); + if (skb) { + skb_put_data(skb, hdev->dump.head, size); + hci_recv_diag(hdev, skb); + } + + /* Emit a devcoredump with the available data */ + dev_coredumpv(&hdev->dev, hdev->dump.head, size, GFP_KERNEL); +} + +static void hci_devcd_handle_pkt_complete(struct hci_dev *hdev, + struct sk_buff *skb) +{ + u32 dump_size; + + if (hdev->dump.state != HCI_DEVCOREDUMP_ACTIVE) { + DBG_UNEXPECTED_STATE(); + return; + } + + hci_devcd_update_state(hdev, HCI_DEVCOREDUMP_DONE); + dump_size = hdev->dump.tail - hdev->dump.head; + + bt_dev_dbg(hdev, "complete with size %u (expect %zu)", dump_size, + hdev->dump.alloc_size); + + hci_devcd_dump(hdev); +} + +static void hci_devcd_handle_pkt_abort(struct hci_dev *hdev, + struct sk_buff *skb) +{ + u32 dump_size; + + if (hdev->dump.state != HCI_DEVCOREDUMP_ACTIVE) { + DBG_UNEXPECTED_STATE(); + return; + } + + hci_devcd_update_state(hdev, HCI_DEVCOREDUMP_ABORT); + dump_size = hdev->dump.tail - hdev->dump.head; + + bt_dev_dbg(hdev, "aborted with size %u (expect %zu)", dump_size, + hdev->dump.alloc_size); + + hci_devcd_dump(hdev); +} + +/* Bluetooth devcoredump state machine. + * + * Devcoredump states: + * + * HCI_DEVCOREDUMP_IDLE: The default state. + * + * HCI_DEVCOREDUMP_ACTIVE: A devcoredump will be in this state once it has + * been initialized using hci_devcd_init(). Once active, the driver + * can append data using hci_devcd_append() or insert a pattern + * using hci_devcd_append_pattern(). + * + * HCI_DEVCOREDUMP_DONE: Once the dump collection is complete, the drive + * can signal the completion using hci_devcd_complete(). A + * devcoredump is generated indicating the completion event and + * then the state machine is reset to the default state. + * + * HCI_DEVCOREDUMP_ABORT: The driver can cancel ongoing dump collection in + * case of any error using hci_devcd_abort(). A devcoredump is + * still generated with the available data indicating the abort + * event and then the state machine is reset to the default state. + * + * HCI_DEVCOREDUMP_TIMEOUT: A timeout timer for HCI_DEVCOREDUMP_TIMEOUT sec + * is started during devcoredump initialization. Once the timeout + * occurs, the driver is notified, a devcoredump is generated with + * the available data indicating the timeout event and then the + * state machine is reset to the default state. + * + * The driver must register using hci_devcd_register() before using the hci + * devcoredump APIs. + */ +void hci_devcd_rx(struct work_struct *work) +{ + struct hci_dev *hdev = container_of(work, struct hci_dev, dump.dump_rx); + struct sk_buff *skb; + int start_state; + + while ((skb = skb_dequeue(&hdev->dump.dump_q))) { + /* Return if timeout occurs. The timeout handler function + * hci_devcd_timeout() will report the available dump data. + */ + if (hdev->dump.state == HCI_DEVCOREDUMP_TIMEOUT) { + kfree_skb(skb); + return; + } + + hci_dev_lock(hdev); + start_state = hdev->dump.state; + + switch (hci_dmp_cb(skb)->pkt_type) { + case HCI_DEVCOREDUMP_PKT_INIT: + hci_devcd_handle_pkt_init(hdev, skb); + break; + + case HCI_DEVCOREDUMP_PKT_SKB: + hci_devcd_handle_pkt_skb(hdev, skb); + break; + + case HCI_DEVCOREDUMP_PKT_PATTERN: + hci_devcd_handle_pkt_pattern(hdev, skb); + break; + + case HCI_DEVCOREDUMP_PKT_COMPLETE: + hci_devcd_handle_pkt_complete(hdev, skb); + break; + + case HCI_DEVCOREDUMP_PKT_ABORT: + hci_devcd_handle_pkt_abort(hdev, skb); + break; + + default: + bt_dev_dbg(hdev, "Unknown packet (%d) for state (%d). ", + hci_dmp_cb(skb)->pkt_type, hdev->dump.state); + break; + } + + hci_dev_unlock(hdev); + kfree_skb(skb); + + /* Notify the driver about any state changes before resetting + * the state machine + */ + if (start_state != hdev->dump.state) + hci_devcd_notify(hdev, hdev->dump.state); + + /* Reset the state machine if the devcoredump is complete */ + hci_dev_lock(hdev); + if (hdev->dump.state == HCI_DEVCOREDUMP_DONE || + hdev->dump.state == HCI_DEVCOREDUMP_ABORT) + hci_devcd_reset(hdev); + hci_dev_unlock(hdev); + } +} +EXPORT_SYMBOL(hci_devcd_rx); + +void hci_devcd_timeout(struct work_struct *work) +{ + struct hci_dev *hdev = container_of(work, struct hci_dev, + dump.dump_timeout.work); + u32 dump_size; + + hci_devcd_notify(hdev, HCI_DEVCOREDUMP_TIMEOUT); + + hci_dev_lock(hdev); + + cancel_work(&hdev->dump.dump_rx); + + hci_devcd_update_state(hdev, HCI_DEVCOREDUMP_TIMEOUT); + + dump_size = hdev->dump.tail - hdev->dump.head; + bt_dev_dbg(hdev, "timeout with size %u (expect %zu)", dump_size, + hdev->dump.alloc_size); + + hci_devcd_dump(hdev); + + hci_devcd_reset(hdev); + + hci_dev_unlock(hdev); +} +EXPORT_SYMBOL(hci_devcd_timeout); + +int hci_devcd_register(struct hci_dev *hdev, coredump_t coredump, + dmp_hdr_t dmp_hdr, notify_change_t notify_change) +{ + /* Driver must implement coredump() and dmp_hdr() functions for + * bluetooth devcoredump. The coredump() should trigger a coredump + * event on the controller when the device's coredump sysfs entry is + * written to. The dmp_hdr() should create a dump header to identify + * the controller/fw/driver info. + */ + if (!coredump || !dmp_hdr) + return -EINVAL; + + hci_dev_lock(hdev); + hdev->dump.coredump = coredump; + hdev->dump.dmp_hdr = dmp_hdr; + hdev->dump.notify_change = notify_change; + hdev->dump.supported = true; + hdev->dump.timeout = DEVCOREDUMP_TIMEOUT; + hci_dev_unlock(hdev); + + return 0; +} +EXPORT_SYMBOL(hci_devcd_register); + +static inline bool hci_devcd_enabled(struct hci_dev *hdev) +{ + return hdev->dump.supported; +} + +int hci_devcd_init(struct hci_dev *hdev, u32 dump_size) +{ + struct sk_buff *skb; + + if (!hci_devcd_enabled(hdev)) + return -EOPNOTSUPP; + + skb = alloc_skb(sizeof(dump_size), GFP_ATOMIC); + if (!skb) + return -ENOMEM; + + hci_dmp_cb(skb)->pkt_type = HCI_DEVCOREDUMP_PKT_INIT; + put_unaligned_le32(dump_size, skb_put(skb, 4)); + + skb_queue_tail(&hdev->dump.dump_q, skb); + queue_work(hdev->workqueue, &hdev->dump.dump_rx); + + return 0; +} +EXPORT_SYMBOL(hci_devcd_init); + +int hci_devcd_append(struct hci_dev *hdev, struct sk_buff *skb) +{ + if (!skb) + return -ENOMEM; + + if (!hci_devcd_enabled(hdev)) { + kfree_skb(skb); + return -EOPNOTSUPP; + } + + hci_dmp_cb(skb)->pkt_type = HCI_DEVCOREDUMP_PKT_SKB; + + skb_queue_tail(&hdev->dump.dump_q, skb); + queue_work(hdev->workqueue, &hdev->dump.dump_rx); + + return 0; +} +EXPORT_SYMBOL(hci_devcd_append); + +int hci_devcd_append_pattern(struct hci_dev *hdev, u8 pattern, u32 len) +{ + struct hci_devcoredump_skb_pattern p; + struct sk_buff *skb; + + if (!hci_devcd_enabled(hdev)) + return -EOPNOTSUPP; + + skb = alloc_skb(sizeof(p), GFP_ATOMIC); + if (!skb) + return -ENOMEM; + + p.pattern = pattern; + p.len = len; + + hci_dmp_cb(skb)->pkt_type = HCI_DEVCOREDUMP_PKT_PATTERN; + skb_put_data(skb, &p, sizeof(p)); + + skb_queue_tail(&hdev->dump.dump_q, skb); + queue_work(hdev->workqueue, &hdev->dump.dump_rx); + + return 0; +} +EXPORT_SYMBOL(hci_devcd_append_pattern); + +int hci_devcd_complete(struct hci_dev *hdev) +{ + struct sk_buff *skb; + + if (!hci_devcd_enabled(hdev)) + return -EOPNOTSUPP; + + skb = alloc_skb(0, GFP_ATOMIC); + if (!skb) + return -ENOMEM; + + hci_dmp_cb(skb)->pkt_type = HCI_DEVCOREDUMP_PKT_COMPLETE; + + skb_queue_tail(&hdev->dump.dump_q, skb); + queue_work(hdev->workqueue, &hdev->dump.dump_rx); + + return 0; +} +EXPORT_SYMBOL(hci_devcd_complete); + +int hci_devcd_abort(struct hci_dev *hdev) +{ + struct sk_buff *skb; + + if (!hci_devcd_enabled(hdev)) + return -EOPNOTSUPP; + + skb = alloc_skb(0, GFP_ATOMIC); + if (!skb) + return -ENOMEM; + + hci_dmp_cb(skb)->pkt_type = HCI_DEVCOREDUMP_PKT_ABORT; + + skb_queue_tail(&hdev->dump.dump_q, skb); + queue_work(hdev->workqueue, &hdev->dump.dump_rx); + + return 0; +} +EXPORT_SYMBOL(hci_devcd_abort); diff --git a/net/bluetooth/ecdh_helper.c b/net/bluetooth/ecdh_helper.c index 989401f116e9..0efc93fdae8a 100644 --- a/net/bluetooth/ecdh_helper.c +++ b/net/bluetooth/ecdh_helper.c @@ -25,22 +25,6 @@ #include <linux/scatterlist.h> #include <crypto/ecdh.h> -struct ecdh_completion { - struct completion completion; - int err; -}; - -static void ecdh_complete(struct crypto_async_request *req, int err) -{ - struct ecdh_completion *res = req->data; - - if (err == -EINPROGRESS) - return; - - res->err = err; - complete(&res->completion); -} - static inline void swap_digits(u64 *in, u64 *out, unsigned int ndigits) { int i; @@ -60,9 +44,9 @@ static inline void swap_digits(u64 *in, u64 *out, unsigned int ndigits) int compute_ecdh_secret(struct crypto_kpp *tfm, const u8 public_key[64], u8 secret[32]) { + DECLARE_CRYPTO_WAIT(result); struct kpp_request *req; u8 *tmp; - struct ecdh_completion result; struct scatterlist src, dst; int err; @@ -76,8 +60,6 @@ int compute_ecdh_secret(struct crypto_kpp *tfm, const u8 public_key[64], goto free_tmp; } - init_completion(&result.completion); - swap_digits((u64 *)public_key, (u64 *)tmp, 4); /* x */ swap_digits((u64 *)&public_key[32], (u64 *)&tmp[32], 4); /* y */ @@ -86,12 +68,9 @@ int compute_ecdh_secret(struct crypto_kpp *tfm, const u8 public_key[64], kpp_request_set_input(req, &src, 64); kpp_request_set_output(req, &dst, 32); kpp_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG, - ecdh_complete, &result); + crypto_req_done, &result); err = crypto_kpp_compute_shared_secret(req); - if (err == -EINPROGRESS) { - wait_for_completion(&result.completion); - err = result.err; - } + err = crypto_wait_req(err, &result); if (err < 0) { pr_err("alg: ecdh: compute shared secret failed. err %d\n", err); @@ -165,9 +144,9 @@ free_tmp: */ int generate_ecdh_public_key(struct crypto_kpp *tfm, u8 public_key[64]) { + DECLARE_CRYPTO_WAIT(result); struct kpp_request *req; u8 *tmp; - struct ecdh_completion result; struct scatterlist dst; int err; @@ -181,18 +160,14 @@ int generate_ecdh_public_key(struct crypto_kpp *tfm, u8 public_key[64]) goto free_tmp; } - init_completion(&result.completion); sg_init_one(&dst, tmp, 64); kpp_request_set_input(req, NULL, 0); kpp_request_set_output(req, &dst, 64); kpp_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG, - ecdh_complete, &result); + crypto_req_done, &result); err = crypto_kpp_generate_public_key(req); - if (err == -EINPROGRESS) { - wait_for_completion(&result.completion); - err = result.err; - } + err = crypto_wait_req(err, &result); if (err < 0) goto free_all; diff --git a/net/bluetooth/eir.c b/net/bluetooth/eir.c index 7e930f77ecab..3f72111ba651 100644 --- a/net/bluetooth/eir.c +++ b/net/bluetooth/eir.c @@ -18,34 +18,30 @@ u8 eir_append_local_name(struct hci_dev *hdev, u8 *ptr, u8 ad_len) size_t short_len; size_t complete_len; - /* no space left for name (+ NULL + type + len) */ - if ((HCI_MAX_AD_LENGTH - ad_len) < HCI_MAX_SHORT_NAME_LENGTH + 3) + /* no space left for name (+ type + len) */ + if ((max_adv_len(hdev) - ad_len) < HCI_MAX_SHORT_NAME_LENGTH + 2) return ad_len; /* use complete name if present and fits */ - complete_len = strlen(hdev->dev_name); + complete_len = strnlen(hdev->dev_name, sizeof(hdev->dev_name)); if (complete_len && complete_len <= HCI_MAX_SHORT_NAME_LENGTH) return eir_append_data(ptr, ad_len, EIR_NAME_COMPLETE, - hdev->dev_name, complete_len + 1); + hdev->dev_name, complete_len); /* use short name if present */ - short_len = strlen(hdev->short_name); + short_len = strnlen(hdev->short_name, sizeof(hdev->short_name)); if (short_len) return eir_append_data(ptr, ad_len, EIR_NAME_SHORT, - hdev->short_name, short_len + 1); + hdev->short_name, + short_len); /* use shortened full name if present, we already know that name * is longer then HCI_MAX_SHORT_NAME_LENGTH */ - if (complete_len) { - u8 name[HCI_MAX_SHORT_NAME_LENGTH + 1]; - - memcpy(name, hdev->dev_name, HCI_MAX_SHORT_NAME_LENGTH); - name[HCI_MAX_SHORT_NAME_LENGTH] = '\0'; - - return eir_append_data(ptr, ad_len, EIR_NAME_SHORT, name, - sizeof(name)); - } + if (complete_len) + return eir_append_data(ptr, ad_len, EIR_NAME_SHORT, + hdev->dev_name, + HCI_MAX_SHORT_NAME_LENGTH); return ad_len; } @@ -55,6 +51,19 @@ u8 eir_append_appearance(struct hci_dev *hdev, u8 *ptr, u8 ad_len) return eir_append_le16(ptr, ad_len, EIR_APPEARANCE, hdev->appearance); } +u8 eir_append_service_data(u8 *eir, u16 eir_len, u16 uuid, u8 *data, + u8 data_len) +{ + eir[eir_len++] = sizeof(u8) + sizeof(uuid) + data_len; + eir[eir_len++] = EIR_SERVICE_DATA; + put_unaligned_le16(uuid, &eir[eir_len]); + eir_len += sizeof(uuid); + memcpy(&eir[eir_len], data, data_len); + eir_len += data_len; + + return eir_len; +} + static u8 *create_uuid16_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len) { u8 *ptr = data, *uuids_start = NULL; @@ -168,7 +177,7 @@ void eir_create(struct hci_dev *hdev, u8 *data) u8 *ptr = data; size_t name_len; - name_len = strlen(hdev->dev_name); + name_len = strnlen(hdev->dev_name, sizeof(hdev->dev_name)); if (name_len > 0) { /* EIR Data type */ @@ -212,7 +221,28 @@ void eir_create(struct hci_dev *hdev, u8 *data) ptr = create_uuid128_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data)); } -u8 eir_create_adv_data(struct hci_dev *hdev, u8 instance, u8 *ptr) +u8 eir_create_per_adv_data(struct hci_dev *hdev, u8 instance, u8 *ptr) +{ + struct adv_info *adv = NULL; + u8 ad_len = 0; + + /* Return 0 when the current instance identifier is invalid. */ + if (instance) { + adv = hci_find_adv_instance(hdev, instance); + if (!adv) + return 0; + } + + if (adv) { + memcpy(ptr, adv->per_adv_data, adv->per_adv_data_len); + ad_len += adv->per_adv_data_len; + ptr += adv->per_adv_data_len; + } + + return ad_len; +} + +u8 eir_create_adv_data(struct hci_dev *hdev, u8 instance, u8 *ptr, u8 size) { struct adv_info *adv = NULL; u8 ad_len = 0, flags = 0; @@ -256,7 +286,7 @@ u8 eir_create_adv_data(struct hci_dev *hdev, u8 instance, u8 *ptr) /* If flags would still be empty, then there is no need to * include the "Flags" AD field". */ - if (flags) { + if (flags && (ad_len + eir_precalc_len(1) <= size)) { ptr[0] = 0x02; ptr[1] = EIR_FLAGS; ptr[2] = flags; @@ -286,7 +316,8 @@ skip_flags: } /* Provide Tx Power only if we can provide a valid value for it */ - if (adv_tx_power != HCI_TX_POWER_INVALID) { + if (adv_tx_power != HCI_TX_POWER_INVALID && + (ad_len + eir_precalc_len(1) <= size)) { ptr[0] = 0x02; ptr[1] = EIR_TX_POWER; ptr[2] = (u8)adv_tx_power; @@ -333,3 +364,23 @@ u8 eir_create_scan_rsp(struct hci_dev *hdev, u8 instance, u8 *ptr) return scan_rsp_len; } + +void *eir_get_service_data(u8 *eir, size_t eir_len, u16 uuid, size_t *len) +{ + size_t dlen; + + while ((eir = eir_get_data(eir, eir_len, EIR_SERVICE_DATA, &dlen))) { + u16 value = get_unaligned_le16(eir); + + if (uuid == value) { + if (len) + *len = dlen - 2; + return &eir[2]; + } + + eir += dlen; + eir_len -= dlen; + } + + return NULL; +} diff --git a/net/bluetooth/eir.h b/net/bluetooth/eir.h index 05e2e917fc25..9372db83f912 100644 --- a/net/bluetooth/eir.h +++ b/net/bluetooth/eir.h @@ -5,15 +5,23 @@ * Copyright (C) 2021 Intel Corporation */ -#include <asm/unaligned.h> +#include <linux/unaligned.h> void eir_create(struct hci_dev *hdev, u8 *data); -u8 eir_create_adv_data(struct hci_dev *hdev, u8 instance, u8 *ptr); +u8 eir_create_adv_data(struct hci_dev *hdev, u8 instance, u8 *ptr, u8 size); u8 eir_create_scan_rsp(struct hci_dev *hdev, u8 instance, u8 *ptr); +u8 eir_create_per_adv_data(struct hci_dev *hdev, u8 instance, u8 *ptr); u8 eir_append_local_name(struct hci_dev *hdev, u8 *eir, u8 ad_len); u8 eir_append_appearance(struct hci_dev *hdev, u8 *ptr, u8 ad_len); +u8 eir_append_service_data(u8 *eir, u16 eir_len, u16 uuid, u8 *data, + u8 data_len); + +static inline u16 eir_precalc_len(u8 data_len) +{ + return sizeof(u8) * 2 + data_len; +} static inline u16 eir_append_data(u8 *eir, u16 eir_len, u8 type, u8 *data, u8 data_len) @@ -36,6 +44,21 @@ static inline u16 eir_append_le16(u8 *eir, u16 eir_len, u8 type, u16 data) return eir_len; } +static inline u16 eir_skb_put_data(struct sk_buff *skb, u8 type, u8 *data, u8 data_len) +{ + u8 *eir; + u16 eir_len; + + eir_len = eir_precalc_len(data_len); + eir = skb_put(skb, eir_len); + WARN_ON(sizeof(type) + data_len > U8_MAX); + eir[0] = sizeof(type) + data_len; + eir[1] = type; + memcpy(&eir[2], data, data_len); + + return eir_len; +} + static inline void *eir_get_data(u8 *eir, size_t eir_len, u8 type, size_t *data_len) { @@ -72,3 +95,5 @@ static inline void *eir_get_data(u8 *eir, size_t eir_len, u8 type, return NULL; } + +void *eir_get_service_data(u8 *eir, size_t eir_len, u16 uuid, size_t *len); diff --git a/net/bluetooth/hci_codec.c b/net/bluetooth/hci_codec.c index 38201532f58e..3cc135bb1d30 100644 --- a/net/bluetooth/hci_codec.c +++ b/net/bluetooth/hci_codec.c @@ -72,9 +72,8 @@ static void hci_read_codec_capabilities(struct hci_dev *hdev, __u8 transport, continue; } - skb = __hci_cmd_sync(hdev, HCI_OP_READ_LOCAL_CODEC_CAPS, - sizeof(*cmd), cmd, - HCI_CMD_TIMEOUT); + skb = __hci_cmd_sync_sk(hdev, HCI_OP_READ_LOCAL_CODEC_CAPS, + sizeof(*cmd), cmd, 0, HCI_CMD_TIMEOUT, NULL); if (IS_ERR(skb)) { bt_dev_err(hdev, "Failed to read codec capabilities (%ld)", PTR_ERR(skb)); @@ -127,8 +126,8 @@ void hci_read_supported_codecs(struct hci_dev *hdev) struct hci_op_read_local_codec_caps caps; __u8 i; - skb = __hci_cmd_sync(hdev, HCI_OP_READ_LOCAL_CODECS, 0, NULL, - HCI_CMD_TIMEOUT); + skb = __hci_cmd_sync_sk(hdev, HCI_OP_READ_LOCAL_CODECS, 0, NULL, + 0, HCI_CMD_TIMEOUT, NULL); if (IS_ERR(skb)) { bt_dev_err(hdev, "Failed to read local supported codecs (%ld)", @@ -158,7 +157,8 @@ void hci_read_supported_codecs(struct hci_dev *hdev) for (i = 0; i < std_codecs->num; i++) { caps.id = std_codecs->codec[i]; caps.direction = 0x00; - hci_read_codec_capabilities(hdev, LOCAL_CODEC_ACL_MASK, &caps); + hci_read_codec_capabilities(hdev, + LOCAL_CODEC_ACL_MASK | LOCAL_CODEC_SCO_MASK, &caps); } skb_pull(skb, flex_array_size(std_codecs, codec, std_codecs->num) @@ -178,7 +178,8 @@ void hci_read_supported_codecs(struct hci_dev *hdev) caps.cid = vnd_codecs->codec[i].cid; caps.vid = vnd_codecs->codec[i].vid; caps.direction = 0x00; - hci_read_codec_capabilities(hdev, LOCAL_CODEC_ACL_MASK, &caps); + hci_read_codec_capabilities(hdev, + LOCAL_CODEC_ACL_MASK | LOCAL_CODEC_SCO_MASK, &caps); } error: @@ -194,8 +195,8 @@ void hci_read_supported_codecs_v2(struct hci_dev *hdev) struct hci_op_read_local_codec_caps caps; __u8 i; - skb = __hci_cmd_sync(hdev, HCI_OP_READ_LOCAL_CODECS_V2, 0, NULL, - HCI_CMD_TIMEOUT); + skb = __hci_cmd_sync_sk(hdev, HCI_OP_READ_LOCAL_CODECS_V2, 0, NULL, + 0, HCI_CMD_TIMEOUT, NULL); if (IS_ERR(skb)) { bt_dev_err(hdev, "Failed to read local supported codecs (%ld)", diff --git a/net/bluetooth/hci_conn.c b/net/bluetooth/hci_conn.c index 04ebe901e86f..c3f7828bf9d5 100644 --- a/net/bluetooth/hci_conn.c +++ b/net/bluetooth/hci_conn.c @@ -1,6 +1,7 @@ /* BlueZ - Bluetooth protocol stack for Linux Copyright (c) 2000-2001, 2010, Code Aurora Forum. All rights reserved. + Copyright 2023-2024 NXP Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com> @@ -26,14 +27,16 @@ #include <linux/export.h> #include <linux/debugfs.h> +#include <linux/errqueue.h> #include <net/bluetooth/bluetooth.h> #include <net/bluetooth/hci_core.h> #include <net/bluetooth/l2cap.h> +#include <net/bluetooth/iso.h> +#include <net/bluetooth/mgmt.h> -#include "hci_request.h" #include "smp.h" -#include "a2mp.h" +#include "eir.h" struct sco_param { u16 pkt_type; @@ -41,6 +44,11 @@ struct sco_param { u8 retrans_effort; }; +struct conn_handle_t { + struct hci_conn *conn; + __u16 handle; +}; + static const struct sco_param esco_param_cvsd[] = { { EDR_ESCO_MASK & ~ESCO_2EV3, 0x000a, 0x01 }, /* S3 */ { EDR_ESCO_MASK & ~ESCO_2EV3, 0x0007, 0x01 }, /* S2 */ @@ -60,7 +68,7 @@ static const struct sco_param esco_param_msbc[] = { }; /* This function requires the caller holds hdev->lock */ -static void hci_connect_le_scan_cleanup(struct hci_conn *conn) +void hci_connect_le_scan_cleanup(struct hci_conn *conn, u8 status) { struct hci_conn_params *params; struct hci_dev *hdev = conn->hdev; @@ -80,9 +88,27 @@ static void hci_connect_le_scan_cleanup(struct hci_conn *conn) params = hci_pend_le_action_lookup(&hdev->pend_le_conns, bdaddr, bdaddr_type); - if (!params || !params->explicit_connect) + if (!params) return; + if (params->conn) { + hci_conn_drop(params->conn); + hci_conn_put(params->conn); + params->conn = NULL; + } + + if (!params->explicit_connect) + return; + + /* If the status indicates successful cancellation of + * the attempt (i.e. Unknown Connection Id) there's no point of + * notifying failure since we'll go back to keep trying to + * connect. The only exception is explicit connect requests + * where a timeout + cancel does indicate an actual failure. + */ + if (status && status != HCI_ERROR_UNKNOWN_CONN_ID) + mgmt_connect_failed(hdev, conn, status); + /* The connection attempt was doing scan for new RPA, and is * in scan phase. If params are not associated with any other * autoconnect action, remove them completely. If they are, just unmark @@ -90,7 +116,7 @@ static void hci_connect_le_scan_cleanup(struct hci_conn *conn) */ params->explicit_connect = false; - list_del_init(¶ms->action); + hci_pend_le_list_del_init(params); switch (params->auto_connect) { case HCI_AUTO_CONN_EXPLICIT: @@ -99,10 +125,10 @@ static void hci_connect_le_scan_cleanup(struct hci_conn *conn) return; case HCI_AUTO_CONN_DIRECT: case HCI_AUTO_CONN_ALWAYS: - list_add(¶ms->action, &hdev->pend_le_conns); + hci_pend_le_list_add(params, &hdev->pend_le_conns); break; case HCI_AUTO_CONN_REPORT: - list_add(¶ms->action, &hdev->pend_le_reports); + hci_pend_le_list_add(params, &hdev->pend_le_reports); break; default: break; @@ -118,9 +144,16 @@ static void hci_conn_cleanup(struct hci_conn *conn) if (test_bit(HCI_CONN_PARAM_REMOVAL_PEND, &conn->flags)) hci_conn_params_del(conn->hdev, &conn->dst, conn->dst_type); + if (test_and_clear_bit(HCI_CONN_FLUSH_KEY, &conn->flags)) + hci_remove_link_key(hdev, &conn->dst); + hci_chan_list_flush(conn); - hci_conn_hash_del(hdev, conn); + if (HCI_CONN_HANDLE_UNSET(conn->handle)) + ida_free(&hdev->unset_handle_ida, conn->handle); + + if (conn->cleanup) + conn->cleanup(conn); if (conn->type == SCO_LINK || conn->type == ESCO_LINK) { switch (conn->setting & SCO_AIRMODE_MASK) { @@ -135,122 +168,11 @@ static void hci_conn_cleanup(struct hci_conn *conn) hdev->notify(hdev, HCI_NOTIFY_CONN_DEL); } - hci_conn_del_sysfs(conn); - debugfs_remove_recursive(conn->debugfs); - hci_dev_put(hdev); - - hci_conn_put(conn); -} - -static void le_scan_cleanup(struct work_struct *work) -{ - struct hci_conn *conn = container_of(work, struct hci_conn, - le_scan_cleanup); - struct hci_dev *hdev = conn->hdev; - struct hci_conn *c = NULL; - - BT_DBG("%s hcon %p", hdev->name, conn); - - hci_dev_lock(hdev); - - /* Check that the hci_conn is still around */ - rcu_read_lock(); - list_for_each_entry_rcu(c, &hdev->conn_hash.list, list) { - if (c == conn) - break; - } - rcu_read_unlock(); - - if (c == conn) { - hci_connect_le_scan_cleanup(conn); - hci_conn_cleanup(conn); - } + hci_conn_del_sysfs(conn); - hci_dev_unlock(hdev); hci_dev_put(hdev); - hci_conn_put(conn); -} - -static void hci_connect_le_scan_remove(struct hci_conn *conn) -{ - BT_DBG("%s hcon %p", conn->hdev->name, conn); - - /* We can't call hci_conn_del/hci_conn_cleanup here since that - * could deadlock with another hci_conn_del() call that's holding - * hci_dev_lock and doing cancel_delayed_work_sync(&conn->disc_work). - * Instead, grab temporary extra references to the hci_dev and - * hci_conn and perform the necessary cleanup in a separate work - * callback. - */ - - hci_dev_hold(conn->hdev); - hci_conn_get(conn); - - /* Even though we hold a reference to the hdev, many other - * things might get cleaned up meanwhile, including the hdev's - * own workqueue, so we can't use that for scheduling. - */ - schedule_work(&conn->le_scan_cleanup); -} - -static void hci_acl_create_connection(struct hci_conn *conn) -{ - struct hci_dev *hdev = conn->hdev; - struct inquiry_entry *ie; - struct hci_cp_create_conn cp; - - BT_DBG("hcon %p", conn); - - /* Many controllers disallow HCI Create Connection while it is doing - * HCI Inquiry. So we cancel the Inquiry first before issuing HCI Create - * Connection. This may cause the MGMT discovering state to become false - * without user space's request but it is okay since the MGMT Discovery - * APIs do not promise that discovery should be done forever. Instead, - * the user space monitors the status of MGMT discovering and it may - * request for discovery again when this flag becomes false. - */ - if (test_bit(HCI_INQUIRY, &hdev->flags)) { - /* Put this connection to "pending" state so that it will be - * executed after the inquiry cancel command complete event. - */ - conn->state = BT_CONNECT2; - hci_send_cmd(hdev, HCI_OP_INQUIRY_CANCEL, 0, NULL); - return; - } - - conn->state = BT_CONNECT; - conn->out = true; - conn->role = HCI_ROLE_MASTER; - - conn->attempt++; - - conn->link_policy = hdev->link_policy; - - memset(&cp, 0, sizeof(cp)); - bacpy(&cp.bdaddr, &conn->dst); - cp.pscan_rep_mode = 0x02; - - ie = hci_inquiry_cache_lookup(hdev, &conn->dst); - if (ie) { - if (inquiry_entry_age(ie) <= INQUIRY_ENTRY_AGE_MAX) { - cp.pscan_rep_mode = ie->data.pscan_rep_mode; - cp.pscan_mode = ie->data.pscan_mode; - cp.clock_offset = ie->data.clock_offset | - cpu_to_le16(0x8000); - } - - memcpy(conn->dev_class, ie->data.dev_class, 3); - } - - cp.pkt_type = cpu_to_le16(conn->pkt_type); - if (lmp_rswitch_capable(hdev) && !(hdev->link_mode & HCI_LM_MASTER)) - cp.role_switch = 0x01; - else - cp.role_switch = 0x00; - - hci_send_cmd(hdev, HCI_OP_CREATE_CONN, sizeof(cp), &cp); } int hci_disconnect(struct hci_conn *conn, __u8 reason) @@ -296,8 +218,11 @@ static void hci_add_sco(struct hci_conn *conn, __u16 handle) static bool find_next_esco_param(struct hci_conn *conn, const struct sco_param *esco_param, int size) { + if (!conn->parent) + return false; + for (; conn->attempt <= size; conn->attempt++) { - if (lmp_esco_2m_capable(conn->link) || + if (lmp_esco_2m_capable(conn->parent) || (esco_param[conn->attempt - 1].pkt_type & ESCO_2EV3)) break; BT_DBG("hcon %p skipped attempt %d, eSCO 2M not supported", @@ -307,17 +232,68 @@ static bool find_next_esco_param(struct hci_conn *conn, return conn->attempt <= size; } -static bool hci_enhanced_setup_sync_conn(struct hci_conn *conn, __u16 handle) +static int configure_datapath_sync(struct hci_dev *hdev, struct bt_codec *codec) { - struct hci_dev *hdev = conn->hdev; + int err; + __u8 vnd_len, *vnd_data = NULL; + struct hci_op_configure_data_path *cmd = NULL; + + /* Do not take below 2 checks as error since the 1st means user do not + * want to use HFP offload mode and the 2nd means the vendor controller + * do not need to send below HCI command for offload mode. + */ + if (!codec->data_path || !hdev->get_codec_config_data) + return 0; + + err = hdev->get_codec_config_data(hdev, ESCO_LINK, codec, &vnd_len, + &vnd_data); + if (err < 0) + goto error; + + cmd = kzalloc(sizeof(*cmd) + vnd_len, GFP_KERNEL); + if (!cmd) { + err = -ENOMEM; + goto error; + } + + err = hdev->get_data_path_id(hdev, &cmd->data_path_id); + if (err < 0) + goto error; + + cmd->vnd_len = vnd_len; + memcpy(cmd->vnd_data, vnd_data, vnd_len); + + cmd->direction = 0x00; + __hci_cmd_sync_status(hdev, HCI_CONFIGURE_DATA_PATH, + sizeof(*cmd) + vnd_len, cmd, HCI_CMD_TIMEOUT); + + cmd->direction = 0x01; + err = __hci_cmd_sync_status(hdev, HCI_CONFIGURE_DATA_PATH, + sizeof(*cmd) + vnd_len, cmd, + HCI_CMD_TIMEOUT); +error: + + kfree(cmd); + kfree(vnd_data); + return err; +} + +static int hci_enhanced_setup_sync(struct hci_dev *hdev, void *data) +{ + struct conn_handle_t *conn_handle = data; + struct hci_conn *conn = conn_handle->conn; + __u16 handle = conn_handle->handle; struct hci_cp_enhanced_setup_sync_conn cp; const struct sco_param *param; + kfree(conn_handle); + + if (!hci_conn_valid(hdev, conn)) + return -ECANCELED; + bt_dev_dbg(hdev, "hcon %p", conn); - /* for offload use case, codec needs to configured before opening SCO */ - if (conn->codec.data_path) - hci_req_configure_datapath(hdev, &conn->codec); + configure_datapath_sync(hdev, &conn->codec); conn->state = BT_CONNECT; conn->out = true; @@ -335,7 +311,7 @@ static bool hci_enhanced_setup_sync_conn(struct hci_conn *conn, __u16 handle) case BT_CODEC_MSBC: if (!find_next_esco_param(conn, esco_param_msbc, ARRAY_SIZE(esco_param_msbc))) - return false; + return -EINVAL; param = &esco_param_msbc[conn->attempt - 1]; cp.tx_coding_format.id = 0x05; @@ -361,7 +337,8 @@ static bool hci_enhanced_setup_sync_conn(struct hci_conn *conn, __u16 handle) case BT_CODEC_TRANSPARENT: if (!find_next_esco_param(conn, esco_param_msbc, ARRAY_SIZE(esco_param_msbc))) - return false; + return -EINVAL; + param = &esco_param_msbc[conn->attempt - 1]; cp.tx_coding_format.id = 0x03; cp.rx_coding_format.id = 0x03; @@ -384,14 +361,14 @@ static bool hci_enhanced_setup_sync_conn(struct hci_conn *conn, __u16 handle) break; case BT_CODEC_CVSD: - if (lmp_esco_capable(conn->link)) { + if (conn->parent && lmp_esco_capable(conn->parent)) { if (!find_next_esco_param(conn, esco_param_cvsd, ARRAY_SIZE(esco_param_cvsd))) - return false; + return -EINVAL; param = &esco_param_cvsd[conn->attempt - 1]; } else { if (conn->attempt > ARRAY_SIZE(sco_param_cvsd)) - return false; + return -EINVAL; param = &sco_param_cvsd[conn->attempt - 1]; } cp.tx_coding_format.id = 2; @@ -414,7 +391,7 @@ static bool hci_enhanced_setup_sync_conn(struct hci_conn *conn, __u16 handle) cp.out_transport_unit_size = 16; break; default: - return false; + return -EINVAL; } cp.retrans_effort = param->retrans_effort; @@ -422,9 +399,9 @@ static bool hci_enhanced_setup_sync_conn(struct hci_conn *conn, __u16 handle) cp.max_latency = __cpu_to_le16(param->max_latency); if (hci_send_cmd(hdev, HCI_OP_ENHANCED_SETUP_SYNC_CONN, sizeof(cp), &cp) < 0) - return false; + return -EIO; - return true; + return 0; } static bool hci_setup_sync_conn(struct hci_conn *conn, __u16 handle) @@ -454,7 +431,7 @@ static bool hci_setup_sync_conn(struct hci_conn *conn, __u16 handle) param = &esco_param_msbc[conn->attempt - 1]; break; case SCO_AIRMODE_CVSD: - if (lmp_esco_capable(conn->link)) { + if (conn->parent && lmp_esco_capable(conn->parent)) { if (!find_next_esco_param(conn, esco_param_cvsd, ARRAY_SIZE(esco_param_cvsd))) return false; @@ -481,8 +458,24 @@ static bool hci_setup_sync_conn(struct hci_conn *conn, __u16 handle) bool hci_setup_sync(struct hci_conn *conn, __u16 handle) { - if (enhanced_sco_capable(conn->hdev)) - return hci_enhanced_setup_sync_conn(conn, handle); + int result; + struct conn_handle_t *conn_handle; + + if (enhanced_sync_conn_capable(conn->hdev)) { + conn_handle = kzalloc(sizeof(*conn_handle), GFP_KERNEL); + + if (!conn_handle) + return false; + + conn_handle->conn = conn; + conn_handle->handle = handle; + result = hci_cmd_sync_queue(conn->hdev, hci_enhanced_setup_sync, + conn_handle, NULL); + if (result < 0) + kfree(conn_handle); + + return result == 0; + } return hci_setup_sync_conn(conn, handle); } @@ -544,21 +537,22 @@ void hci_le_start_enc(struct hci_conn *conn, __le16 ediv, __le64 rand, /* Device _must_ be locked */ void hci_sco_setup(struct hci_conn *conn, __u8 status) { - struct hci_conn *sco = conn->link; + struct hci_link *link; - if (!sco) + link = list_first_entry_or_null(&conn->link_list, struct hci_link, list); + if (!link || !link->conn) return; BT_DBG("hcon %p", conn); if (!status) { if (lmp_esco_capable(conn->hdev)) - hci_setup_sync(sco, conn->handle); + hci_setup_sync(link->conn, conn->handle); else - hci_add_sco(sco, conn->handle); + hci_add_sco(link->conn, conn->handle); } else { - hci_connect_cfm(sco, status); - hci_conn_del(sco); + hci_connect_cfm(link->conn, status); + hci_conn_del(link->conn); } } @@ -582,13 +576,6 @@ static void hci_conn_timeout(struct work_struct *work) if (refcnt > 0) return; - /* LE connections in scanning state need special handling */ - if (conn->state == BT_CONNECT && conn->type == LE_LINK && - test_bit(HCI_CONN_SCANNING, &conn->flags)) { - hci_connect_le_scan_remove(conn); - return; - } - hci_abort_conn(conn, hci_proto_disconn_ind(conn)); } @@ -669,26 +656,325 @@ static void le_conn_timeout(struct work_struct *work) if (conn->role == HCI_ROLE_SLAVE) { /* Disable LE Advertising */ le_disable_advertising(hdev); - hci_le_conn_failed(conn, HCI_ERROR_ADVERTISING_TIMEOUT); + hci_dev_lock(hdev); + hci_conn_failed(conn, HCI_ERROR_ADVERTISING_TIMEOUT); + hci_dev_unlock(hdev); return; } hci_abort_conn(conn, HCI_ERROR_REMOTE_USER_TERM); } -struct hci_conn *hci_conn_add(struct hci_dev *hdev, int type, bdaddr_t *dst, - u8 role) +struct iso_list_data { + union { + u8 cig; + u8 big; + }; + union { + u8 cis; + u8 bis; + u16 sync_handle; + }; + int count; + bool big_term; + bool pa_sync_term; + bool big_sync_term; +}; + +static void bis_list(struct hci_conn *conn, void *data) +{ + struct iso_list_data *d = data; + + /* Skip if not broadcast/ANY address */ + if (bacmp(&conn->dst, BDADDR_ANY)) + return; + + if (d->big != conn->iso_qos.bcast.big || d->bis == BT_ISO_QOS_BIS_UNSET || + d->bis != conn->iso_qos.bcast.bis) + return; + + d->count++; +} + +static int terminate_big_sync(struct hci_dev *hdev, void *data) +{ + struct iso_list_data *d = data; + + bt_dev_dbg(hdev, "big 0x%2.2x bis 0x%2.2x", d->big, d->bis); + + hci_disable_per_advertising_sync(hdev, d->bis); + hci_remove_ext_adv_instance_sync(hdev, d->bis, NULL); + + /* Only terminate BIG if it has been created */ + if (!d->big_term) + return 0; + + return hci_le_terminate_big_sync(hdev, d->big, + HCI_ERROR_LOCAL_HOST_TERM); +} + +static void terminate_big_destroy(struct hci_dev *hdev, void *data, int err) +{ + kfree(data); +} + +static int hci_le_terminate_big(struct hci_dev *hdev, struct hci_conn *conn) +{ + struct iso_list_data *d; + int ret; + + bt_dev_dbg(hdev, "big 0x%2.2x bis 0x%2.2x", conn->iso_qos.bcast.big, + conn->iso_qos.bcast.bis); + + d = kzalloc(sizeof(*d), GFP_KERNEL); + if (!d) + return -ENOMEM; + + d->big = conn->iso_qos.bcast.big; + d->bis = conn->iso_qos.bcast.bis; + d->big_term = test_and_clear_bit(HCI_CONN_BIG_CREATED, &conn->flags); + + ret = hci_cmd_sync_queue(hdev, terminate_big_sync, d, + terminate_big_destroy); + if (ret) + kfree(d); + + return ret; +} + +static int big_terminate_sync(struct hci_dev *hdev, void *data) +{ + struct iso_list_data *d = data; + + bt_dev_dbg(hdev, "big 0x%2.2x sync_handle 0x%4.4x", d->big, + d->sync_handle); + + if (d->big_sync_term) + hci_le_big_terminate_sync(hdev, d->big); + + if (d->pa_sync_term) + return hci_le_pa_terminate_sync(hdev, d->sync_handle); + + return 0; +} + +static void find_bis(struct hci_conn *conn, void *data) +{ + struct iso_list_data *d = data; + + /* Ignore if BIG doesn't match */ + if (d->big != conn->iso_qos.bcast.big) + return; + + d->count++; +} + +static int hci_le_big_terminate(struct hci_dev *hdev, struct hci_conn *conn) +{ + struct iso_list_data *d; + int ret; + + bt_dev_dbg(hdev, "hcon %p big 0x%2.2x sync_handle 0x%4.4x", conn, + conn->iso_qos.bcast.big, conn->sync_handle); + + d = kzalloc(sizeof(*d), GFP_KERNEL); + if (!d) + return -ENOMEM; + + d->big = conn->iso_qos.bcast.big; + d->sync_handle = conn->sync_handle; + + if (conn->type == PA_LINK && + test_and_clear_bit(HCI_CONN_PA_SYNC, &conn->flags)) { + hci_conn_hash_list_flag(hdev, find_bis, PA_LINK, + HCI_CONN_PA_SYNC, d); + + if (!d->count) + d->pa_sync_term = true; + + d->count = 0; + } + + if (test_and_clear_bit(HCI_CONN_BIG_SYNC, &conn->flags)) { + hci_conn_hash_list_flag(hdev, find_bis, BIS_LINK, + HCI_CONN_BIG_SYNC, d); + + if (!d->count) + d->big_sync_term = true; + } + + if (!d->pa_sync_term && !d->big_sync_term) + return 0; + + ret = hci_cmd_sync_queue(hdev, big_terminate_sync, d, + terminate_big_destroy); + if (ret) + kfree(d); + + return ret; +} + +/* Cleanup BIS connection + * + * Detects if there any BIS left connected in a BIG + * broadcaster: Remove advertising instance and terminate BIG. + * broadcaster receiver: Terminate BIG sync and terminate PA sync. + */ +static void bis_cleanup(struct hci_conn *conn) +{ + struct hci_dev *hdev = conn->hdev; + struct hci_conn *bis; + + bt_dev_dbg(hdev, "conn %p", conn); + + if (conn->role == HCI_ROLE_MASTER) { + if (!test_and_clear_bit(HCI_CONN_PER_ADV, &conn->flags)) + return; + + /* Check if ISO connection is a BIS and terminate advertising + * set and BIG if there are no other connections using it. + */ + bis = hci_conn_hash_lookup_big_state(hdev, + conn->iso_qos.bcast.big, + BT_CONNECTED, + HCI_ROLE_MASTER); + if (bis) + return; + + bis = hci_conn_hash_lookup_big_state(hdev, + conn->iso_qos.bcast.big, + BT_CONNECT, + HCI_ROLE_MASTER); + if (bis) + return; + + bis = hci_conn_hash_lookup_big_state(hdev, + conn->iso_qos.bcast.big, + BT_OPEN, + HCI_ROLE_MASTER); + if (bis) + return; + + hci_le_terminate_big(hdev, conn); + } else { + hci_le_big_terminate(hdev, conn); + } +} + +static int remove_cig_sync(struct hci_dev *hdev, void *data) +{ + u8 handle = PTR_UINT(data); + + return hci_le_remove_cig_sync(hdev, handle); +} + +static int hci_le_remove_cig(struct hci_dev *hdev, u8 handle) +{ + bt_dev_dbg(hdev, "handle 0x%2.2x", handle); + + return hci_cmd_sync_queue(hdev, remove_cig_sync, UINT_PTR(handle), + NULL); +} + +static void find_cis(struct hci_conn *conn, void *data) +{ + struct iso_list_data *d = data; + + /* Ignore broadcast or if CIG don't match */ + if (!bacmp(&conn->dst, BDADDR_ANY) || d->cig != conn->iso_qos.ucast.cig) + return; + + d->count++; +} + +/* Cleanup CIS connection: + * + * Detects if there any CIS left connected in a CIG and remove it. + */ +static void cis_cleanup(struct hci_conn *conn) +{ + struct hci_dev *hdev = conn->hdev; + struct iso_list_data d; + + if (conn->iso_qos.ucast.cig == BT_ISO_QOS_CIG_UNSET) + return; + + memset(&d, 0, sizeof(d)); + d.cig = conn->iso_qos.ucast.cig; + + /* Check if ISO connection is a CIS and remove CIG if there are + * no other connections using it. + */ + hci_conn_hash_list_state(hdev, find_cis, CIS_LINK, BT_BOUND, &d); + hci_conn_hash_list_state(hdev, find_cis, CIS_LINK, BT_CONNECT, + &d); + hci_conn_hash_list_state(hdev, find_cis, CIS_LINK, BT_CONNECTED, + &d); + if (d.count) + return; + + hci_le_remove_cig(hdev, conn->iso_qos.ucast.cig); +} + +static int hci_conn_hash_alloc_unset(struct hci_dev *hdev) +{ + return ida_alloc_range(&hdev->unset_handle_ida, HCI_CONN_HANDLE_MAX + 1, + U16_MAX, GFP_ATOMIC); +} + +static struct hci_conn *__hci_conn_add(struct hci_dev *hdev, int type, + bdaddr_t *dst, u8 dst_type, + u8 role, u16 handle) { struct hci_conn *conn; + struct smp_irk *irk = NULL; + + switch (type) { + case ACL_LINK: + if (!hdev->acl_mtu) + return ERR_PTR(-ECONNREFUSED); + break; + case CIS_LINK: + case BIS_LINK: + case PA_LINK: + if (!hdev->iso_mtu) + return ERR_PTR(-ECONNREFUSED); + irk = hci_get_irk(hdev, dst, dst_type); + break; + case LE_LINK: + if (hdev->le_mtu && hdev->le_mtu < HCI_MIN_LE_MTU) + return ERR_PTR(-ECONNREFUSED); + if (!hdev->le_mtu && hdev->acl_mtu < HCI_MIN_LE_MTU) + return ERR_PTR(-ECONNREFUSED); + irk = hci_get_irk(hdev, dst, dst_type); + break; + case SCO_LINK: + case ESCO_LINK: + if (!hdev->sco_pkts) + /* Controller does not support SCO or eSCO over HCI */ + return ERR_PTR(-ECONNREFUSED); + break; + default: + return ERR_PTR(-ECONNREFUSED); + } - BT_DBG("%s dst %pMR", hdev->name, dst); + bt_dev_dbg(hdev, "dst %pMR handle 0x%4.4x", dst, handle); conn = kzalloc(sizeof(*conn), GFP_KERNEL); if (!conn) - return NULL; + return ERR_PTR(-ENOMEM); + + /* If and IRK exists use its identity address */ + if (!irk) { + bacpy(&conn->dst, dst); + conn->dst_type = dst_type; + } else { + bacpy(&conn->dst, &irk->bdaddr); + conn->dst_type = irk->addr_type; + } - bacpy(&conn->dst, dst); bacpy(&conn->src, &hdev->bdaddr); + conn->handle = handle; conn->hdev = hdev; conn->type = type; conn->role = role; @@ -701,6 +987,8 @@ struct hci_conn *hci_conn_add(struct hci_dev *hdev, int type, bdaddr_t *dst, conn->rssi = HCI_RSSI_INVALID; conn->tx_power = HCI_TX_POWER_INVALID; conn->max_tx_power = HCI_TX_POWER_INVALID; + conn->sync_handle = HCI_SYNC_HANDLE_INVALID; + conn->sid = HCI_SID_INVALID; set_bit(HCI_CONN_POWER_SAVE, &conn->flags); conn->disc_timeout = HCI_DISCONN_TIMEOUT; @@ -714,10 +1002,28 @@ struct hci_conn *hci_conn_add(struct hci_dev *hdev, int type, bdaddr_t *dst, switch (type) { case ACL_LINK: conn->pkt_type = hdev->pkt_type & ACL_PTYPE_MASK; + conn->mtu = hdev->acl_mtu; break; case LE_LINK: /* conn->src should reflect the local identity address */ hci_copy_identity_address(hdev, &conn->src, &conn->src_type); + conn->mtu = hdev->le_mtu ? hdev->le_mtu : hdev->acl_mtu; + break; + case CIS_LINK: + /* conn->src should reflect the local identity address */ + hci_copy_identity_address(hdev, &conn->src, &conn->src_type); + + if (conn->role == HCI_ROLE_MASTER) + conn->cleanup = cis_cleanup; + + conn->mtu = hdev->iso_mtu; + break; + case PA_LINK: + case BIS_LINK: + /* conn->src should reflect the local identity address */ + hci_copy_identity_address(hdev, &conn->src, &conn->src_type); + conn->cleanup = bis_cleanup; + conn->mtu = hdev->iso_mtu; break; case SCO_LINK: if (lmp_esco_capable(hdev)) @@ -725,21 +1031,25 @@ struct hci_conn *hci_conn_add(struct hci_dev *hdev, int type, bdaddr_t *dst, (hdev->esco_type & EDR_ESCO_MASK); else conn->pkt_type = hdev->pkt_type & SCO_PTYPE_MASK; + + conn->mtu = hdev->sco_mtu; break; case ESCO_LINK: conn->pkt_type = hdev->esco_type & ~EDR_ESCO_MASK; + conn->mtu = hdev->sco_mtu; break; } skb_queue_head_init(&conn->data_q); + skb_queue_head_init(&conn->tx_q.queue); INIT_LIST_HEAD(&conn->chan_list); + INIT_LIST_HEAD(&conn->link_list); INIT_DELAYED_WORK(&conn->disc_work, hci_conn_timeout); INIT_DELAYED_WORK(&conn->auto_accept_work, hci_conn_auto_accept); INIT_DELAYED_WORK(&conn->idle_work, hci_conn_idle); INIT_DELAYED_WORK(&conn->le_conn_timeout, le_conn_timeout); - INIT_WORK(&conn->le_scan_cleanup, le_scan_cleanup); atomic_set(&conn->refcnt, 0); @@ -757,46 +1067,164 @@ struct hci_conn *hci_conn_add(struct hci_dev *hdev, int type, bdaddr_t *dst, } hci_conn_init_sysfs(conn); - return conn; } -int hci_conn_del(struct hci_conn *conn) +struct hci_conn *hci_conn_add_unset(struct hci_dev *hdev, int type, + bdaddr_t *dst, u8 dst_type, u8 role) +{ + int handle; + + bt_dev_dbg(hdev, "dst %pMR", dst); + + handle = hci_conn_hash_alloc_unset(hdev); + if (unlikely(handle < 0)) + return ERR_PTR(-ECONNREFUSED); + + return __hci_conn_add(hdev, type, dst, dst_type, role, handle); +} + +struct hci_conn *hci_conn_add(struct hci_dev *hdev, int type, bdaddr_t *dst, + u8 dst_type, u8 role, u16 handle) +{ + if (handle > HCI_CONN_HANDLE_MAX) + return ERR_PTR(-EINVAL); + + return __hci_conn_add(hdev, type, dst, dst_type, role, handle); +} + +static void hci_conn_cleanup_child(struct hci_conn *conn, u8 reason) +{ + if (!reason) + reason = HCI_ERROR_REMOTE_USER_TERM; + + /* Due to race, SCO/ISO conn might be not established yet at this point, + * and nothing else will clean it up. In other cases it is done via HCI + * events. + */ + switch (conn->type) { + case SCO_LINK: + case ESCO_LINK: + if (HCI_CONN_HANDLE_UNSET(conn->handle)) + hci_conn_failed(conn, reason); + break; + case CIS_LINK: + case BIS_LINK: + case PA_LINK: + if ((conn->state != BT_CONNECTED && + !test_bit(HCI_CONN_CREATE_CIS, &conn->flags)) || + test_bit(HCI_CONN_BIG_CREATED, &conn->flags)) + hci_conn_failed(conn, reason); + break; + } +} + +static void hci_conn_unlink(struct hci_conn *conn) +{ + struct hci_dev *hdev = conn->hdev; + + bt_dev_dbg(hdev, "hcon %p", conn); + + if (!conn->parent) { + struct hci_link *link, *t; + + list_for_each_entry_safe(link, t, &conn->link_list, list) { + struct hci_conn *child = link->conn; + + hci_conn_unlink(child); + + /* If hdev is down it means + * hci_dev_close_sync/hci_conn_hash_flush is in progress + * and links don't need to be cleanup as all connections + * would be cleanup. + */ + if (!test_bit(HCI_UP, &hdev->flags)) + continue; + + hci_conn_cleanup_child(child, conn->abort_reason); + } + + return; + } + + if (!conn->link) + return; + + list_del_rcu(&conn->link->list); + synchronize_rcu(); + + hci_conn_drop(conn->parent); + hci_conn_put(conn->parent); + conn->parent = NULL; + + kfree(conn->link); + conn->link = NULL; +} + +void hci_conn_del(struct hci_conn *conn) { struct hci_dev *hdev = conn->hdev; BT_DBG("%s hcon %p handle %d", hdev->name, conn, conn->handle); - cancel_delayed_work_sync(&conn->disc_work); - cancel_delayed_work_sync(&conn->auto_accept_work); - cancel_delayed_work_sync(&conn->idle_work); + hci_conn_unlink(conn); - if (conn->type == ACL_LINK) { - struct hci_conn *sco = conn->link; - if (sco) - sco->link = NULL; + disable_delayed_work_sync(&conn->disc_work); + disable_delayed_work_sync(&conn->auto_accept_work); + disable_delayed_work_sync(&conn->idle_work); - /* Unacked frames */ - hdev->acl_cnt += conn->sent; - } else if (conn->type == LE_LINK) { - cancel_delayed_work(&conn->le_conn_timeout); + /* Remove the connection from the list so unacked logic can detect when + * a certain pool is not being utilized. + */ + hci_conn_hash_del(hdev, conn); - if (hdev->le_pkts) - hdev->le_cnt += conn->sent; + /* Handle unacked frames: + * + * - In case there are no connection, or if restoring the buffers + * considered in transist would overflow, restore all buffers to the + * pool. + * - Otherwise restore just the buffers considered in transit for the + * hci_conn + */ + switch (conn->type) { + case ACL_LINK: + if (!hci_conn_num(hdev, ACL_LINK) || + hdev->acl_cnt + conn->sent > hdev->acl_pkts) + hdev->acl_cnt = hdev->acl_pkts; else hdev->acl_cnt += conn->sent; - } else { - struct hci_conn *acl = conn->link; - if (acl) { - acl->link = NULL; - hci_conn_drop(acl); + break; + case LE_LINK: + cancel_delayed_work(&conn->le_conn_timeout); + + if (hdev->le_pkts) { + if (!hci_conn_num(hdev, LE_LINK) || + hdev->le_cnt + conn->sent > hdev->le_pkts) + hdev->le_cnt = hdev->le_pkts; + else + hdev->le_cnt += conn->sent; + } else { + if ((!hci_conn_num(hdev, LE_LINK) && + !hci_conn_num(hdev, ACL_LINK)) || + hdev->acl_cnt + conn->sent > hdev->acl_pkts) + hdev->acl_cnt = hdev->acl_pkts; + else + hdev->acl_cnt += conn->sent; } + break; + case CIS_LINK: + case BIS_LINK: + case PA_LINK: + if (!hci_iso_count(hdev) || + hdev->iso_cnt + conn->sent > hdev->iso_pkts) + hdev->iso_cnt = hdev->iso_pkts; + else + hdev->iso_cnt += conn->sent; + break; } - if (conn->amp_mgr) - amp_mgr_put(conn->amp_mgr); - skb_queue_purge(&conn->data_q); + skb_queue_purge(&conn->tx_q.queue); /* Remove the connection from the list and cleanup its remaining * state. This is a separate function since for some cases like @@ -805,7 +1233,8 @@ int hci_conn_del(struct hci_conn *conn) */ hci_conn_cleanup(conn); - return 0; + /* Dequeue callbacks using connection pointer as data */ + hci_cmd_sync_dequeue(hdev, NULL, conn, NULL); } struct hci_dev *hci_get_route(bdaddr_t *dst, bdaddr_t *src, uint8_t src_type) @@ -819,8 +1248,7 @@ struct hci_dev *hci_get_route(bdaddr_t *dst, bdaddr_t *src, uint8_t src_type) list_for_each_entry(d, &hci_dev_list, list) { if (!test_bit(HCI_UP, &d->flags) || - hci_dev_test_flag(d, HCI_USER_CHANNEL) || - d->dev_type != HCI_PRIMARY) + hci_dev_test_flag(d, HCI_USER_CHANNEL)) continue; /* Simple routing: @@ -870,40 +1298,11 @@ struct hci_dev *hci_get_route(bdaddr_t *dst, bdaddr_t *src, uint8_t src_type) EXPORT_SYMBOL(hci_get_route); /* This function requires the caller holds hdev->lock */ -void hci_le_conn_failed(struct hci_conn *conn, u8 status) +static void hci_le_conn_failed(struct hci_conn *conn, u8 status) { struct hci_dev *hdev = conn->hdev; - struct hci_conn_params *params; - - params = hci_pend_le_action_lookup(&hdev->pend_le_conns, &conn->dst, - conn->dst_type); - if (params && params->conn) { - hci_conn_drop(params->conn); - hci_conn_put(params->conn); - params->conn = NULL; - } - - conn->state = BT_CLOSED; - - /* If the status indicates successful cancellation of - * the attempt (i.e. Unknown Connection Id) there's no point of - * notifying failure since we'll go back to keep trying to - * connect. The only exception is explicit connect requests - * where a timeout + cancel does indicate an actual failure. - */ - if (status != HCI_ERROR_UNKNOWN_CONN_ID || - (params && params->explicit_connect)) - mgmt_connect_failed(hdev, &conn->dst, conn->type, - conn->dst_type, status); - hci_connect_cfm(conn, status); - - hci_conn_del(conn); - - /* Since we may have temporarily stopped the background scanning in - * favor of connection establishment, we should restart it. - */ - hci_update_passive_scan(hdev); + hci_connect_le_scan_cleanup(conn, status); /* Enable advertising in case this was a failed connection * attempt as a peripheral. @@ -911,40 +1310,66 @@ void hci_le_conn_failed(struct hci_conn *conn, u8 status) hci_enable_advertising(hdev); } -static void create_le_conn_complete(struct hci_dev *hdev, void *data, int err) +/* This function requires the caller holds hdev->lock */ +void hci_conn_failed(struct hci_conn *conn, u8 status) { - struct hci_conn *conn = data; + struct hci_dev *hdev = conn->hdev; - hci_dev_lock(hdev); + bt_dev_dbg(hdev, "status 0x%2.2x", status); - if (!err) { - hci_connect_le_scan_cleanup(conn); - goto done; + switch (conn->type) { + case LE_LINK: + hci_le_conn_failed(conn, status); + break; + case ACL_LINK: + mgmt_connect_failed(hdev, conn, status); + break; } - bt_dev_err(hdev, "request failed to create LE connection: err %d", err); - - if (!conn) - goto done; - - hci_le_conn_failed(conn, err); + /* In case of BIG/PA sync failed, clear conn flags so that + * the conns will be correctly cleaned up by ISO layer + */ + test_and_clear_bit(HCI_CONN_BIG_SYNC_FAILED, &conn->flags); + test_and_clear_bit(HCI_CONN_PA_SYNC_FAILED, &conn->flags); -done: - hci_dev_unlock(hdev); + conn->state = BT_CLOSED; + hci_connect_cfm(conn, status); + hci_conn_del(conn); } -static int hci_connect_le_sync(struct hci_dev *hdev, void *data) +/* This function requires the caller holds hdev->lock */ +u8 hci_conn_set_handle(struct hci_conn *conn, u16 handle) { - struct hci_conn *conn = data; + struct hci_dev *hdev = conn->hdev; - bt_dev_dbg(hdev, "conn %p", conn); + bt_dev_dbg(hdev, "hcon %p handle 0x%4.4x", conn, handle); - return hci_le_create_conn_sync(hdev, conn); + if (conn->handle == handle) + return 0; + + if (handle > HCI_CONN_HANDLE_MAX) { + bt_dev_err(hdev, "Invalid handle: 0x%4.4x > 0x%4.4x", + handle, HCI_CONN_HANDLE_MAX); + return HCI_ERROR_INVALID_PARAMETERS; + } + + /* If abort_reason has been sent it means the connection is being + * aborted and the handle shall not be changed. + */ + if (conn->abort_reason) + return conn->abort_reason; + + if (HCI_CONN_HANDLE_UNSET(conn->handle)) + ida_free(&hdev->unset_handle_ida, conn->handle); + + conn->handle = handle; + + return 0; } struct hci_conn *hci_connect_le(struct hci_dev *hdev, bdaddr_t *dst, u8 dst_type, bool dst_resolved, u8 sec_level, - u16 conn_timeout, u8 role) + u16 conn_timeout, u8 role, u8 phy, u8 sec_phy) { struct hci_conn *conn; struct smp_irk *irk; @@ -997,22 +1422,19 @@ struct hci_conn *hci_connect_le(struct hci_dev *hdev, bdaddr_t *dst, if (conn) { bacpy(&conn->dst, dst); } else { - conn = hci_conn_add(hdev, LE_LINK, dst, role); - if (!conn) - return ERR_PTR(-ENOMEM); + conn = hci_conn_add_unset(hdev, LE_LINK, dst, dst_type, role); + if (IS_ERR(conn)) + return conn; hci_conn_hold(conn); conn->pending_sec_level = sec_level; } - conn->dst_type = dst_type; conn->sec_level = BT_SECURITY_LOW; conn->conn_timeout = conn_timeout; + conn->le_adv_phy = phy; + conn->le_adv_sec_phy = sec_phy; - conn->state = BT_CONNECT; - clear_bit(HCI_CONN_SCANNING, &conn->flags); - - err = hci_cmd_sync_queue(hdev, hci_connect_le_sync, conn, - create_le_conn_complete); + err = hci_connect_le_sync(hdev, conn); if (err) { hci_conn_del(conn); return ERR_PTR(err); @@ -1061,8 +1483,8 @@ static int hci_explicit_conn_params_set(struct hci_dev *hdev, if (params->auto_connect == HCI_AUTO_CONN_DISABLED || params->auto_connect == HCI_AUTO_CONN_REPORT || params->auto_connect == HCI_AUTO_CONN_EXPLICIT) { - list_del_init(¶ms->action); - list_add(¶ms->action, &hdev->pend_le_conns); + hci_pend_le_list_del_init(params); + hci_pend_le_list_add(params, &hdev->pend_le_conns); } params->explicit_connect = true; @@ -1073,6 +1495,121 @@ static int hci_explicit_conn_params_set(struct hci_dev *hdev, return 0; } +static int qos_set_big(struct hci_dev *hdev, struct bt_iso_qos *qos) +{ + struct hci_conn *conn; + u8 big; + + /* Allocate a BIG if not set */ + if (qos->bcast.big == BT_ISO_QOS_BIG_UNSET) { + for (big = 0x00; big < 0xef; big++) { + + conn = hci_conn_hash_lookup_big(hdev, big); + if (!conn) + break; + } + + if (big == 0xef) + return -EADDRNOTAVAIL; + + /* Update BIG */ + qos->bcast.big = big; + } + + return 0; +} + +static int qos_set_bis(struct hci_dev *hdev, struct bt_iso_qos *qos) +{ + struct hci_conn *conn; + u8 bis; + + /* Allocate BIS if not set */ + if (qos->bcast.bis == BT_ISO_QOS_BIS_UNSET) { + if (qos->bcast.big != BT_ISO_QOS_BIG_UNSET) { + conn = hci_conn_hash_lookup_big(hdev, qos->bcast.big); + + if (conn) { + /* If the BIG handle is already matched to an advertising + * handle, do not allocate a new one. + */ + qos->bcast.bis = conn->iso_qos.bcast.bis; + return 0; + } + } + + /* Find an unused adv set to advertise BIS, skip instance 0x00 + * since it is reserved as general purpose set. + */ + for (bis = 0x01; bis < hdev->le_num_of_adv_sets; + bis++) { + + conn = hci_conn_hash_lookup_bis(hdev, BDADDR_ANY, bis); + if (!conn) + break; + } + + if (bis == hdev->le_num_of_adv_sets) + return -EADDRNOTAVAIL; + + /* Update BIS */ + qos->bcast.bis = bis; + } + + return 0; +} + +/* This function requires the caller holds hdev->lock */ +static struct hci_conn *hci_add_bis(struct hci_dev *hdev, bdaddr_t *dst, + __u8 sid, struct bt_iso_qos *qos, + __u8 base_len, __u8 *base, u16 timeout) +{ + struct hci_conn *conn; + int err; + + /* Let's make sure that le is enabled.*/ + if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED)) { + if (lmp_le_capable(hdev)) + return ERR_PTR(-ECONNREFUSED); + return ERR_PTR(-EOPNOTSUPP); + } + + err = qos_set_big(hdev, qos); + if (err) + return ERR_PTR(err); + + err = qos_set_bis(hdev, qos); + if (err) + return ERR_PTR(err); + + /* Check if the LE Create BIG command has already been sent */ + conn = hci_conn_hash_lookup_per_adv_bis(hdev, dst, qos->bcast.big, + qos->bcast.big); + if (conn) + return ERR_PTR(-EADDRINUSE); + + /* Check BIS settings against other bound BISes, since all + * BISes in a BIG must have the same value for all parameters + */ + conn = hci_conn_hash_lookup_big(hdev, qos->bcast.big); + + if (conn && (memcmp(qos, &conn->iso_qos, sizeof(*qos)) || + base_len != conn->le_per_adv_data_len || + memcmp(conn->le_per_adv_data, base, base_len))) + return ERR_PTR(-EADDRINUSE); + + conn = hci_conn_add_unset(hdev, BIS_LINK, dst, 0, HCI_ROLE_MASTER); + if (IS_ERR(conn)) + return conn; + + conn->state = BT_CONNECT; + conn->sid = sid; + conn->conn_timeout = timeout; + + hci_conn_hold(conn); + return conn; +} + /* This function requires the caller holds hdev->lock */ struct hci_conn *hci_connect_le_scan(struct hci_dev *hdev, bdaddr_t *dst, u8 dst_type, u8 sec_level, @@ -1107,9 +1644,10 @@ struct hci_conn *hci_connect_le_scan(struct hci_dev *hdev, bdaddr_t *dst, BT_DBG("requesting refresh of dst_addr"); - conn = hci_conn_add(hdev, LE_LINK, dst, HCI_ROLE_MASTER); - if (!conn) - return ERR_PTR(-ENOMEM); + conn = hci_conn_add_unset(hdev, LE_LINK, dst, dst_type, + HCI_ROLE_MASTER); + if (IS_ERR(conn)) + return conn; if (hci_explicit_conn_params_set(hdev, dst, dst_type) < 0) { hci_conn_del(conn); @@ -1118,7 +1656,6 @@ struct hci_conn *hci_connect_le_scan(struct hci_dev *hdev, bdaddr_t *dst, conn->state = BT_CONNECT; set_bit(HCI_CONN_SCANNING, &conn->flags); - conn->dst_type = dst_type; conn->sec_level = BT_SECURITY_LOW; conn->pending_sec_level = sec_level; conn->conn_timeout = conn_timeout; @@ -1133,7 +1670,7 @@ done: struct hci_conn *hci_connect_acl(struct hci_dev *hdev, bdaddr_t *dst, u8 sec_level, u8 auth_type, - enum conn_reasons conn_reason) + enum conn_reasons conn_reason, u16 timeout) { struct hci_conn *acl; @@ -1144,50 +1681,100 @@ struct hci_conn *hci_connect_acl(struct hci_dev *hdev, bdaddr_t *dst, return ERR_PTR(-EOPNOTSUPP); } + /* Reject outgoing connection to device with same BD ADDR against + * CVE-2020-26555 + */ + if (!bacmp(&hdev->bdaddr, dst)) { + bt_dev_dbg(hdev, "Reject connection with same BD_ADDR %pMR\n", + dst); + return ERR_PTR(-ECONNREFUSED); + } + acl = hci_conn_hash_lookup_ba(hdev, ACL_LINK, dst); if (!acl) { - acl = hci_conn_add(hdev, ACL_LINK, dst, HCI_ROLE_MASTER); - if (!acl) - return ERR_PTR(-ENOMEM); + acl = hci_conn_add_unset(hdev, ACL_LINK, dst, 0, + HCI_ROLE_MASTER); + if (IS_ERR(acl)) + return acl; } hci_conn_hold(acl); acl->conn_reason = conn_reason; if (acl->state == BT_OPEN || acl->state == BT_CLOSED) { + int err; + acl->sec_level = BT_SECURITY_LOW; acl->pending_sec_level = sec_level; acl->auth_type = auth_type; - hci_acl_create_connection(acl); + acl->conn_timeout = timeout; + + err = hci_connect_acl_sync(hdev, acl); + if (err) { + hci_conn_del(acl); + return ERR_PTR(err); + } } return acl; } +static struct hci_link *hci_conn_link(struct hci_conn *parent, + struct hci_conn *conn) +{ + struct hci_dev *hdev = parent->hdev; + struct hci_link *link; + + bt_dev_dbg(hdev, "parent %p hcon %p", parent, conn); + + if (conn->link) + return conn->link; + + if (conn->parent) + return NULL; + + link = kzalloc(sizeof(*link), GFP_KERNEL); + if (!link) + return NULL; + + link->conn = hci_conn_hold(conn); + conn->link = link; + conn->parent = hci_conn_get(parent); + + /* Use list_add_tail_rcu append to the list */ + list_add_tail_rcu(&link->list, &parent->link_list); + + return link; +} + struct hci_conn *hci_connect_sco(struct hci_dev *hdev, int type, bdaddr_t *dst, - __u16 setting, struct bt_codec *codec) + __u16 setting, struct bt_codec *codec, + u16 timeout) { struct hci_conn *acl; struct hci_conn *sco; + struct hci_link *link; acl = hci_connect_acl(hdev, dst, BT_SECURITY_LOW, HCI_AT_NO_BONDING, - CONN_REASON_SCO_CONNECT); + CONN_REASON_SCO_CONNECT, timeout); if (IS_ERR(acl)) return acl; sco = hci_conn_hash_lookup_ba(hdev, type, dst); if (!sco) { - sco = hci_conn_add(hdev, type, dst, HCI_ROLE_MASTER); - if (!sco) { + sco = hci_conn_add_unset(hdev, type, dst, 0, HCI_ROLE_MASTER); + if (IS_ERR(sco)) { hci_conn_drop(acl); - return ERR_PTR(-ENOMEM); + return sco; } } - acl->link = sco; - sco->link = acl; - - hci_conn_hold(sco); + link = hci_conn_link(acl, sco); + if (!link) { + hci_conn_drop(acl); + hci_conn_drop(sco); + return ERR_PTR(-ENOLINK); + } sco->setting = setting; sco->codec = *codec; @@ -1209,6 +1796,591 @@ struct hci_conn *hci_connect_sco(struct hci_dev *hdev, int type, bdaddr_t *dst, return sco; } +static int hci_le_create_big(struct hci_conn *conn, struct bt_iso_qos *qos) +{ + struct hci_dev *hdev = conn->hdev; + struct hci_cp_le_create_big cp; + struct iso_list_data data; + + memset(&cp, 0, sizeof(cp)); + + data.big = qos->bcast.big; + data.bis = qos->bcast.bis; + data.count = 0; + + /* Create a BIS for each bound connection */ + hci_conn_hash_list_state(hdev, bis_list, BIS_LINK, + BT_BOUND, &data); + + cp.handle = qos->bcast.big; + cp.adv_handle = qos->bcast.bis; + cp.num_bis = data.count; + hci_cpu_to_le24(qos->bcast.out.interval, cp.bis.sdu_interval); + cp.bis.sdu = cpu_to_le16(qos->bcast.out.sdu); + cp.bis.latency = cpu_to_le16(qos->bcast.out.latency); + cp.bis.rtn = qos->bcast.out.rtn; + cp.bis.phy = qos->bcast.out.phy; + cp.bis.packing = qos->bcast.packing; + cp.bis.framing = qos->bcast.framing; + cp.bis.encryption = qos->bcast.encryption; + memcpy(cp.bis.bcode, qos->bcast.bcode, sizeof(cp.bis.bcode)); + + return hci_send_cmd(hdev, HCI_OP_LE_CREATE_BIG, sizeof(cp), &cp); +} + +static int set_cig_params_sync(struct hci_dev *hdev, void *data) +{ + DEFINE_FLEX(struct hci_cp_le_set_cig_params, pdu, cis, num_cis, 0x1f); + u8 cig_id = PTR_UINT(data); + struct hci_conn *conn; + struct bt_iso_qos *qos; + u8 aux_num_cis = 0; + u8 cis_id; + + conn = hci_conn_hash_lookup_cig(hdev, cig_id); + if (!conn) + return 0; + + qos = &conn->iso_qos; + pdu->cig_id = cig_id; + hci_cpu_to_le24(qos->ucast.out.interval, pdu->c_interval); + hci_cpu_to_le24(qos->ucast.in.interval, pdu->p_interval); + pdu->sca = qos->ucast.sca; + pdu->packing = qos->ucast.packing; + pdu->framing = qos->ucast.framing; + pdu->c_latency = cpu_to_le16(qos->ucast.out.latency); + pdu->p_latency = cpu_to_le16(qos->ucast.in.latency); + + /* Reprogram all CIS(s) with the same CIG, valid range are: + * num_cis: 0x00 to 0x1F + * cis_id: 0x00 to 0xEF + */ + for (cis_id = 0x00; cis_id < 0xf0 && + aux_num_cis < pdu->num_cis; cis_id++) { + struct hci_cis_params *cis; + + conn = hci_conn_hash_lookup_cis(hdev, NULL, 0, cig_id, cis_id); + if (!conn) + continue; + + qos = &conn->iso_qos; + + cis = &pdu->cis[aux_num_cis++]; + cis->cis_id = cis_id; + cis->c_sdu = cpu_to_le16(conn->iso_qos.ucast.out.sdu); + cis->p_sdu = cpu_to_le16(conn->iso_qos.ucast.in.sdu); + cis->c_phy = qos->ucast.out.phy ? qos->ucast.out.phy : + qos->ucast.in.phy; + cis->p_phy = qos->ucast.in.phy ? qos->ucast.in.phy : + qos->ucast.out.phy; + cis->c_rtn = qos->ucast.out.rtn; + cis->p_rtn = qos->ucast.in.rtn; + } + pdu->num_cis = aux_num_cis; + + if (!pdu->num_cis) + return 0; + + return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_CIG_PARAMS, + struct_size(pdu, cis, pdu->num_cis), + pdu, HCI_CMD_TIMEOUT); +} + +static bool hci_le_set_cig_params(struct hci_conn *conn, struct bt_iso_qos *qos) +{ + struct hci_dev *hdev = conn->hdev; + struct iso_list_data data; + + memset(&data, 0, sizeof(data)); + + /* Allocate first still reconfigurable CIG if not set */ + if (qos->ucast.cig == BT_ISO_QOS_CIG_UNSET) { + for (data.cig = 0x00; data.cig < 0xf0; data.cig++) { + data.count = 0; + + hci_conn_hash_list_state(hdev, find_cis, CIS_LINK, + BT_CONNECT, &data); + if (data.count) + continue; + + hci_conn_hash_list_state(hdev, find_cis, CIS_LINK, + BT_CONNECTED, &data); + if (!data.count) + break; + } + + if (data.cig == 0xf0) + return false; + + /* Update CIG */ + qos->ucast.cig = data.cig; + } + + if (qos->ucast.cis != BT_ISO_QOS_CIS_UNSET) { + if (hci_conn_hash_lookup_cis(hdev, NULL, 0, qos->ucast.cig, + qos->ucast.cis)) + return false; + goto done; + } + + /* Allocate first available CIS if not set */ + for (data.cig = qos->ucast.cig, data.cis = 0x00; data.cis < 0xf0; + data.cis++) { + if (!hci_conn_hash_lookup_cis(hdev, NULL, 0, data.cig, + data.cis)) { + /* Update CIS */ + qos->ucast.cis = data.cis; + break; + } + } + + if (qos->ucast.cis == BT_ISO_QOS_CIS_UNSET) + return false; + +done: + if (hci_cmd_sync_queue(hdev, set_cig_params_sync, + UINT_PTR(qos->ucast.cig), NULL) < 0) + return false; + + return true; +} + +struct hci_conn *hci_bind_cis(struct hci_dev *hdev, bdaddr_t *dst, + __u8 dst_type, struct bt_iso_qos *qos, + u16 timeout) +{ + struct hci_conn *cis; + + cis = hci_conn_hash_lookup_cis(hdev, dst, dst_type, qos->ucast.cig, + qos->ucast.cis); + if (!cis) { + cis = hci_conn_add_unset(hdev, CIS_LINK, dst, dst_type, + HCI_ROLE_MASTER); + if (IS_ERR(cis)) + return cis; + cis->cleanup = cis_cleanup; + cis->dst_type = dst_type; + cis->iso_qos.ucast.cig = BT_ISO_QOS_CIG_UNSET; + cis->iso_qos.ucast.cis = BT_ISO_QOS_CIS_UNSET; + cis->conn_timeout = timeout; + } + + if (cis->state == BT_CONNECTED) + return cis; + + /* Check if CIS has been set and the settings matches */ + if (cis->state == BT_BOUND && + !memcmp(&cis->iso_qos, qos, sizeof(*qos))) + return cis; + + /* Update LINK PHYs according to QoS preference */ + cis->le_tx_phy = qos->ucast.out.phy; + cis->le_rx_phy = qos->ucast.in.phy; + + /* If output interval is not set use the input interval as it cannot be + * 0x000000. + */ + if (!qos->ucast.out.interval) + qos->ucast.out.interval = qos->ucast.in.interval; + + /* If input interval is not set use the output interval as it cannot be + * 0x000000. + */ + if (!qos->ucast.in.interval) + qos->ucast.in.interval = qos->ucast.out.interval; + + /* If output latency is not set use the input latency as it cannot be + * 0x0000. + */ + if (!qos->ucast.out.latency) + qos->ucast.out.latency = qos->ucast.in.latency; + + /* If input latency is not set use the output latency as it cannot be + * 0x0000. + */ + if (!qos->ucast.in.latency) + qos->ucast.in.latency = qos->ucast.out.latency; + + if (!hci_le_set_cig_params(cis, qos)) { + hci_conn_drop(cis); + return ERR_PTR(-EINVAL); + } + + hci_conn_hold(cis); + + cis->iso_qos = *qos; + cis->state = BT_BOUND; + + return cis; +} + +bool hci_iso_setup_path(struct hci_conn *conn) +{ + struct hci_dev *hdev = conn->hdev; + struct hci_cp_le_setup_iso_path cmd; + + memset(&cmd, 0, sizeof(cmd)); + + if (conn->iso_qos.ucast.out.sdu) { + cmd.handle = cpu_to_le16(conn->handle); + cmd.direction = 0x00; /* Input (Host to Controller) */ + cmd.path = 0x00; /* HCI path if enabled */ + cmd.codec = 0x03; /* Transparent Data */ + + if (hci_send_cmd(hdev, HCI_OP_LE_SETUP_ISO_PATH, sizeof(cmd), + &cmd) < 0) + return false; + } + + if (conn->iso_qos.ucast.in.sdu) { + cmd.handle = cpu_to_le16(conn->handle); + cmd.direction = 0x01; /* Output (Controller to Host) */ + cmd.path = 0x00; /* HCI path if enabled */ + cmd.codec = 0x03; /* Transparent Data */ + + if (hci_send_cmd(hdev, HCI_OP_LE_SETUP_ISO_PATH, sizeof(cmd), + &cmd) < 0) + return false; + } + + return true; +} + +int hci_conn_check_create_cis(struct hci_conn *conn) +{ + if (conn->type != CIS_LINK) + return -EINVAL; + + if (!conn->parent || conn->parent->state != BT_CONNECTED || + conn->state != BT_CONNECT || HCI_CONN_HANDLE_UNSET(conn->handle)) + return 1; + + return 0; +} + +static int hci_create_cis_sync(struct hci_dev *hdev, void *data) +{ + return hci_le_create_cis_sync(hdev); +} + +int hci_le_create_cis_pending(struct hci_dev *hdev) +{ + struct hci_conn *conn; + bool pending = false; + + rcu_read_lock(); + + list_for_each_entry_rcu(conn, &hdev->conn_hash.list, list) { + if (test_bit(HCI_CONN_CREATE_CIS, &conn->flags)) { + rcu_read_unlock(); + return -EBUSY; + } + + if (!hci_conn_check_create_cis(conn)) + pending = true; + } + + rcu_read_unlock(); + + if (!pending) + return 0; + + /* Queue Create CIS */ + return hci_cmd_sync_queue(hdev, hci_create_cis_sync, NULL, NULL); +} + +static void hci_iso_qos_setup(struct hci_dev *hdev, struct hci_conn *conn, + struct bt_iso_io_qos *qos, __u8 phy) +{ + /* Only set MTU if PHY is enabled */ + if (!qos->sdu && qos->phy) + qos->sdu = conn->mtu; + + /* Use the same PHY as ACL if set to any */ + if (qos->phy == BT_ISO_PHY_ANY) + qos->phy = phy; + + /* Use LE ACL connection interval if not set */ + if (!qos->interval) + /* ACL interval unit in 1.25 ms to us */ + qos->interval = conn->le_conn_interval * 1250; + + /* Use LE ACL connection latency if not set */ + if (!qos->latency) + qos->latency = conn->le_conn_latency; +} + +static int create_big_sync(struct hci_dev *hdev, void *data) +{ + struct hci_conn *conn = data; + struct bt_iso_qos *qos = &conn->iso_qos; + u16 interval, sync_interval = 0; + u32 flags = 0; + int err; + + if (qos->bcast.out.phy == 0x02) + flags |= MGMT_ADV_FLAG_SEC_2M; + + /* Align intervals */ + interval = (qos->bcast.out.interval / 1250) * qos->bcast.sync_factor; + + if (qos->bcast.bis) + sync_interval = interval * 4; + + err = hci_start_per_adv_sync(hdev, qos->bcast.bis, conn->sid, + conn->le_per_adv_data_len, + conn->le_per_adv_data, flags, interval, + interval, sync_interval); + if (err) + return err; + + return hci_le_create_big(conn, &conn->iso_qos); +} + +struct hci_conn *hci_pa_create_sync(struct hci_dev *hdev, bdaddr_t *dst, + __u8 dst_type, __u8 sid, + struct bt_iso_qos *qos) +{ + struct hci_conn *conn; + + bt_dev_dbg(hdev, "dst %pMR type %d sid %d", dst, dst_type, sid); + + conn = hci_conn_add_unset(hdev, PA_LINK, dst, dst_type, HCI_ROLE_SLAVE); + if (IS_ERR(conn)) + return conn; + + conn->iso_qos = *qos; + conn->sid = sid; + conn->state = BT_LISTEN; + conn->conn_timeout = msecs_to_jiffies(qos->bcast.sync_timeout * 10); + + hci_conn_hold(conn); + + hci_connect_pa_sync(hdev, conn); + + return conn; +} + +int hci_conn_big_create_sync(struct hci_dev *hdev, struct hci_conn *hcon, + struct bt_iso_qos *qos, __u16 sync_handle, + __u8 num_bis, __u8 bis[]) +{ + int err; + + if (num_bis < 0x01 || num_bis > ISO_MAX_NUM_BIS) + return -EINVAL; + + err = qos_set_big(hdev, qos); + if (err) + return err; + + if (hcon) { + /* Update hcon QoS */ + hcon->iso_qos = *qos; + + hcon->num_bis = num_bis; + memcpy(hcon->bis, bis, num_bis); + hcon->conn_timeout = msecs_to_jiffies(qos->bcast.timeout * 10); + } + + return hci_connect_big_sync(hdev, hcon); +} + +static void create_big_complete(struct hci_dev *hdev, void *data, int err) +{ + struct hci_conn *conn = data; + + bt_dev_dbg(hdev, "conn %p", conn); + + if (err) { + bt_dev_err(hdev, "Unable to create BIG: %d", err); + hci_connect_cfm(conn, err); + hci_conn_del(conn); + } +} + +struct hci_conn *hci_bind_bis(struct hci_dev *hdev, bdaddr_t *dst, __u8 sid, + struct bt_iso_qos *qos, + __u8 base_len, __u8 *base, u16 timeout) +{ + struct hci_conn *conn; + struct hci_conn *parent; + __u8 eir[HCI_MAX_PER_AD_LENGTH]; + struct hci_link *link; + + /* Look for any BIS that is open for rebinding */ + conn = hci_conn_hash_lookup_big_state(hdev, qos->bcast.big, BT_OPEN, + HCI_ROLE_MASTER); + if (conn) { + memcpy(qos, &conn->iso_qos, sizeof(*qos)); + conn->state = BT_CONNECTED; + return conn; + } + + if (base_len && base) + base_len = eir_append_service_data(eir, 0, 0x1851, + base, base_len); + + /* We need hci_conn object using the BDADDR_ANY as dst */ + conn = hci_add_bis(hdev, dst, sid, qos, base_len, eir, timeout); + if (IS_ERR(conn)) + return conn; + + /* Update LINK PHYs according to QoS preference */ + conn->le_tx_phy = qos->bcast.out.phy; + conn->le_tx_phy = qos->bcast.out.phy; + + /* Add Basic Announcement into Peridic Adv Data if BASE is set */ + if (base_len && base) { + memcpy(conn->le_per_adv_data, eir, sizeof(eir)); + conn->le_per_adv_data_len = base_len; + } + + hci_iso_qos_setup(hdev, conn, &qos->bcast.out, + conn->le_tx_phy ? conn->le_tx_phy : + hdev->le_tx_def_phys); + + conn->iso_qos = *qos; + conn->state = BT_BOUND; + + /* Link BISes together */ + parent = hci_conn_hash_lookup_big(hdev, + conn->iso_qos.bcast.big); + if (parent && parent != conn) { + link = hci_conn_link(parent, conn); + hci_conn_drop(conn); + if (!link) + return ERR_PTR(-ENOLINK); + } + + return conn; +} + +int hci_past_bis(struct hci_conn *conn, bdaddr_t *dst, __u8 dst_type) +{ + struct hci_conn *le; + + /* Lookup existing LE connection to rebind to */ + le = hci_conn_hash_lookup_le(conn->hdev, dst, dst_type); + if (!le) + return -EINVAL; + + return hci_past_sync(conn, le); +} + +static void bis_mark_per_adv(struct hci_conn *conn, void *data) +{ + struct iso_list_data *d = data; + + /* Skip if not broadcast/ANY address */ + if (bacmp(&conn->dst, BDADDR_ANY)) + return; + + if (d->big != conn->iso_qos.bcast.big || + d->bis == BT_ISO_QOS_BIS_UNSET || + d->bis != conn->iso_qos.bcast.bis) + return; + + set_bit(HCI_CONN_PER_ADV, &conn->flags); +} + +struct hci_conn *hci_connect_bis(struct hci_dev *hdev, bdaddr_t *dst, + __u8 dst_type, __u8 sid, + struct bt_iso_qos *qos, + __u8 base_len, __u8 *base, u16 timeout) +{ + struct hci_conn *conn; + int err; + struct iso_list_data data; + + conn = hci_bind_bis(hdev, dst, sid, qos, base_len, base, timeout); + if (IS_ERR(conn)) + return conn; + + if (conn->state == BT_CONNECTED) + return conn; + + /* Check if SID needs to be allocated then search for the first + * available. + */ + if (conn->sid == HCI_SID_INVALID) { + u8 sid; + + for (sid = 0; sid <= 0x0f; sid++) { + if (!hci_find_adv_sid(hdev, sid)) { + conn->sid = sid; + break; + } + } + } + + data.big = qos->bcast.big; + data.bis = qos->bcast.bis; + + /* Set HCI_CONN_PER_ADV for all bound connections, to mark that + * the start periodic advertising and create BIG commands have + * been queued + */ + hci_conn_hash_list_state(hdev, bis_mark_per_adv, BIS_LINK, + BT_BOUND, &data); + + /* Queue start periodic advertising and create BIG */ + err = hci_cmd_sync_queue(hdev, create_big_sync, conn, + create_big_complete); + if (err < 0) { + hci_conn_drop(conn); + return ERR_PTR(err); + } + + return conn; +} + +struct hci_conn *hci_connect_cis(struct hci_dev *hdev, bdaddr_t *dst, + __u8 dst_type, struct bt_iso_qos *qos, + u16 timeout) +{ + struct hci_conn *le; + struct hci_conn *cis; + struct hci_link *link; + + if (hci_dev_test_flag(hdev, HCI_ADVERTISING)) + le = hci_connect_le(hdev, dst, dst_type, false, + BT_SECURITY_LOW, + HCI_LE_CONN_TIMEOUT, + HCI_ROLE_SLAVE, 0, 0); + else + le = hci_connect_le_scan(hdev, dst, dst_type, + BT_SECURITY_LOW, + HCI_LE_CONN_TIMEOUT, + CONN_REASON_ISO_CONNECT); + if (IS_ERR(le)) + return le; + + hci_iso_qos_setup(hdev, le, &qos->ucast.out, + le->le_tx_phy ? le->le_tx_phy : hdev->le_tx_def_phys); + hci_iso_qos_setup(hdev, le, &qos->ucast.in, + le->le_rx_phy ? le->le_rx_phy : hdev->le_rx_def_phys); + + cis = hci_bind_cis(hdev, dst, dst_type, qos, timeout); + if (IS_ERR(cis)) { + hci_conn_drop(le); + return cis; + } + + link = hci_conn_link(le, cis); + hci_conn_drop(cis); + if (!link) { + hci_conn_drop(le); + return ERR_PTR(-ENOLINK); + } + + cis->state = BT_CONNECT; + + hci_le_create_cis_pending(hdev); + + return cis; +} + /* Check link security requirement */ int hci_conn_check_link_mode(struct hci_conn *conn) { @@ -1274,12 +2446,10 @@ static int hci_conn_auth(struct hci_conn *conn, __u8 sec_level, __u8 auth_type) hci_send_cmd(conn->hdev, HCI_OP_AUTH_REQUESTED, sizeof(cp), &cp); - /* If we're already encrypted set the REAUTH_PEND flag, - * otherwise set the ENCRYPT_PEND. + /* Set the ENCRYPT_PEND to trigger encryption after + * authentication. */ - if (test_bit(HCI_CONN_ENCRYPT, &conn->flags)) - set_bit(HCI_CONN_REAUTH_PEND, &conn->flags); - else + if (!test_bit(HCI_CONN_ENCRYPT, &conn->flags)) set_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags); } @@ -1322,34 +2492,41 @@ int hci_conn_security(struct hci_conn *conn, __u8 sec_level, __u8 auth_type, if (!test_bit(HCI_CONN_AUTH, &conn->flags)) goto auth; - /* An authenticated FIPS approved combination key has sufficient - * security for security level 4. */ - if (conn->key_type == HCI_LK_AUTH_COMBINATION_P256 && - sec_level == BT_SECURITY_FIPS) - goto encrypt; - - /* An authenticated combination key has sufficient security for - security level 3. */ - if ((conn->key_type == HCI_LK_AUTH_COMBINATION_P192 || - conn->key_type == HCI_LK_AUTH_COMBINATION_P256) && - sec_level == BT_SECURITY_HIGH) - goto encrypt; - - /* An unauthenticated combination key has sufficient security for - security level 1 and 2. */ - if ((conn->key_type == HCI_LK_UNAUTH_COMBINATION_P192 || - conn->key_type == HCI_LK_UNAUTH_COMBINATION_P256) && - (sec_level == BT_SECURITY_MEDIUM || sec_level == BT_SECURITY_LOW)) - goto encrypt; - - /* A combination key has always sufficient security for the security - levels 1 or 2. High security level requires the combination key - is generated using maximum PIN code length (16). - For pre 2.1 units. */ - if (conn->key_type == HCI_LK_COMBINATION && - (sec_level == BT_SECURITY_MEDIUM || sec_level == BT_SECURITY_LOW || - conn->pin_length == 16)) - goto encrypt; + switch (conn->key_type) { + case HCI_LK_AUTH_COMBINATION_P256: + /* An authenticated FIPS approved combination key has + * sufficient security for security level 4 or lower. + */ + if (sec_level <= BT_SECURITY_FIPS) + goto encrypt; + break; + case HCI_LK_AUTH_COMBINATION_P192: + /* An authenticated combination key has sufficient security for + * security level 3 or lower. + */ + if (sec_level <= BT_SECURITY_HIGH) + goto encrypt; + break; + case HCI_LK_UNAUTH_COMBINATION_P192: + case HCI_LK_UNAUTH_COMBINATION_P256: + /* An unauthenticated combination key has sufficient security + * for security level 2 or lower. + */ + if (sec_level <= BT_SECURITY_MEDIUM) + goto encrypt; + break; + case HCI_LK_COMBINATION: + /* A combination key has always sufficient security for the + * security levels 2 or lower. High security level requires the + * combination key is generated using maximum PIN code length + * (16). For pre 2.1 units. + */ + if (sec_level <= BT_SECURITY_MEDIUM || conn->pin_length == 16) + goto encrypt; + break; + default: + break; + } auth: if (test_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags)) @@ -1444,33 +2621,22 @@ timer: /* Drop all connection on the device */ void hci_conn_hash_flush(struct hci_dev *hdev) { - struct hci_conn_hash *h = &hdev->conn_hash; - struct hci_conn *c, *n; - - BT_DBG("hdev %s", hdev->name); - - list_for_each_entry_safe(c, n, &h->list, list) { - c->state = BT_CLOSED; - - hci_disconn_cfm(c, HCI_ERROR_LOCAL_HOST_TERM); - hci_conn_del(c); - } -} - -/* Check pending connect attempts */ -void hci_conn_check_pending(struct hci_dev *hdev) -{ + struct list_head *head = &hdev->conn_hash.list; struct hci_conn *conn; BT_DBG("hdev %s", hdev->name); - hci_dev_lock(hdev); - - conn = hci_conn_hash_lookup_state(hdev, ACL_LINK, BT_CONNECT2); - if (conn) - hci_acl_create_connection(conn); - - hci_dev_unlock(hdev); + /* We should not traverse the list here, because hci_conn_del + * can remove extra links, which may cause the list traversal + * to hit items that have already been released. + */ + while ((conn = list_first_entry_or_null(head, + struct hci_conn, + list)) != NULL) { + conn->state = BT_CLOSED; + hci_disconn_cfm(conn, HCI_ERROR_LOCAL_HOST_TERM); + hci_conn_del(conn); + } } static u32 get_link_mode(struct hci_conn *conn) @@ -1785,3 +2951,237 @@ u32 hci_conn_get_phy(struct hci_conn *conn) return phys; } + +static int abort_conn_sync(struct hci_dev *hdev, void *data) +{ + struct hci_conn *conn = data; + + if (!hci_conn_valid(hdev, conn)) + return -ECANCELED; + + return hci_abort_conn_sync(hdev, conn, conn->abort_reason); +} + +int hci_abort_conn(struct hci_conn *conn, u8 reason) +{ + struct hci_dev *hdev = conn->hdev; + + /* If abort_reason has already been set it means the connection is + * already being aborted so don't attempt to overwrite it. + */ + if (conn->abort_reason) + return 0; + + bt_dev_dbg(hdev, "handle 0x%2.2x reason 0x%2.2x", conn->handle, reason); + + conn->abort_reason = reason; + + /* If the connection is pending check the command opcode since that + * might be blocking on hci_cmd_sync_work while waiting its respective + * event so we need to hci_cmd_sync_cancel to cancel it. + * + * hci_connect_le serializes the connection attempts so only one + * connection can be in BT_CONNECT at time. + */ + if (conn->state == BT_CONNECT && hdev->req_status == HCI_REQ_PEND) { + switch (hci_skb_event(hdev->sent_cmd)) { + case HCI_EV_CONN_COMPLETE: + case HCI_EV_LE_CONN_COMPLETE: + case HCI_EV_LE_ENHANCED_CONN_COMPLETE: + case HCI_EVT_LE_CIS_ESTABLISHED: + hci_cmd_sync_cancel(hdev, ECANCELED); + break; + } + /* Cancel connect attempt if still queued/pending */ + } else if (!hci_cancel_connect_sync(hdev, conn)) { + return 0; + } + + /* Run immediately if on cmd_sync_work since this may be called + * as a result to MGMT_OP_DISCONNECT/MGMT_OP_UNPAIR which does + * already queue its callback on cmd_sync_work. + */ + return hci_cmd_sync_run_once(hdev, abort_conn_sync, conn, NULL); +} + +void hci_setup_tx_timestamp(struct sk_buff *skb, size_t key_offset, + const struct sockcm_cookie *sockc) +{ + struct sock *sk = skb ? skb->sk : NULL; + int key; + + /* This shall be called on a single skb of those generated by user + * sendmsg(), and only when the sendmsg() does not return error to + * user. This is required for keeping the tskey that increments here in + * sync with possible sendmsg() counting by user. + * + * Stream sockets shall set key_offset to sendmsg() length in bytes + * and call with the last fragment, others to 1 and first fragment. + */ + + if (!skb || !sockc || !sk || !key_offset) + return; + + sock_tx_timestamp(sk, sockc, &skb_shinfo(skb)->tx_flags); + + if (sk->sk_type == SOCK_STREAM) + key = atomic_add_return(key_offset, &sk->sk_tskey); + + if (sockc->tsflags & SOF_TIMESTAMPING_OPT_ID && + sockc->tsflags & SOF_TIMESTAMPING_TX_RECORD_MASK) { + if (sockc->tsflags & SOCKCM_FLAG_TS_OPT_ID) { + skb_shinfo(skb)->tskey = sockc->ts_opt_id; + } else { + if (sk->sk_type != SOCK_STREAM) + key = atomic_inc_return(&sk->sk_tskey); + skb_shinfo(skb)->tskey = key - 1; + } + } +} + +void hci_conn_tx_queue(struct hci_conn *conn, struct sk_buff *skb) +{ + struct tx_queue *comp = &conn->tx_q; + bool track = false; + + /* Emit SND now, ie. just before sending to driver */ + if (skb_shinfo(skb)->tx_flags & SKBTX_SW_TSTAMP) + __skb_tstamp_tx(skb, NULL, NULL, skb->sk, SCM_TSTAMP_SND); + + /* COMPLETION tstamp is emitted for tracked skb later in Number of + * Completed Packets event. Available only for flow controlled cases. + * + * TODO: SCO support without flowctl (needs to be done in drivers) + */ + switch (conn->type) { + case CIS_LINK: + case BIS_LINK: + case PA_LINK: + case ACL_LINK: + case LE_LINK: + break; + case SCO_LINK: + case ESCO_LINK: + if (!hci_dev_test_flag(conn->hdev, HCI_SCO_FLOWCTL)) + return; + break; + default: + return; + } + + if (skb->sk && (skb_shinfo(skb)->tx_flags & SKBTX_COMPLETION_TSTAMP)) + track = true; + + /* If nothing is tracked, just count extra skbs at the queue head */ + if (!track && !comp->tracked) { + comp->extra++; + return; + } + + if (track) { + skb = skb_clone_sk(skb); + if (!skb) + goto count_only; + + comp->tracked++; + } else { + skb = skb_clone(skb, GFP_KERNEL); + if (!skb) + goto count_only; + } + + skb_queue_tail(&comp->queue, skb); + return; + +count_only: + /* Stop tracking skbs, and only count. This will not emit timestamps for + * the packets, but if we get here something is more seriously wrong. + */ + comp->tracked = 0; + comp->extra += skb_queue_len(&comp->queue) + 1; + skb_queue_purge(&comp->queue); +} + +void hci_conn_tx_dequeue(struct hci_conn *conn) +{ + struct tx_queue *comp = &conn->tx_q; + struct sk_buff *skb; + + /* If there are tracked skbs, the counted extra go before dequeuing real + * skbs, to keep ordering. When nothing is tracked, the ordering doesn't + * matter so dequeue real skbs first to get rid of them ASAP. + */ + if (comp->extra && (comp->tracked || skb_queue_empty(&comp->queue))) { + comp->extra--; + return; + } + + skb = skb_dequeue(&comp->queue); + if (!skb) + return; + + if (skb->sk) { + comp->tracked--; + __skb_tstamp_tx(skb, NULL, NULL, skb->sk, + SCM_TSTAMP_COMPLETION); + } + + kfree_skb(skb); +} + +u8 *hci_conn_key_enc_size(struct hci_conn *conn) +{ + if (conn->type == ACL_LINK) { + struct link_key *key; + + key = hci_find_link_key(conn->hdev, &conn->dst); + if (!key) + return NULL; + + return &key->pin_len; + } else if (conn->type == LE_LINK) { + struct smp_ltk *ltk; + + ltk = hci_find_ltk(conn->hdev, &conn->dst, conn->dst_type, + conn->role); + if (!ltk) + return NULL; + + return <k->enc_size; + } + + return NULL; +} + +int hci_ethtool_ts_info(unsigned int index, int sk_proto, + struct kernel_ethtool_ts_info *info) +{ + struct hci_dev *hdev; + + hdev = hci_dev_get(index); + if (!hdev) + return -ENODEV; + + info->so_timestamping = + SOF_TIMESTAMPING_RX_SOFTWARE | + SOF_TIMESTAMPING_SOFTWARE; + info->phc_index = -1; + info->tx_types = BIT(HWTSTAMP_TX_OFF); + info->rx_filters = BIT(HWTSTAMP_FILTER_NONE); + + switch (sk_proto) { + case BTPROTO_ISO: + case BTPROTO_L2CAP: + info->so_timestamping |= SOF_TIMESTAMPING_TX_SOFTWARE; + info->so_timestamping |= SOF_TIMESTAMPING_TX_COMPLETION; + break; + case BTPROTO_SCO: + info->so_timestamping |= SOF_TIMESTAMPING_TX_SOFTWARE; + if (hci_dev_test_flag(hdev, HCI_SCO_FLOWCTL)) + info->so_timestamping |= SOF_TIMESTAMPING_TX_COMPLETION; + break; + } + + hci_dev_put(hdev); + return 0; +} diff --git a/net/bluetooth/hci_core.c b/net/bluetooth/hci_core.c index 2b7bd3655b07..8ccec73dce45 100644 --- a/net/bluetooth/hci_core.c +++ b/net/bluetooth/hci_core.c @@ -29,17 +29,17 @@ #include <linux/rfkill.h> #include <linux/debugfs.h> #include <linux/crypto.h> +#include <linux/kcov.h> #include <linux/property.h> #include <linux/suspend.h> #include <linux/wait.h> -#include <asm/unaligned.h> +#include <linux/unaligned.h> #include <net/bluetooth/bluetooth.h> #include <net/bluetooth/hci_core.h> #include <net/bluetooth/l2cap.h> #include <net/bluetooth/mgmt.h> -#include "hci_request.h" #include "hci_debugfs.h" #include "smp.h" #include "leds.h" @@ -62,53 +62,9 @@ DEFINE_MUTEX(hci_cb_list_lock); /* HCI ID Numbering */ static DEFINE_IDA(hci_index_ida); -static int hci_scan_req(struct hci_request *req, unsigned long opt) -{ - __u8 scan = opt; - - BT_DBG("%s %x", req->hdev->name, scan); - - /* Inquiry and Page scans */ - hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan); - return 0; -} - -static int hci_auth_req(struct hci_request *req, unsigned long opt) -{ - __u8 auth = opt; - - BT_DBG("%s %x", req->hdev->name, auth); - - /* Authentication */ - hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth); - return 0; -} - -static int hci_encrypt_req(struct hci_request *req, unsigned long opt) -{ - __u8 encrypt = opt; - - BT_DBG("%s %x", req->hdev->name, encrypt); - - /* Encryption */ - hci_req_add(req, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt); - return 0; -} - -static int hci_linkpol_req(struct hci_request *req, unsigned long opt) -{ - __le16 policy = cpu_to_le16(opt); - - BT_DBG("%s %x", req->hdev->name, policy); - - /* Default link policy */ - hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy); - return 0; -} - /* Get HCI device by index. * Device is held on return. */ -struct hci_dev *hci_dev_get(int index) +static struct hci_dev *__hci_dev_get(int index, int *srcu_index) { struct hci_dev *hdev = NULL, *d; @@ -121,6 +77,8 @@ struct hci_dev *hci_dev_get(int index) list_for_each_entry(d, &hci_dev_list, list) { if (d->id == index) { hdev = hci_dev_hold(d); + if (srcu_index) + *srcu_index = srcu_read_lock(&d->srcu); break; } } @@ -128,6 +86,22 @@ struct hci_dev *hci_dev_get(int index) return hdev; } +struct hci_dev *hci_dev_get(int index) +{ + return __hci_dev_get(index, NULL); +} + +static struct hci_dev *hci_dev_get_srcu(int index, int *srcu_index) +{ + return __hci_dev_get(index, srcu_index); +} + +static void hci_dev_put_srcu(struct hci_dev *hdev, int srcu_index) +{ + srcu_read_unlock(&hdev->srcu, srcu_index); + hci_dev_put(hdev); +} + /* ---- Inquiry support ---- */ bool hci_discovery_active(struct hci_dev *hdev) @@ -148,8 +122,6 @@ void hci_discovery_set_state(struct hci_dev *hdev, int state) { int old_state = hdev->discovery.state; - BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state); - if (old_state == state) return; @@ -172,6 +144,8 @@ void hci_discovery_set_state(struct hci_dev *hdev, int state) case DISCOVERY_STOPPING: break; } + + bt_dev_dbg(hdev, "state %u -> %u", old_state, state); } void hci_inquiry_cache_flush(struct hci_dev *hdev) @@ -348,33 +322,12 @@ static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf) return copied; } -static int hci_inq_req(struct hci_request *req, unsigned long opt) -{ - struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt; - struct hci_dev *hdev = req->hdev; - struct hci_cp_inquiry cp; - - BT_DBG("%s", hdev->name); - - if (test_bit(HCI_INQUIRY, &hdev->flags)) - return 0; - - /* Start Inquiry */ - memcpy(&cp.lap, &ir->lap, 3); - cp.length = ir->length; - cp.num_rsp = ir->num_rsp; - hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp); - - return 0; -} - int hci_inquiry(void __user *arg) { __u8 __user *ptr = arg; struct hci_inquiry_req ir; struct hci_dev *hdev; int err = 0, do_inquiry = 0, max_rsp; - long timeo; __u8 *buf; if (copy_from_user(&ir, ptr, sizeof(ir))) @@ -394,11 +347,6 @@ int hci_inquiry(void __user *arg) goto done; } - if (hdev->dev_type != HCI_PRIMARY) { - err = -EOPNOTSUPP; - goto done; - } - if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) { err = -EOPNOTSUPP; goto done; @@ -418,11 +366,11 @@ int hci_inquiry(void __user *arg) } hci_dev_unlock(hdev); - timeo = ir.length * msecs_to_jiffies(2000); - if (do_inquiry) { - err = hci_req_sync(hdev, hci_inq_req, (unsigned long) &ir, - timeo, NULL); + hci_req_sync_lock(hdev); + err = hci_inquiry_sync(hdev, ir.length, ir.num_rsp); + hci_req_sync_unlock(hdev); + if (err < 0) goto done; @@ -571,6 +519,7 @@ int hci_dev_close(__u16 dev) goto done; } + cancel_work_sync(&hdev->power_on); if (hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF)) cancel_delayed_work(&hdev->power_off); @@ -593,6 +542,20 @@ static int hci_dev_do_reset(struct hci_dev *hdev) skb_queue_purge(&hdev->rx_q); skb_queue_purge(&hdev->cmd_q); + /* Cancel these to avoid queueing non-chained pending work */ + hci_dev_set_flag(hdev, HCI_CMD_DRAIN_WORKQUEUE); + /* Wait for + * + * if (!hci_dev_test_flag(hdev, HCI_CMD_DRAIN_WORKQUEUE)) + * queue_delayed_work(&hdev->{cmd,ncmd}_timer) + * + * inside RCU section to see the flag or complete scheduling. + */ + synchronize_rcu(); + /* Explicitly cancel works in case scheduled after setting the flag. */ + cancel_delayed_work(&hdev->cmd_timer); + cancel_delayed_work(&hdev->ncmd_timer); + /* Avoid potential lockdep warnings from the *_flush() calls by * ensuring the workqueue is empty up front. */ @@ -606,8 +569,13 @@ static int hci_dev_do_reset(struct hci_dev *hdev) if (hdev->flush) hdev->flush(hdev); + hci_dev_clear_flag(hdev, HCI_CMD_DRAIN_WORKQUEUE); + atomic_set(&hdev->cmd_cnt, 1); - hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0; + hdev->acl_cnt = 0; + hdev->sco_cnt = 0; + hdev->le_cnt = 0; + hdev->iso_cnt = 0; ret = hci_reset_sync(hdev); @@ -618,9 +586,9 @@ static int hci_dev_do_reset(struct hci_dev *hdev) int hci_dev_reset(__u16 dev) { struct hci_dev *hdev; - int err; + int err, srcu_index; - hdev = hci_dev_get(dev); + hdev = hci_dev_get_srcu(dev, &srcu_index); if (!hdev) return -ENODEV; @@ -642,7 +610,7 @@ int hci_dev_reset(__u16 dev) err = hci_dev_do_reset(hdev); done: - hci_dev_put(hdev); + hci_dev_put_srcu(hdev, srcu_index); return err; } @@ -702,7 +670,7 @@ static void hci_update_passive_scan_state(struct hci_dev *hdev, u8 scan) hci_dev_set_flag(hdev, HCI_BREDR_ENABLED); if (hci_dev_test_flag(hdev, HCI_LE_ENABLED)) - hci_req_update_adv_data(hdev, hdev->cur_adv_instance); + hci_update_adv_data(hdev, hdev->cur_adv_instance); mgmt_new_settings(hdev); } @@ -712,6 +680,7 @@ int hci_dev_cmd(unsigned int cmd, void __user *arg) { struct hci_dev *hdev; struct hci_dev_req dr; + __le16 policy; int err = 0; if (copy_from_user(&dr, arg, sizeof(dr))) @@ -731,11 +700,6 @@ int hci_dev_cmd(unsigned int cmd, void __user *arg) goto done; } - if (hdev->dev_type != HCI_PRIMARY) { - err = -EOPNOTSUPP; - goto done; - } - if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) { err = -EOPNOTSUPP; goto done; @@ -743,8 +707,8 @@ int hci_dev_cmd(unsigned int cmd, void __user *arg) switch (cmd) { case HCISETAUTH: - err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt, - HCI_INIT_TIMEOUT, NULL); + err = hci_cmd_sync_status(hdev, HCI_OP_WRITE_AUTH_ENABLE, + 1, &dr.dev_opt, HCI_CMD_TIMEOUT); break; case HCISETENCRYPT: @@ -755,19 +719,21 @@ int hci_dev_cmd(unsigned int cmd, void __user *arg) if (!test_bit(HCI_AUTH, &hdev->flags)) { /* Auth must be enabled first */ - err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt, - HCI_INIT_TIMEOUT, NULL); + err = hci_cmd_sync_status(hdev, + HCI_OP_WRITE_AUTH_ENABLE, + 1, &dr.dev_opt, + HCI_CMD_TIMEOUT); if (err) break; } - err = hci_req_sync(hdev, hci_encrypt_req, dr.dev_opt, - HCI_INIT_TIMEOUT, NULL); + err = hci_cmd_sync_status(hdev, HCI_OP_WRITE_ENCRYPT_MODE, + 1, &dr.dev_opt, HCI_CMD_TIMEOUT); break; case HCISETSCAN: - err = hci_req_sync(hdev, hci_scan_req, dr.dev_opt, - HCI_INIT_TIMEOUT, NULL); + err = hci_cmd_sync_status(hdev, HCI_OP_WRITE_SCAN_ENABLE, + 1, &dr.dev_opt, HCI_CMD_TIMEOUT); /* Ensure that the connectable and discoverable states * get correctly modified as this was a non-mgmt change. @@ -777,8 +743,10 @@ int hci_dev_cmd(unsigned int cmd, void __user *arg) break; case HCISETLINKPOL: - err = hci_req_sync(hdev, hci_linkpol_req, dr.dev_opt, - HCI_INIT_TIMEOUT, NULL); + policy = cpu_to_le16(dr.dev_opt); + + err = hci_cmd_sync_status(hdev, HCI_OP_WRITE_DEF_LINK_POLICY, + 2, &policy, HCI_CMD_TIMEOUT); break; case HCISETLINKMODE: @@ -819,7 +787,7 @@ int hci_get_dev_list(void __user *arg) struct hci_dev *hdev; struct hci_dev_list_req *dl; struct hci_dev_req *dr; - int n = 0, size, err; + int n = 0, err; __u16 dev_num; if (get_user(dev_num, (__u16 __user *) arg)) @@ -828,12 +796,11 @@ int hci_get_dev_list(void __user *arg) if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr)) return -EINVAL; - size = sizeof(*dl) + dev_num * sizeof(*dr); - - dl = kzalloc(size, GFP_KERNEL); + dl = kzalloc(struct_size(dl, dev_req, dev_num), GFP_KERNEL); if (!dl) return -ENOMEM; + dl->dev_num = dev_num; dr = dl->dev_req; read_lock(&hci_dev_list_lock); @@ -847,8 +814,8 @@ int hci_get_dev_list(void __user *arg) if (hci_dev_test_flag(hdev, HCI_AUTO_OFF)) flags &= ~BIT(HCI_UP); - (dr + n)->dev_id = hdev->id; - (dr + n)->dev_opt = flags; + dr[n].dev_id = hdev->id; + dr[n].dev_opt = flags; if (++n >= dev_num) break; @@ -856,9 +823,7 @@ int hci_get_dev_list(void __user *arg) read_unlock(&hci_dev_list_lock); dl->dev_num = n; - size = sizeof(*dl) + n * sizeof(*dr); - - err = copy_to_user(arg, dl, size); + err = copy_to_user(arg, dl, struct_size(dl, dev_req, n)); kfree(dl); return err ? -EFAULT : 0; @@ -887,9 +852,9 @@ int hci_get_dev_info(void __user *arg) else flags = hdev->flags; - strcpy(di.name, hdev->name); + strscpy(di.name, hdev->name, sizeof(di.name)); di.bdaddr = hdev->bdaddr; - di.type = (hdev->bus & 0x0f) | ((hdev->dev_type & 0x03) << 4); + di.type = (hdev->bus & 0x0f); di.flags = flags; di.pkt_type = hdev->pkt_type; if (lmp_bredr_capable(hdev)) { @@ -919,20 +884,51 @@ int hci_get_dev_info(void __user *arg) /* ---- Interface to HCI drivers ---- */ +static int hci_dev_do_poweroff(struct hci_dev *hdev) +{ + int err; + + BT_DBG("%s %p", hdev->name, hdev); + + hci_req_sync_lock(hdev); + + err = hci_set_powered_sync(hdev, false); + + hci_req_sync_unlock(hdev); + + return err; +} + static int hci_rfkill_set_block(void *data, bool blocked) { struct hci_dev *hdev = data; + int err; BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked); if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) return -EBUSY; + if (blocked == hci_dev_test_flag(hdev, HCI_RFKILLED)) + return 0; + if (blocked) { hci_dev_set_flag(hdev, HCI_RFKILLED); + if (!hci_dev_test_flag(hdev, HCI_SETUP) && - !hci_dev_test_flag(hdev, HCI_CONFIG)) - hci_dev_do_close(hdev); + !hci_dev_test_flag(hdev, HCI_CONFIG)) { + err = hci_dev_do_poweroff(hdev); + if (err) { + bt_dev_err(hdev, "Error when powering off device on rfkill (%d)", + err); + + /* Make sure the device is still closed even if + * anything during power off sequence (eg. + * disconnecting devices) failed. + */ + hci_dev_do_close(hdev); + } + } } else { hci_dev_clear_flag(hdev, HCI_RFKILLED); } @@ -974,8 +970,7 @@ static void hci_power_on(struct work_struct *work) */ if (hci_dev_test_flag(hdev, HCI_RFKILLED) || hci_dev_test_flag(hdev, HCI_UNCONFIGURED) || - (hdev->dev_type == HCI_PRIMARY && - !bacmp(&hdev->bdaddr, BDADDR_ANY) && + (!bacmp(&hdev->bdaddr, BDADDR_ANY) && !bacmp(&hdev->static_addr, BDADDR_ANY))) { hci_dev_clear_flag(hdev, HCI_AUTO_OFF); hci_dev_do_close(hdev); @@ -1028,6 +1023,7 @@ static void hci_error_reset(struct work_struct *work) { struct hci_dev *hdev = container_of(work, struct hci_dev, error_reset); + hci_dev_hold(hdev); BT_DBG("%s", hdev->name); if (hdev->hw_error) @@ -1035,10 +1031,10 @@ static void hci_error_reset(struct work_struct *work) else bt_dev_err(hdev, "hardware error 0x%2.2x", hdev->hw_error_code); - if (hci_dev_do_close(hdev)) - return; + if (!hci_dev_do_close(hdev)) + hci_dev_do_open(hdev); - hci_dev_do_open(hdev); + hci_dev_put(hdev); } void hci_uuids_clear(struct hci_dev *hdev) @@ -1053,9 +1049,9 @@ void hci_uuids_clear(struct hci_dev *hdev) void hci_link_keys_clear(struct hci_dev *hdev) { - struct link_key *key; + struct link_key *key, *tmp; - list_for_each_entry(key, &hdev->link_keys, list) { + list_for_each_entry_safe(key, tmp, &hdev->link_keys, list) { list_del_rcu(&key->list); kfree_rcu(key, rcu); } @@ -1063,9 +1059,9 @@ void hci_link_keys_clear(struct hci_dev *hdev) void hci_smp_ltks_clear(struct hci_dev *hdev) { - struct smp_ltk *k; + struct smp_ltk *k, *tmp; - list_for_each_entry(k, &hdev->long_term_keys, list) { + list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) { list_del_rcu(&k->list); kfree_rcu(k, rcu); } @@ -1073,9 +1069,9 @@ void hci_smp_ltks_clear(struct hci_dev *hdev) void hci_smp_irks_clear(struct hci_dev *hdev) { - struct smp_irk *k; + struct smp_irk *k, *tmp; - list_for_each_entry(k, &hdev->identity_resolving_keys, list) { + list_for_each_entry_safe(k, tmp, &hdev->identity_resolving_keys, list) { list_del_rcu(&k->list); kfree_rcu(k, rcu); } @@ -1083,9 +1079,9 @@ void hci_smp_irks_clear(struct hci_dev *hdev) void hci_blocked_keys_clear(struct hci_dev *hdev) { - struct blocked_key *b; + struct blocked_key *b, *tmp; - list_for_each_entry(b, &hdev->blocked_keys, list) { + list_for_each_entry_safe(b, tmp, &hdev->blocked_keys, list) { list_del_rcu(&b->list); kfree_rcu(b, rcu); } @@ -1260,12 +1256,10 @@ struct smp_irk *hci_find_irk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr, if (addr_type == irk->addr_type && bacmp(bdaddr, &irk->bdaddr) == 0) { irk_to_return = irk; - goto done; + break; } } -done: - if (irk_to_return && hci_is_blocked_key(hdev, HCI_BLOCKED_KEY_TYPE_IRK, irk_to_return->val)) { bt_dev_warn_ratelimited(hdev, "Identity key blocked for %pMR", @@ -1395,10 +1389,10 @@ int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr) int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 bdaddr_type) { - struct smp_ltk *k; + struct smp_ltk *k, *tmp; int removed = 0; - list_for_each_entry_rcu(k, &hdev->long_term_keys, list) { + list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) { if (bacmp(bdaddr, &k->bdaddr) || k->bdaddr_type != bdaddr_type) continue; @@ -1414,9 +1408,9 @@ int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 bdaddr_type) void hci_remove_irk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type) { - struct smp_irk *k; + struct smp_irk *k, *tmp; - list_for_each_entry_rcu(k, &hdev->identity_resolving_keys, list) { + list_for_each_entry_safe(k, tmp, &hdev->identity_resolving_keys, list) { if (bacmp(bdaddr, &k->bdaddr) || k->addr_type != addr_type) continue; @@ -1469,17 +1463,18 @@ static void hci_cmd_timeout(struct work_struct *work) struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_timer.work); - if (hdev->sent_cmd) { - struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data; - u16 opcode = __le16_to_cpu(sent->opcode); + if (hdev->req_skb) { + u16 opcode = hci_skb_opcode(hdev->req_skb); bt_dev_err(hdev, "command 0x%4.4x tx timeout", opcode); + + hci_cmd_sync_cancel_sync(hdev, ETIMEDOUT); } else { bt_dev_err(hdev, "command tx timeout"); } - if (hdev->cmd_timeout) - hdev->cmd_timeout(hdev); + if (hdev->reset) + hdev->reset(hdev); atomic_set(&hdev->cmd_cnt, 1); queue_work(hdev->workqueue, &hdev->cmd_work); @@ -1606,6 +1601,19 @@ struct adv_info *hci_find_adv_instance(struct hci_dev *hdev, u8 instance) } /* This function requires the caller holds hdev->lock */ +struct adv_info *hci_find_adv_sid(struct hci_dev *hdev, u8 sid) +{ + struct adv_info *adv; + + list_for_each_entry(adv, &hdev->adv_instances, list) { + if (adv->sid == sid) + return adv; + } + + return NULL; +} + +/* This function requires the caller holds hdev->lock */ struct adv_info *hci_get_next_instance(struct hci_dev *hdev, u8 instance) { struct adv_info *cur_instance; @@ -1665,12 +1673,12 @@ void hci_adv_instances_clear(struct hci_dev *hdev) struct adv_info *adv_instance, *n; if (hdev->adv_instance_timeout) { - cancel_delayed_work(&hdev->adv_instance_expire); + disable_delayed_work(&hdev->adv_instance_expire); hdev->adv_instance_timeout = 0; } list_for_each_entry_safe(adv_instance, n, &hdev->adv_instances, list) { - cancel_delayed_work_sync(&adv_instance->rpa_expired_cb); + disable_delayed_work_sync(&adv_instance->rpa_expired_cb); list_del(&adv_instance->list); kfree(adv_instance); } @@ -1690,63 +1698,93 @@ static void adv_instance_rpa_expired(struct work_struct *work) } /* This function requires the caller holds hdev->lock */ -int hci_add_adv_instance(struct hci_dev *hdev, u8 instance, u32 flags, - u16 adv_data_len, u8 *adv_data, - u16 scan_rsp_len, u8 *scan_rsp_data, - u16 timeout, u16 duration, s8 tx_power, - u32 min_interval, u32 max_interval) +struct adv_info *hci_add_adv_instance(struct hci_dev *hdev, u8 instance, + u32 flags, u16 adv_data_len, u8 *adv_data, + u16 scan_rsp_len, u8 *scan_rsp_data, + u16 timeout, u16 duration, s8 tx_power, + u32 min_interval, u32 max_interval, + u8 mesh_handle) { - struct adv_info *adv_instance; + struct adv_info *adv; - adv_instance = hci_find_adv_instance(hdev, instance); - if (adv_instance) { - memset(adv_instance->adv_data, 0, - sizeof(adv_instance->adv_data)); - memset(adv_instance->scan_rsp_data, 0, - sizeof(adv_instance->scan_rsp_data)); + adv = hci_find_adv_instance(hdev, instance); + if (adv) { + memset(adv->adv_data, 0, sizeof(adv->adv_data)); + memset(adv->scan_rsp_data, 0, sizeof(adv->scan_rsp_data)); + memset(adv->per_adv_data, 0, sizeof(adv->per_adv_data)); } else { if (hdev->adv_instance_cnt >= hdev->le_num_of_adv_sets || - instance < 1 || instance > hdev->le_num_of_adv_sets) - return -EOVERFLOW; + instance < 1 || instance > hdev->le_num_of_adv_sets + 1) + return ERR_PTR(-EOVERFLOW); - adv_instance = kzalloc(sizeof(*adv_instance), GFP_KERNEL); - if (!adv_instance) - return -ENOMEM; + adv = kzalloc(sizeof(*adv), GFP_KERNEL); + if (!adv) + return ERR_PTR(-ENOMEM); + + adv->pending = true; + adv->instance = instance; + + /* If controller support only one set and the instance is set to + * 1 then there is no option other than using handle 0x00. + */ + if (hdev->le_num_of_adv_sets == 1 && instance == 1) + adv->handle = 0x00; + else + adv->handle = instance; - adv_instance->pending = true; - adv_instance->instance = instance; - list_add(&adv_instance->list, &hdev->adv_instances); + list_add(&adv->list, &hdev->adv_instances); hdev->adv_instance_cnt++; } - adv_instance->flags = flags; - adv_instance->adv_data_len = adv_data_len; - adv_instance->scan_rsp_len = scan_rsp_len; - adv_instance->min_interval = min_interval; - adv_instance->max_interval = max_interval; - adv_instance->tx_power = tx_power; - - if (adv_data_len) - memcpy(adv_instance->adv_data, adv_data, adv_data_len); + adv->flags = flags; + adv->min_interval = min_interval; + adv->max_interval = max_interval; + adv->tx_power = tx_power; + /* Defining a mesh_handle changes the timing units to ms, + * rather than seconds, and ties the instance to the requested + * mesh_tx queue. + */ + adv->mesh = mesh_handle; - if (scan_rsp_len) - memcpy(adv_instance->scan_rsp_data, - scan_rsp_data, scan_rsp_len); + hci_set_adv_instance_data(hdev, instance, adv_data_len, adv_data, + scan_rsp_len, scan_rsp_data); - adv_instance->timeout = timeout; - adv_instance->remaining_time = timeout; + adv->timeout = timeout; + adv->remaining_time = timeout; if (duration == 0) - adv_instance->duration = hdev->def_multi_adv_rotation_duration; + adv->duration = hdev->def_multi_adv_rotation_duration; else - adv_instance->duration = duration; + adv->duration = duration; - INIT_DELAYED_WORK(&adv_instance->rpa_expired_cb, - adv_instance_rpa_expired); + INIT_DELAYED_WORK(&adv->rpa_expired_cb, adv_instance_rpa_expired); BT_DBG("%s for %dMR", hdev->name, instance); - return 0; + return adv; +} + +/* This function requires the caller holds hdev->lock */ +struct adv_info *hci_add_per_instance(struct hci_dev *hdev, u8 instance, u8 sid, + u32 flags, u8 data_len, u8 *data, + u32 min_interval, u32 max_interval) +{ + struct adv_info *adv; + + adv = hci_add_adv_instance(hdev, instance, flags, 0, NULL, 0, NULL, + 0, 0, HCI_ADV_TX_POWER_NO_PREFERENCE, + min_interval, max_interval, 0); + if (IS_ERR(adv)) + return adv; + + adv->sid = sid; + adv->periodic = true; + adv->per_adv_data_len = data_len; + + if (data) + memcpy(adv->per_adv_data, data, data_len); + + return adv; } /* This function requires the caller holds hdev->lock */ @@ -1754,29 +1792,33 @@ int hci_set_adv_instance_data(struct hci_dev *hdev, u8 instance, u16 adv_data_len, u8 *adv_data, u16 scan_rsp_len, u8 *scan_rsp_data) { - struct adv_info *adv_instance; + struct adv_info *adv; - adv_instance = hci_find_adv_instance(hdev, instance); + adv = hci_find_adv_instance(hdev, instance); /* If advertisement doesn't exist, we can't modify its data */ - if (!adv_instance) + if (!adv) return -ENOENT; - if (adv_data_len) { - memset(adv_instance->adv_data, 0, - sizeof(adv_instance->adv_data)); - memcpy(adv_instance->adv_data, adv_data, adv_data_len); - adv_instance->adv_data_len = adv_data_len; + if (adv_data_len && ADV_DATA_CMP(adv, adv_data, adv_data_len)) { + memset(adv->adv_data, 0, sizeof(adv->adv_data)); + memcpy(adv->adv_data, adv_data, adv_data_len); + adv->adv_data_len = adv_data_len; + adv->adv_data_changed = true; } - if (scan_rsp_len) { - memset(adv_instance->scan_rsp_data, 0, - sizeof(adv_instance->scan_rsp_data)); - memcpy(adv_instance->scan_rsp_data, - scan_rsp_data, scan_rsp_len); - adv_instance->scan_rsp_len = scan_rsp_len; + if (scan_rsp_len && SCAN_RSP_CMP(adv, scan_rsp_data, scan_rsp_len)) { + memset(adv->scan_rsp_data, 0, sizeof(adv->scan_rsp_data)); + memcpy(adv->scan_rsp_data, scan_rsp_data, scan_rsp_len); + adv->scan_rsp_len = scan_rsp_len; + adv->scan_rsp_changed = true; } + /* Mark as changed if there are flags which would affect it */ + if (((adv->flags & MGMT_ADV_FLAG_APPEARANCE) && hdev->appearance) || + adv->flags & MGMT_ADV_FLAG_LOCAL_NAME) + adv->scan_rsp_changed = true; + return 0; } @@ -1865,159 +1907,128 @@ void hci_free_adv_monitor(struct hci_dev *hdev, struct adv_monitor *monitor) if (monitor->handle) idr_remove(&hdev->adv_monitors_idr, monitor->handle); - if (monitor->state != ADV_MONITOR_STATE_NOT_REGISTERED) { + if (monitor->state != ADV_MONITOR_STATE_NOT_REGISTERED) hdev->adv_monitors_cnt--; - mgmt_adv_monitor_removed(hdev, monitor->handle); - } kfree(monitor); } -int hci_add_adv_patterns_monitor_complete(struct hci_dev *hdev, u8 status) -{ - return mgmt_add_adv_patterns_monitor_complete(hdev, status); -} - -int hci_remove_adv_monitor_complete(struct hci_dev *hdev, u8 status) -{ - return mgmt_remove_adv_monitor_complete(hdev, status); -} - /* Assigns handle to a monitor, and if offloading is supported and power is on, * also attempts to forward the request to the controller. - * Returns true if request is forwarded (result is pending), false otherwise. - * This function requires the caller holds hdev->lock. + * This function requires the caller holds hci_req_sync_lock. */ -bool hci_add_adv_monitor(struct hci_dev *hdev, struct adv_monitor *monitor, - int *err) +int hci_add_adv_monitor(struct hci_dev *hdev, struct adv_monitor *monitor) { int min, max, handle; + int status = 0; - *err = 0; + if (!monitor) + return -EINVAL; - if (!monitor) { - *err = -EINVAL; - return false; - } + hci_dev_lock(hdev); min = HCI_MIN_ADV_MONITOR_HANDLE; max = HCI_MIN_ADV_MONITOR_HANDLE + HCI_MAX_ADV_MONITOR_NUM_HANDLES; handle = idr_alloc(&hdev->adv_monitors_idr, monitor, min, max, GFP_KERNEL); - if (handle < 0) { - *err = handle; - return false; - } + + hci_dev_unlock(hdev); + + if (handle < 0) + return handle; monitor->handle = handle; if (!hdev_is_powered(hdev)) - return false; + return status; switch (hci_get_adv_monitor_offload_ext(hdev)) { case HCI_ADV_MONITOR_EXT_NONE: - hci_update_passive_scan(hdev); - bt_dev_dbg(hdev, "%s add monitor status %d", hdev->name, *err); + bt_dev_dbg(hdev, "add monitor %d status %d", + monitor->handle, status); /* Message was not forwarded to controller - not an error */ - return false; + break; + case HCI_ADV_MONITOR_EXT_MSFT: - *err = msft_add_monitor_pattern(hdev, monitor); - bt_dev_dbg(hdev, "%s add monitor msft status %d", hdev->name, - *err); + status = msft_add_monitor_pattern(hdev, monitor); + bt_dev_dbg(hdev, "add monitor %d msft status %d", + handle, status); break; } - return (*err == 0); + return status; } /* Attempts to tell the controller and free the monitor. If somehow the * controller doesn't have a corresponding handle, remove anyway. - * Returns true if request is forwarded (result is pending), false otherwise. - * This function requires the caller holds hdev->lock. + * This function requires the caller holds hci_req_sync_lock. */ -static bool hci_remove_adv_monitor(struct hci_dev *hdev, - struct adv_monitor *monitor, - u16 handle, int *err) +static int hci_remove_adv_monitor(struct hci_dev *hdev, + struct adv_monitor *monitor) { - *err = 0; + int status = 0; + int handle; switch (hci_get_adv_monitor_offload_ext(hdev)) { case HCI_ADV_MONITOR_EXT_NONE: /* also goes here when powered off */ + bt_dev_dbg(hdev, "remove monitor %d status %d", + monitor->handle, status); goto free_monitor; + case HCI_ADV_MONITOR_EXT_MSFT: - *err = msft_remove_monitor(hdev, monitor, handle); + handle = monitor->handle; + status = msft_remove_monitor(hdev, monitor); + bt_dev_dbg(hdev, "remove monitor %d msft status %d", + handle, status); break; } /* In case no matching handle registered, just free the monitor */ - if (*err == -ENOENT) + if (status == -ENOENT) goto free_monitor; - return (*err == 0); + return status; free_monitor: - if (*err == -ENOENT) + if (status == -ENOENT) bt_dev_warn(hdev, "Removing monitor with no matching handle %d", monitor->handle); hci_free_adv_monitor(hdev, monitor); - *err = 0; - return false; + return status; } -/* Returns true if request is forwarded (result is pending), false otherwise. - * This function requires the caller holds hdev->lock. - */ -bool hci_remove_single_adv_monitor(struct hci_dev *hdev, u16 handle, int *err) +/* This function requires the caller holds hci_req_sync_lock */ +int hci_remove_single_adv_monitor(struct hci_dev *hdev, u16 handle) { struct adv_monitor *monitor = idr_find(&hdev->adv_monitors_idr, handle); - bool pending; - - if (!monitor) { - *err = -EINVAL; - return false; - } - - pending = hci_remove_adv_monitor(hdev, monitor, handle, err); - if (!*err && !pending) - hci_update_passive_scan(hdev); - bt_dev_dbg(hdev, "%s remove monitor handle %d, status %d, %spending", - hdev->name, handle, *err, pending ? "" : "not "); + if (!monitor) + return -EINVAL; - return pending; + return hci_remove_adv_monitor(hdev, monitor); } -/* Returns true if request is forwarded (result is pending), false otherwise. - * This function requires the caller holds hdev->lock. - */ -bool hci_remove_all_adv_monitor(struct hci_dev *hdev, int *err) +/* This function requires the caller holds hci_req_sync_lock */ +int hci_remove_all_adv_monitor(struct hci_dev *hdev) { struct adv_monitor *monitor; int idr_next_id = 0; - bool pending = false; - bool update = false; - - *err = 0; + int status = 0; - while (!*err && !pending) { + while (1) { monitor = idr_get_next(&hdev->adv_monitors_idr, &idr_next_id); if (!monitor) break; - pending = hci_remove_adv_monitor(hdev, monitor, 0, err); + status = hci_remove_adv_monitor(hdev, monitor); + if (status) + return status; - if (!*err && !pending) - update = true; + idr_next_id++; } - if (update) - hci_update_passive_scan(hdev); - - bt_dev_dbg(hdev, "%s remove all monitors status %d, %spending", - hdev->name, *err, pending ? "" : "not "); - - return pending; + return status; } /* This function requires the caller holds hdev->lock */ @@ -2153,7 +2164,7 @@ int hci_bdaddr_list_add_with_flags(struct list_head *list, bdaddr_t *bdaddr, bacpy(&entry->bdaddr, bdaddr); entry->bdaddr_type = type; - bitmap_from_u64(entry->flags, flags); + entry->flags = flags; list_add(&entry->list, list); @@ -2199,26 +2210,6 @@ int hci_bdaddr_list_del_with_irk(struct list_head *list, bdaddr_t *bdaddr, return 0; } -int hci_bdaddr_list_del_with_flags(struct list_head *list, bdaddr_t *bdaddr, - u8 type) -{ - struct bdaddr_list_with_flags *entry; - - if (!bacmp(bdaddr, BDADDR_ANY)) { - hci_bdaddr_list_clear(list); - return 0; - } - - entry = hci_bdaddr_list_lookup_with_flags(list, bdaddr, type); - if (!entry) - return -ENOENT; - - list_del(&entry->list); - kfree(entry); - - return 0; -} - /* This function requires the caller holds hdev->lock */ struct hci_conn_params *hci_conn_params_lookup(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type) @@ -2235,22 +2226,46 @@ struct hci_conn_params *hci_conn_params_lookup(struct hci_dev *hdev, return NULL; } -/* This function requires the caller holds hdev->lock */ +/* This function requires the caller holds hdev->lock or rcu_read_lock */ struct hci_conn_params *hci_pend_le_action_lookup(struct list_head *list, bdaddr_t *addr, u8 addr_type) { struct hci_conn_params *param; - list_for_each_entry(param, list, action) { + rcu_read_lock(); + + list_for_each_entry_rcu(param, list, action) { if (bacmp(¶m->addr, addr) == 0 && - param->addr_type == addr_type) + param->addr_type == addr_type) { + rcu_read_unlock(); return param; + } } + rcu_read_unlock(); + return NULL; } /* This function requires the caller holds hdev->lock */ +void hci_pend_le_list_del_init(struct hci_conn_params *param) +{ + if (list_empty(¶m->action)) + return; + + list_del_rcu(¶m->action); + synchronize_rcu(); + INIT_LIST_HEAD(¶m->action); +} + +/* This function requires the caller holds hdev->lock */ +void hci_pend_le_list_add(struct hci_conn_params *param, + struct list_head *list) +{ + list_add_rcu(¶m->action, list); +} + +/* This function requires the caller holds hdev->lock */ struct hci_conn_params *hci_conn_params_add(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type) { @@ -2283,14 +2298,15 @@ struct hci_conn_params *hci_conn_params_add(struct hci_dev *hdev, return params; } -static void hci_conn_params_free(struct hci_conn_params *params) +void hci_conn_params_free(struct hci_conn_params *params) { + hci_pend_le_list_del_init(params); + if (params->conn) { hci_conn_drop(params->conn); hci_conn_put(params->conn); } - list_del(¶ms->action); list_del(¶ms->list); kfree(params); } @@ -2328,8 +2344,7 @@ void hci_conn_params_clear_disabled(struct hci_dev *hdev) continue; } - list_del(¶ms->list); - kfree(params); + hci_conn_params_free(params); } BT_DBG("All LE disabled connection parameters were removed"); @@ -2392,15 +2407,29 @@ static int hci_suspend_notifier(struct notifier_block *nb, unsigned long action, container_of(nb, struct hci_dev, suspend_notifier); int ret = 0; - if (action == PM_SUSPEND_PREPARE) + /* Userspace has full control of this device. Do nothing. */ + if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) + return NOTIFY_DONE; + + /* To avoid a potential race with hci_unregister_dev. */ + hci_dev_hold(hdev); + + switch (action) { + case PM_HIBERNATION_PREPARE: + case PM_SUSPEND_PREPARE: ret = hci_suspend_dev(hdev); - else if (action == PM_POST_SUSPEND) + break; + case PM_POST_HIBERNATION: + case PM_POST_SUSPEND: ret = hci_resume_dev(hdev); + break; + } if (ret) bt_dev_err(hdev, "Suspend notifier action (%lu) failed: %d", action, ret); + hci_dev_put(hdev); return NOTIFY_DONE; } @@ -2420,6 +2449,11 @@ struct hci_dev *hci_alloc_dev_priv(int sizeof_priv) if (!hdev) return NULL; + if (init_srcu_struct(&hdev->srcu)) { + kfree(hdev); + return NULL; + } + hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1); hdev->esco_type = (ESCO_HV1); hdev->link_mode = (HCI_LM_ACCEPT); @@ -2442,16 +2476,16 @@ struct hci_dev *hci_alloc_dev_priv(int sizeof_priv) hdev->le_adv_channel_map = 0x07; hdev->le_adv_min_interval = 0x0800; hdev->le_adv_max_interval = 0x0800; - hdev->le_scan_interval = 0x0060; - hdev->le_scan_window = 0x0030; - hdev->le_scan_int_suspend = 0x0400; - hdev->le_scan_window_suspend = 0x0012; + hdev->le_scan_interval = DISCOV_LE_SCAN_INT_FAST; + hdev->le_scan_window = DISCOV_LE_SCAN_WIN_FAST; + hdev->le_scan_int_suspend = DISCOV_LE_SCAN_INT_SLOW1; + hdev->le_scan_window_suspend = DISCOV_LE_SCAN_WIN_SLOW1; hdev->le_scan_int_discovery = DISCOV_LE_SCAN_INT; hdev->le_scan_window_discovery = DISCOV_LE_SCAN_WIN; - hdev->le_scan_int_adv_monitor = 0x0060; - hdev->le_scan_window_adv_monitor = 0x0030; - hdev->le_scan_int_connect = 0x0060; - hdev->le_scan_window_connect = 0x0060; + hdev->le_scan_int_adv_monitor = DISCOV_LE_SCAN_INT_FAST; + hdev->le_scan_window_adv_monitor = DISCOV_LE_SCAN_WIN_FAST; + hdev->le_scan_int_connect = DISCOV_LE_SCAN_INT_CONN; + hdev->le_scan_window_connect = DISCOV_LE_SCAN_WIN_CONN; hdev->le_conn_min_interval = 0x0018; hdev->le_conn_max_interval = 0x0028; hdev->le_conn_latency = 0x0000; @@ -2468,7 +2502,7 @@ struct hci_dev *hci_alloc_dev_priv(int sizeof_priv) hdev->le_rx_def_phys = HCI_LE_SET_PHY_1M; hdev->le_num_of_adv_sets = HCI_MAX_ADV_INSTANCES; hdev->def_multi_adv_rotation_duration = HCI_DEFAULT_ADV_DURATION; - hdev->def_le_autoconnect_timeout = HCI_LE_AUTOCONN_TIMEOUT; + hdev->def_le_autoconnect_timeout = HCI_LE_CONN_TIMEOUT; hdev->min_le_tx_power = HCI_TX_POWER_INVALID; hdev->max_le_tx_power = HCI_TX_POWER_INVALID; @@ -2486,7 +2520,11 @@ struct hci_dev *hci_alloc_dev_priv(int sizeof_priv) mutex_init(&hdev->lock); mutex_init(&hdev->req_lock); + mutex_init(&hdev->mgmt_pending_lock); + + ida_init(&hdev->unset_handle_ida); + INIT_LIST_HEAD(&hdev->mesh_pending); INIT_LIST_HEAD(&hdev->mgmt_pending); INIT_LIST_HEAD(&hdev->reject_list); INIT_LIST_HEAD(&hdev->accept_list); @@ -2503,6 +2541,7 @@ struct hci_dev *hci_alloc_dev_priv(int sizeof_priv) INIT_LIST_HEAD(&hdev->conn_hash.list); INIT_LIST_HEAD(&hdev->adv_instances); INIT_LIST_HEAD(&hdev->blocked_keys); + INIT_LIST_HEAD(&hdev->monitored_devices); INIT_LIST_HEAD(&hdev->local_codecs); INIT_WORK(&hdev->rx_work, hci_rx_work); @@ -2524,7 +2563,7 @@ struct hci_dev *hci_alloc_dev_priv(int sizeof_priv) INIT_DELAYED_WORK(&hdev->cmd_timer, hci_cmd_timeout); INIT_DELAYED_WORK(&hdev->ncmd_timer, hci_ncmd_timeout); - hci_request_setup(hdev); + hci_devcd_setup(hdev); hci_init_sysfs(hdev); discovery_init(hdev); @@ -2549,24 +2588,15 @@ int hci_register_dev(struct hci_dev *hdev) if (!hdev->open || !hdev->close || !hdev->send) return -EINVAL; - /* Do not allow HCI_AMP devices to register at index 0, - * so the index can be used as the AMP controller ID. - */ - switch (hdev->dev_type) { - case HCI_PRIMARY: - id = ida_simple_get(&hci_index_ida, 0, 0, GFP_KERNEL); - break; - case HCI_AMP: - id = ida_simple_get(&hci_index_ida, 1, 0, GFP_KERNEL); - break; - default: - return -EINVAL; - } - + id = ida_alloc_max(&hci_index_ida, HCI_MAX_ID - 1, GFP_KERNEL); if (id < 0) return id; - sprintf(hdev->name, "hci%d", id); + error = dev_set_name(&hdev->dev, "hci%u", id); + if (error) + return error; + + hdev->name = dev_name(&hdev->dev); hdev->id = id; BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus); @@ -2588,8 +2618,6 @@ int hci_register_dev(struct hci_dev *hdev) if (!IS_ERR_OR_NULL(bt_debugfs)) hdev->debugfs = debugfs_create_dir(hdev->name, bt_debugfs); - dev_set_name(&hdev->dev, "%s", hdev->name); - error = device_add(&hdev->dev); if (error < 0) goto err_wqueue; @@ -2612,12 +2640,10 @@ int hci_register_dev(struct hci_dev *hdev) hci_dev_set_flag(hdev, HCI_SETUP); hci_dev_set_flag(hdev, HCI_AUTO_OFF); - if (hdev->dev_type == HCI_PRIMARY) { - /* Assume BR/EDR support until proven otherwise (such as - * through reading supported features during init. - */ - hci_dev_set_flag(hdev, HCI_BREDR_ENABLED); - } + /* Assume BR/EDR support until proven otherwise (such as + * through reading supported features during init. + */ + hci_dev_set_flag(hdev, HCI_BREDR_ENABLED); write_lock(&hci_dev_list_lock); list_add(&hdev->list, &hci_dev_list); @@ -2626,24 +2652,21 @@ int hci_register_dev(struct hci_dev *hdev) /* Devices that are marked for raw-only usage are unconfigured * and should not be included in normal operation. */ - if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks)) + if (hci_test_quirk(hdev, HCI_QUIRK_RAW_DEVICE)) hci_dev_set_flag(hdev, HCI_UNCONFIGURED); /* Mark Remote Wakeup connection flag as supported if driver has wakeup * callback. */ if (hdev->wakeup) - set_bit(HCI_CONN_FLAG_REMOTE_WAKEUP, hdev->conn_flags); + hdev->conn_flags |= HCI_CONN_FLAG_REMOTE_WAKEUP; hci_sock_dev_event(hdev, HCI_DEV_REG); hci_dev_hold(hdev); - if (!test_bit(HCI_QUIRK_NO_SUSPEND_NOTIFIER, &hdev->quirks)) { - hdev->suspend_notifier.notifier_call = hci_suspend_notifier; - error = register_pm_notifier(&hdev->suspend_notifier); - if (error) - goto err_wqueue; - } + error = hci_register_suspend_notifier(hdev); + if (error) + BT_WARN("register suspend notifier failed error:%d\n", error); queue_work(hdev->req_workqueue, &hdev->power_on); @@ -2657,7 +2680,7 @@ err_wqueue: destroy_workqueue(hdev->workqueue); destroy_workqueue(hdev->req_workqueue); err: - ida_simple_remove(&hci_index_ida, hdev->id); + ida_free(&hci_index_ida, hdev->id); return error; } @@ -2668,20 +2691,26 @@ void hci_unregister_dev(struct hci_dev *hdev) { BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus); + mutex_lock(&hdev->unregister_lock); hci_dev_set_flag(hdev, HCI_UNREGISTER); + mutex_unlock(&hdev->unregister_lock); write_lock(&hci_dev_list_lock); list_del(&hdev->list); write_unlock(&hci_dev_list_lock); - cancel_work_sync(&hdev->power_on); + synchronize_srcu(&hdev->srcu); + cleanup_srcu_struct(&hdev->srcu); - hci_cmd_sync_clear(hdev); + disable_work_sync(&hdev->rx_work); + disable_work_sync(&hdev->cmd_work); + disable_work_sync(&hdev->tx_work); + disable_work_sync(&hdev->power_on); + disable_work_sync(&hdev->error_reset); - if (!test_bit(HCI_QUIRK_NO_SUSPEND_NOTIFIER, &hdev->quirks)) - unregister_pm_notifier(&hdev->suspend_notifier); + hci_cmd_sync_clear(hdev); - msft_unregister(hdev); + hci_unregister_suspend_notifier(hdev); hci_dev_do_close(hdev); @@ -2735,13 +2764,68 @@ void hci_release_dev(struct hci_dev *hdev) hci_conn_params_clear_all(hdev); hci_discovery_filter_clear(hdev); hci_blocked_keys_clear(hdev); + hci_codec_list_clear(&hdev->local_codecs); + msft_release(hdev); hci_dev_unlock(hdev); - ida_simple_remove(&hci_index_ida, hdev->id); + ida_destroy(&hdev->unset_handle_ida); + ida_free(&hci_index_ida, hdev->id); + kfree_skb(hdev->sent_cmd); + kfree_skb(hdev->req_skb); + kfree_skb(hdev->recv_event); kfree(hdev); } EXPORT_SYMBOL(hci_release_dev); +int hci_register_suspend_notifier(struct hci_dev *hdev) +{ + int ret = 0; + + if (!hdev->suspend_notifier.notifier_call && + !hci_test_quirk(hdev, HCI_QUIRK_NO_SUSPEND_NOTIFIER)) { + hdev->suspend_notifier.notifier_call = hci_suspend_notifier; + ret = register_pm_notifier(&hdev->suspend_notifier); + } + + return ret; +} + +int hci_unregister_suspend_notifier(struct hci_dev *hdev) +{ + int ret = 0; + + if (hdev->suspend_notifier.notifier_call) { + ret = unregister_pm_notifier(&hdev->suspend_notifier); + if (!ret) + hdev->suspend_notifier.notifier_call = NULL; + } + + return ret; +} + +/* Cancel ongoing command synchronously: + * + * - Cancel command timer + * - Reset command counter + * - Cancel command request + */ +static void hci_cancel_cmd_sync(struct hci_dev *hdev, int err) +{ + bt_dev_dbg(hdev, "err 0x%2.2x", err); + + if (hci_dev_test_flag(hdev, HCI_UNREGISTER)) { + disable_delayed_work_sync(&hdev->cmd_timer); + disable_delayed_work_sync(&hdev->ncmd_timer); + } else { + cancel_delayed_work_sync(&hdev->cmd_timer); + cancel_delayed_work_sync(&hdev->ncmd_timer); + } + + atomic_set(&hdev->cmd_cnt, 1); + + hci_cmd_sync_cancel_sync(hdev, err); +} + /* Suspend HCI device */ int hci_suspend_dev(struct hci_dev *hdev) { @@ -2758,6 +2842,9 @@ int hci_suspend_dev(struct hci_dev *hdev) if (mgmt_powering_down(hdev)) return 0; + /* Cancel potentially blocking sync operation before suspend */ + hci_cancel_cmd_sync(hdev, EHOSTDOWN); + hci_req_sync_lock(hdev); ret = hci_suspend_sync(hdev); hci_req_sync_unlock(hdev); @@ -2818,19 +2905,55 @@ int hci_reset_dev(struct hci_dev *hdev) } EXPORT_SYMBOL(hci_reset_dev); +static u8 hci_dev_classify_pkt_type(struct hci_dev *hdev, struct sk_buff *skb) +{ + if (hdev->classify_pkt_type) + return hdev->classify_pkt_type(hdev, skb); + + return hci_skb_pkt_type(skb); +} + /* Receive frame from HCI drivers */ int hci_recv_frame(struct hci_dev *hdev, struct sk_buff *skb) { + u8 dev_pkt_type; + if (!hdev || (!test_bit(HCI_UP, &hdev->flags) && !test_bit(HCI_INIT, &hdev->flags))) { kfree_skb(skb); return -ENXIO; } - if (hci_skb_pkt_type(skb) != HCI_EVENT_PKT && - hci_skb_pkt_type(skb) != HCI_ACLDATA_PKT && - hci_skb_pkt_type(skb) != HCI_SCODATA_PKT && - hci_skb_pkt_type(skb) != HCI_ISODATA_PKT) { + /* Check if the driver agree with packet type classification */ + dev_pkt_type = hci_dev_classify_pkt_type(hdev, skb); + if (hci_skb_pkt_type(skb) != dev_pkt_type) { + hci_skb_pkt_type(skb) = dev_pkt_type; + } + + switch (hci_skb_pkt_type(skb)) { + case HCI_EVENT_PKT: + break; + case HCI_ACLDATA_PKT: + /* Detect if ISO packet has been sent as ACL */ + if (hci_conn_num(hdev, CIS_LINK) || + hci_conn_num(hdev, BIS_LINK) || + hci_conn_num(hdev, PA_LINK)) { + __u16 handle = __le16_to_cpu(hci_acl_hdr(skb)->handle); + __u8 type; + + type = hci_conn_lookup_type(hdev, hci_handle(handle)); + if (type == CIS_LINK || type == BIS_LINK || + type == PA_LINK) + hci_skb_pkt_type(skb) = HCI_ISODATA_PKT; + } + break; + case HCI_SCODATA_PKT: + break; + case HCI_ISODATA_PKT: + break; + case HCI_DRV_PKT: + break; + default: kfree_skb(skb); return -EINVAL; } @@ -2938,6 +3061,15 @@ static int hci_send_frame(struct hci_dev *hdev, struct sk_buff *skb) return -EINVAL; } + if (hci_skb_pkt_type(skb) == HCI_DRV_PKT) { + /* Intercept HCI Drv packet here and don't go with hdev->send + * callback. + */ + err = hci_drv_process_cmd(hdev, skb); + kfree_skb(skb); + return err; + } + err = hdev->send(hdev, skb); if (err < 0) { bt_dev_err(hdev, "sending frame failed (%d)", err); @@ -2948,6 +3080,13 @@ static int hci_send_frame(struct hci_dev *hdev, struct sk_buff *skb) return 0; } +static int hci_send_conn_frame(struct hci_dev *hdev, struct hci_conn *conn, + struct sk_buff *skb) +{ + hci_conn_tx_queue(conn, skb); + return hci_send_frame(hdev, skb); +} + /* Send HCI command */ int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen, const void *param) @@ -2956,7 +3095,7 @@ int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen, BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen); - skb = hci_prepare_cmd(hdev, opcode, plen, param); + skb = hci_cmd_sync_alloc(hdev, opcode, plen, param, NULL); if (!skb) { bt_dev_err(hdev, "no memory for command"); return -ENOMEM; @@ -2991,7 +3130,7 @@ int __hci_cmd_send(struct hci_dev *hdev, u16 opcode, u32 plen, return -EINVAL; } - skb = hci_prepare_cmd(hdev, opcode, plen, param); + skb = hci_cmd_sync_alloc(hdev, opcode, plen, param, NULL); if (!skb) { bt_dev_err(hdev, "no memory for command (opcode 0x%4.4x)", opcode); @@ -3005,21 +3144,64 @@ int __hci_cmd_send(struct hci_dev *hdev, u16 opcode, u32 plen, EXPORT_SYMBOL(__hci_cmd_send); /* Get data from the previously sent command */ -void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode) +static void *hci_cmd_data(struct sk_buff *skb, __u16 opcode) { struct hci_command_hdr *hdr; - if (!hdev->sent_cmd) + if (!skb || skb->len < HCI_COMMAND_HDR_SIZE) return NULL; - hdr = (void *) hdev->sent_cmd->data; + hdr = (void *)skb->data; if (hdr->opcode != cpu_to_le16(opcode)) return NULL; - BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode); + return skb->data + HCI_COMMAND_HDR_SIZE; +} - return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE; +/* Get data from the previously sent command */ +void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode) +{ + void *data; + + /* Check if opcode matches last sent command */ + data = hci_cmd_data(hdev->sent_cmd, opcode); + if (!data) + /* Check if opcode matches last request */ + data = hci_cmd_data(hdev->req_skb, opcode); + + return data; +} + +/* Get data from last received event */ +void *hci_recv_event_data(struct hci_dev *hdev, __u8 event) +{ + struct hci_event_hdr *hdr; + int offset; + + if (!hdev->recv_event) + return NULL; + + hdr = (void *)hdev->recv_event->data; + offset = sizeof(*hdr); + + if (hdr->evt != event) { + /* In case of LE metaevent check the subevent match */ + if (hdr->evt == HCI_EV_LE_META) { + struct hci_ev_le_meta *ev; + + ev = (void *)hdev->recv_event->data + offset; + offset += sizeof(*ev); + if (ev->subevent == event) + goto found; + } + return NULL; + } + +found: + bt_dev_dbg(hdev, "event 0x%2.2x", event); + + return hdev->recv_event->data + offset; } /* Send ACL data */ @@ -3047,17 +3229,7 @@ static void hci_queue_acl(struct hci_chan *chan, struct sk_buff_head *queue, hci_skb_pkt_type(skb) = HCI_ACLDATA_PKT; - switch (hdev->dev_type) { - case HCI_PRIMARY: - hci_add_acl_hdr(skb, conn->handle, flags); - break; - case HCI_AMP: - hci_add_acl_hdr(skb, chan->handle, flags); - break; - default: - bt_dev_err(hdev, "unknown dev_type %d", hdev->dev_type); - return; - } + hci_add_acl_hdr(skb, conn->handle, flags); list = skb_shinfo(skb)->frag_list; if (!list) { @@ -3095,6 +3267,8 @@ static void hci_queue_acl(struct hci_chan *chan, struct sk_buff_head *queue, spin_unlock_bh(&queue->lock); } + + bt_dev_dbg(hdev, "chan %p queued %d", chan, skb_queue_len(queue)); } void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags) @@ -3126,12 +3300,124 @@ void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb) hci_skb_pkt_type(skb) = HCI_SCODATA_PKT; skb_queue_tail(&conn->data_q, skb); + + bt_dev_dbg(hdev, "hcon %p queued %d", conn, + skb_queue_len(&conn->data_q)); + + queue_work(hdev->workqueue, &hdev->tx_work); +} + +/* Send ISO data */ +static void hci_add_iso_hdr(struct sk_buff *skb, __u16 handle, __u8 flags) +{ + struct hci_iso_hdr *hdr; + int len = skb->len; + + skb_push(skb, HCI_ISO_HDR_SIZE); + skb_reset_transport_header(skb); + hdr = (struct hci_iso_hdr *)skb_transport_header(skb); + hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags)); + hdr->dlen = cpu_to_le16(len); +} + +static void hci_queue_iso(struct hci_conn *conn, struct sk_buff_head *queue, + struct sk_buff *skb) +{ + struct hci_dev *hdev = conn->hdev; + struct sk_buff *list; + __u16 flags; + + skb->len = skb_headlen(skb); + skb->data_len = 0; + + hci_skb_pkt_type(skb) = HCI_ISODATA_PKT; + + list = skb_shinfo(skb)->frag_list; + + flags = hci_iso_flags_pack(list ? ISO_START : ISO_SINGLE, 0x00); + hci_add_iso_hdr(skb, conn->handle, flags); + + if (!list) { + /* Non fragmented */ + BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len); + + skb_queue_tail(queue, skb); + } else { + /* Fragmented */ + BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len); + + skb_shinfo(skb)->frag_list = NULL; + + __skb_queue_tail(queue, skb); + + do { + skb = list; list = list->next; + + hci_skb_pkt_type(skb) = HCI_ISODATA_PKT; + flags = hci_iso_flags_pack(list ? ISO_CONT : ISO_END, + 0x00); + hci_add_iso_hdr(skb, conn->handle, flags); + + BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len); + + __skb_queue_tail(queue, skb); + } while (list); + } + + bt_dev_dbg(hdev, "hcon %p queued %d", conn, skb_queue_len(queue)); +} + +void hci_send_iso(struct hci_conn *conn, struct sk_buff *skb) +{ + struct hci_dev *hdev = conn->hdev; + + BT_DBG("%s len %d", hdev->name, skb->len); + + hci_queue_iso(conn, &conn->data_q, skb); + queue_work(hdev->workqueue, &hdev->tx_work); } /* ---- HCI TX task (outgoing data) ---- */ /* HCI Connection scheduler */ +static inline void hci_quote_sent(struct hci_conn *conn, int num, int *quote) +{ + struct hci_dev *hdev; + int cnt, q; + + if (!conn) { + *quote = 0; + return; + } + + hdev = conn->hdev; + + switch (conn->type) { + case ACL_LINK: + cnt = hdev->acl_cnt; + break; + case SCO_LINK: + case ESCO_LINK: + cnt = hdev->sco_cnt; + break; + case LE_LINK: + cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt; + break; + case CIS_LINK: + case BIS_LINK: + case PA_LINK: + cnt = hdev->iso_cnt; + break; + default: + cnt = 0; + bt_dev_err(hdev, "unknown link type %d", conn->type); + } + + q = cnt / num; + *quote = q ? q : 1; +} + static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type, int *quote) { @@ -3145,9 +3431,14 @@ static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type, rcu_read_lock(); list_for_each_entry_rcu(c, &h->list, list) { - if (c->type != type || skb_queue_empty(&c->data_q)) + if (c->type != type || + skb_queue_empty(&c->data_q)) continue; + bt_dev_dbg(hdev, "hcon %p state %s queued %d", c, + state_to_string(c->state), + skb_queue_len(&c->data_q)); + if (c->state != BT_CONNECTED && c->state != BT_CONFIG) continue; @@ -3164,29 +3455,7 @@ static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type, rcu_read_unlock(); - if (conn) { - int cnt, q; - - switch (conn->type) { - case ACL_LINK: - cnt = hdev->acl_cnt; - break; - case SCO_LINK: - case ESCO_LINK: - cnt = hdev->sco_cnt; - break; - case LE_LINK: - cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt; - break; - default: - cnt = 0; - bt_dev_err(hdev, "unknown link type %d", conn->type); - } - - q = cnt / num; - *quote = q ? q : 1; - } else - *quote = 0; + hci_quote_sent(conn, num, quote); BT_DBG("conn %p quote %d", conn, *quote); return conn; @@ -3199,10 +3468,10 @@ static void hci_link_tx_to(struct hci_dev *hdev, __u8 type) bt_dev_err(hdev, "link tx timeout"); - rcu_read_lock(); + hci_dev_lock(hdev); /* Kill stalled connections */ - list_for_each_entry_rcu(c, &h->list, list) { + list_for_each_entry(c, &h->list, list) { if (c->type == type && c->sent) { bt_dev_err(hdev, "killing stalled connection %pMR", &c->dst); @@ -3210,7 +3479,7 @@ static void hci_link_tx_to(struct hci_dev *hdev, __u8 type) } } - rcu_read_unlock(); + hci_dev_unlock(hdev); } static struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type, @@ -3220,7 +3489,7 @@ static struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type, struct hci_chan *chan = NULL; unsigned int num = 0, min = ~0, cur_prio = 0; struct hci_conn *conn; - int cnt, q, conn_num = 0; + int conn_num = 0; BT_DBG("%s", hdev->name); @@ -3270,27 +3539,8 @@ static struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type, if (!chan) return NULL; - switch (chan->conn->type) { - case ACL_LINK: - cnt = hdev->acl_cnt; - break; - case AMP_LINK: - cnt = hdev->block_cnt; - break; - case SCO_LINK: - case ESCO_LINK: - cnt = hdev->sco_cnt; - break; - case LE_LINK: - cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt; - break; - default: - cnt = 0; - bt_dev_err(hdev, "unknown link type %d", chan->conn->type); - } + hci_quote_sent(chan->conn, num, quote); - q = cnt / num; - *quote = q ? q : 1; BT_DBG("chan %p quote %d", chan, *quote); return chan; } @@ -3345,69 +3595,82 @@ static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type) } -static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb) -{ - /* Calculate count of blocks used by this packet */ - return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len); -} - -static void __check_timeout(struct hci_dev *hdev, unsigned int cnt) -{ - if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) { - /* ACL tx timeout must be longer than maximum - * link supervision timeout (40.9 seconds) */ - if (!cnt && time_after(jiffies, hdev->acl_last_tx + - HCI_ACL_TX_TIMEOUT)) - hci_link_tx_to(hdev, ACL_LINK); - } -} - -/* Schedule SCO */ -static void hci_sched_sco(struct hci_dev *hdev) +static void __check_timeout(struct hci_dev *hdev, unsigned int cnt, u8 type) { - struct hci_conn *conn; - struct sk_buff *skb; - int quote; + unsigned long timeout; - BT_DBG("%s", hdev->name); - - if (!hci_conn_num(hdev, SCO_LINK)) + if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) return; - while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, "e))) { - while (quote-- && (skb = skb_dequeue(&conn->data_q))) { - BT_DBG("skb %p len %d", skb, skb->len); - hci_send_frame(hdev, skb); - - conn->sent++; - if (conn->sent == ~0) - conn->sent = 0; - } + switch (type) { + case ACL_LINK: + /* tx timeout must be longer than maximum link supervision + * timeout (40.9 seconds) + */ + timeout = hdev->acl_last_tx + HCI_ACL_TX_TIMEOUT; + break; + case LE_LINK: + /* tx timeout must be longer than maximum link supervision + * timeout (40.9 seconds) + */ + timeout = hdev->le_last_tx + HCI_ACL_TX_TIMEOUT; + break; + case CIS_LINK: + case BIS_LINK: + case PA_LINK: + /* tx timeout must be longer than the maximum transport latency + * (8.388607 seconds) + */ + timeout = hdev->iso_last_tx + HCI_ISO_TX_TIMEOUT; + break; + default: + return; } + + if (!cnt && time_after(jiffies, timeout)) + hci_link_tx_to(hdev, type); } -static void hci_sched_esco(struct hci_dev *hdev) +/* Schedule SCO */ +static void hci_sched_sco(struct hci_dev *hdev, __u8 type) { struct hci_conn *conn; struct sk_buff *skb; - int quote; + int quote, *cnt; + unsigned int pkts = hdev->sco_pkts; - BT_DBG("%s", hdev->name); + bt_dev_dbg(hdev, "type %u", type); - if (!hci_conn_num(hdev, ESCO_LINK)) + if (!hci_conn_num(hdev, type) || !pkts) return; - while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK, - "e))) { + /* Use sco_pkts if flow control has not been enabled which will limit + * the amount of buffer sent in a row. + */ + if (!hci_dev_test_flag(hdev, HCI_SCO_FLOWCTL)) + cnt = &pkts; + else + cnt = &hdev->sco_cnt; + + while (*cnt && (conn = hci_low_sent(hdev, type, "e))) { while (quote-- && (skb = skb_dequeue(&conn->data_q))) { BT_DBG("skb %p len %d", skb, skb->len); - hci_send_frame(hdev, skb); + hci_send_conn_frame(hdev, conn, skb); conn->sent++; if (conn->sent == ~0) conn->sent = 0; + (*cnt)--; } } + + /* Rescheduled if all packets were sent and flow control is not enabled + * as there could be more packets queued that could not be sent and + * since no HCI_EV_NUM_COMP_PKTS event will be generated the reschedule + * needs to be forced. + */ + if (!pkts && !hci_dev_test_flag(hdev, HCI_SCO_FLOWCTL)) + queue_work(hdev->workqueue, &hdev->tx_work); } static void hci_sched_acl_pkt(struct hci_dev *hdev) @@ -3417,7 +3680,7 @@ static void hci_sched_acl_pkt(struct hci_dev *hdev) struct sk_buff *skb; int quote; - __check_timeout(hdev, cnt); + __check_timeout(hdev, cnt, ACL_LINK); while (hdev->acl_cnt && (chan = hci_chan_sent(hdev, ACL_LINK, "e))) { @@ -3435,7 +3698,7 @@ static void hci_sched_acl_pkt(struct hci_dev *hdev) hci_conn_enter_active_mode(chan->conn, bt_cb(skb)->force_active); - hci_send_frame(hdev, skb); + hci_send_conn_frame(hdev, chan->conn, skb); hdev->acl_last_tx = jiffies; hdev->acl_cnt--; @@ -3443,8 +3706,8 @@ static void hci_sched_acl_pkt(struct hci_dev *hdev) chan->conn->sent++; /* Send pending SCO packets right away */ - hci_sched_sco(hdev); - hci_sched_esco(hdev); + hci_sched_sco(hdev, SCO_LINK); + hci_sched_sco(hdev, ESCO_LINK); } } @@ -3452,100 +3715,34 @@ static void hci_sched_acl_pkt(struct hci_dev *hdev) hci_prio_recalculate(hdev, ACL_LINK); } -static void hci_sched_acl_blk(struct hci_dev *hdev) -{ - unsigned int cnt = hdev->block_cnt; - struct hci_chan *chan; - struct sk_buff *skb; - int quote; - u8 type; - - __check_timeout(hdev, cnt); - - BT_DBG("%s", hdev->name); - - if (hdev->dev_type == HCI_AMP) - type = AMP_LINK; - else - type = ACL_LINK; - - while (hdev->block_cnt > 0 && - (chan = hci_chan_sent(hdev, type, "e))) { - u32 priority = (skb_peek(&chan->data_q))->priority; - while (quote > 0 && (skb = skb_peek(&chan->data_q))) { - int blocks; - - BT_DBG("chan %p skb %p len %d priority %u", chan, skb, - skb->len, skb->priority); - - /* Stop if priority has changed */ - if (skb->priority < priority) - break; - - skb = skb_dequeue(&chan->data_q); - - blocks = __get_blocks(hdev, skb); - if (blocks > hdev->block_cnt) - return; - - hci_conn_enter_active_mode(chan->conn, - bt_cb(skb)->force_active); - - hci_send_frame(hdev, skb); - hdev->acl_last_tx = jiffies; - - hdev->block_cnt -= blocks; - quote -= blocks; - - chan->sent += blocks; - chan->conn->sent += blocks; - } - } - - if (cnt != hdev->block_cnt) - hci_prio_recalculate(hdev, type); -} - static void hci_sched_acl(struct hci_dev *hdev) { BT_DBG("%s", hdev->name); /* No ACL link over BR/EDR controller */ - if (!hci_conn_num(hdev, ACL_LINK) && hdev->dev_type == HCI_PRIMARY) + if (!hci_conn_num(hdev, ACL_LINK)) return; - /* No AMP link over AMP controller */ - if (!hci_conn_num(hdev, AMP_LINK) && hdev->dev_type == HCI_AMP) - return; - - switch (hdev->flow_ctl_mode) { - case HCI_FLOW_CTL_MODE_PACKET_BASED: - hci_sched_acl_pkt(hdev); - break; - - case HCI_FLOW_CTL_MODE_BLOCK_BASED: - hci_sched_acl_blk(hdev); - break; - } + hci_sched_acl_pkt(hdev); } static void hci_sched_le(struct hci_dev *hdev) { struct hci_chan *chan; struct sk_buff *skb; - int quote, cnt, tmp; + int quote, *cnt, tmp; BT_DBG("%s", hdev->name); if (!hci_conn_num(hdev, LE_LINK)) return; - cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt; + cnt = hdev->le_pkts ? &hdev->le_cnt : &hdev->acl_cnt; - __check_timeout(hdev, cnt); + __check_timeout(hdev, *cnt, LE_LINK); - tmp = cnt; - while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, "e))) { + tmp = *cnt; + while (*cnt && (chan = hci_chan_sent(hdev, LE_LINK, "e))) { u32 priority = (skb_peek(&chan->data_q))->priority; while (quote-- && (skb = skb_peek(&chan->data_q))) { BT_DBG("chan %p skb %p len %d priority %u", chan, skb, @@ -3557,40 +3754,69 @@ static void hci_sched_le(struct hci_dev *hdev) skb = skb_dequeue(&chan->data_q); - hci_send_frame(hdev, skb); + hci_send_conn_frame(hdev, chan->conn, skb); hdev->le_last_tx = jiffies; - cnt--; + (*cnt)--; chan->sent++; chan->conn->sent++; /* Send pending SCO packets right away */ - hci_sched_sco(hdev); - hci_sched_esco(hdev); + hci_sched_sco(hdev, SCO_LINK); + hci_sched_sco(hdev, ESCO_LINK); } } - if (hdev->le_pkts) - hdev->le_cnt = cnt; - else - hdev->acl_cnt = cnt; - - if (cnt != tmp) + if (*cnt != tmp) hci_prio_recalculate(hdev, LE_LINK); } +/* Schedule iso */ +static void hci_sched_iso(struct hci_dev *hdev, __u8 type) +{ + struct hci_conn *conn; + struct sk_buff *skb; + int quote, *cnt; + + BT_DBG("%s", hdev->name); + + if (!hci_conn_num(hdev, type)) + return; + + cnt = &hdev->iso_cnt; + + __check_timeout(hdev, *cnt, type); + + while (*cnt && (conn = hci_low_sent(hdev, type, "e))) { + while (quote-- && (skb = skb_dequeue(&conn->data_q))) { + BT_DBG("skb %p len %d", skb, skb->len); + + hci_send_conn_frame(hdev, conn, skb); + hdev->iso_last_tx = jiffies; + + conn->sent++; + if (conn->sent == ~0) + conn->sent = 0; + (*cnt)--; + } + } +} + static void hci_tx_work(struct work_struct *work) { struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work); struct sk_buff *skb; - BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt, - hdev->sco_cnt, hdev->le_cnt); + BT_DBG("%s acl %d sco %d le %d iso %d", hdev->name, hdev->acl_cnt, + hdev->sco_cnt, hdev->le_cnt, hdev->iso_cnt); if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) { /* Schedule queues and send stuff to HCI driver */ - hci_sched_sco(hdev); - hci_sched_esco(hdev); + hci_sched_sco(hdev, SCO_LINK); + hci_sched_sco(hdev, ESCO_LINK); + hci_sched_iso(hdev, CIS_LINK); + hci_sched_iso(hdev, BIS_LINK); + hci_sched_iso(hdev, PA_LINK); hci_sched_acl(hdev); hci_sched_le(hdev); } @@ -3605,72 +3831,96 @@ static void hci_tx_work(struct work_struct *work) /* ACL data packet */ static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb) { - struct hci_acl_hdr *hdr = (void *) skb->data; - struct hci_conn *conn; + struct hci_acl_hdr *hdr; __u16 handle, flags; + int err; - skb_pull(skb, HCI_ACL_HDR_SIZE); + hdr = skb_pull_data(skb, sizeof(*hdr)); + if (!hdr) { + bt_dev_err(hdev, "ACL packet too small"); + kfree_skb(skb); + return; + } handle = __le16_to_cpu(hdr->handle); flags = hci_flags(handle); handle = hci_handle(handle); - BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len, - handle, flags); + bt_dev_dbg(hdev, "len %d handle 0x%4.4x flags 0x%4.4x", skb->len, + handle, flags); hdev->stat.acl_rx++; - hci_dev_lock(hdev); - conn = hci_conn_hash_lookup_handle(hdev, handle); - hci_dev_unlock(hdev); - - if (conn) { - hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF); - - /* Send to upper protocol */ - l2cap_recv_acldata(conn, skb, flags); - return; - } else { + err = l2cap_recv_acldata(hdev, handle, skb, flags); + if (err == -ENOENT) bt_dev_err(hdev, "ACL packet for unknown connection handle %d", handle); - } - - kfree_skb(skb); + else if (err) + bt_dev_dbg(hdev, "ACL packet recv for handle %d failed: %d", + handle, err); } /* SCO data packet */ static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb) { - struct hci_sco_hdr *hdr = (void *) skb->data; - struct hci_conn *conn; + struct hci_sco_hdr *hdr; __u16 handle, flags; + int err; - skb_pull(skb, HCI_SCO_HDR_SIZE); + hdr = skb_pull_data(skb, sizeof(*hdr)); + if (!hdr) { + bt_dev_err(hdev, "SCO packet too small"); + kfree_skb(skb); + return; + } handle = __le16_to_cpu(hdr->handle); flags = hci_flags(handle); handle = hci_handle(handle); - BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len, - handle, flags); + bt_dev_dbg(hdev, "len %d handle 0x%4.4x flags 0x%4.4x", skb->len, + handle, flags); hdev->stat.sco_rx++; - hci_dev_lock(hdev); - conn = hci_conn_hash_lookup_handle(hdev, handle); - hci_dev_unlock(hdev); + hci_skb_pkt_status(skb) = flags & 0x03; - if (conn) { - /* Send to upper protocol */ - bt_cb(skb)->sco.pkt_status = flags & 0x03; - sco_recv_scodata(conn, skb); + err = sco_recv_scodata(hdev, handle, skb); + if (err == -ENOENT) + bt_dev_err_ratelimited(hdev, "SCO packet for unknown connection handle %d", + handle); + else if (err) + bt_dev_dbg(hdev, "SCO packet recv for handle %d failed: %d", + handle, err); +} + +static void hci_isodata_packet(struct hci_dev *hdev, struct sk_buff *skb) +{ + struct hci_iso_hdr *hdr; + __u16 handle, flags; + int err; + + hdr = skb_pull_data(skb, sizeof(*hdr)); + if (!hdr) { + bt_dev_err(hdev, "ISO packet too small"); + kfree_skb(skb); return; - } else { - bt_dev_err(hdev, "SCO packet for unknown connection handle %d", - handle); } - kfree_skb(skb); + handle = __le16_to_cpu(hdr->handle); + flags = hci_flags(handle); + handle = hci_handle(handle); + + bt_dev_dbg(hdev, "len %d handle 0x%4.4x flags 0x%4.4x", skb->len, + handle, flags); + + err = iso_recv(hdev, handle, skb, flags); + if (err == -ENOENT) + bt_dev_err(hdev, "ISO packet for unknown connection handle %d", + handle); + else if (err) + bt_dev_dbg(hdev, "ISO packet recv for handle %d failed: %d", + handle, err); } static bool hci_req_is_complete(struct hci_dev *hdev) @@ -3740,17 +3990,19 @@ void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status, if (!status && !hci_req_is_complete(hdev)) return; + skb = hdev->req_skb; + /* If this was the last command in a request the complete - * callback would be found in hdev->sent_cmd instead of the + * callback would be found in hdev->req_skb instead of the * command queue (hdev->cmd_q). */ - if (bt_cb(hdev->sent_cmd)->hci.req_flags & HCI_REQ_SKB) { - *req_complete_skb = bt_cb(hdev->sent_cmd)->hci.req_complete_skb; + if (skb && bt_cb(skb)->hci.req_flags & HCI_REQ_SKB) { + *req_complete_skb = bt_cb(skb)->hci.req_complete_skb; return; } - if (bt_cb(hdev->sent_cmd)->hci.req_complete) { - *req_complete = bt_cb(hdev->sent_cmd)->hci.req_complete; + if (skb && bt_cb(skb)->hci.req_complete) { + *req_complete = bt_cb(skb)->hci.req_complete; return; } @@ -3766,7 +4018,7 @@ void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status, *req_complete_skb = bt_cb(skb)->hci.req_complete_skb; else *req_complete = bt_cb(skb)->hci.req_complete; - kfree_skb(skb); + dev_kfree_skb_irq(skb); } spin_unlock_irqrestore(&hdev->cmd_q.lock, flags); } @@ -3778,7 +4030,14 @@ static void hci_rx_work(struct work_struct *work) BT_DBG("%s", hdev->name); - while ((skb = skb_dequeue(&hdev->rx_q))) { + /* The kcov_remote functions used for collecting packet parsing + * coverage information from this background thread and associate + * the coverage with the syscall's thread which originally injected + * the packet. This helps fuzzing the kernel. + */ + for (; (skb = skb_dequeue(&hdev->rx_q)); kcov_remote_stop()) { + kcov_remote_start_common(skb_get_kcov_handle(skb)); + /* Send copy to monitor */ hci_send_to_monitor(hdev, skb); @@ -3827,6 +4086,11 @@ static void hci_rx_work(struct work_struct *work) hci_scodata_packet(hdev, skb); break; + case HCI_ISODATA_PKT: + BT_DBG("%s ISO data packet", hdev->name); + hci_isodata_packet(hdev, skb); + break; + default: kfree_skb(skb); break; @@ -3834,10 +4098,47 @@ static void hci_rx_work(struct work_struct *work) } } +static int hci_send_cmd_sync(struct hci_dev *hdev, struct sk_buff *skb) +{ + int err; + + bt_dev_dbg(hdev, "skb %p", skb); + + kfree_skb(hdev->sent_cmd); + + hdev->sent_cmd = skb_clone(skb, GFP_KERNEL); + if (!hdev->sent_cmd) { + skb_queue_head(&hdev->cmd_q, skb); + queue_work(hdev->workqueue, &hdev->cmd_work); + return -EINVAL; + } + + if (hci_skb_opcode(skb) != HCI_OP_NOP) { + err = hci_send_frame(hdev, skb); + if (err < 0) { + hci_cmd_sync_cancel_sync(hdev, -err); + return err; + } + atomic_dec(&hdev->cmd_cnt); + } else { + err = -ENODATA; + kfree_skb(skb); + } + + if (hdev->req_status == HCI_REQ_PEND && + !hci_dev_test_and_set_flag(hdev, HCI_CMD_PENDING)) { + kfree_skb(hdev->req_skb); + hdev->req_skb = skb_clone(hdev->sent_cmd, GFP_KERNEL); + } + + return err; +} + static void hci_cmd_work(struct work_struct *work) { struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work); struct sk_buff *skb; + int err; BT_DBG("%s cmd_cnt %d cmd queued %d", hdev->name, atomic_read(&hdev->cmd_cnt), skb_queue_len(&hdev->cmd_q)); @@ -3848,27 +4149,17 @@ static void hci_cmd_work(struct work_struct *work) if (!skb) return; - kfree_skb(hdev->sent_cmd); - - hdev->sent_cmd = skb_clone(skb, GFP_KERNEL); - if (hdev->sent_cmd) { - int res; - if (hci_req_status_pend(hdev)) - hci_dev_set_flag(hdev, HCI_CMD_PENDING); - atomic_dec(&hdev->cmd_cnt); - - res = hci_send_frame(hdev, skb); - if (res < 0) - __hci_cmd_sync_cancel(hdev, -res); - - if (test_bit(HCI_RESET, &hdev->flags)) - cancel_delayed_work(&hdev->cmd_timer); - else - schedule_delayed_work(&hdev->cmd_timer, - HCI_CMD_TIMEOUT); - } else { - skb_queue_head(&hdev->cmd_q, skb); - queue_work(hdev->workqueue, &hdev->cmd_work); - } + err = hci_send_cmd_sync(hdev, skb); + if (err) + return; + + rcu_read_lock(); + if (test_bit(HCI_RESET, &hdev->flags) || + hci_dev_test_flag(hdev, HCI_CMD_DRAIN_WORKQUEUE)) + cancel_delayed_work(&hdev->cmd_timer); + else + queue_delayed_work(hdev->workqueue, &hdev->cmd_timer, + HCI_CMD_TIMEOUT); + rcu_read_unlock(); } } diff --git a/net/bluetooth/hci_debugfs.c b/net/bluetooth/hci_debugfs.c index 902b40a90b91..99e2e9fc70e8 100644 --- a/net/bluetooth/hci_debugfs.c +++ b/net/bluetooth/hci_debugfs.c @@ -22,12 +22,12 @@ */ #include <linux/debugfs.h> +#include <linux/kstrtox.h> #include <net/bluetooth/bluetooth.h> #include <net/bluetooth/hci_core.h> #include "smp.h" -#include "hci_request.h" #include "hci_debugfs.h" #define DEFINE_QUIRK_ATTRIBUTE(__name, __quirk) \ @@ -38,7 +38,7 @@ static ssize_t __name ## _read(struct file *file, \ struct hci_dev *hdev = file->private_data; \ char buf[3]; \ \ - buf[0] = test_bit(__quirk, &hdev->quirks) ? 'Y' : 'N'; \ + buf[0] = test_bit(__quirk, hdev->quirk_flags) ? 'Y' : 'N'; \ buf[1] = '\n'; \ buf[2] = '\0'; \ return simple_read_from_buffer(user_buf, count, ppos, buf, 2); \ @@ -59,10 +59,10 @@ static ssize_t __name ## _write(struct file *file, \ if (err) \ return err; \ \ - if (enable == test_bit(__quirk, &hdev->quirks)) \ + if (enable == test_bit(__quirk, hdev->quirk_flags)) \ return -EALREADY; \ \ - change_bit(__quirk, &hdev->quirks); \ + change_bit(__quirk, hdev->quirk_flags); \ \ return count; \ } \ @@ -189,7 +189,7 @@ static int uuids_show(struct seq_file *f, void *p) } hci_dev_unlock(hdev); - return 0; + return 0; } DEFINE_SHOW_ATTRIBUTE(uuids); @@ -217,10 +217,12 @@ static int conn_info_min_age_set(void *data, u64 val) { struct hci_dev *hdev = data; - if (val == 0 || val > hdev->conn_info_max_age) + hci_dev_lock(hdev); + if (val == 0 || val > hdev->conn_info_max_age) { + hci_dev_unlock(hdev); return -EINVAL; + } - hci_dev_lock(hdev); hdev->conn_info_min_age = val; hci_dev_unlock(hdev); @@ -245,10 +247,12 @@ static int conn_info_max_age_set(void *data, u64 val) { struct hci_dev *hdev = data; - if (val == 0 || val < hdev->conn_info_min_age) + hci_dev_lock(hdev); + if (val == 0 || val < hdev->conn_info_min_age) { + hci_dev_unlock(hdev); return -EINVAL; + } - hci_dev_lock(hdev); hdev->conn_info_max_age = val; hci_dev_unlock(hdev); @@ -566,10 +570,12 @@ static int sniff_min_interval_set(void *data, u64 val) { struct hci_dev *hdev = data; - if (val == 0 || val % 2 || val > hdev->sniff_max_interval) + hci_dev_lock(hdev); + if (val == 0 || val % 2 || val > hdev->sniff_max_interval) { + hci_dev_unlock(hdev); return -EINVAL; + } - hci_dev_lock(hdev); hdev->sniff_min_interval = val; hci_dev_unlock(hdev); @@ -594,10 +600,12 @@ static int sniff_max_interval_set(void *data, u64 val) { struct hci_dev *hdev = data; - if (val == 0 || val % 2 || val < hdev->sniff_min_interval) + hci_dev_lock(hdev); + if (val == 0 || val % 2 || val < hdev->sniff_min_interval) { + hci_dev_unlock(hdev); return -EINVAL; + } - hci_dev_lock(hdev); hdev->sniff_max_interval = val; hci_dev_unlock(hdev); @@ -757,7 +765,7 @@ static ssize_t force_static_address_write(struct file *file, bool enable; int err; - if (test_bit(HCI_UP, &hdev->flags)) + if (hdev_is_powered(hdev)) return -EBUSY; err = kstrtobool_from_user(user_buf, count, &enable); @@ -849,10 +857,12 @@ static int conn_min_interval_set(void *data, u64 val) { struct hci_dev *hdev = data; - if (val < 0x0006 || val > 0x0c80 || val > hdev->le_conn_max_interval) + hci_dev_lock(hdev); + if (val < 0x0006 || val > 0x0c80 || val > hdev->le_conn_max_interval) { + hci_dev_unlock(hdev); return -EINVAL; + } - hci_dev_lock(hdev); hdev->le_conn_min_interval = val; hci_dev_unlock(hdev); @@ -877,10 +887,12 @@ static int conn_max_interval_set(void *data, u64 val) { struct hci_dev *hdev = data; - if (val < 0x0006 || val > 0x0c80 || val < hdev->le_conn_min_interval) + hci_dev_lock(hdev); + if (val < 0x0006 || val > 0x0c80 || val < hdev->le_conn_min_interval) { + hci_dev_unlock(hdev); return -EINVAL; + } - hci_dev_lock(hdev); hdev->le_conn_max_interval = val; hci_dev_unlock(hdev); @@ -989,10 +1001,12 @@ static int adv_min_interval_set(void *data, u64 val) { struct hci_dev *hdev = data; - if (val < 0x0020 || val > 0x4000 || val > hdev->le_adv_max_interval) + hci_dev_lock(hdev); + if (val < 0x0020 || val > 0x4000 || val > hdev->le_adv_max_interval) { + hci_dev_unlock(hdev); return -EINVAL; + } - hci_dev_lock(hdev); hdev->le_adv_min_interval = val; hci_dev_unlock(hdev); @@ -1017,10 +1031,12 @@ static int adv_max_interval_set(void *data, u64 val) { struct hci_dev *hdev = data; - if (val < 0x0020 || val > 0x4000 || val < hdev->le_adv_min_interval) + hci_dev_lock(hdev); + if (val < 0x0020 || val > 0x4000 || val < hdev->le_adv_min_interval) { + hci_dev_unlock(hdev); return -EINVAL; + } - hci_dev_lock(hdev); hdev->le_adv_max_interval = val; hci_dev_unlock(hdev); @@ -1045,10 +1061,12 @@ static int min_key_size_set(void *data, u64 val) { struct hci_dev *hdev = data; - if (val > hdev->le_max_key_size || val < SMP_MIN_ENC_KEY_SIZE) + hci_dev_lock(hdev); + if (val > hdev->le_max_key_size || val < SMP_MIN_ENC_KEY_SIZE) { + hci_dev_unlock(hdev); return -EINVAL; + } - hci_dev_lock(hdev); hdev->le_min_key_size = val; hci_dev_unlock(hdev); @@ -1073,10 +1091,12 @@ static int max_key_size_set(void *data, u64 val) { struct hci_dev *hdev = data; - if (val > SMP_MAX_ENC_KEY_SIZE || val < hdev->le_min_key_size) + hci_dev_lock(hdev); + if (val > SMP_MAX_ENC_KEY_SIZE || val < hdev->le_min_key_size) { + hci_dev_unlock(hdev); return -EINVAL; + } - hci_dev_lock(hdev); hdev->le_max_key_size = val; hci_dev_unlock(hdev); @@ -1152,7 +1172,7 @@ static ssize_t force_no_mitm_write(struct file *file, return -EFAULT; buf[buf_size] = '\0'; - if (strtobool(buf, &enable)) + if (kstrtobool(buf, &enable)) return -EINVAL; if (enable == hci_dev_test_flag(hdev, HCI_FORCE_NO_MITM)) @@ -1245,7 +1265,7 @@ void hci_debugfs_create_conn(struct hci_conn *conn) struct hci_dev *hdev = conn->hdev; char name[6]; - if (IS_ERR_OR_NULL(hdev->debugfs)) + if (IS_ERR_OR_NULL(hdev->debugfs) || conn->debugfs) return; snprintf(name, sizeof(name), "%u", conn->handle); @@ -1336,7 +1356,7 @@ static ssize_t vendor_diag_write(struct file *file, const char __user *user_buf, * for the vendor callback. Instead just store the desired value and * the setting will be programmed when the controller gets powered on. */ - if (test_bit(HCI_QUIRK_NON_PERSISTENT_DIAG, &hdev->quirks) && + if (hci_test_quirk(hdev, HCI_QUIRK_NON_PERSISTENT_DIAG) && (!test_bit(HCI_RUNNING, &hdev->flags) || hci_dev_test_flag(hdev, HCI_USER_CHANNEL))) goto done; diff --git a/net/bluetooth/hci_drv.c b/net/bluetooth/hci_drv.c new file mode 100644 index 000000000000..3dd2d8a006b9 --- /dev/null +++ b/net/bluetooth/hci_drv.c @@ -0,0 +1,105 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (C) 2025 Google Corporation + */ + +#include <linux/skbuff.h> +#include <linux/types.h> + +#include <net/bluetooth/bluetooth.h> +#include <net/bluetooth/hci.h> +#include <net/bluetooth/hci_core.h> +#include <net/bluetooth/hci_drv.h> + +int hci_drv_cmd_status(struct hci_dev *hdev, u16 cmd, u8 status) +{ + struct hci_drv_ev_hdr *hdr; + struct hci_drv_ev_cmd_status *ev; + struct sk_buff *skb; + + skb = bt_skb_alloc(sizeof(*hdr) + sizeof(*ev), GFP_KERNEL); + if (!skb) + return -ENOMEM; + + hdr = skb_put(skb, sizeof(*hdr)); + hdr->opcode = __cpu_to_le16(HCI_DRV_EV_CMD_STATUS); + hdr->len = __cpu_to_le16(sizeof(*ev)); + + ev = skb_put(skb, sizeof(*ev)); + ev->opcode = __cpu_to_le16(cmd); + ev->status = status; + + hci_skb_pkt_type(skb) = HCI_DRV_PKT; + + return hci_recv_frame(hdev, skb); +} +EXPORT_SYMBOL(hci_drv_cmd_status); + +int hci_drv_cmd_complete(struct hci_dev *hdev, u16 cmd, u8 status, void *rp, + size_t rp_len) +{ + struct hci_drv_ev_hdr *hdr; + struct hci_drv_ev_cmd_complete *ev; + struct sk_buff *skb; + + skb = bt_skb_alloc(sizeof(*hdr) + sizeof(*ev) + rp_len, GFP_KERNEL); + if (!skb) + return -ENOMEM; + + hdr = skb_put(skb, sizeof(*hdr)); + hdr->opcode = __cpu_to_le16(HCI_DRV_EV_CMD_COMPLETE); + hdr->len = __cpu_to_le16(sizeof(*ev) + rp_len); + + ev = skb_put(skb, sizeof(*ev)); + ev->opcode = __cpu_to_le16(cmd); + ev->status = status; + + skb_put_data(skb, rp, rp_len); + + hci_skb_pkt_type(skb) = HCI_DRV_PKT; + + return hci_recv_frame(hdev, skb); +} +EXPORT_SYMBOL(hci_drv_cmd_complete); + +int hci_drv_process_cmd(struct hci_dev *hdev, struct sk_buff *skb) +{ + struct hci_drv_cmd_hdr *hdr; + const struct hci_drv_handler *handler = NULL; + u16 opcode, len, ogf, ocf; + + hdr = skb_pull_data(skb, sizeof(*hdr)); + if (!hdr) + return -EILSEQ; + + opcode = __le16_to_cpu(hdr->opcode); + len = __le16_to_cpu(hdr->len); + if (len != skb->len) + return -EILSEQ; + + ogf = hci_opcode_ogf(opcode); + ocf = hci_opcode_ocf(opcode); + + if (!hdev->hci_drv) + return hci_drv_cmd_status(hdev, opcode, + HCI_DRV_STATUS_UNKNOWN_COMMAND); + + if (ogf != HCI_DRV_OGF_DRIVER_SPECIFIC) { + if (opcode < hdev->hci_drv->common_handler_count) + handler = &hdev->hci_drv->common_handlers[opcode]; + } else { + if (ocf < hdev->hci_drv->specific_handler_count) + handler = &hdev->hci_drv->specific_handlers[ocf]; + } + + if (!handler || !handler->func) + return hci_drv_cmd_status(hdev, opcode, + HCI_DRV_STATUS_UNKNOWN_COMMAND); + + if (len != handler->data_len) + return hci_drv_cmd_status(hdev, opcode, + HCI_DRV_STATUS_INVALID_PARAMETERS); + + return handler->func(hdev, skb->data, len); +} +EXPORT_SYMBOL(hci_drv_process_cmd); diff --git a/net/bluetooth/hci_event.c b/net/bluetooth/hci_event.c index fc30f4c03d29..a9868f17ef40 100644 --- a/net/bluetooth/hci_event.c +++ b/net/bluetooth/hci_event.c @@ -1,6 +1,7 @@ /* BlueZ - Bluetooth protocol stack for Linux Copyright (c) 2000-2001, 2010, Code Aurora Forum. All rights reserved. + Copyright 2023-2024 NXP Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com> @@ -24,16 +25,16 @@ /* Bluetooth HCI event handling. */ -#include <asm/unaligned.h> +#include <linux/unaligned.h> +#include <linux/crypto.h> +#include <crypto/algapi.h> #include <net/bluetooth/bluetooth.h> #include <net/bluetooth/hci_core.h> #include <net/bluetooth/mgmt.h> -#include "hci_request.h" #include "hci_debugfs.h" -#include "a2mp.h" -#include "amp.h" +#include "hci_codec.h" #include "smp.h" #include "msft.h" #include "eir.h" @@ -41,8 +42,6 @@ #define ZERO_KEY "\x00\x00\x00\x00\x00\x00\x00\x00" \ "\x00\x00\x00\x00\x00\x00\x00\x00" -#define secs_to_jiffies(_secs) msecs_to_jiffies((_secs) * 1000) - /* Handle HCI Event packets */ static void *hci_ev_skb_pull(struct hci_dev *hdev, struct sk_buff *skb, @@ -91,11 +90,11 @@ static u8 hci_cc_inquiry_cancel(struct hci_dev *hdev, void *data, /* It is possible that we receive Inquiry Complete event right * before we receive Inquiry Cancel Command Complete event, in * which case the latter event should have status of Command - * Disallowed (0x0c). This should not be treated as error, since + * Disallowed. This should not be treated as error, since * we actually achieve what Inquiry Cancel wants to achieve, * which is to end the last Inquiry session. */ - if (rp->status == 0x0c && !test_bit(HCI_INQUIRY, &hdev->flags)) { + if (rp->status == HCI_ERROR_COMMAND_DISALLOWED && !test_bit(HCI_INQUIRY, &hdev->flags)) { bt_dev_warn(hdev, "Ignoring error of Inquiry Cancel command"); rp->status = 0x00; } @@ -116,8 +115,6 @@ static u8 hci_cc_inquiry_cancel(struct hci_dev *hdev, void *data, hci_discovery_set_state(hdev, DISCOVERY_STOPPED); hci_dev_unlock(hdev); - hci_conn_check_pending(hdev); - return rp->status; } @@ -148,15 +145,13 @@ static u8 hci_cc_exit_periodic_inq(struct hci_dev *hdev, void *data, hci_dev_clear_flag(hdev, HCI_PERIODIC_INQ); - hci_conn_check_pending(hdev); - return rp->status; } static u8 hci_cc_remote_name_req_cancel(struct hci_dev *hdev, void *data, struct sk_buff *skb) { - struct hci_ev_status *rp = data; + struct hci_rp_remote_name_req_cancel *rp = data; bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); @@ -328,14 +323,17 @@ static u8 hci_cc_delete_stored_link_key(struct hci_dev *hdev, void *data, struct sk_buff *skb) { struct hci_rp_delete_stored_link_key *rp = data; + u16 num_keys; bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); if (rp->status) return rp->status; - if (rp->num_keys <= hdev->stored_num_keys) - hdev->stored_num_keys -= le16_to_cpu(rp->num_keys); + num_keys = le16_to_cpu(rp->num_keys); + + if (num_keys <= hdev->stored_num_keys) + hdev->stored_num_keys -= num_keys; else hdev->stored_num_keys = 0; @@ -509,6 +507,9 @@ static u8 hci_cc_read_class_of_dev(struct hci_dev *hdev, void *data, { struct hci_rp_read_class_of_dev *rp = data; + if (WARN_ON(!hdev)) + return HCI_ERROR_UNSPECIFIED; + bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); if (rp->status) @@ -709,6 +710,72 @@ static u8 hci_cc_read_local_version(struct hci_dev *hdev, void *data, return rp->status; } +static u8 hci_cc_read_enc_key_size(struct hci_dev *hdev, void *data, + struct sk_buff *skb) +{ + struct hci_rp_read_enc_key_size *rp = data; + struct hci_conn *conn; + u16 handle; + u8 status = rp->status; + + bt_dev_dbg(hdev, "status 0x%2.2x", status); + + handle = le16_to_cpu(rp->handle); + + hci_dev_lock(hdev); + + conn = hci_conn_hash_lookup_handle(hdev, handle); + if (!conn) { + status = 0xFF; + goto done; + } + + /* While unexpected, the read_enc_key_size command may fail. The most + * secure approach is to then assume the key size is 0 to force a + * disconnection. + */ + if (status) { + bt_dev_err(hdev, "failed to read key size for handle %u", + handle); + conn->enc_key_size = 0; + } else { + u8 *key_enc_size = hci_conn_key_enc_size(conn); + + conn->enc_key_size = rp->key_size; + status = 0; + + /* Attempt to check if the key size is too small or if it has + * been downgraded from the last time it was stored as part of + * the link_key. + */ + if (conn->enc_key_size < hdev->min_enc_key_size || + (key_enc_size && conn->enc_key_size < *key_enc_size)) { + /* As slave role, the conn->state has been set to + * BT_CONNECTED and l2cap conn req might not be received + * yet, at this moment the l2cap layer almost does + * nothing with the non-zero status. + * So we also clear encrypt related bits, and then the + * handler of l2cap conn req will get the right secure + * state at a later time. + */ + status = HCI_ERROR_AUTH_FAILURE; + clear_bit(HCI_CONN_ENCRYPT, &conn->flags); + clear_bit(HCI_CONN_AES_CCM, &conn->flags); + } + + /* Update the key encryption size with the connection one */ + if (key_enc_size && *key_enc_size != conn->enc_key_size) + *key_enc_size = conn->enc_key_size; + } + + hci_encrypt_cfm(conn, status); + +done: + hci_dev_unlock(hdev); + + return status; +} + static u8 hci_cc_read_local_commands(struct hci_dev *hdev, void *data, struct sk_buff *skb) { @@ -757,9 +824,6 @@ static u8 hci_cc_write_auth_payload_timeout(struct hci_dev *hdev, void *data, bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); - if (rp->status) - return rp->status; - sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_AUTH_PAYLOAD_TO); if (!sent) return rp->status; @@ -767,9 +831,15 @@ static u8 hci_cc_write_auth_payload_timeout(struct hci_dev *hdev, void *data, hci_dev_lock(hdev); conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle)); - if (conn) + if (!conn) { + rp->status = 0xff; + goto unlock; + } + + if (!rp->status) conn->auth_payload_timeout = get_unaligned_le16(sent + 2); +unlock: hci_dev_unlock(hdev); return rp->status; @@ -837,8 +907,13 @@ static u8 hci_cc_read_local_ext_features(struct hci_dev *hdev, void *data, if (rp->status) return rp->status; - if (hdev->max_page < rp->max_page) - hdev->max_page = rp->max_page; + if (hdev->max_page < rp->max_page) { + if (hci_test_quirk(hdev, + HCI_QUIRK_BROKEN_LOCAL_EXT_FEATURES_PAGE_2)) + bt_dev_warn(hdev, "broken local ext features page 2"); + else + hdev->max_page = rp->max_page; + } if (rp->page < HCI_MAX_PAGES) memcpy(hdev->features[rp->page], rp->features, 8); @@ -846,21 +921,6 @@ static u8 hci_cc_read_local_ext_features(struct hci_dev *hdev, void *data, return rp->status; } -static u8 hci_cc_read_flow_control_mode(struct hci_dev *hdev, void *data, - struct sk_buff *skb) -{ - struct hci_rp_read_flow_control_mode *rp = data; - - bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); - - if (rp->status) - return rp->status; - - hdev->flow_ctl_mode = rp->mode; - - return rp->status; -} - static u8 hci_cc_read_buffer_size(struct hci_dev *hdev, void *data, struct sk_buff *skb) { @@ -876,17 +936,23 @@ static u8 hci_cc_read_buffer_size(struct hci_dev *hdev, void *data, hdev->acl_pkts = __le16_to_cpu(rp->acl_max_pkt); hdev->sco_pkts = __le16_to_cpu(rp->sco_max_pkt); - if (test_bit(HCI_QUIRK_FIXUP_BUFFER_SIZE, &hdev->quirks)) { + if (hci_test_quirk(hdev, HCI_QUIRK_FIXUP_BUFFER_SIZE)) { hdev->sco_mtu = 64; hdev->sco_pkts = 8; } + if (!read_voice_setting_capable(hdev)) + hdev->sco_pkts = 0; + hdev->acl_cnt = hdev->acl_pkts; hdev->sco_cnt = hdev->sco_pkts; BT_DBG("%s acl mtu %d:%d sco mtu %d:%d", hdev->name, hdev->acl_mtu, hdev->acl_pkts, hdev->sco_mtu, hdev->sco_pkts); + if (!hdev->acl_mtu || !hdev->acl_pkts) + return HCI_ERROR_INVALID_PARAMETERS; + return rp->status; } @@ -1001,28 +1067,6 @@ static u8 hci_cc_write_page_scan_type(struct hci_dev *hdev, void *data, return rp->status; } -static u8 hci_cc_read_data_block_size(struct hci_dev *hdev, void *data, - struct sk_buff *skb) -{ - struct hci_rp_read_data_block_size *rp = data; - - bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); - - if (rp->status) - return rp->status; - - hdev->block_mtu = __le16_to_cpu(rp->max_acl_len); - hdev->block_len = __le16_to_cpu(rp->block_len); - hdev->num_blocks = __le16_to_cpu(rp->num_blocks); - - hdev->block_cnt = hdev->num_blocks; - - BT_DBG("%s blk mtu %d cnt %d len %d", hdev->name, hdev->block_mtu, - hdev->block_cnt, hdev->block_len); - - return rp->status; -} - static u8 hci_cc_read_clock(struct hci_dev *hdev, void *data, struct sk_buff *skb) { @@ -1057,30 +1101,6 @@ unlock: return rp->status; } -static u8 hci_cc_read_local_amp_info(struct hci_dev *hdev, void *data, - struct sk_buff *skb) -{ - struct hci_rp_read_local_amp_info *rp = data; - - bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); - - if (rp->status) - return rp->status; - - hdev->amp_status = rp->amp_status; - hdev->amp_total_bw = __le32_to_cpu(rp->total_bw); - hdev->amp_max_bw = __le32_to_cpu(rp->max_bw); - hdev->amp_min_latency = __le32_to_cpu(rp->min_latency); - hdev->amp_max_pdu = __le32_to_cpu(rp->max_pdu); - hdev->amp_type = rp->amp_type; - hdev->amp_pal_cap = __le16_to_cpu(rp->pal_cap); - hdev->amp_assoc_size = __le16_to_cpu(rp->max_assoc_size); - hdev->amp_be_flush_to = __le32_to_cpu(rp->be_flush_to); - hdev->amp_max_flush_to = __le32_to_cpu(rp->max_flush_to); - - return rp->status; -} - static u8 hci_cc_read_inq_rsp_tx_power(struct hci_dev *hdev, void *data, struct sk_buff *skb) { @@ -1196,6 +1216,9 @@ static u8 hci_cc_le_read_buffer_size(struct hci_dev *hdev, void *data, BT_DBG("%s le mtu %d:%d", hdev->name, hdev->le_mtu, hdev->le_pkts); + if (hdev->le_mtu && hdev->le_mtu < HCI_MIN_LE_MTU) + return HCI_ERROR_INVALID_PARAMETERS; + return rp->status; } @@ -1509,7 +1532,7 @@ static u8 hci_cc_le_set_privacy_mode(struct hci_dev *hdev, void *data, params = hci_conn_params_lookup(hdev, &cp->bdaddr, cp->bdaddr_type); if (params) - params->privacy_mode = cp->mode; + WRITE_ONCE(params->privacy_mode, cp->mode); hci_dev_unlock(hdev); @@ -1586,6 +1609,8 @@ static u8 hci_cc_le_set_ext_adv_enable(struct hci_dev *hdev, void *data, if (adv) adv->enabled = true; + else if (!set->handle) + hci_dev_set_flag(hdev, HCI_LE_ADV_0); conn = hci_lookup_le_connect(hdev); if (conn) @@ -1596,6 +1621,8 @@ static u8 hci_cc_le_set_ext_adv_enable(struct hci_dev *hdev, void *data, if (cp->num_of_sets) { if (adv) adv->enabled = false; + else if (!set->handle) + hci_dev_clear_flag(hdev, HCI_LE_ADV_0); /* If just one instance was disabled check if there are * any other instance enabled before clearing HCI_LE_ADV @@ -1692,7 +1719,7 @@ static void store_pending_adv_report(struct hci_dev *hdev, bdaddr_t *bdaddr, { struct discovery_state *d = &hdev->discovery; - if (len > HCI_MAX_AD_LENGTH) + if (len > max_adv_len(hdev)) return; bacpy(&d->last_adv_addr, bdaddr); @@ -1710,8 +1737,10 @@ static void le_set_scan_enable_complete(struct hci_dev *hdev, u8 enable) switch (enable) { case LE_SCAN_ENABLE: hci_dev_set_flag(hdev, HCI_LE_SCAN); - if (hdev->le_scan_type == LE_SCAN_ACTIVE) + if (hdev->le_scan_type == LE_SCAN_ACTIVE) { clear_pending_adv_report(hdev); + hci_discovery_set_state(hdev, DISCOVERY_FINDING); + } break; case LE_SCAN_DISABLE: @@ -1726,7 +1755,7 @@ static void le_set_scan_enable_complete(struct hci_dev *hdev, u8 enable) d->last_adv_addr_type, NULL, d->last_adv_rssi, d->last_adv_flags, d->last_adv_data, - d->last_adv_data_len, NULL, 0); + d->last_adv_data_len, NULL, 0, 0); } /* Cancel this timer so that we don't try to disable scanning @@ -1742,6 +1771,9 @@ static void le_set_scan_enable_complete(struct hci_dev *hdev, u8 enable) */ if (hci_dev_test_and_clear_flag(hdev, HCI_LE_SCAN_INTERRUPTED)) hci_discovery_set_state(hdev, DISCOVERY_STOPPED); + else if (!hci_dev_test_flag(hdev, HCI_LE_ADV) && + hdev->discovery.state == DISCOVERY_FINDING) + queue_work(hdev->workqueue, &hdev->reenable_adv_work); break; @@ -1835,7 +1867,9 @@ static u8 hci_cc_le_clear_accept_list(struct hci_dev *hdev, void *data, if (rp->status) return rp->status; + hci_dev_lock(hdev); hci_bdaddr_list_clear(&hdev->le_accept_list); + hci_dev_unlock(hdev); return rp->status; } @@ -1855,8 +1889,10 @@ static u8 hci_cc_le_add_to_accept_list(struct hci_dev *hdev, void *data, if (!sent) return rp->status; + hci_dev_lock(hdev); hci_bdaddr_list_add(&hdev->le_accept_list, &sent->bdaddr, sent->bdaddr_type); + hci_dev_unlock(hdev); return rp->status; } @@ -1876,8 +1912,10 @@ static u8 hci_cc_le_del_from_accept_list(struct hci_dev *hdev, void *data, if (!sent) return rp->status; + hci_dev_lock(hdev); hci_bdaddr_list_del(&hdev->le_accept_list, &sent->bdaddr, sent->bdaddr_type); + hci_dev_unlock(hdev); return rp->status; } @@ -1949,9 +1987,11 @@ static u8 hci_cc_le_add_to_resolv_list(struct hci_dev *hdev, void *data, if (!sent) return rp->status; + hci_dev_lock(hdev); hci_bdaddr_list_add_with_irk(&hdev->le_resolv_list, &sent->bdaddr, sent->bdaddr_type, sent->peer_irk, sent->local_irk); + hci_dev_unlock(hdev); return rp->status; } @@ -1971,8 +2011,10 @@ static u8 hci_cc_le_del_from_resolv_list(struct hci_dev *hdev, void *data, if (!sent) return rp->status; + hci_dev_lock(hdev); hci_bdaddr_list_del_with_irk(&hdev->le_resolv_list, &sent->bdaddr, sent->bdaddr_type); + hci_dev_unlock(hdev); return rp->status; } @@ -1987,7 +2029,9 @@ static u8 hci_cc_le_clear_resolv_list(struct hci_dev *hdev, void *data, if (rp->status) return rp->status; + hci_dev_lock(hdev); hci_bdaddr_list_clear(&hdev->le_resolv_list); + hci_dev_unlock(hdev); return rp->status; } @@ -2110,40 +2154,6 @@ static u8 hci_cc_set_adv_param(struct hci_dev *hdev, void *data, return rp->status; } -static u8 hci_cc_set_ext_adv_param(struct hci_dev *hdev, void *data, - struct sk_buff *skb) -{ - struct hci_rp_le_set_ext_adv_params *rp = data; - struct hci_cp_le_set_ext_adv_params *cp; - struct adv_info *adv_instance; - - bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); - - if (rp->status) - return rp->status; - - cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_EXT_ADV_PARAMS); - if (!cp) - return rp->status; - - hci_dev_lock(hdev); - hdev->adv_addr_type = cp->own_addr_type; - if (!cp->handle) { - /* Store in hdev for instance 0 */ - hdev->adv_tx_power = rp->tx_power; - } else { - adv_instance = hci_find_adv_instance(hdev, cp->handle); - if (adv_instance) - adv_instance->tx_power = rp->tx_power; - } - /* Update adv data as tx power is known now */ - hci_req_update_adv_data(hdev, cp->handle); - - hci_dev_unlock(hdev); - - return rp->status; -} - static u8 hci_cc_read_rssi(struct hci_dev *hdev, void *data, struct sk_buff *skb) { @@ -2224,12 +2234,11 @@ static void hci_cs_inquiry(struct hci_dev *hdev, __u8 status) { bt_dev_dbg(hdev, "status 0x%2.2x", status); - if (status) { - hci_conn_check_pending(hdev); + if (status) return; - } - set_bit(HCI_INQUIRY, &hdev->flags); + if (hci_sent_cmd_data(hdev, HCI_OP_INQUIRY)) + set_bit(HCI_INQUIRY, &hdev->flags); } static void hci_cs_create_conn(struct hci_dev *hdev, __u8 status) @@ -2251,19 +2260,16 @@ static void hci_cs_create_conn(struct hci_dev *hdev, __u8 status) if (status) { if (conn && conn->state == BT_CONNECT) { - if (status != 0x0c || conn->attempt > 2) { - conn->state = BT_CLOSED; - hci_connect_cfm(conn, status); - hci_conn_del(conn); - } else - conn->state = BT_CONNECT2; + conn->state = BT_CLOSED; + hci_connect_cfm(conn, status); + hci_conn_del(conn); } } else { if (!conn) { - conn = hci_conn_add(hdev, ACL_LINK, &cp->bdaddr, - HCI_ROLE_MASTER); - if (!conn) - bt_dev_err(hdev, "no memory for new connection"); + conn = hci_conn_add_unset(hdev, ACL_LINK, &cp->bdaddr, + 0, HCI_ROLE_MASTER); + if (IS_ERR(conn)) + bt_dev_err(hdev, "connection err: %ld", PTR_ERR(conn)); } } @@ -2273,7 +2279,8 @@ static void hci_cs_create_conn(struct hci_dev *hdev, __u8 status) static void hci_cs_add_sco(struct hci_dev *hdev, __u8 status) { struct hci_cp_add_sco *cp; - struct hci_conn *acl, *sco; + struct hci_conn *acl; + struct hci_link *link; __u16 handle; bt_dev_dbg(hdev, "status 0x%2.2x", status); @@ -2293,12 +2300,13 @@ static void hci_cs_add_sco(struct hci_dev *hdev, __u8 status) acl = hci_conn_hash_lookup_handle(hdev, handle); if (acl) { - sco = acl->link; - if (sco) { - sco->state = BT_CLOSED; + link = list_first_entry_or_null(&acl->link_list, + struct hci_link, list); + if (link && link->conn) { + link->conn->state = BT_CLOSED; - hci_connect_cfm(sco, status); - hci_conn_del(sco); + hci_connect_cfm(link->conn, status); + hci_conn_del(link->conn); } } @@ -2433,9 +2441,7 @@ static void hci_check_pending_name(struct hci_dev *hdev, struct hci_conn *conn, * Only those in BT_CONFIG or BT_CONNECTED states can be * considered connected. */ - if (conn && - (conn->state == BT_CONFIG || conn->state == BT_CONNECTED) && - !test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags)) + if (conn && (conn->state == BT_CONFIG || conn->state == BT_CONNECTED)) mgmt_device_connected(hdev, conn, name, name_len); if (discov->state == DISCOVERY_STOPPED) @@ -2565,74 +2571,61 @@ static void hci_cs_read_remote_ext_features(struct hci_dev *hdev, __u8 status) hci_dev_unlock(hdev); } -static void hci_cs_setup_sync_conn(struct hci_dev *hdev, __u8 status) +static void hci_setup_sync_conn_status(struct hci_dev *hdev, __u16 handle, + __u8 status) { - struct hci_cp_setup_sync_conn *cp; - struct hci_conn *acl, *sco; - __u16 handle; - - bt_dev_dbg(hdev, "status 0x%2.2x", status); - - if (!status) - return; - - cp = hci_sent_cmd_data(hdev, HCI_OP_SETUP_SYNC_CONN); - if (!cp) - return; + struct hci_conn *acl; + struct hci_link *link; - handle = __le16_to_cpu(cp->handle); - - bt_dev_dbg(hdev, "handle 0x%4.4x", handle); + bt_dev_dbg(hdev, "handle 0x%4.4x status 0x%2.2x", handle, status); hci_dev_lock(hdev); acl = hci_conn_hash_lookup_handle(hdev, handle); if (acl) { - sco = acl->link; - if (sco) { - sco->state = BT_CLOSED; + link = list_first_entry_or_null(&acl->link_list, + struct hci_link, list); + if (link && link->conn) { + link->conn->state = BT_CLOSED; - hci_connect_cfm(sco, status); - hci_conn_del(sco); + hci_connect_cfm(link->conn, status); + hci_conn_del(link->conn); } } hci_dev_unlock(hdev); } -static void hci_cs_enhanced_setup_sync_conn(struct hci_dev *hdev, __u8 status) +static void hci_cs_setup_sync_conn(struct hci_dev *hdev, __u8 status) { - struct hci_cp_enhanced_setup_sync_conn *cp; - struct hci_conn *acl, *sco; - __u16 handle; + struct hci_cp_setup_sync_conn *cp; bt_dev_dbg(hdev, "status 0x%2.2x", status); if (!status) return; - cp = hci_sent_cmd_data(hdev, HCI_OP_ENHANCED_SETUP_SYNC_CONN); + cp = hci_sent_cmd_data(hdev, HCI_OP_SETUP_SYNC_CONN); if (!cp) return; - handle = __le16_to_cpu(cp->handle); + hci_setup_sync_conn_status(hdev, __le16_to_cpu(cp->handle), status); +} - bt_dev_dbg(hdev, "handle 0x%4.4x", handle); +static void hci_cs_enhanced_setup_sync_conn(struct hci_dev *hdev, __u8 status) +{ + struct hci_cp_enhanced_setup_sync_conn *cp; - hci_dev_lock(hdev); + bt_dev_dbg(hdev, "status 0x%2.2x", status); - acl = hci_conn_hash_lookup_handle(hdev, handle); - if (acl) { - sco = acl->link; - if (sco) { - sco->state = BT_CLOSED; + if (!status) + return; - hci_connect_cfm(sco, status); - hci_conn_del(sco); - } - } + cp = hci_sent_cmd_data(hdev, HCI_OP_ENHANCED_SETUP_SYNC_CONN); + if (!cp) + return; - hci_dev_unlock(hdev); + hci_setup_sync_conn_status(hdev, __le16_to_cpu(cp->handle), status); } static void hci_cs_sniff_mode(struct hci_dev *hdev, __u8 status) @@ -2714,7 +2707,7 @@ static void hci_cs_disconnect(struct hci_dev *hdev, u8 status) if (!conn) goto unlock; - if (status) { + if (status && status != HCI_ERROR_UNKNOWN_CONN_ID) { mgmt_disconnect_failed(hdev, &conn->dst, conn->type, conn->dst_type, status); @@ -2723,13 +2716,22 @@ static void hci_cs_disconnect(struct hci_dev *hdev, u8 status) hci_enable_advertising(hdev); } + /* Inform sockets conn is gone before we delete it */ + hci_disconn_cfm(conn, HCI_ERROR_UNSPECIFIED); + goto done; } + /* During suspend, mark connection as closed immediately + * since we might not receive HCI_EV_DISCONN_COMPLETE + */ + if (hdev->suspended) + conn->state = BT_CLOSED; + mgmt_conn = test_and_clear_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags); if (conn->type == ACL_LINK) { - if (test_bit(HCI_CONN_FLUSH_KEY, &conn->flags)) + if (test_and_clear_bit(HCI_CONN_FLUSH_KEY, &conn->flags)) hci_remove_link_key(hdev, &conn->dst); } @@ -2743,8 +2745,8 @@ static void hci_cs_disconnect(struct hci_dev *hdev, u8 status) case HCI_AUTO_CONN_DIRECT: case HCI_AUTO_CONN_ALWAYS: - list_del_init(¶ms->action); - list_add(¶ms->action, &hdev->pend_le_conns); + hci_pend_le_list_del_init(params); + hci_pend_le_list_add(params, &hdev->pend_le_conns); break; default: @@ -2815,16 +2817,6 @@ static void cs_le_create_conn(struct hci_dev *hdev, bdaddr_t *peer_addr, conn->resp_addr_type = peer_addr_type; bacpy(&conn->resp_addr, peer_addr); - - /* We don't want the connection attempt to stick around - * indefinitely since LE doesn't have a page timeout concept - * like BR/EDR. Set a timer for any connection that doesn't use - * the accept list for connecting. - */ - if (filter_policy == HCI_LE_USE_PEER_ADDR) - queue_delayed_work(conn->hdev->workqueue, - &conn->le_conn_timeout, - conn->conn_timeout); } static void hci_cs_le_create_conn(struct hci_dev *hdev, u8 status) @@ -2834,7 +2826,7 @@ static void hci_cs_le_create_conn(struct hci_dev *hdev, u8 status) bt_dev_dbg(hdev, "status 0x%2.2x", status); /* All connection failure handling is taken care of by the - * hci_le_conn_failed function which is triggered by the HCI + * hci_conn_failed function which is triggered by the HCI * request completion callbacks used for connecting. */ if (status) @@ -2859,7 +2851,7 @@ static void hci_cs_le_ext_create_conn(struct hci_dev *hdev, u8 status) bt_dev_dbg(hdev, "status 0x%2.2x", status); /* All connection failure handling is taken care of by the - * hci_le_conn_failed function which is triggered by the HCI + * hci_conn_failed function which is triggered by the HCI * request completion callbacks used for connecting. */ if (status) @@ -2894,12 +2886,8 @@ static void hci_cs_le_read_remote_features(struct hci_dev *hdev, u8 status) hci_dev_lock(hdev); conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle)); - if (conn) { - if (conn->state == BT_CONFIG) { - hci_connect_cfm(conn, status); - hci_conn_drop(conn); - } - } + if (conn && conn->state == BT_CONFIG) + hci_connect_cfm(conn, status); hci_dev_unlock(hdev); } @@ -2966,8 +2954,6 @@ static void hci_inquiry_complete_evt(struct hci_dev *hdev, void *data, bt_dev_dbg(hdev, "status 0x%2.2x", ev->status); - hci_conn_check_pending(hdev); - if (!test_and_clear_bit(HCI_INQUIRY, &hdev->flags)) return; @@ -2991,7 +2977,7 @@ static void hci_inquiry_complete_evt(struct hci_dev *hdev, void *data, * state to indicate completion. */ if (!hci_dev_test_flag(hdev, HCI_LE_SCAN) || - !test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY, &hdev->quirks)) + !hci_test_quirk(hdev, HCI_QUIRK_SIMULTANEOUS_DISCOVERY)) hci_discovery_set_state(hdev, DISCOVERY_STOPPED); goto unlock; } @@ -3010,7 +2996,7 @@ static void hci_inquiry_complete_evt(struct hci_dev *hdev, void *data, * state to indicate completion. */ if (!hci_dev_test_flag(hdev, HCI_LE_SCAN) || - !test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY, &hdev->quirks)) + !hci_test_quirk(hdev, HCI_QUIRK_SIMULTANEOUS_DISCOVERY)) hci_discovery_set_state(hdev, DISCOVERY_STOPPED); } @@ -3056,24 +3042,69 @@ static void hci_inquiry_result_evt(struct hci_dev *hdev, void *edata, mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00, info->dev_class, HCI_RSSI_INVALID, - flags, NULL, 0, NULL, 0); + flags, NULL, 0, NULL, 0, 0); } hci_dev_unlock(hdev); } +static int hci_read_enc_key_size(struct hci_dev *hdev, struct hci_conn *conn) +{ + struct hci_cp_read_enc_key_size cp; + u8 *key_enc_size = hci_conn_key_enc_size(conn); + + if (!read_key_size_capable(hdev)) { + conn->enc_key_size = HCI_LINK_KEY_SIZE; + return -EOPNOTSUPP; + } + + bt_dev_dbg(hdev, "hcon %p", conn); + + memset(&cp, 0, sizeof(cp)); + cp.handle = cpu_to_le16(conn->handle); + + /* If the key enc_size is already known, use it as conn->enc_key_size, + * otherwise use hdev->min_enc_key_size so the likes of + * l2cap_check_enc_key_size don't fail while waiting for + * HCI_OP_READ_ENC_KEY_SIZE response. + */ + if (key_enc_size && *key_enc_size) + conn->enc_key_size = *key_enc_size; + else + conn->enc_key_size = hdev->min_enc_key_size; + + return hci_send_cmd(hdev, HCI_OP_READ_ENC_KEY_SIZE, sizeof(cp), &cp); +} + static void hci_conn_complete_evt(struct hci_dev *hdev, void *data, struct sk_buff *skb) { struct hci_ev_conn_complete *ev = data; struct hci_conn *conn; + u8 status = ev->status; - bt_dev_dbg(hdev, "status 0x%2.2x", ev->status); + bt_dev_dbg(hdev, "status 0x%2.2x", status); hci_dev_lock(hdev); + /* Check for existing connection: + * + * 1. If it doesn't exist then it must be receiver/slave role. + * 2. If it does exist confirm that it is connecting/BT_CONNECT in case + * of initiator/master role since there could be a collision where + * either side is attempting to connect or something like a fuzzing + * testing is trying to play tricks to destroy the hcon object before + * it even attempts to connect (e.g. hcon->state == BT_OPEN). + */ conn = hci_conn_hash_lookup_ba(hdev, ev->link_type, &ev->bdaddr); - if (!conn) { + if (!conn || + (conn->role == HCI_ROLE_MASTER && conn->state != BT_CONNECT)) { + /* In case of error status and there is no connection pending + * just unlock as there is nothing to cleanup. + */ + if (ev->status) + goto unlock; + /* Connection may not exist if auto-connected. Check the bredr * allowlist to see if this device is allowed to auto connect. * If link is an ACL type, create a connection class @@ -3087,10 +3118,11 @@ static void hci_conn_complete_evt(struct hci_dev *hdev, void *data, hci_bdaddr_list_lookup_with_flags(&hdev->accept_list, &ev->bdaddr, BDADDR_BREDR)) { - conn = hci_conn_add(hdev, ev->link_type, &ev->bdaddr, - HCI_ROLE_SLAVE); - if (!conn) { - bt_dev_err(hdev, "no memory for new conn"); + conn = hci_conn_add_unset(hdev, ev->link_type, + &ev->bdaddr, 0, + HCI_ROLE_SLAVE); + if (IS_ERR(conn)) { + bt_dev_err(hdev, "connection err: %ld", PTR_ERR(conn)); goto unlock; } } else { @@ -3106,8 +3138,21 @@ static void hci_conn_complete_evt(struct hci_dev *hdev, void *data, } } - if (!ev->status) { - conn->handle = __le16_to_cpu(ev->handle); + /* The HCI_Connection_Complete event is only sent once per connection. + * Processing it more than once per connection can corrupt kernel memory. + * + * As the connection handle is set here for the first time, it indicates + * whether the connection is already set up. + */ + if (!HCI_CONN_HANDLE_UNSET(conn->handle)) { + bt_dev_err(hdev, "Ignoring HCI_Connection_Complete for existing connection"); + goto unlock; + } + + if (!status) { + status = hci_conn_set_handle(conn, __le16_to_cpu(ev->handle)); + if (status) + goto done; if (conn->type == ACL_LINK) { conn->state = BT_CONFIG; @@ -3130,6 +3175,19 @@ static void hci_conn_complete_evt(struct hci_dev *hdev, void *data, if (test_bit(HCI_ENCRYPT, &hdev->flags)) set_bit(HCI_CONN_ENCRYPT, &conn->flags); + /* "Link key request" completed ahead of "connect request" completes */ + if (ev->encr_mode == 1 && !test_bit(HCI_CONN_ENCRYPT, &conn->flags) && + ev->link_type == ACL_LINK) { + struct link_key *key; + + key = hci_find_link_key(hdev, &ev->bdaddr); + if (key) { + set_bit(HCI_CONN_ENCRYPT, &conn->flags); + hci_read_enc_key_size(hdev, conn); + hci_encrypt_cfm(conn, ev->status); + } + } + /* Get remote features */ if (conn->type == ACL_LINK) { struct hci_cp_read_remote_features cp; @@ -3137,7 +3195,7 @@ static void hci_conn_complete_evt(struct hci_dev *hdev, void *data, hci_send_cmd(hdev, HCI_OP_READ_REMOTE_FEATURES, sizeof(cp), &cp); - hci_req_update_scan(hdev); + hci_update_scan(hdev); } /* Set packet type for incoming connection */ @@ -3148,19 +3206,14 @@ static void hci_conn_complete_evt(struct hci_dev *hdev, void *data, hci_send_cmd(hdev, HCI_OP_CHANGE_CONN_PTYPE, sizeof(cp), &cp); } - } else { - conn->state = BT_CLOSED; - if (conn->type == ACL_LINK) - mgmt_connect_failed(hdev, &conn->dst, conn->type, - conn->dst_type, ev->status); } if (conn->type == ACL_LINK) hci_sco_setup(conn, ev->status); - if (ev->status) { - hci_connect_cfm(conn, ev->status); - hci_conn_del(conn); +done: + if (status) { + hci_conn_failed(conn, status); } else if (ev->link_type == SCO_LINK) { switch (conn->setting & SCO_AIRMODE_MASK) { case SCO_AIRMODE_CVSD: @@ -3169,13 +3222,11 @@ static void hci_conn_complete_evt(struct hci_dev *hdev, void *data, break; } - hci_connect_cfm(conn, ev->status); + hci_connect_cfm(conn, status); } unlock: hci_dev_unlock(hdev); - - hci_conn_check_pending(hdev); } static void hci_reject_conn(struct hci_dev *hdev, bdaddr_t *bdaddr) @@ -3198,6 +3249,16 @@ static void hci_conn_request_evt(struct hci_dev *hdev, void *data, bt_dev_dbg(hdev, "bdaddr %pMR type 0x%x", &ev->bdaddr, ev->link_type); + /* Reject incoming connection from device with same BD ADDR against + * CVE-2020-26555 + */ + if (hdev && !bacmp(&hdev->bdaddr, &ev->bdaddr)) { + bt_dev_dbg(hdev, "Reject connection with same BD_ADDR %pMR\n", + &ev->bdaddr); + hci_reject_conn(hdev, &ev->bdaddr); + return; + } + mask |= hci_proto_connect_ind(hdev, &ev->bdaddr, ev->link_type, &flags); @@ -3206,10 +3267,12 @@ static void hci_conn_request_evt(struct hci_dev *hdev, void *data, return; } + hci_dev_lock(hdev); + if (hci_bdaddr_list_lookup(&hdev->reject_list, &ev->bdaddr, BDADDR_BREDR)) { hci_reject_conn(hdev, &ev->bdaddr); - return; + goto unlock; } /* Require HCI_CONNECTABLE or an accept list entry to accept the @@ -3221,13 +3284,11 @@ static void hci_conn_request_evt(struct hci_dev *hdev, void *data, !hci_bdaddr_list_lookup_with_flags(&hdev->accept_list, &ev->bdaddr, BDADDR_BREDR)) { hci_reject_conn(hdev, &ev->bdaddr); - return; + goto unlock; } /* Connection accepted */ - hci_dev_lock(hdev); - ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr); if (ie) memcpy(ie->data.dev_class, ev->dev_class, 3); @@ -3235,12 +3296,11 @@ static void hci_conn_request_evt(struct hci_dev *hdev, void *data, conn = hci_conn_hash_lookup_ba(hdev, ev->link_type, &ev->bdaddr); if (!conn) { - conn = hci_conn_add(hdev, ev->link_type, &ev->bdaddr, - HCI_ROLE_SLAVE); - if (!conn) { - bt_dev_err(hdev, "no memory for new connection"); - hci_dev_unlock(hdev); - return; + conn = hci_conn_add_unset(hdev, ev->link_type, &ev->bdaddr, 0, + HCI_ROLE_SLAVE); + if (IS_ERR(conn)) { + bt_dev_err(hdev, "connection err: %ld", PTR_ERR(conn)); + goto unlock; } } @@ -3280,6 +3340,10 @@ static void hci_conn_request_evt(struct hci_dev *hdev, void *data, conn->state = BT_CONNECT2; hci_connect_cfm(conn, 0); } + + return; +unlock: + hci_dev_unlock(hdev); } static u8 hci_to_mgmt_reason(u8 err) @@ -3334,29 +3398,36 @@ static void hci_disconn_complete_evt(struct hci_dev *hdev, void *data, reason, mgmt_connected); if (conn->type == ACL_LINK) { - if (test_bit(HCI_CONN_FLUSH_KEY, &conn->flags)) + if (test_and_clear_bit(HCI_CONN_FLUSH_KEY, &conn->flags)) hci_remove_link_key(hdev, &conn->dst); - hci_req_update_scan(hdev); + hci_update_scan(hdev); } - params = hci_conn_params_lookup(hdev, &conn->dst, conn->dst_type); - if (params) { - switch (params->auto_connect) { - case HCI_AUTO_CONN_LINK_LOSS: - if (ev->reason != HCI_ERROR_CONNECTION_TIMEOUT) + /* Re-enable passive scanning if disconnected device is marked + * as auto-connectable. + */ + if (conn->type == LE_LINK) { + params = hci_conn_params_lookup(hdev, &conn->dst, + conn->dst_type); + if (params) { + switch (params->auto_connect) { + case HCI_AUTO_CONN_LINK_LOSS: + if (ev->reason != HCI_ERROR_CONNECTION_TIMEOUT) + break; + fallthrough; + + case HCI_AUTO_CONN_DIRECT: + case HCI_AUTO_CONN_ALWAYS: + hci_pend_le_list_del_init(params); + hci_pend_le_list_add(params, + &hdev->pend_le_conns); + hci_update_passive_scan(hdev); break; - fallthrough; - case HCI_AUTO_CONN_DIRECT: - case HCI_AUTO_CONN_ALWAYS: - list_del_init(¶ms->action); - list_add(¶ms->action, &hdev->pend_le_conns); - hci_update_passive_scan(hdev); - break; - - default: - break; + default: + break; + } } } @@ -3399,14 +3470,8 @@ static void hci_auth_complete_evt(struct hci_dev *hdev, void *data, if (!ev->status) { clear_bit(HCI_CONN_AUTH_FAILURE, &conn->flags); - - if (!hci_conn_ssp_enabled(conn) && - test_bit(HCI_CONN_REAUTH_PEND, &conn->flags)) { - bt_dev_info(hdev, "re-auth of legacy device is not possible."); - } else { - set_bit(HCI_CONN_AUTH, &conn->flags); - conn->sec_level = conn->pending_sec_level; - } + set_bit(HCI_CONN_AUTH, &conn->flags); + conn->sec_level = conn->pending_sec_level; } else { if (ev->status == HCI_ERROR_PIN_OR_KEY_MISSING) set_bit(HCI_CONN_AUTH_FAILURE, &conn->flags); @@ -3415,7 +3480,6 @@ static void hci_auth_complete_evt(struct hci_dev *hdev, void *data, } clear_bit(HCI_CONN_AUTH_PEND, &conn->flags); - clear_bit(HCI_CONN_REAUTH_PEND, &conn->flags); if (conn->state == BT_CONFIG) { if (!ev->status && hci_conn_ssp_enabled(conn)) { @@ -3462,8 +3526,6 @@ static void hci_remote_name_evt(struct hci_dev *hdev, void *data, bt_dev_dbg(hdev, "status 0x%2.2x", ev->status); - hci_conn_check_pending(hdev); - hci_dev_lock(hdev); conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr); @@ -3497,47 +3559,6 @@ unlock: hci_dev_unlock(hdev); } -static void read_enc_key_size_complete(struct hci_dev *hdev, u8 status, - u16 opcode, struct sk_buff *skb) -{ - const struct hci_rp_read_enc_key_size *rp; - struct hci_conn *conn; - u16 handle; - - BT_DBG("%s status 0x%02x", hdev->name, status); - - if (!skb || skb->len < sizeof(*rp)) { - bt_dev_err(hdev, "invalid read key size response"); - return; - } - - rp = (void *)skb->data; - handle = le16_to_cpu(rp->handle); - - hci_dev_lock(hdev); - - conn = hci_conn_hash_lookup_handle(hdev, handle); - if (!conn) - goto unlock; - - /* While unexpected, the read_enc_key_size command may fail. The most - * secure approach is to then assume the key size is 0 to force a - * disconnection. - */ - if (rp->status) { - bt_dev_err(hdev, "failed to read key size for handle %u", - handle); - conn->enc_key_size = 0; - } else { - conn->enc_key_size = rp->key_size; - } - - hci_encrypt_cfm(conn, 0); - -unlock: - hci_dev_unlock(hdev); -} - static void hci_encrypt_change_evt(struct hci_dev *hdev, void *data, struct sk_buff *skb) { @@ -3601,32 +3622,18 @@ static void hci_encrypt_change_evt(struct hci_dev *hdev, void *data, /* Try reading the encryption key size for encrypted ACL links */ if (!ev->status && ev->encrypt && conn->type == ACL_LINK) { - struct hci_cp_read_enc_key_size cp; - struct hci_request req; - - /* Only send HCI_Read_Encryption_Key_Size if the - * controller really supports it. If it doesn't, assume - * the default size (16). - */ - if (!(hdev->commands[20] & 0x10)) { - conn->enc_key_size = HCI_LINK_KEY_SIZE; - goto notify; - } - - hci_req_init(&req, hdev); - - cp.handle = cpu_to_le16(conn->handle); - hci_req_add(&req, HCI_OP_READ_ENC_KEY_SIZE, sizeof(cp), &cp); - - if (hci_req_run_skb(&req, read_enc_key_size_complete)) { - bt_dev_err(hdev, "sending read key size failed"); - conn->enc_key_size = HCI_LINK_KEY_SIZE; + if (hci_read_enc_key_size(hdev, conn)) goto notify; - } goto unlock; } + /* We skip the WRITE_AUTH_PAYLOAD_TIMEOUT for ATS2851 based controllers + * to avoid unexpected SMP command errors when pairing. + */ + if (hci_test_quirk(hdev, HCI_QUIRK_BROKEN_WRITE_AUTH_PAYLOAD_TIMEOUT)) + goto notify; + /* Set the default Authenticated Payload Timeout after * an LE Link is established. As per Core Spec v5.0, Vol 2, Part B * Section 3.3, the HCI command WRITE_AUTH_PAYLOAD_TIMEOUT should be @@ -3642,8 +3649,9 @@ static void hci_encrypt_change_evt(struct hci_dev *hdev, void *data, cp.handle = cpu_to_le16(conn->handle); cp.timeout = cpu_to_le16(hdev->auth_payload_timeout); - hci_send_cmd(conn->hdev, HCI_OP_WRITE_AUTH_PAYLOAD_TO, - sizeof(cp), &cp); + if (hci_send_cmd(conn->hdev, HCI_OP_WRITE_AUTH_PAYLOAD_TO, + sizeof(cp), &cp)) + bt_dev_err(hdev, "write auth payload timeout failed"); } notify: @@ -3706,14 +3714,15 @@ static void hci_remote_features_evt(struct hci_dev *hdev, void *data, goto unlock; } - if (!ev->status && !test_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags)) { + if (!ev->status) { struct hci_cp_remote_name_req cp; memset(&cp, 0, sizeof(cp)); bacpy(&cp.bdaddr, &conn->dst); cp.pscan_rep_mode = 0x02; hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp); - } else if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags)) + } else { mgmt_device_connected(hdev, conn, NULL, 0); + } if (!hci_outgoing_auth_needed(hdev, conn)) { conn->state = BT_CONNECTED; @@ -3729,15 +3738,292 @@ static inline void handle_cmd_cnt_and_timer(struct hci_dev *hdev, u8 ncmd) { cancel_delayed_work(&hdev->cmd_timer); + rcu_read_lock(); if (!test_bit(HCI_RESET, &hdev->flags)) { if (ncmd) { cancel_delayed_work(&hdev->ncmd_timer); atomic_set(&hdev->cmd_cnt, 1); } else { - schedule_delayed_work(&hdev->ncmd_timer, - HCI_NCMD_TIMEOUT); + if (!hci_dev_test_flag(hdev, HCI_CMD_DRAIN_WORKQUEUE)) + queue_delayed_work(hdev->workqueue, &hdev->ncmd_timer, + HCI_NCMD_TIMEOUT); } } + rcu_read_unlock(); +} + +static u8 hci_cc_le_read_buffer_size_v2(struct hci_dev *hdev, void *data, + struct sk_buff *skb) +{ + struct hci_rp_le_read_buffer_size_v2 *rp = data; + + bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); + + if (rp->status) + return rp->status; + + hdev->le_mtu = __le16_to_cpu(rp->acl_mtu); + hdev->le_pkts = rp->acl_max_pkt; + hdev->iso_mtu = __le16_to_cpu(rp->iso_mtu); + hdev->iso_pkts = rp->iso_max_pkt; + + hdev->le_cnt = hdev->le_pkts; + hdev->iso_cnt = hdev->iso_pkts; + + BT_DBG("%s acl mtu %d:%d iso mtu %d:%d", hdev->name, hdev->acl_mtu, + hdev->acl_pkts, hdev->iso_mtu, hdev->iso_pkts); + + if (hdev->le_mtu && hdev->le_mtu < HCI_MIN_LE_MTU) + return HCI_ERROR_INVALID_PARAMETERS; + + return rp->status; +} + +static void hci_unbound_cis_failed(struct hci_dev *hdev, u8 cig, u8 status) +{ + struct hci_conn *conn, *tmp; + + lockdep_assert_held(&hdev->lock); + + list_for_each_entry_safe(conn, tmp, &hdev->conn_hash.list, list) { + if (conn->type != CIS_LINK || + conn->state == BT_OPEN || conn->iso_qos.ucast.cig != cig) + continue; + + if (HCI_CONN_HANDLE_UNSET(conn->handle)) + hci_conn_failed(conn, status); + } +} + +static u8 hci_cc_le_set_cig_params(struct hci_dev *hdev, void *data, + struct sk_buff *skb) +{ + struct hci_rp_le_set_cig_params *rp = data; + struct hci_cp_le_set_cig_params *cp; + struct hci_conn *conn; + u8 status = rp->status; + bool pending = false; + int i; + + bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); + + cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_CIG_PARAMS); + if (!rp->status && (!cp || rp->num_handles != cp->num_cis || + rp->cig_id != cp->cig_id)) { + bt_dev_err(hdev, "unexpected Set CIG Parameters response data"); + status = HCI_ERROR_UNSPECIFIED; + } + + hci_dev_lock(hdev); + + /* BLUETOOTH CORE SPECIFICATION Version 5.4 | Vol 4, Part E page 2554 + * + * If the Status return parameter is non-zero, then the state of the CIG + * and its CIS configurations shall not be changed by the command. If + * the CIG did not already exist, it shall not be created. + */ + if (status) { + /* Keep current configuration, fail only the unbound CIS */ + hci_unbound_cis_failed(hdev, rp->cig_id, status); + goto unlock; + } + + /* BLUETOOTH CORE SPECIFICATION Version 5.3 | Vol 4, Part E page 2553 + * + * If the Status return parameter is zero, then the Controller shall + * set the Connection_Handle arrayed return parameter to the connection + * handle(s) corresponding to the CIS configurations specified in + * the CIS_IDs command parameter, in the same order. + */ + for (i = 0; i < rp->num_handles; ++i) { + conn = hci_conn_hash_lookup_cis(hdev, NULL, 0, rp->cig_id, + cp->cis[i].cis_id); + if (!conn || !bacmp(&conn->dst, BDADDR_ANY)) + continue; + + if (conn->state != BT_BOUND && conn->state != BT_CONNECT) + continue; + + if (hci_conn_set_handle(conn, __le16_to_cpu(rp->handle[i]))) + continue; + + if (conn->state == BT_CONNECT) + pending = true; + } + +unlock: + if (pending) + hci_le_create_cis_pending(hdev); + + hci_dev_unlock(hdev); + + return rp->status; +} + +static u8 hci_cc_le_setup_iso_path(struct hci_dev *hdev, void *data, + struct sk_buff *skb) +{ + struct hci_rp_le_setup_iso_path *rp = data; + struct hci_cp_le_setup_iso_path *cp; + struct hci_conn *conn; + + bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); + + cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SETUP_ISO_PATH); + if (!cp) + return rp->status; + + hci_dev_lock(hdev); + + conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle)); + if (!conn) + goto unlock; + + if (rp->status) { + hci_connect_cfm(conn, rp->status); + hci_conn_del(conn); + goto unlock; + } + + switch (cp->direction) { + /* Input (Host to Controller) */ + case 0x00: + /* Only confirm connection if output only */ + if (conn->iso_qos.ucast.out.sdu && !conn->iso_qos.ucast.in.sdu) + hci_connect_cfm(conn, rp->status); + break; + /* Output (Controller to Host) */ + case 0x01: + /* Confirm connection since conn->iso_qos is always configured + * last. + */ + hci_connect_cfm(conn, rp->status); + + /* Notify device connected in case it is a BIG Sync */ + if (!rp->status && test_bit(HCI_CONN_BIG_SYNC, &conn->flags)) + mgmt_device_connected(hdev, conn, NULL, 0); + + break; + } + +unlock: + hci_dev_unlock(hdev); + return rp->status; +} + +static u8 hci_cc_le_read_all_local_features(struct hci_dev *hdev, void *data, + struct sk_buff *skb) +{ + struct hci_rp_le_read_all_local_features *rp = data; + + bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); + + if (rp->status) + return rp->status; + + memcpy(hdev->le_features, rp->features, 248); + + return rp->status; +} + +static void hci_cs_le_create_big(struct hci_dev *hdev, u8 status) +{ + bt_dev_dbg(hdev, "status 0x%2.2x", status); +} + +static void hci_cs_le_read_all_remote_features(struct hci_dev *hdev, u8 status) +{ + struct hci_cp_le_read_remote_features *cp; + struct hci_conn *conn; + + bt_dev_dbg(hdev, "status 0x%2.2x", status); + + if (!status) + return; + + cp = hci_sent_cmd_data(hdev, HCI_OP_LE_READ_ALL_REMOTE_FEATURES); + if (!cp) + return; + + hci_dev_lock(hdev); + + conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle)); + if (conn && conn->state == BT_CONFIG) + hci_connect_cfm(conn, status); + + hci_dev_unlock(hdev); +} + +static u8 hci_cc_set_per_adv_param(struct hci_dev *hdev, void *data, + struct sk_buff *skb) +{ + struct hci_ev_status *rp = data; + struct hci_cp_le_set_per_adv_params *cp; + + bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); + + if (rp->status) + return rp->status; + + cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_PER_ADV_PARAMS); + if (!cp) + return rp->status; + + /* TODO: set the conn state */ + return rp->status; +} + +static u8 hci_cc_le_set_per_adv_enable(struct hci_dev *hdev, void *data, + struct sk_buff *skb) +{ + struct hci_ev_status *rp = data; + struct hci_cp_le_set_per_adv_enable *cp; + struct adv_info *adv = NULL, *n; + u8 per_adv_cnt = 0; + + bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); + + if (rp->status) + return rp->status; + + cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_PER_ADV_ENABLE); + if (!cp) + return rp->status; + + hci_dev_lock(hdev); + + adv = hci_find_adv_instance(hdev, cp->handle); + + if (cp->enable) { + hci_dev_set_flag(hdev, HCI_LE_PER_ADV); + + if (adv) + adv->periodic_enabled = true; + } else { + if (adv) + adv->periodic_enabled = false; + + /* If just one instance was disabled check if there are + * any other instance enabled before clearing HCI_LE_PER_ADV. + * The current periodic adv instance will be marked as + * disabled once extended advertising is also disabled. + */ + list_for_each_entry_safe(adv, n, &hdev->adv_instances, + list) { + if (adv->periodic && adv->enabled) + per_adv_cnt++; + } + + if (per_adv_cnt > 1) + goto unlock; + + hci_dev_clear_flag(hdev, HCI_LE_PER_ADV); + } + +unlock: + hci_dev_unlock(hdev); + + return rp->status; } #define HCI_CC_VL(_op, _func, _min, _max) \ @@ -3763,8 +4049,8 @@ static const struct hci_cc { HCI_CC_STATUS(HCI_OP_INQUIRY_CANCEL, hci_cc_inquiry_cancel), HCI_CC_STATUS(HCI_OP_PERIODIC_INQ, hci_cc_periodic_inq), HCI_CC_STATUS(HCI_OP_EXIT_PERIODIC_INQ, hci_cc_exit_periodic_inq), - HCI_CC_STATUS(HCI_OP_REMOTE_NAME_REQ_CANCEL, - hci_cc_remote_name_req_cancel), + HCI_CC(HCI_OP_REMOTE_NAME_REQ_CANCEL, hci_cc_remote_name_req_cancel, + sizeof(struct hci_rp_remote_name_req_cancel)), HCI_CC(HCI_OP_ROLE_DISCOVERY, hci_cc_role_discovery, sizeof(struct hci_rp_role_discovery)), HCI_CC(HCI_OP_READ_LINK_POLICY, hci_cc_read_link_policy, @@ -3822,14 +4108,10 @@ static const struct hci_cc { HCI_CC(HCI_OP_READ_PAGE_SCAN_TYPE, hci_cc_read_page_scan_type, sizeof(struct hci_rp_read_page_scan_type)), HCI_CC_STATUS(HCI_OP_WRITE_PAGE_SCAN_TYPE, hci_cc_write_page_scan_type), - HCI_CC(HCI_OP_READ_DATA_BLOCK_SIZE, hci_cc_read_data_block_size, - sizeof(struct hci_rp_read_data_block_size)), - HCI_CC(HCI_OP_READ_FLOW_CONTROL_MODE, hci_cc_read_flow_control_mode, - sizeof(struct hci_rp_read_flow_control_mode)), - HCI_CC(HCI_OP_READ_LOCAL_AMP_INFO, hci_cc_read_local_amp_info, - sizeof(struct hci_rp_read_local_amp_info)), HCI_CC(HCI_OP_READ_CLOCK, hci_cc_read_clock, sizeof(struct hci_rp_read_clock)), + HCI_CC(HCI_OP_READ_ENC_KEY_SIZE, hci_cc_read_enc_key_size, + sizeof(struct hci_rp_read_enc_key_size)), HCI_CC(HCI_OP_READ_INQ_RSP_TX_POWER, hci_cc_read_inq_rsp_tx_power, sizeof(struct hci_rp_read_inq_rsp_tx_power)), HCI_CC(HCI_OP_READ_DEF_ERR_DATA_REPORTING, @@ -3905,17 +4187,27 @@ static const struct hci_cc { HCI_CC(HCI_OP_LE_READ_NUM_SUPPORTED_ADV_SETS, hci_cc_le_read_num_adv_sets, sizeof(struct hci_rp_le_read_num_supported_adv_sets)), - HCI_CC(HCI_OP_LE_SET_EXT_ADV_PARAMS, hci_cc_set_ext_adv_param, - sizeof(struct hci_rp_le_set_ext_adv_params)), HCI_CC_STATUS(HCI_OP_LE_SET_EXT_ADV_ENABLE, hci_cc_le_set_ext_adv_enable), HCI_CC_STATUS(HCI_OP_LE_SET_ADV_SET_RAND_ADDR, hci_cc_le_set_adv_set_random_addr), HCI_CC_STATUS(HCI_OP_LE_REMOVE_ADV_SET, hci_cc_le_remove_adv_set), HCI_CC_STATUS(HCI_OP_LE_CLEAR_ADV_SETS, hci_cc_le_clear_adv_sets), + HCI_CC_STATUS(HCI_OP_LE_SET_PER_ADV_PARAMS, hci_cc_set_per_adv_param), + HCI_CC_STATUS(HCI_OP_LE_SET_PER_ADV_ENABLE, + hci_cc_le_set_per_adv_enable), HCI_CC(HCI_OP_LE_READ_TRANSMIT_POWER, hci_cc_le_read_transmit_power, sizeof(struct hci_rp_le_read_transmit_power)), - HCI_CC_STATUS(HCI_OP_LE_SET_PRIVACY_MODE, hci_cc_le_set_privacy_mode) + HCI_CC_STATUS(HCI_OP_LE_SET_PRIVACY_MODE, hci_cc_le_set_privacy_mode), + HCI_CC(HCI_OP_LE_READ_BUFFER_SIZE_V2, hci_cc_le_read_buffer_size_v2, + sizeof(struct hci_rp_le_read_buffer_size_v2)), + HCI_CC_VL(HCI_OP_LE_SET_CIG_PARAMS, hci_cc_le_set_cig_params, + sizeof(struct hci_rp_le_set_cig_params), HCI_MAX_EVENT_SIZE), + HCI_CC(HCI_OP_LE_SETUP_ISO_PATH, hci_cc_le_setup_iso_path, + sizeof(struct hci_rp_le_setup_iso_path)), + HCI_CC(HCI_OP_LE_READ_ALL_LOCAL_FEATURES, + hci_cc_le_read_all_local_features, + sizeof(struct hci_rp_le_read_all_local_features)), }; static u8 hci_cc_func(struct hci_dev *hdev, const struct hci_cc *cc, @@ -3963,6 +4255,24 @@ static void hci_cmd_complete_evt(struct hci_dev *hdev, void *data, } } + if (i == ARRAY_SIZE(hci_cc_table)) { + if (!skb->len) { + bt_dev_err(hdev, "Unexpected cc 0x%4.4x with no status", + *opcode); + *status = HCI_ERROR_UNSPECIFIED; + return; + } + + /* Unknown opcode, assume byte 0 contains the status, so + * that e.g. __hci_cmd_sync() properly returns errors + * for vendor specific commands send by HCI drivers. + * If a vendor doesn't actually follow this convention we may + * need to introduce a vendor CC table in order to properly set + * the status. + */ + *status = skb->data[0]; + } + handle_cmd_cnt_and_timer(hdev, ev->ncmd); hci_req_cmd_complete(hdev, *opcode, *status, req_complete, @@ -3978,6 +4288,48 @@ static void hci_cmd_complete_evt(struct hci_dev *hdev, void *data, queue_work(hdev->workqueue, &hdev->cmd_work); } +static void hci_cs_le_create_cis(struct hci_dev *hdev, u8 status) +{ + struct hci_cp_le_create_cis *cp; + bool pending = false; + int i; + + bt_dev_dbg(hdev, "status 0x%2.2x", status); + + if (!status) + return; + + cp = hci_sent_cmd_data(hdev, HCI_OP_LE_CREATE_CIS); + if (!cp) + return; + + hci_dev_lock(hdev); + + /* Remove connection if command failed */ + for (i = 0; i < cp->num_cis; i++) { + struct hci_conn *conn; + u16 handle; + + handle = __le16_to_cpu(cp->cis[i].cis_handle); + + conn = hci_conn_hash_lookup_handle(hdev, handle); + if (conn) { + if (test_and_clear_bit(HCI_CONN_CREATE_CIS, + &conn->flags)) + pending = true; + conn->state = BT_CLOSED; + hci_connect_cfm(conn, status); + hci_conn_del(conn); + } + } + cp->num_cis = 0; + + if (pending) + hci_le_create_cis_pending(hdev); + + hci_dev_unlock(hdev); +} + #define HCI_CS(_op, _func) \ { \ .op = _op, \ @@ -4007,7 +4359,11 @@ static const struct hci_cs { HCI_CS(HCI_OP_LE_CREATE_CONN, hci_cs_le_create_conn), HCI_CS(HCI_OP_LE_READ_REMOTE_FEATURES, hci_cs_le_read_remote_features), HCI_CS(HCI_OP_LE_START_ENC, hci_cs_le_start_enc), - HCI_CS(HCI_OP_LE_EXT_CREATE_CONN, hci_cs_le_ext_create_conn) + HCI_CS(HCI_OP_LE_EXT_CREATE_CONN, hci_cs_le_ext_create_conn), + HCI_CS(HCI_OP_LE_CREATE_CIS, hci_cs_le_create_cis), + HCI_CS(HCI_OP_LE_CREATE_BIG, hci_cs_le_create_big), + HCI_CS(HCI_OP_LE_READ_ALL_REMOTE_FEATURES, + hci_cs_le_read_all_remote_features), }; static void hci_cmd_status_evt(struct hci_dev *hdev, void *data, @@ -4038,7 +4394,7 @@ static void hci_cmd_status_evt(struct hci_dev *hdev, void *data, * (since for this kind of commands there will not be a command * complete event). */ - if (ev->status || (hdev->sent_cmd && !hci_skb_event(hdev->sent_cmd))) { + if (ev->status || (hdev->req_skb && !hci_skb_event(hdev->req_skb))) { hci_req_cmd_complete(hdev, *opcode, ev->status, req_complete, req_complete_skb); if (hci_dev_test_flag(hdev, HCI_CMD_PENDING)) { @@ -4097,17 +4453,15 @@ static void hci_num_comp_pkts_evt(struct hci_dev *hdev, void *data, flex_array_size(ev, handles, ev->num))) return; - if (hdev->flow_ctl_mode != HCI_FLOW_CTL_MODE_PACKET_BASED) { - bt_dev_err(hdev, "wrong event for mode %d", hdev->flow_ctl_mode); - return; - } - bt_dev_dbg(hdev, "num %d", ev->num); + hci_dev_lock(hdev); + for (i = 0; i < ev->num; i++) { struct hci_comp_pkts_info *info = &ev->handles[i]; struct hci_conn *conn; __u16 handle, count; + unsigned int i; handle = __le16_to_cpu(info->handle); count = __le16_to_cpu(info->count); @@ -4116,7 +4470,20 @@ static void hci_num_comp_pkts_evt(struct hci_dev *hdev, void *data, if (!conn) continue; - conn->sent -= count; + /* Check if there is really enough packets outstanding before + * attempting to decrease the sent counter otherwise it could + * underflow.. + */ + if (conn->sent >= count) { + conn->sent -= count; + } else { + bt_dev_warn(hdev, "hcon %p sent %u < count %u", + conn, conn->sent, count); + conn->sent = 0; + } + + for (i = 0; i < count; ++i) + hci_conn_tx_dequeue(conn); switch (conn->type) { case ACL_LINK: @@ -4138,81 +4505,19 @@ static void hci_num_comp_pkts_evt(struct hci_dev *hdev, void *data, break; case SCO_LINK: + case ESCO_LINK: hdev->sco_cnt += count; if (hdev->sco_cnt > hdev->sco_pkts) hdev->sco_cnt = hdev->sco_pkts; - break; - default: - bt_dev_err(hdev, "unknown type %d conn %p", - conn->type, conn); break; - } - } - - queue_work(hdev->workqueue, &hdev->tx_work); -} - -static struct hci_conn *__hci_conn_lookup_handle(struct hci_dev *hdev, - __u16 handle) -{ - struct hci_chan *chan; - - switch (hdev->dev_type) { - case HCI_PRIMARY: - return hci_conn_hash_lookup_handle(hdev, handle); - case HCI_AMP: - chan = hci_chan_lookup_handle(hdev, handle); - if (chan) - return chan->conn; - break; - default: - bt_dev_err(hdev, "unknown dev_type %d", hdev->dev_type); - break; - } - - return NULL; -} - -static void hci_num_comp_blocks_evt(struct hci_dev *hdev, void *data, - struct sk_buff *skb) -{ - struct hci_ev_num_comp_blocks *ev = data; - int i; - - if (!hci_ev_skb_pull(hdev, skb, HCI_EV_NUM_COMP_BLOCKS, - flex_array_size(ev, handles, ev->num_hndl))) - return; - if (hdev->flow_ctl_mode != HCI_FLOW_CTL_MODE_BLOCK_BASED) { - bt_dev_err(hdev, "wrong event for mode %d", - hdev->flow_ctl_mode); - return; - } - - bt_dev_dbg(hdev, "num_blocks %d num_hndl %d", ev->num_blocks, - ev->num_hndl); - - for (i = 0; i < ev->num_hndl; i++) { - struct hci_comp_blocks_info *info = &ev->handles[i]; - struct hci_conn *conn = NULL; - __u16 handle, block_count; - - handle = __le16_to_cpu(info->handle); - block_count = __le16_to_cpu(info->blocks); - - conn = __hci_conn_lookup_handle(hdev, handle); - if (!conn) - continue; - - conn->sent -= block_count; - - switch (conn->type) { - case ACL_LINK: - case AMP_LINK: - hdev->block_cnt += block_count; - if (hdev->block_cnt > hdev->num_blocks) - hdev->block_cnt = hdev->num_blocks; + case CIS_LINK: + case BIS_LINK: + case PA_LINK: + hdev->iso_cnt += count; + if (hdev->iso_cnt > hdev->iso_pkts) + hdev->iso_cnt = hdev->iso_pkts; break; default: @@ -4223,6 +4528,8 @@ static void hci_num_comp_blocks_evt(struct hci_dev *hdev, void *data, } queue_work(hdev->workqueue, &hdev->tx_work); + + hci_dev_unlock(hdev); } static void hci_mode_change_evt(struct hci_dev *hdev, void *data, @@ -4400,6 +4707,15 @@ static void hci_link_key_notify_evt(struct hci_dev *hdev, void *data, if (!conn) goto unlock; + /* Ignore NULL link key against CVE-2020-26555 */ + if (!crypto_memneq(ev->link_key, ZERO_KEY, HCI_LINK_KEY_SIZE)) { + bt_dev_dbg(hdev, "Ignore NULL link key (ZERO KEY) for %pMR", + &ev->bdaddr); + hci_disconnect(conn, HCI_ERROR_AUTH_FAILURE); + hci_conn_drop(conn); + goto unlock; + } + hci_conn_hold(conn); conn->disc_timeout = HCI_DISCONN_TIMEOUT; hci_conn_drop(conn); @@ -4534,7 +4850,7 @@ static void hci_inquiry_result_with_rssi_evt(struct hci_dev *hdev, void *edata, if (!info) { bt_dev_err(hdev, "Malformed HCI Event: 0x%2.2x", HCI_EV_INQUIRY_RESULT_WITH_RSSI); - return; + goto unlock; } bacpy(&data.bdaddr, &info->bdaddr); @@ -4550,7 +4866,7 @@ static void hci_inquiry_result_with_rssi_evt(struct hci_dev *hdev, void *edata, mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00, info->dev_class, info->rssi, - flags, NULL, 0, NULL, 0); + flags, NULL, 0, NULL, 0, 0); } } else if (skb->len == array_size(ev->num, sizeof(struct inquiry_info_rssi))) { @@ -4565,7 +4881,7 @@ static void hci_inquiry_result_with_rssi_evt(struct hci_dev *hdev, void *edata, if (!info) { bt_dev_err(hdev, "Malformed HCI Event: 0x%2.2x", HCI_EV_INQUIRY_RESULT_WITH_RSSI); - return; + goto unlock; } bacpy(&data.bdaddr, &info->bdaddr); @@ -4581,13 +4897,13 @@ static void hci_inquiry_result_with_rssi_evt(struct hci_dev *hdev, void *edata, mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00, info->dev_class, info->rssi, - flags, NULL, 0, NULL, 0); + flags, NULL, 0, NULL, 0, 0); } } else { bt_dev_err(hdev, "Malformed HCI Event: 0x%2.2x", HCI_EV_INQUIRY_RESULT_WITH_RSSI); } - +unlock: hci_dev_unlock(hdev); } @@ -4642,8 +4958,9 @@ static void hci_remote_ext_features_evt(struct hci_dev *hdev, void *data, bacpy(&cp.bdaddr, &conn->dst); cp.pscan_rep_mode = 0x02; hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp); - } else if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags)) + } else { mgmt_device_connected(hdev, conn, NULL, 0); + } if (!hci_outgoing_auth_needed(hdev, conn)) { conn->state = BT_CONNECTED; @@ -4660,8 +4977,22 @@ static void hci_sync_conn_complete_evt(struct hci_dev *hdev, void *data, { struct hci_ev_sync_conn_complete *ev = data; struct hci_conn *conn; + u8 status = ev->status; - bt_dev_dbg(hdev, "status 0x%2.2x", ev->status); + switch (ev->link_type) { + case SCO_LINK: + case ESCO_LINK: + break; + default: + /* As per Core 5.3 Vol 4 Part E 7.7.35 (p.2219), Link_Type + * for HCI_Synchronous_Connection_Complete is limited to + * either SCO or eSCO + */ + bt_dev_err(hdev, "Ignoring connect complete event for invalid link type"); + return; + } + + bt_dev_dbg(hdev, "status 0x%2.2x", status); hci_dev_lock(hdev); @@ -4684,24 +5015,25 @@ static void hci_sync_conn_complete_evt(struct hci_dev *hdev, void *data, goto unlock; } - switch (ev->status) { + /* The HCI_Synchronous_Connection_Complete event is only sent once per connection. + * Processing it more than once per connection can corrupt kernel memory. + * + * As the connection handle is set here for the first time, it indicates + * whether the connection is already set up. + */ + if (!HCI_CONN_HANDLE_UNSET(conn->handle)) { + bt_dev_err(hdev, "Ignoring HCI_Sync_Conn_Complete event for existing connection"); + goto unlock; + } + + switch (status) { case 0x00: - /* The synchronous connection complete event should only be - * sent once per new connection. Receiving a successful - * complete event when the connection status is already - * BT_CONNECTED means that the device is misbehaving and sent - * multiple complete event packets for the same new connection. - * - * Registering the device more than once can corrupt kernel - * memory, hence upon detecting this invalid event, we report - * an error and ignore the packet. - */ - if (conn->state == BT_CONNECTED) { - bt_dev_err(hdev, "Ignoring connect complete event for existing connection"); - goto unlock; + status = hci_conn_set_handle(conn, __le16_to_cpu(ev->handle)); + if (status) { + conn->state = BT_CLOSED; + break; } - conn->handle = __le16_to_cpu(ev->handle); conn->state = BT_CONNECTED; conn->type = ev->link_type; @@ -4720,7 +5052,7 @@ static void hci_sync_conn_complete_evt(struct hci_dev *hdev, void *data, if (conn->out) { conn->pkt_type = (hdev->esco_type & SCO_ESCO_MASK) | (hdev->esco_type & EDR_ESCO_MASK); - if (hci_setup_sync(conn, conn->link->handle)) + if (hci_setup_sync(conn, conn->parent->handle)) goto unlock; } fallthrough; @@ -4745,8 +5077,8 @@ static void hci_sync_conn_complete_evt(struct hci_dev *hdev, void *data, } } - hci_connect_cfm(conn, ev->status); - if (ev->status) + hci_connect_cfm(conn, status); + if (status) hci_conn_del(conn); unlock: @@ -4819,7 +5151,7 @@ static void hci_extended_inquiry_result_evt(struct hci_dev *hdev, void *edata, mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00, info->dev_class, info->rssi, - flags, info->data, eir_len, NULL, 0); + flags, info->data, eir_len, NULL, 0, 0); } hci_dev_unlock(hdev); @@ -4917,8 +5249,8 @@ static u8 bredr_oob_data_present(struct hci_conn *conn) * available, then do not declare that OOB data is * present. */ - if (!memcmp(data->rand256, ZERO_KEY, 16) || - !memcmp(data->hash256, ZERO_KEY, 16)) + if (!crypto_memneq(data->rand256, ZERO_KEY, 16) || + !crypto_memneq(data->hash256, ZERO_KEY, 16)) return 0x00; return 0x02; @@ -4928,8 +5260,8 @@ static u8 bredr_oob_data_present(struct hci_conn *conn) * not supported by the hardware, then check that if * P-192 data values are present. */ - if (!memcmp(data->rand192, ZERO_KEY, 16) || - !memcmp(data->hash192, ZERO_KEY, 16)) + if (!crypto_memneq(data->rand192, ZERO_KEY, 16) || + !crypto_memneq(data->hash192, ZERO_KEY, 16)) return 0x00; return 0x01; @@ -4946,9 +5278,12 @@ static void hci_io_capa_request_evt(struct hci_dev *hdev, void *data, hci_dev_lock(hdev); conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr); - if (!conn) + if (!conn || !hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) goto unlock; + /* Assume remote supports SSP since it has triggered this event */ + set_bit(HCI_CONN_SSP_ENABLED, &conn->flags); + hci_conn_hold(conn); if (!hci_dev_test_flag(hdev, HCI_MGMT)) @@ -5060,19 +5395,16 @@ static void hci_user_confirm_request_evt(struct hci_dev *hdev, void *data, goto unlock; } - /* If no side requires MITM protection; auto-accept */ + /* If no side requires MITM protection; use JUST_CFM method */ if ((!loc_mitm || conn->remote_cap == HCI_IO_NO_INPUT_OUTPUT) && (!rem_mitm || conn->io_capability == HCI_IO_NO_INPUT_OUTPUT)) { - /* If we're not the initiators request authorization to - * proceed from user space (mgmt_user_confirm with - * confirm_hint set to 1). The exception is if neither - * side had MITM or if the local IO capability is - * NoInputNoOutput, in which case we do auto-accept + /* If we're not the initiator of request authorization and the + * local IO capability is not NoInputNoOutput, use JUST_WORKS + * method (mgmt_user_confirm with confirm_hint set to 1). */ if (!test_bit(HCI_CONN_AUTH_PEND, &conn->flags) && - conn->io_capability != HCI_IO_NO_INPUT_OUTPUT && - (loc_mitm || rem_mitm)) { + conn->io_capability != HCI_IO_NO_INPUT_OUTPUT) { bt_dev_dbg(hdev, "Confirming auto-accept as acceptor"); confirm_hint = 1; goto confirm; @@ -5193,7 +5525,7 @@ static void hci_simple_pair_complete_evt(struct hci_dev *hdev, void *data, hci_dev_lock(hdev); conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr); - if (!conn) + if (!conn || !hci_conn_ssp_enabled(conn)) goto unlock; /* Reset the authentication requirement to unknown */ @@ -5289,149 +5621,6 @@ unlock: hci_dev_unlock(hdev); } -#if IS_ENABLED(CONFIG_BT_HS) -static void hci_chan_selected_evt(struct hci_dev *hdev, void *data, - struct sk_buff *skb) -{ - struct hci_ev_channel_selected *ev = data; - struct hci_conn *hcon; - - bt_dev_dbg(hdev, "handle 0x%2.2x", ev->phy_handle); - - hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle); - if (!hcon) - return; - - amp_read_loc_assoc_final_data(hdev, hcon); -} - -static void hci_phy_link_complete_evt(struct hci_dev *hdev, void *data, - struct sk_buff *skb) -{ - struct hci_ev_phy_link_complete *ev = data; - struct hci_conn *hcon, *bredr_hcon; - - bt_dev_dbg(hdev, "handle 0x%2.2x status 0x%2.2x", ev->phy_handle, - ev->status); - - hci_dev_lock(hdev); - - hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle); - if (!hcon) - goto unlock; - - if (!hcon->amp_mgr) - goto unlock; - - if (ev->status) { - hci_conn_del(hcon); - goto unlock; - } - - bredr_hcon = hcon->amp_mgr->l2cap_conn->hcon; - - hcon->state = BT_CONNECTED; - bacpy(&hcon->dst, &bredr_hcon->dst); - - hci_conn_hold(hcon); - hcon->disc_timeout = HCI_DISCONN_TIMEOUT; - hci_conn_drop(hcon); - - hci_debugfs_create_conn(hcon); - hci_conn_add_sysfs(hcon); - - amp_physical_cfm(bredr_hcon, hcon); - -unlock: - hci_dev_unlock(hdev); -} - -static void hci_loglink_complete_evt(struct hci_dev *hdev, void *data, - struct sk_buff *skb) -{ - struct hci_ev_logical_link_complete *ev = data; - struct hci_conn *hcon; - struct hci_chan *hchan; - struct amp_mgr *mgr; - - bt_dev_dbg(hdev, "log_handle 0x%4.4x phy_handle 0x%2.2x status 0x%2.2x", - le16_to_cpu(ev->handle), ev->phy_handle, ev->status); - - hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle); - if (!hcon) - return; - - /* Create AMP hchan */ - hchan = hci_chan_create(hcon); - if (!hchan) - return; - - hchan->handle = le16_to_cpu(ev->handle); - hchan->amp = true; - - BT_DBG("hcon %p mgr %p hchan %p", hcon, hcon->amp_mgr, hchan); - - mgr = hcon->amp_mgr; - if (mgr && mgr->bredr_chan) { - struct l2cap_chan *bredr_chan = mgr->bredr_chan; - - l2cap_chan_lock(bredr_chan); - - bredr_chan->conn->mtu = hdev->block_mtu; - l2cap_logical_cfm(bredr_chan, hchan, 0); - hci_conn_hold(hcon); - - l2cap_chan_unlock(bredr_chan); - } -} - -static void hci_disconn_loglink_complete_evt(struct hci_dev *hdev, void *data, - struct sk_buff *skb) -{ - struct hci_ev_disconn_logical_link_complete *ev = data; - struct hci_chan *hchan; - - bt_dev_dbg(hdev, "handle 0x%4.4x status 0x%2.2x", - le16_to_cpu(ev->handle), ev->status); - - if (ev->status) - return; - - hci_dev_lock(hdev); - - hchan = hci_chan_lookup_handle(hdev, le16_to_cpu(ev->handle)); - if (!hchan || !hchan->amp) - goto unlock; - - amp_destroy_logical_link(hchan, ev->reason); - -unlock: - hci_dev_unlock(hdev); -} - -static void hci_disconn_phylink_complete_evt(struct hci_dev *hdev, void *data, - struct sk_buff *skb) -{ - struct hci_ev_disconn_phy_link_complete *ev = data; - struct hci_conn *hcon; - - bt_dev_dbg(hdev, "status 0x%2.2x", ev->status); - - if (ev->status) - return; - - hci_dev_lock(hdev); - - hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle); - if (hcon) { - hcon->state = BT_CLOSED; - hci_conn_del(hcon); - } - - hci_dev_unlock(hdev); -} -#endif - static void le_conn_update_addr(struct hci_conn *conn, bdaddr_t *bdaddr, u8 bdaddr_type, bdaddr_t *local_rpa) { @@ -5495,6 +5684,7 @@ static void le_conn_complete_evt(struct hci_dev *hdev, u8 status, struct hci_conn *conn; struct smp_irk *irk; u8 addr_type; + int err; hci_dev_lock(hdev); @@ -5503,15 +5693,30 @@ static void le_conn_complete_evt(struct hci_dev *hdev, u8 status, */ hci_dev_clear_flag(hdev, HCI_LE_ADV); - conn = hci_lookup_le_connect(hdev); - if (!conn) { - conn = hci_conn_add(hdev, LE_LINK, bdaddr, role); - if (!conn) { - bt_dev_err(hdev, "no memory for new connection"); + /* Check for existing connection: + * + * 1. If it doesn't exist then use the role to create a new object. + * 2. If it does exist confirm that it is connecting/BT_CONNECT in case + * of initiator/master role since there could be a collision where + * either side is attempting to connect or something like a fuzzing + * testing is trying to play tricks to destroy the hcon object before + * it even attempts to connect (e.g. hcon->state == BT_OPEN). + */ + conn = hci_conn_hash_lookup_role(hdev, LE_LINK, role, bdaddr); + if (!conn || + (conn->role == HCI_ROLE_MASTER && conn->state != BT_CONNECT)) { + /* In case of error status and there is no connection pending + * just unlock as there is nothing to cleanup. + */ + if (status) goto unlock; - } - conn->dst_type = bdaddr_type; + conn = hci_conn_add_unset(hdev, LE_LINK, bdaddr, bdaddr_type, + role); + if (IS_ERR(conn)) { + bt_dev_err(hdev, "connection err: %ld", PTR_ERR(conn)); + goto unlock; + } /* If we didn't have a hci_conn object previously * but we're in central role this must be something @@ -5537,6 +5742,17 @@ static void le_conn_complete_evt(struct hci_dev *hdev, u8 status, cancel_delayed_work(&conn->le_conn_timeout); } + /* The HCI_LE_Connection_Complete event is only sent once per connection. + * Processing it more than once per connection can corrupt kernel memory. + * + * As the connection handle is set here for the first time, it indicates + * whether the connection is already set up. + */ + if (!HCI_CONN_HANDLE_UNSET(conn->handle)) { + bt_dev_err(hdev, "Ignoring HCI_Connection_Complete for existing connection"); + goto unlock; + } + le_conn_update_addr(conn, bdaddr, bdaddr_type, local_rpa); /* Lookup the identity address from the stored connection @@ -5556,8 +5772,16 @@ static void le_conn_complete_evt(struct hci_dev *hdev, u8 status, conn->dst_type = ev_bdaddr_type(hdev, conn->dst_type, NULL); - if (status) { - hci_le_conn_failed(conn, status); + /* All connection failure handling is taken care of by the + * hci_conn_failed function which is triggered by the HCI + * request completion callbacks used for connecting. + */ + if (status || hci_conn_set_handle(conn, handle)) + goto unlock; + + /* Drop the connection if it has been aborted */ + if (test_bit(HCI_CONN_CANCEL, &conn->flags)) { + hci_conn_drop(conn); goto unlock; } @@ -5572,15 +5796,13 @@ static void le_conn_complete_evt(struct hci_dev *hdev, u8 status, goto unlock; } - if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags)) - mgmt_device_connected(hdev, conn, NULL, 0); + mgmt_device_connected(hdev, conn, NULL, 0); conn->sec_level = BT_SECURITY_LOW; - conn->handle = handle; conn->state = BT_CONFIG; /* Store current advertising instance as connection advertising instance - * when sotfware rotation is in use so it can be re-enabled when + * when software rotation is in use so it can be re-enabled when * disconnected. */ if (!ext_adv_capable(hdev)) @@ -5593,26 +5815,8 @@ static void le_conn_complete_evt(struct hci_dev *hdev, u8 status, hci_debugfs_create_conn(conn); hci_conn_add_sysfs(conn); - /* The remote features procedure is defined for central - * role only. So only in case of an initiated connection - * request the remote features. - * - * If the local controller supports peripheral-initiated features - * exchange, then requesting the remote features in peripheral - * role is possible. Otherwise just transition into the - * connected state without requesting the remote features. - */ - if (conn->out || - (hdev->le_features[0] & HCI_LE_PERIPHERAL_FEATURES)) { - struct hci_cp_le_read_remote_features cp; - - cp.handle = __cpu_to_le16(conn->handle); - - hci_send_cmd(hdev, HCI_OP_LE_READ_REMOTE_FEATURES, - sizeof(cp), &cp); - - hci_conn_hold(conn); - } else { + err = hci_le_read_remote_features(conn); + if (err) { conn->state = BT_CONNECTED; hci_connect_cfm(conn, status); } @@ -5620,7 +5824,7 @@ static void le_conn_complete_evt(struct hci_dev *hdev, u8 status, params = hci_pend_le_action_lookup(&hdev->pend_le_conns, &conn->dst, conn->dst_type); if (params) { - list_del_init(¶ms->action); + hci_pend_le_list_del_init(params); if (params->conn) { hci_conn_drop(params->conn); hci_conn_put(params->conn); @@ -5661,6 +5865,29 @@ static void hci_le_enh_conn_complete_evt(struct hci_dev *hdev, void *data, le16_to_cpu(ev->supervision_timeout)); } +static void hci_le_pa_sync_lost_evt(struct hci_dev *hdev, void *data, + struct sk_buff *skb) +{ + struct hci_ev_le_pa_sync_lost *ev = data; + u16 handle = le16_to_cpu(ev->handle); + struct hci_conn *conn; + + bt_dev_dbg(hdev, "sync handle 0x%4.4x", handle); + + hci_dev_lock(hdev); + + /* Delete the pa sync connection */ + conn = hci_conn_hash_lookup_pa_sync_handle(hdev, handle); + if (conn) { + clear_bit(HCI_CONN_BIG_SYNC, &conn->flags); + clear_bit(HCI_CONN_PA_SYNC, &conn->flags); + hci_disconn_cfm(conn, HCI_ERROR_REMOTE_USER_TERM); + hci_conn_del(conn); + } + + hci_dev_unlock(hdev); +} + static void hci_le_ext_adv_term_evt(struct hci_dev *hdev, void *data, struct sk_buff *skb) { @@ -5670,8 +5897,6 @@ static void hci_le_ext_adv_term_evt(struct hci_dev *hdev, void *data, bt_dev_dbg(hdev, "status 0x%2.2x", ev->status); - adv = hci_find_adv_instance(hdev, ev->handle); - /* The Bluetooth Core 5.3 specification clearly states that this event * shall not be sent when the Host disables the advertising set. So in * case of HCI_ERROR_CANCELLED_BY_HOST, just ignore the event. @@ -5684,9 +5909,13 @@ static void hci_le_ext_adv_term_evt(struct hci_dev *hdev, void *data, return; } + hci_dev_lock(hdev); + + adv = hci_find_adv_instance(hdev, ev->handle); + if (ev->status) { if (!adv) - return; + goto unlock; /* Remove advertising as it has been terminated */ hci_remove_adv_instance(hdev, ev->handle); @@ -5694,12 +5923,12 @@ static void hci_le_ext_adv_term_evt(struct hci_dev *hdev, void *data, list_for_each_entry_safe(adv, n, &hdev->adv_instances, list) { if (adv->enabled) - return; + goto unlock; } /* We are no longer advertising, clear HCI_LE_ADV */ hci_dev_clear_flag(hdev, HCI_LE_ADV); - return; + goto unlock; } if (adv) @@ -5714,16 +5943,84 @@ static void hci_le_ext_adv_term_evt(struct hci_dev *hdev, void *data, if (hdev->adv_addr_type != ADDR_LE_DEV_RANDOM || bacmp(&conn->resp_addr, BDADDR_ANY)) - return; + goto unlock; if (!ev->handle) { bacpy(&conn->resp_addr, &hdev->random_addr); - return; + goto unlock; } if (adv) bacpy(&conn->resp_addr, &adv->random_addr); } + +unlock: + hci_dev_unlock(hdev); +} + +static int hci_le_pa_term_sync(struct hci_dev *hdev, __le16 handle) +{ + struct hci_cp_le_pa_term_sync cp; + + memset(&cp, 0, sizeof(cp)); + cp.handle = handle; + + return hci_send_cmd(hdev, HCI_OP_LE_PA_TERM_SYNC, sizeof(cp), &cp); +} + +static void hci_le_past_received_evt(struct hci_dev *hdev, void *data, + struct sk_buff *skb) +{ + struct hci_ev_le_past_received *ev = data; + int mask = hdev->link_mode; + __u8 flags = 0; + struct hci_conn *pa_sync, *conn; + + bt_dev_dbg(hdev, "status 0x%2.2x", ev->status); + + hci_dev_lock(hdev); + + hci_dev_clear_flag(hdev, HCI_PA_SYNC); + + conn = hci_conn_hash_lookup_create_pa_sync(hdev); + if (!conn) { + bt_dev_err(hdev, + "Unable to find connection for dst %pMR sid 0x%2.2x", + &ev->bdaddr, ev->sid); + goto unlock; + } + + conn->sync_handle = le16_to_cpu(ev->sync_handle); + conn->sid = HCI_SID_INVALID; + + mask |= hci_proto_connect_ind(hdev, &ev->bdaddr, PA_LINK, + &flags); + if (!(mask & HCI_LM_ACCEPT)) { + hci_le_pa_term_sync(hdev, ev->sync_handle); + goto unlock; + } + + if (!(flags & HCI_PROTO_DEFER)) + goto unlock; + + /* Add connection to indicate PA sync event */ + pa_sync = hci_conn_add_unset(hdev, PA_LINK, BDADDR_ANY, 0, + HCI_ROLE_SLAVE); + + if (IS_ERR(pa_sync)) + goto unlock; + + pa_sync->sync_handle = le16_to_cpu(ev->sync_handle); + + if (ev->status) { + set_bit(HCI_CONN_PA_SYNC_FAILED, &pa_sync->flags); + + /* Notify iso layer */ + hci_connect_cfm(pa_sync, ev->status); + } + +unlock: + hci_dev_unlock(hdev); } static void hci_le_conn_update_complete_evt(struct hci_dev *hdev, void *data, @@ -5753,7 +6050,7 @@ static void hci_le_conn_update_complete_evt(struct hci_dev *hdev, void *data, static struct hci_conn *check_pending_le_conn(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type, bool addr_resolved, - u8 adv_type) + u8 adv_type, u8 phy, u8 sec_phy) { struct hci_conn *conn; struct hci_conn_params *params; @@ -5771,7 +6068,7 @@ static struct hci_conn *check_pending_le_conn(struct hci_dev *hdev, * while we have an existing one in peripheral role. */ if (hdev->conn_hash.le_num_peripheral > 0 && - (!test_bit(HCI_QUIRK_VALID_LE_STATES, &hdev->quirks) || + (hci_test_quirk(hdev, HCI_QUIRK_BROKEN_LE_STATES) || !(hdev->le_states[3] & 0x10))) return NULL; @@ -5808,7 +6105,7 @@ static struct hci_conn *check_pending_le_conn(struct hci_dev *hdev, conn = hci_connect_le(hdev, addr, addr_type, addr_resolved, BT_SECURITY_LOW, hdev->def_le_autoconnect_timeout, - HCI_ROLE_MASTER); + HCI_ROLE_MASTER, phy, sec_phy); if (!IS_ERR(conn)) { /* If HCI_AUTO_CONN_EXPLICIT is set, conn is already owned * by higher layer that tried to connect, if no then @@ -5843,8 +6140,9 @@ static struct hci_conn *check_pending_le_conn(struct hci_dev *hdev, static void process_adv_report(struct hci_dev *hdev, u8 type, bdaddr_t *bdaddr, u8 bdaddr_type, bdaddr_t *direct_addr, - u8 direct_addr_type, s8 rssi, u8 *data, u8 len, - bool ext_adv) + u8 direct_addr_type, u8 phy, u8 sec_phy, s8 rssi, + u8 *data, u8 len, bool ext_adv, bool ctl_time, + u64 instant) { struct discovery_state *d = &hdev->discovery; struct smp_irk *irk; @@ -5866,8 +6164,9 @@ static void process_adv_report(struct hci_dev *hdev, u8 type, bdaddr_t *bdaddr, return; } - if (!ext_adv && len > HCI_MAX_AD_LENGTH) { - bt_dev_err_ratelimited(hdev, "legacy adv larger than 31 bytes"); + if (len > max_adv_len(hdev)) { + bt_dev_err_ratelimited(hdev, + "adv larger than maximum supported"); return; } @@ -5891,8 +6190,17 @@ static void process_adv_report(struct hci_dev *hdev, u8 type, bdaddr_t *bdaddr, * a LE Direct Advertising Report event. In that case it is * important to see if the address is matching the local * controller address. + * + * If local privacy is not enable the controller shall not be + * generating such event since according to its documentation it is only + * valid for filter_policy 0x02 and 0x03, but the fact that it did + * generate LE Direct Advertising Report means it is probably broken and + * won't generate any other event which can potentially break + * auto-connect logic so in case local privacy is not enable this + * ignores the direct_addr so it works as a regular report. */ - if (direct_addr) { + if (!hci_dev_test_flag(hdev, HCI_MESH) && direct_addr && + hci_dev_test_flag(hdev, HCI_PRIVACY)) { direct_addr_type = ev_bdaddr_type(hdev, direct_addr_type, &bdaddr_resolved); @@ -5902,12 +6210,6 @@ static void process_adv_report(struct hci_dev *hdev, u8 type, bdaddr_t *bdaddr, if (!hci_bdaddr_is_rpa(direct_addr, direct_addr_type)) return; - /* If the controller is not using resolvable random - * addresses, then this report can be ignored. - */ - if (!hci_dev_test_flag(hdev, HCI_PRIVACY)) - return; - /* If the local IRK of the controller does not match * with the resolvable random address provided, then * this report can be ignored. @@ -5931,8 +6233,9 @@ static void process_adv_report(struct hci_dev *hdev, u8 type, bdaddr_t *bdaddr, * for advertising reports) and is already verified to be RPA above. */ conn = check_pending_le_conn(hdev, bdaddr, bdaddr_type, bdaddr_resolved, - type); - if (!ext_adv && conn && type == LE_ADV_IND && len <= HCI_MAX_AD_LENGTH) { + type, phy, sec_phy); + if (!ext_adv && conn && type == LE_ADV_IND && + len <= max_adv_len(hdev)) { /* Store report for later inclusion by * mgmt_device_connected */ @@ -5940,6 +6243,18 @@ static void process_adv_report(struct hci_dev *hdev, u8 type, bdaddr_t *bdaddr, conn->le_adv_data_len = len; } + if (type == LE_ADV_NONCONN_IND || type == LE_ADV_SCAN_IND) + flags = MGMT_DEV_FOUND_NOT_CONNECTABLE; + else + flags = 0; + + /* All scan results should be sent up for Mesh systems */ + if (hci_dev_test_flag(hdev, HCI_MESH)) { + mgmt_device_found(hdev, bdaddr, LE_LINK, bdaddr_type, NULL, + rssi, flags, data, len, NULL, 0, instant); + return; + } + /* Passive scanning shouldn't trigger any device found events, * except for devices marked as CONN_REPORT for which we do send * device found events, or advertisement monitoring requested. @@ -5953,52 +6268,41 @@ static void process_adv_report(struct hci_dev *hdev, u8 type, bdaddr_t *bdaddr, idr_is_empty(&hdev->adv_monitors_idr)) return; - if (type == LE_ADV_NONCONN_IND || type == LE_ADV_SCAN_IND) - flags = MGMT_DEV_FOUND_NOT_CONNECTABLE; - else - flags = 0; mgmt_device_found(hdev, bdaddr, LE_LINK, bdaddr_type, NULL, - rssi, flags, data, len, NULL, 0); + rssi, flags, data, len, NULL, 0, 0); return; } - /* When receiving non-connectable or scannable undirected - * advertising reports, this means that the remote device is - * not connectable and then clearly indicate this in the - * device found event. - * - * When receiving a scan response, then there is no way to + /* When receiving a scan response, then there is no way to * know if the remote device is connectable or not. However * since scan responses are merged with a previously seen * advertising report, the flags field from that report * will be used. * - * In the really unlikely case that a controller get confused - * and just sends a scan response event, then it is marked as - * not connectable as well. + * In the unlikely case that a controller just sends a scan + * response event that doesn't match the pending report, then + * it is marked as a standalone SCAN_RSP. */ - if (type == LE_ADV_NONCONN_IND || type == LE_ADV_SCAN_IND || - type == LE_ADV_SCAN_RSP) - flags = MGMT_DEV_FOUND_NOT_CONNECTABLE; - else - flags = 0; + if (type == LE_ADV_SCAN_RSP) + flags = MGMT_DEV_FOUND_SCAN_RSP; /* If there's nothing pending either store the data from this * event or send an immediate device found event if the data * should not be stored for later. */ - if (!ext_adv && !has_pending_adv_report(hdev)) { + if (!has_pending_adv_report(hdev)) { /* If the report will trigger a SCAN_REQ store it for * later merging. */ - if (type == LE_ADV_IND || type == LE_ADV_SCAN_IND) { + if (!ext_adv && (type == LE_ADV_IND || + type == LE_ADV_SCAN_IND)) { store_pending_adv_report(hdev, bdaddr, bdaddr_type, rssi, flags, data, len); return; } mgmt_device_found(hdev, bdaddr, LE_LINK, bdaddr_type, NULL, - rssi, flags, data, len, NULL, 0); + rssi, flags, data, len, NULL, 0, 0); return; } @@ -6017,7 +6321,7 @@ static void process_adv_report(struct hci_dev *hdev, u8 type, bdaddr_t *bdaddr, d->last_adv_addr_type, NULL, d->last_adv_rssi, d->last_adv_flags, d->last_adv_data, - d->last_adv_data_len, NULL, 0); + d->last_adv_data_len, NULL, 0, 0); /* If the new report will trigger a SCAN_REQ store it for * later merging. @@ -6034,7 +6338,7 @@ static void process_adv_report(struct hci_dev *hdev, u8 type, bdaddr_t *bdaddr, */ clear_pending_adv_report(hdev); mgmt_device_found(hdev, bdaddr, LE_LINK, bdaddr_type, NULL, - rssi, flags, data, len, NULL, 0); + rssi, flags, data, len, NULL, 0, 0); return; } @@ -6044,7 +6348,7 @@ static void process_adv_report(struct hci_dev *hdev, u8 type, bdaddr_t *bdaddr, */ mgmt_device_found(hdev, &d->last_adv_addr, LE_LINK, d->last_adv_addr_type, NULL, rssi, d->last_adv_flags, - d->last_adv_data, d->last_adv_data_len, data, len); + d->last_adv_data, d->last_adv_data_len, data, len, 0); clear_pending_adv_report(hdev); } @@ -6052,6 +6356,7 @@ static void hci_le_adv_report_evt(struct hci_dev *hdev, void *data, struct sk_buff *skb) { struct hci_ev_le_advertising_report *ev = data; + u64 instant = jiffies; if (!ev->num) return; @@ -6072,11 +6377,13 @@ static void hci_le_adv_report_evt(struct hci_dev *hdev, void *data, info->length + 1)) break; - if (info->length <= HCI_MAX_AD_LENGTH) { + if (info->length <= max_adv_len(hdev)) { rssi = info->data[info->length]; process_adv_report(hdev, info->type, &info->bdaddr, - info->bdaddr_type, NULL, 0, rssi, - info->data, info->length, false); + info->bdaddr_type, NULL, 0, + HCI_ADV_PHY_1M, 0, rssi, + info->data, info->length, false, + false, instant); } else { bt_dev_err(hdev, "Dropping invalid advertising data"); } @@ -6087,6 +6394,11 @@ static void hci_le_adv_report_evt(struct hci_dev *hdev, void *data, static u8 ext_evt_type_to_legacy(struct hci_dev *hdev, u16 evt_type) { + u16 pdu_type = evt_type & ~LE_EXT_ADV_DATA_STATUS_MASK; + + if (!pdu_type) + return LE_ADV_NONCONN_IND; + if (evt_type & LE_EXT_ADV_LEGACY_PDU) { switch (evt_type) { case LE_LEGACY_ADV_IND: @@ -6118,8 +6430,7 @@ static u8 ext_evt_type_to_legacy(struct hci_dev *hdev, u16 evt_type) if (evt_type & LE_EXT_ADV_SCAN_IND) return LE_ADV_SCAN_IND; - if (evt_type == LE_EXT_ADV_NON_CONN_IND || - evt_type & LE_EXT_ADV_DIRECT_IND) + if (evt_type & LE_EXT_ADV_DIRECT_IND) return LE_ADV_NONCONN_IND; invalid: @@ -6133,6 +6444,7 @@ static void hci_le_ext_adv_report_evt(struct hci_dev *hdev, void *data, struct sk_buff *skb) { struct hci_ev_le_ext_adv_report *ev = data; + u64 instant = jiffies; if (!ev->num) return; @@ -6153,19 +6465,136 @@ static void hci_le_ext_adv_report_evt(struct hci_dev *hdev, void *data, info->length)) break; - evt_type = __le16_to_cpu(info->type); + evt_type = __le16_to_cpu(info->type) & LE_EXT_ADV_EVT_TYPE_MASK; legacy_evt_type = ext_evt_type_to_legacy(hdev, evt_type); + + if (hci_test_quirk(hdev, + HCI_QUIRK_FIXUP_LE_EXT_ADV_REPORT_PHY)) { + info->primary_phy &= 0x1f; + info->secondary_phy &= 0x1f; + } + + /* Check if PA Sync is pending and if the hci_conn SID has not + * been set update it. + */ + if (hci_dev_test_flag(hdev, HCI_PA_SYNC)) { + struct hci_conn *conn; + + conn = hci_conn_hash_lookup_create_pa_sync(hdev); + if (conn && conn->sid == HCI_SID_INVALID) + conn->sid = info->sid; + } + if (legacy_evt_type != LE_ADV_INVALID) { process_adv_report(hdev, legacy_evt_type, &info->bdaddr, info->bdaddr_type, NULL, 0, + info->primary_phy, + info->secondary_phy, info->rssi, info->data, info->length, - !(evt_type & LE_EXT_ADV_LEGACY_PDU)); + !(evt_type & LE_EXT_ADV_LEGACY_PDU), + false, instant); } } hci_dev_unlock(hdev); } +static void hci_le_pa_sync_established_evt(struct hci_dev *hdev, void *data, + struct sk_buff *skb) +{ + struct hci_ev_le_pa_sync_established *ev = data; + int mask = hdev->link_mode; + __u8 flags = 0; + struct hci_conn *pa_sync, *conn; + + bt_dev_dbg(hdev, "status 0x%2.2x", ev->status); + + hci_dev_lock(hdev); + + hci_dev_clear_flag(hdev, HCI_PA_SYNC); + + conn = hci_conn_hash_lookup_create_pa_sync(hdev); + if (!conn) { + bt_dev_err(hdev, + "Unable to find connection for dst %pMR sid 0x%2.2x", + &ev->bdaddr, ev->sid); + goto unlock; + } + + clear_bit(HCI_CONN_CREATE_PA_SYNC, &conn->flags); + + conn->sync_handle = le16_to_cpu(ev->handle); + conn->sid = HCI_SID_INVALID; + + mask |= hci_proto_connect_ind(hdev, &ev->bdaddr, PA_LINK, + &flags); + if (!(mask & HCI_LM_ACCEPT)) { + hci_le_pa_term_sync(hdev, ev->handle); + goto unlock; + } + + if (!(flags & HCI_PROTO_DEFER)) + goto unlock; + + /* Add connection to indicate PA sync event */ + pa_sync = hci_conn_add_unset(hdev, PA_LINK, BDADDR_ANY, 0, + HCI_ROLE_SLAVE); + + if (IS_ERR(pa_sync)) + goto unlock; + + pa_sync->sync_handle = le16_to_cpu(ev->handle); + + if (ev->status) { + set_bit(HCI_CONN_PA_SYNC_FAILED, &pa_sync->flags); + + /* Notify iso layer */ + hci_connect_cfm(pa_sync, ev->status); + } + +unlock: + hci_dev_unlock(hdev); +} + +static void hci_le_per_adv_report_evt(struct hci_dev *hdev, void *data, + struct sk_buff *skb) +{ + struct hci_ev_le_per_adv_report *ev = data; + int mask = hdev->link_mode; + __u8 flags = 0; + struct hci_conn *pa_sync; + + bt_dev_dbg(hdev, "sync_handle 0x%4.4x", le16_to_cpu(ev->sync_handle)); + + hci_dev_lock(hdev); + + mask |= hci_proto_connect_ind(hdev, BDADDR_ANY, PA_LINK, &flags); + if (!(mask & HCI_LM_ACCEPT)) + goto unlock; + + if (!(flags & HCI_PROTO_DEFER)) + goto unlock; + + pa_sync = hci_conn_hash_lookup_pa_sync_handle + (hdev, + le16_to_cpu(ev->sync_handle)); + + if (!pa_sync) + goto unlock; + + if (ev->data_status == LE_PA_DATA_COMPLETE && + !test_and_set_bit(HCI_CONN_PA_SYNC, &pa_sync->flags)) { + /* Notify iso layer */ + hci_connect_cfm(pa_sync, 0); + + /* Notify MGMT layer */ + mgmt_device_connected(hdev, pa_sync, NULL, 0); + } + +unlock: + hci_dev_unlock(hdev); +} + static void hci_le_remote_feat_complete_evt(struct hci_dev *hdev, void *data, struct sk_buff *skb) { @@ -6193,7 +6622,7 @@ static void hci_le_remote_feat_complete_evt(struct hci_dev *hdev, void *data, * transition into connected state and mark it as * successful. */ - if (!conn->out && ev->status == 0x1a && + if (!conn->out && ev->status == HCI_ERROR_UNSUPPORTED_REMOTE_FEATURE && (hdev->le_features[0] & HCI_LE_PERIPHERAL_FEATURES)) status = 0x00; else @@ -6201,7 +6630,6 @@ static void hci_le_remote_feat_complete_evt(struct hci_dev *hdev, void *data, conn->state = BT_CONNECTED; hci_connect_cfm(conn, status); - hci_conn_drop(conn); } } @@ -6306,6 +6734,10 @@ static void hci_le_remote_conn_param_req_evt(struct hci_dev *hdev, void *data, return send_conn_param_neg_reply(hdev, handle, HCI_ERROR_UNKNOWN_CONN_ID); + if (max > hcon->le_conn_max_interval) + return send_conn_param_neg_reply(hdev, handle, + HCI_ERROR_INVALID_LL_PARAMS); + if (hci_check_conn_params(min, max, latency, timeout)) return send_conn_param_neg_reply(hdev, handle, HCI_ERROR_INVALID_LL_PARAMS); @@ -6349,6 +6781,7 @@ static void hci_le_direct_adv_report_evt(struct hci_dev *hdev, void *data, struct sk_buff *skb) { struct hci_ev_le_direct_adv_report *ev = data; + u64 instant = jiffies; int i; if (!hci_le_ev_skb_pull(hdev, skb, HCI_EV_LE_DIRECT_ADV_REPORT, @@ -6365,8 +6798,8 @@ static void hci_le_direct_adv_report_evt(struct hci_dev *hdev, void *data, process_adv_report(hdev, info->type, &info->bdaddr, info->bdaddr_type, &info->direct_addr, - info->direct_addr_type, info->rssi, NULL, 0, - false); + info->direct_addr_type, HCI_ADV_PHY_1M, 0, + info->rssi, NULL, 0, false, false, instant); } hci_dev_unlock(hdev); @@ -6396,6 +6829,428 @@ unlock: hci_dev_unlock(hdev); } +static void hci_le_cis_established_evt(struct hci_dev *hdev, void *data, + struct sk_buff *skb) +{ + struct hci_evt_le_cis_established *ev = data; + struct hci_conn *conn; + struct bt_iso_qos *qos; + bool pending = false; + u16 handle = __le16_to_cpu(ev->handle); + u32 c_sdu_interval, p_sdu_interval; + + bt_dev_dbg(hdev, "status 0x%2.2x", ev->status); + + hci_dev_lock(hdev); + + conn = hci_conn_hash_lookup_handle(hdev, handle); + if (!conn) { + bt_dev_err(hdev, + "Unable to find connection with handle 0x%4.4x", + handle); + goto unlock; + } + + if (conn->type != CIS_LINK) { + bt_dev_err(hdev, + "Invalid connection link type handle 0x%4.4x", + handle); + goto unlock; + } + + qos = &conn->iso_qos; + + pending = test_and_clear_bit(HCI_CONN_CREATE_CIS, &conn->flags); + + /* BLUETOOTH CORE SPECIFICATION Version 5.4 | Vol 6, Part G + * page 3075: + * Transport_Latency_C_To_P = CIG_Sync_Delay + (FT_C_To_P) × + * ISO_Interval + SDU_Interval_C_To_P + * ... + * SDU_Interval = (CIG_Sync_Delay + (FT) x ISO_Interval) - + * Transport_Latency + */ + c_sdu_interval = (get_unaligned_le24(ev->cig_sync_delay) + + (ev->c_ft * le16_to_cpu(ev->interval) * 1250)) - + get_unaligned_le24(ev->c_latency); + p_sdu_interval = (get_unaligned_le24(ev->cig_sync_delay) + + (ev->p_ft * le16_to_cpu(ev->interval) * 1250)) - + get_unaligned_le24(ev->p_latency); + + switch (conn->role) { + case HCI_ROLE_SLAVE: + qos->ucast.in.interval = c_sdu_interval; + qos->ucast.out.interval = p_sdu_interval; + /* Convert Transport Latency (us) to Latency (msec) */ + qos->ucast.in.latency = + DIV_ROUND_CLOSEST(get_unaligned_le24(ev->c_latency), + 1000); + qos->ucast.out.latency = + DIV_ROUND_CLOSEST(get_unaligned_le24(ev->p_latency), + 1000); + qos->ucast.in.sdu = ev->c_bn ? le16_to_cpu(ev->c_mtu) : 0; + qos->ucast.out.sdu = ev->p_bn ? le16_to_cpu(ev->p_mtu) : 0; + qos->ucast.in.phy = ev->c_phy; + qos->ucast.out.phy = ev->p_phy; + break; + case HCI_ROLE_MASTER: + qos->ucast.in.interval = p_sdu_interval; + qos->ucast.out.interval = c_sdu_interval; + /* Convert Transport Latency (us) to Latency (msec) */ + qos->ucast.out.latency = + DIV_ROUND_CLOSEST(get_unaligned_le24(ev->c_latency), + 1000); + qos->ucast.in.latency = + DIV_ROUND_CLOSEST(get_unaligned_le24(ev->p_latency), + 1000); + qos->ucast.out.sdu = ev->c_bn ? le16_to_cpu(ev->c_mtu) : 0; + qos->ucast.in.sdu = ev->p_bn ? le16_to_cpu(ev->p_mtu) : 0; + qos->ucast.out.phy = ev->c_phy; + qos->ucast.in.phy = ev->p_phy; + break; + } + + if (!ev->status) { + conn->state = BT_CONNECTED; + hci_debugfs_create_conn(conn); + hci_conn_add_sysfs(conn); + hci_iso_setup_path(conn); + goto unlock; + } + + conn->state = BT_CLOSED; + hci_connect_cfm(conn, ev->status); + hci_conn_del(conn); + +unlock: + if (pending) + hci_le_create_cis_pending(hdev); + + hci_dev_unlock(hdev); +} + +static void hci_le_reject_cis(struct hci_dev *hdev, __le16 handle) +{ + struct hci_cp_le_reject_cis cp; + + memset(&cp, 0, sizeof(cp)); + cp.handle = handle; + cp.reason = HCI_ERROR_REJ_BAD_ADDR; + hci_send_cmd(hdev, HCI_OP_LE_REJECT_CIS, sizeof(cp), &cp); +} + +static void hci_le_accept_cis(struct hci_dev *hdev, __le16 handle) +{ + struct hci_cp_le_accept_cis cp; + + memset(&cp, 0, sizeof(cp)); + cp.handle = handle; + hci_send_cmd(hdev, HCI_OP_LE_ACCEPT_CIS, sizeof(cp), &cp); +} + +static void hci_le_cis_req_evt(struct hci_dev *hdev, void *data, + struct sk_buff *skb) +{ + struct hci_evt_le_cis_req *ev = data; + u16 acl_handle, cis_handle; + struct hci_conn *acl, *cis; + int mask; + __u8 flags = 0; + + acl_handle = __le16_to_cpu(ev->acl_handle); + cis_handle = __le16_to_cpu(ev->cis_handle); + + bt_dev_dbg(hdev, "acl 0x%4.4x handle 0x%4.4x cig 0x%2.2x cis 0x%2.2x", + acl_handle, cis_handle, ev->cig_id, ev->cis_id); + + hci_dev_lock(hdev); + + acl = hci_conn_hash_lookup_handle(hdev, acl_handle); + if (!acl) + goto unlock; + + mask = hci_proto_connect_ind(hdev, &acl->dst, CIS_LINK, &flags); + if (!(mask & HCI_LM_ACCEPT)) { + hci_le_reject_cis(hdev, ev->cis_handle); + goto unlock; + } + + cis = hci_conn_hash_lookup_handle(hdev, cis_handle); + if (!cis) { + cis = hci_conn_add(hdev, CIS_LINK, &acl->dst, acl->dst_type, + HCI_ROLE_SLAVE, cis_handle); + if (IS_ERR(cis)) { + hci_le_reject_cis(hdev, ev->cis_handle); + goto unlock; + } + } + + cis->iso_qos.ucast.cig = ev->cig_id; + cis->iso_qos.ucast.cis = ev->cis_id; + + if (!(flags & HCI_PROTO_DEFER)) { + hci_le_accept_cis(hdev, ev->cis_handle); + } else { + cis->state = BT_CONNECT2; + hci_connect_cfm(cis, 0); + } + +unlock: + hci_dev_unlock(hdev); +} + +static int hci_iso_term_big_sync(struct hci_dev *hdev, void *data) +{ + u8 handle = PTR_UINT(data); + + return hci_le_terminate_big_sync(hdev, handle, + HCI_ERROR_LOCAL_HOST_TERM); +} + +static void hci_le_create_big_complete_evt(struct hci_dev *hdev, void *data, + struct sk_buff *skb) +{ + struct hci_evt_le_create_big_complete *ev = data; + struct hci_conn *conn; + __u8 i = 0; + + BT_DBG("%s status 0x%2.2x", hdev->name, ev->status); + + if (!hci_le_ev_skb_pull(hdev, skb, HCI_EVT_LE_CREATE_BIG_COMPLETE, + flex_array_size(ev, bis_handle, ev->num_bis))) + return; + + hci_dev_lock(hdev); + + /* Connect all BISes that are bound to the BIG */ + while ((conn = hci_conn_hash_lookup_big_state(hdev, ev->handle, + BT_BOUND, + HCI_ROLE_MASTER))) { + if (ev->status) { + hci_connect_cfm(conn, ev->status); + hci_conn_del(conn); + continue; + } + + if (hci_conn_set_handle(conn, + __le16_to_cpu(ev->bis_handle[i++]))) + continue; + + conn->state = BT_CONNECTED; + set_bit(HCI_CONN_BIG_CREATED, &conn->flags); + hci_debugfs_create_conn(conn); + hci_conn_add_sysfs(conn); + hci_iso_setup_path(conn); + } + + if (!ev->status && !i) + /* If no BISes have been connected for the BIG, + * terminate. This is in case all bound connections + * have been closed before the BIG creation + * has completed. + */ + hci_cmd_sync_queue(hdev, hci_iso_term_big_sync, + UINT_PTR(ev->handle), NULL); + + hci_dev_unlock(hdev); +} + +static void hci_le_big_sync_established_evt(struct hci_dev *hdev, void *data, + struct sk_buff *skb) +{ + struct hci_evt_le_big_sync_established *ev = data; + struct hci_conn *bis, *conn; + int i; + + bt_dev_dbg(hdev, "status 0x%2.2x", ev->status); + + if (!hci_le_ev_skb_pull(hdev, skb, HCI_EVT_LE_BIG_SYNC_ESTABLISHED, + flex_array_size(ev, bis, ev->num_bis))) + return; + + hci_dev_lock(hdev); + + conn = hci_conn_hash_lookup_big_sync_pend(hdev, ev->handle, + ev->num_bis); + if (!conn) { + bt_dev_err(hdev, + "Unable to find connection for big 0x%2.2x", + ev->handle); + goto unlock; + } + + clear_bit(HCI_CONN_CREATE_BIG_SYNC, &conn->flags); + + conn->num_bis = 0; + memset(conn->bis, 0, sizeof(conn->num_bis)); + + for (i = 0; i < ev->num_bis; i++) { + u16 handle = le16_to_cpu(ev->bis[i]); + __le32 interval; + + bis = hci_conn_hash_lookup_handle(hdev, handle); + if (!bis) { + if (handle > HCI_CONN_HANDLE_MAX) { + bt_dev_dbg(hdev, "ignore too large handle %u", handle); + continue; + } + bis = hci_conn_add(hdev, BIS_LINK, BDADDR_ANY, 0, + HCI_ROLE_SLAVE, handle); + if (IS_ERR(bis)) + continue; + } + + if (ev->status != 0x42) + /* Mark PA sync as established */ + set_bit(HCI_CONN_PA_SYNC, &bis->flags); + + bis->sync_handle = conn->sync_handle; + bis->iso_qos.bcast.big = ev->handle; + memset(&interval, 0, sizeof(interval)); + memcpy(&interval, ev->latency, sizeof(ev->latency)); + bis->iso_qos.bcast.in.interval = le32_to_cpu(interval); + /* Convert ISO Interval (1.25 ms slots) to latency (ms) */ + bis->iso_qos.bcast.in.latency = le16_to_cpu(ev->interval) * 125 / 100; + bis->iso_qos.bcast.in.sdu = le16_to_cpu(ev->max_pdu); + + if (!ev->status) { + bis->state = BT_CONNECTED; + set_bit(HCI_CONN_BIG_SYNC, &bis->flags); + hci_debugfs_create_conn(bis); + hci_conn_add_sysfs(bis); + hci_iso_setup_path(bis); + } + } + + /* In case BIG sync failed, notify each failed connection to + * the user after all hci connections have been added + */ + if (ev->status) + for (i = 0; i < ev->num_bis; i++) { + u16 handle = le16_to_cpu(ev->bis[i]); + + bis = hci_conn_hash_lookup_handle(hdev, handle); + if (!bis) + continue; + + set_bit(HCI_CONN_BIG_SYNC_FAILED, &bis->flags); + hci_connect_cfm(bis, ev->status); + } + +unlock: + hci_dev_unlock(hdev); +} + +static void hci_le_big_sync_lost_evt(struct hci_dev *hdev, void *data, + struct sk_buff *skb) +{ + struct hci_evt_le_big_sync_lost *ev = data; + struct hci_conn *bis; + bool mgmt_conn = false; + + bt_dev_dbg(hdev, "big handle 0x%2.2x", ev->handle); + + hci_dev_lock(hdev); + + /* Delete each bis connection */ + while ((bis = hci_conn_hash_lookup_big_state(hdev, ev->handle, + BT_CONNECTED, + HCI_ROLE_SLAVE))) { + if (!mgmt_conn) { + mgmt_conn = test_and_clear_bit(HCI_CONN_MGMT_CONNECTED, + &bis->flags); + mgmt_device_disconnected(hdev, &bis->dst, bis->type, + bis->dst_type, ev->reason, + mgmt_conn); + } + + clear_bit(HCI_CONN_BIG_SYNC, &bis->flags); + hci_disconn_cfm(bis, ev->reason); + hci_conn_del(bis); + } + + hci_dev_unlock(hdev); +} + +static void hci_le_big_info_adv_report_evt(struct hci_dev *hdev, void *data, + struct sk_buff *skb) +{ + struct hci_evt_le_big_info_adv_report *ev = data; + int mask = hdev->link_mode; + __u8 flags = 0; + struct hci_conn *pa_sync; + + bt_dev_dbg(hdev, "sync_handle 0x%4.4x", le16_to_cpu(ev->sync_handle)); + + hci_dev_lock(hdev); + + mask |= hci_proto_connect_ind(hdev, BDADDR_ANY, BIS_LINK, &flags); + if (!(mask & HCI_LM_ACCEPT)) + goto unlock; + + if (!(flags & HCI_PROTO_DEFER)) + goto unlock; + + pa_sync = hci_conn_hash_lookup_pa_sync_handle + (hdev, + le16_to_cpu(ev->sync_handle)); + + if (!pa_sync) + goto unlock; + + pa_sync->iso_qos.bcast.encryption = ev->encryption; + + /* Notify iso layer */ + hci_connect_cfm(pa_sync, 0); + +unlock: + hci_dev_unlock(hdev); +} + +static void hci_le_read_all_remote_features_evt(struct hci_dev *hdev, + void *data, struct sk_buff *skb) +{ + struct hci_evt_le_read_all_remote_features_complete *ev = data; + struct hci_conn *conn; + + bt_dev_dbg(hdev, "status 0x%2.2x", ev->status); + + hci_dev_lock(hdev); + + conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle)); + if (!conn) + goto unlock; + + if (!ev->status) + memcpy(conn->le_features, ev->features, 248); + + if (conn->state == BT_CONFIG) { + __u8 status; + + /* If the local controller supports peripheral-initiated + * features exchange, but the remote controller does + * not, then it is possible that the error code 0x1a + * for unsupported remote feature gets returned. + * + * In this specific case, allow the connection to + * transition into connected state and mark it as + * successful. + */ + if (!conn->out && + ev->status == HCI_ERROR_UNSUPPORTED_REMOTE_FEATURE && + (hdev->le_features[0] & HCI_LE_PERIPHERAL_FEATURES)) + status = 0x00; + else + status = ev->status; + + conn->state = BT_CONNECTED; + hci_connect_cfm(conn, status); + } + +unlock: + hci_dev_unlock(hdev); +} + #define HCI_LE_EV_VL(_op, _func, _min_len, _max_len) \ [_op] = { \ .func = _func, \ @@ -6412,7 +7267,7 @@ unlock: /* Entries in this table shall have their position according to the subevent * opcode they handle so the use of the macros above is recommend since it does * attempt to initialize at its proper index using Designated Initializers that - * way events without a callback function can be ommited. + * way events without a callback function can be omitted. */ static const struct hci_le_ev { void (*func)(struct hci_dev *hdev, void *data, struct sk_buff *skb); @@ -6456,9 +7311,57 @@ static const struct hci_le_ev { HCI_LE_EV_VL(HCI_EV_LE_EXT_ADV_REPORT, hci_le_ext_adv_report_evt, sizeof(struct hci_ev_le_ext_adv_report), HCI_MAX_EVENT_SIZE), + /* [0x0e = HCI_EV_LE_PA_SYNC_ESTABLISHED] */ + HCI_LE_EV(HCI_EV_LE_PA_SYNC_ESTABLISHED, + hci_le_pa_sync_established_evt, + sizeof(struct hci_ev_le_pa_sync_established)), + /* [0x0f = HCI_EV_LE_PER_ADV_REPORT] */ + HCI_LE_EV_VL(HCI_EV_LE_PER_ADV_REPORT, + hci_le_per_adv_report_evt, + sizeof(struct hci_ev_le_per_adv_report), + HCI_MAX_EVENT_SIZE), + /* [0x10 = HCI_EV_LE_PA_SYNC_LOST] */ + HCI_LE_EV(HCI_EV_LE_PA_SYNC_LOST, hci_le_pa_sync_lost_evt, + sizeof(struct hci_ev_le_pa_sync_lost)), /* [0x12 = HCI_EV_LE_EXT_ADV_SET_TERM] */ HCI_LE_EV(HCI_EV_LE_EXT_ADV_SET_TERM, hci_le_ext_adv_term_evt, sizeof(struct hci_evt_le_ext_adv_set_term)), + /* [0x18 = HCI_EVT_LE_PAST_RECEIVED] */ + HCI_LE_EV(HCI_EV_LE_PAST_RECEIVED, + hci_le_past_received_evt, + sizeof(struct hci_ev_le_past_received)), + /* [0x19 = HCI_EVT_LE_CIS_ESTABLISHED] */ + HCI_LE_EV(HCI_EVT_LE_CIS_ESTABLISHED, hci_le_cis_established_evt, + sizeof(struct hci_evt_le_cis_established)), + /* [0x1a = HCI_EVT_LE_CIS_REQ] */ + HCI_LE_EV(HCI_EVT_LE_CIS_REQ, hci_le_cis_req_evt, + sizeof(struct hci_evt_le_cis_req)), + /* [0x1b = HCI_EVT_LE_CREATE_BIG_COMPLETE] */ + HCI_LE_EV_VL(HCI_EVT_LE_CREATE_BIG_COMPLETE, + hci_le_create_big_complete_evt, + sizeof(struct hci_evt_le_create_big_complete), + HCI_MAX_EVENT_SIZE), + /* [0x1d = HCI_EV_LE_BIG_SYNC_ESTABLISHED] */ + HCI_LE_EV_VL(HCI_EVT_LE_BIG_SYNC_ESTABLISHED, + hci_le_big_sync_established_evt, + sizeof(struct hci_evt_le_big_sync_established), + HCI_MAX_EVENT_SIZE), + /* [0x1e = HCI_EVT_LE_BIG_SYNC_LOST] */ + HCI_LE_EV_VL(HCI_EVT_LE_BIG_SYNC_LOST, + hci_le_big_sync_lost_evt, + sizeof(struct hci_evt_le_big_sync_lost), + HCI_MAX_EVENT_SIZE), + /* [0x22 = HCI_EVT_LE_BIG_INFO_ADV_REPORT] */ + HCI_LE_EV_VL(HCI_EVT_LE_BIG_INFO_ADV_REPORT, + hci_le_big_info_adv_report_evt, + sizeof(struct hci_evt_le_big_info_adv_report), + HCI_MAX_EVENT_SIZE), + /* [0x2b = HCI_EVT_LE_ALL_REMOTE_FEATURES_COMPLETE] */ + HCI_LE_EV_VL(HCI_EVT_LE_ALL_REMOTE_FEATURES_COMPLETE, + hci_le_read_all_remote_features_evt, + sizeof(struct + hci_evt_le_read_all_remote_features_complete), + HCI_MAX_EVENT_SIZE), }; static void hci_le_meta_evt(struct hci_dev *hdev, void *data, @@ -6472,10 +7375,11 @@ static void hci_le_meta_evt(struct hci_dev *hdev, void *data, bt_dev_dbg(hdev, "subevent 0x%2.2x", ev->subevent); /* Only match event if command OGF is for LE */ - if (hdev->sent_cmd && - hci_opcode_ogf(hci_skb_opcode(hdev->sent_cmd)) == 0x08 && - hci_skb_event(hdev->sent_cmd) == ev->subevent) { - *opcode = hci_skb_opcode(hdev->sent_cmd); + if (hdev->req_skb && + (hci_opcode_ogf(hci_skb_opcode(hdev->req_skb)) == 0x08 || + hci_skb_opcode(hdev->req_skb) == HCI_OP_NOP) && + hci_skb_event(hdev->req_skb) == ev->subevent) { + *opcode = hci_skb_opcode(hdev->req_skb); hci_req_cmd_complete(hdev, *opcode, 0x00, req_complete, req_complete_skb); } @@ -6497,7 +7401,6 @@ static void hci_le_meta_evt(struct hci_dev *hdev, void *data, if (skb->len > subev->max_len) bt_dev_warn(hdev, "unexpected subevent 0x%2.2x length: %u > %u", ev->subevent, skb->len, subev->max_len); - data = hci_le_ev_skb_pull(hdev, skb, ev->subevent, subev->min_len); if (!data) return; @@ -6576,10 +7479,10 @@ static void hci_store_wake_reason(struct hci_dev *hdev, u8 event, * keep track of the bdaddr of the connection event that woke us up. */ if (event == HCI_EV_CONN_REQUEST) { - bacpy(&hdev->wake_addr, &conn_complete->bdaddr); + bacpy(&hdev->wake_addr, &conn_request->bdaddr); hdev->wake_addr_type = BDADDR_BREDR; } else if (event == HCI_EV_CONN_COMPLETE) { - bacpy(&hdev->wake_addr, &conn_request->bdaddr); + bacpy(&hdev->wake_addr, &conn_complete->bdaddr); hdev->wake_addr_type = BDADDR_BREDR; } else if (event == HCI_EV_LE_META) { struct hci_ev_le_meta *le_ev = (void *)skb->data; @@ -6738,7 +7641,7 @@ static const struct hci_ev { /* [0x2c = HCI_EV_SYNC_CONN_COMPLETE] */ HCI_EV(HCI_EV_SYNC_CONN_COMPLETE, hci_sync_conn_complete_evt, sizeof(struct hci_ev_sync_conn_complete)), - /* [0x2d = HCI_EV_EXTENDED_INQUIRY_RESULT] */ + /* [0x2f = HCI_EV_EXTENDED_INQUIRY_RESULT] */ HCI_EV_VL(HCI_EV_EXTENDED_INQUIRY_RESULT, hci_extended_inquiry_result_evt, sizeof(struct hci_ev_ext_inquiry_result), HCI_MAX_EVENT_SIZE), @@ -6775,30 +7678,8 @@ static const struct hci_ev { /* [0x3e = HCI_EV_LE_META] */ HCI_EV_REQ_VL(HCI_EV_LE_META, hci_le_meta_evt, sizeof(struct hci_ev_le_meta), HCI_MAX_EVENT_SIZE), -#if IS_ENABLED(CONFIG_BT_HS) - /* [0x40 = HCI_EV_PHY_LINK_COMPLETE] */ - HCI_EV(HCI_EV_PHY_LINK_COMPLETE, hci_phy_link_complete_evt, - sizeof(struct hci_ev_phy_link_complete)), - /* [0x41 = HCI_EV_CHANNEL_SELECTED] */ - HCI_EV(HCI_EV_CHANNEL_SELECTED, hci_chan_selected_evt, - sizeof(struct hci_ev_channel_selected)), - /* [0x42 = HCI_EV_DISCONN_PHY_LINK_COMPLETE] */ - HCI_EV(HCI_EV_DISCONN_LOGICAL_LINK_COMPLETE, - hci_disconn_loglink_complete_evt, - sizeof(struct hci_ev_disconn_logical_link_complete)), - /* [0x45 = HCI_EV_LOGICAL_LINK_COMPLETE] */ - HCI_EV(HCI_EV_LOGICAL_LINK_COMPLETE, hci_loglink_complete_evt, - sizeof(struct hci_ev_logical_link_complete)), - /* [0x46 = HCI_EV_DISCONN_LOGICAL_LINK_COMPLETE] */ - HCI_EV(HCI_EV_DISCONN_PHY_LINK_COMPLETE, - hci_disconn_phylink_complete_evt, - sizeof(struct hci_ev_disconn_phy_link_complete)), -#endif - /* [0x48 = HCI_EV_NUM_COMP_BLOCKS] */ - HCI_EV(HCI_EV_NUM_COMP_BLOCKS, hci_num_comp_blocks_evt, - sizeof(struct hci_ev_num_comp_blocks)), /* [0xff = HCI_EV_VENDOR] */ - HCI_EV(HCI_EV_VENDOR, msft_vendor_evt, 0), + HCI_EV_VL(HCI_EV_VENDOR, msft_vendor_evt, 0, HCI_MAX_EVENT_SIZE), }; static void hci_event_func(struct hci_dev *hdev, u8 event, struct sk_buff *skb, @@ -6823,8 +7704,9 @@ static void hci_event_func(struct hci_dev *hdev, u8 event, struct sk_buff *skb, * decide if that is acceptable. */ if (skb->len > ev->max_len) - bt_dev_warn(hdev, "unexpected event 0x%2.2x length: %u > %u", - event, skb->len, ev->max_len); + bt_dev_warn_ratelimited(hdev, + "unexpected event 0x%2.2x length: %u > %u", + event, skb->len, ev->max_len); data = hci_ev_skb_pull(hdev, skb, event, ev->min_len); if (!data) @@ -6851,6 +7733,11 @@ void hci_event_packet(struct hci_dev *hdev, struct sk_buff *skb) goto done; } + hci_dev_lock(hdev); + kfree_skb(hdev->recv_event); + hdev->recv_event = skb_clone(skb, GFP_KERNEL); + hci_dev_unlock(hdev); + event = hdr->evt; if (!event) { bt_dev_warn(hdev, "Received unexpected HCI Event 0x%2.2x", @@ -6859,10 +7746,10 @@ void hci_event_packet(struct hci_dev *hdev, struct sk_buff *skb) } /* Only match event if command OGF is not for LE */ - if (hdev->sent_cmd && - hci_opcode_ogf(hci_skb_opcode(hdev->sent_cmd)) != 0x08 && - hci_skb_event(hdev->sent_cmd) == event) { - hci_req_cmd_complete(hdev, hci_skb_opcode(hdev->sent_cmd), + if (hdev->req_skb && + hci_opcode_ogf(hci_skb_opcode(hdev->req_skb)) != 0x08 && + hci_skb_event(hdev->req_skb) == event) { + hci_req_cmd_complete(hdev, hci_skb_opcode(hdev->req_skb), status, &req_complete, &req_complete_skb); req_evt = event; } diff --git a/net/bluetooth/hci_request.c b/net/bluetooth/hci_request.c deleted file mode 100644 index 42c8047a9897..000000000000 --- a/net/bluetooth/hci_request.c +++ /dev/null @@ -1,2659 +0,0 @@ -/* - BlueZ - Bluetooth protocol stack for Linux - - Copyright (C) 2014 Intel Corporation - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License version 2 as - published by the Free Software Foundation; - - THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS. - IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY - CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES - WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN - ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF - OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. - - ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS, - COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS - SOFTWARE IS DISCLAIMED. -*/ - -#include <linux/sched/signal.h> - -#include <net/bluetooth/bluetooth.h> -#include <net/bluetooth/hci_core.h> -#include <net/bluetooth/mgmt.h> - -#include "smp.h" -#include "hci_request.h" -#include "msft.h" -#include "eir.h" - -void hci_req_init(struct hci_request *req, struct hci_dev *hdev) -{ - skb_queue_head_init(&req->cmd_q); - req->hdev = hdev; - req->err = 0; -} - -void hci_req_purge(struct hci_request *req) -{ - skb_queue_purge(&req->cmd_q); -} - -bool hci_req_status_pend(struct hci_dev *hdev) -{ - return hdev->req_status == HCI_REQ_PEND; -} - -static int req_run(struct hci_request *req, hci_req_complete_t complete, - hci_req_complete_skb_t complete_skb) -{ - struct hci_dev *hdev = req->hdev; - struct sk_buff *skb; - unsigned long flags; - - bt_dev_dbg(hdev, "length %u", skb_queue_len(&req->cmd_q)); - - /* If an error occurred during request building, remove all HCI - * commands queued on the HCI request queue. - */ - if (req->err) { - skb_queue_purge(&req->cmd_q); - return req->err; - } - - /* Do not allow empty requests */ - if (skb_queue_empty(&req->cmd_q)) - return -ENODATA; - - skb = skb_peek_tail(&req->cmd_q); - if (complete) { - bt_cb(skb)->hci.req_complete = complete; - } else if (complete_skb) { - bt_cb(skb)->hci.req_complete_skb = complete_skb; - bt_cb(skb)->hci.req_flags |= HCI_REQ_SKB; - } - - spin_lock_irqsave(&hdev->cmd_q.lock, flags); - skb_queue_splice_tail(&req->cmd_q, &hdev->cmd_q); - spin_unlock_irqrestore(&hdev->cmd_q.lock, flags); - - queue_work(hdev->workqueue, &hdev->cmd_work); - - return 0; -} - -int hci_req_run(struct hci_request *req, hci_req_complete_t complete) -{ - return req_run(req, complete, NULL); -} - -int hci_req_run_skb(struct hci_request *req, hci_req_complete_skb_t complete) -{ - return req_run(req, NULL, complete); -} - -void hci_req_sync_complete(struct hci_dev *hdev, u8 result, u16 opcode, - struct sk_buff *skb) -{ - bt_dev_dbg(hdev, "result 0x%2.2x", result); - - if (hdev->req_status == HCI_REQ_PEND) { - hdev->req_result = result; - hdev->req_status = HCI_REQ_DONE; - if (skb) - hdev->req_skb = skb_get(skb); - wake_up_interruptible(&hdev->req_wait_q); - } -} - -/* Execute request and wait for completion. */ -int __hci_req_sync(struct hci_dev *hdev, int (*func)(struct hci_request *req, - unsigned long opt), - unsigned long opt, u32 timeout, u8 *hci_status) -{ - struct hci_request req; - int err = 0; - - bt_dev_dbg(hdev, "start"); - - hci_req_init(&req, hdev); - - hdev->req_status = HCI_REQ_PEND; - - err = func(&req, opt); - if (err) { - if (hci_status) - *hci_status = HCI_ERROR_UNSPECIFIED; - return err; - } - - err = hci_req_run_skb(&req, hci_req_sync_complete); - if (err < 0) { - hdev->req_status = 0; - - /* ENODATA means the HCI request command queue is empty. - * This can happen when a request with conditionals doesn't - * trigger any commands to be sent. This is normal behavior - * and should not trigger an error return. - */ - if (err == -ENODATA) { - if (hci_status) - *hci_status = 0; - return 0; - } - - if (hci_status) - *hci_status = HCI_ERROR_UNSPECIFIED; - - return err; - } - - err = wait_event_interruptible_timeout(hdev->req_wait_q, - hdev->req_status != HCI_REQ_PEND, timeout); - - if (err == -ERESTARTSYS) - return -EINTR; - - switch (hdev->req_status) { - case HCI_REQ_DONE: - err = -bt_to_errno(hdev->req_result); - if (hci_status) - *hci_status = hdev->req_result; - break; - - case HCI_REQ_CANCELED: - err = -hdev->req_result; - if (hci_status) - *hci_status = HCI_ERROR_UNSPECIFIED; - break; - - default: - err = -ETIMEDOUT; - if (hci_status) - *hci_status = HCI_ERROR_UNSPECIFIED; - break; - } - - kfree_skb(hdev->req_skb); - hdev->req_skb = NULL; - hdev->req_status = hdev->req_result = 0; - - bt_dev_dbg(hdev, "end: err %d", err); - - return err; -} - -int hci_req_sync(struct hci_dev *hdev, int (*req)(struct hci_request *req, - unsigned long opt), - unsigned long opt, u32 timeout, u8 *hci_status) -{ - int ret; - - /* Serialize all requests */ - hci_req_sync_lock(hdev); - /* check the state after obtaing the lock to protect the HCI_UP - * against any races from hci_dev_do_close when the controller - * gets removed. - */ - if (test_bit(HCI_UP, &hdev->flags)) - ret = __hci_req_sync(hdev, req, opt, timeout, hci_status); - else - ret = -ENETDOWN; - hci_req_sync_unlock(hdev); - - return ret; -} - -struct sk_buff *hci_prepare_cmd(struct hci_dev *hdev, u16 opcode, u32 plen, - const void *param) -{ - int len = HCI_COMMAND_HDR_SIZE + plen; - struct hci_command_hdr *hdr; - struct sk_buff *skb; - - skb = bt_skb_alloc(len, GFP_ATOMIC); - if (!skb) - return NULL; - - hdr = skb_put(skb, HCI_COMMAND_HDR_SIZE); - hdr->opcode = cpu_to_le16(opcode); - hdr->plen = plen; - - if (plen) - skb_put_data(skb, param, plen); - - bt_dev_dbg(hdev, "skb len %d", skb->len); - - hci_skb_pkt_type(skb) = HCI_COMMAND_PKT; - hci_skb_opcode(skb) = opcode; - - return skb; -} - -/* Queue a command to an asynchronous HCI request */ -void hci_req_add_ev(struct hci_request *req, u16 opcode, u32 plen, - const void *param, u8 event) -{ - struct hci_dev *hdev = req->hdev; - struct sk_buff *skb; - - bt_dev_dbg(hdev, "opcode 0x%4.4x plen %d", opcode, plen); - - /* If an error occurred during request building, there is no point in - * queueing the HCI command. We can simply return. - */ - if (req->err) - return; - - skb = hci_prepare_cmd(hdev, opcode, plen, param); - if (!skb) { - bt_dev_err(hdev, "no memory for command (opcode 0x%4.4x)", - opcode); - req->err = -ENOMEM; - return; - } - - if (skb_queue_empty(&req->cmd_q)) - bt_cb(skb)->hci.req_flags |= HCI_REQ_START; - - bt_cb(skb)->hci.req_event = event; - - skb_queue_tail(&req->cmd_q, skb); -} - -void hci_req_add(struct hci_request *req, u16 opcode, u32 plen, - const void *param) -{ - hci_req_add_ev(req, opcode, plen, param, 0); -} - -void __hci_req_write_fast_connectable(struct hci_request *req, bool enable) -{ - struct hci_dev *hdev = req->hdev; - struct hci_cp_write_page_scan_activity acp; - u8 type; - - if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) - return; - - if (hdev->hci_ver < BLUETOOTH_VER_1_2) - return; - - if (enable) { - type = PAGE_SCAN_TYPE_INTERLACED; - - /* 160 msec page scan interval */ - acp.interval = cpu_to_le16(0x0100); - } else { - type = hdev->def_page_scan_type; - acp.interval = cpu_to_le16(hdev->def_page_scan_int); - } - - acp.window = cpu_to_le16(hdev->def_page_scan_window); - - if (__cpu_to_le16(hdev->page_scan_interval) != acp.interval || - __cpu_to_le16(hdev->page_scan_window) != acp.window) - hci_req_add(req, HCI_OP_WRITE_PAGE_SCAN_ACTIVITY, - sizeof(acp), &acp); - - if (hdev->page_scan_type != type) - hci_req_add(req, HCI_OP_WRITE_PAGE_SCAN_TYPE, 1, &type); -} - -static void start_interleave_scan(struct hci_dev *hdev) -{ - hdev->interleave_scan_state = INTERLEAVE_SCAN_NO_FILTER; - queue_delayed_work(hdev->req_workqueue, - &hdev->interleave_scan, 0); -} - -static bool is_interleave_scanning(struct hci_dev *hdev) -{ - return hdev->interleave_scan_state != INTERLEAVE_SCAN_NONE; -} - -static void cancel_interleave_scan(struct hci_dev *hdev) -{ - bt_dev_dbg(hdev, "cancelling interleave scan"); - - cancel_delayed_work_sync(&hdev->interleave_scan); - - hdev->interleave_scan_state = INTERLEAVE_SCAN_NONE; -} - -/* Return true if interleave_scan wasn't started until exiting this function, - * otherwise, return false - */ -static bool __hci_update_interleaved_scan(struct hci_dev *hdev) -{ - /* Do interleaved scan only if all of the following are true: - * - There is at least one ADV monitor - * - At least one pending LE connection or one device to be scanned for - * - Monitor offloading is not supported - * If so, we should alternate between allowlist scan and one without - * any filters to save power. - */ - bool use_interleaving = hci_is_adv_monitoring(hdev) && - !(list_empty(&hdev->pend_le_conns) && - list_empty(&hdev->pend_le_reports)) && - hci_get_adv_monitor_offload_ext(hdev) == - HCI_ADV_MONITOR_EXT_NONE; - bool is_interleaving = is_interleave_scanning(hdev); - - if (use_interleaving && !is_interleaving) { - start_interleave_scan(hdev); - bt_dev_dbg(hdev, "starting interleave scan"); - return true; - } - - if (!use_interleaving && is_interleaving) - cancel_interleave_scan(hdev); - - return false; -} - -void __hci_req_update_name(struct hci_request *req) -{ - struct hci_dev *hdev = req->hdev; - struct hci_cp_write_local_name cp; - - memcpy(cp.name, hdev->dev_name, sizeof(cp.name)); - - hci_req_add(req, HCI_OP_WRITE_LOCAL_NAME, sizeof(cp), &cp); -} - -void __hci_req_update_eir(struct hci_request *req) -{ - struct hci_dev *hdev = req->hdev; - struct hci_cp_write_eir cp; - - if (!hdev_is_powered(hdev)) - return; - - if (!lmp_ext_inq_capable(hdev)) - return; - - if (!hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) - return; - - if (hci_dev_test_flag(hdev, HCI_SERVICE_CACHE)) - return; - - memset(&cp, 0, sizeof(cp)); - - eir_create(hdev, cp.data); - - if (memcmp(cp.data, hdev->eir, sizeof(cp.data)) == 0) - return; - - memcpy(hdev->eir, cp.data, sizeof(cp.data)); - - hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp); -} - -void hci_req_add_le_scan_disable(struct hci_request *req, bool rpa_le_conn) -{ - struct hci_dev *hdev = req->hdev; - - if (hdev->scanning_paused) { - bt_dev_dbg(hdev, "Scanning is paused for suspend"); - return; - } - - if (use_ext_scan(hdev)) { - struct hci_cp_le_set_ext_scan_enable cp; - - memset(&cp, 0, sizeof(cp)); - cp.enable = LE_SCAN_DISABLE; - hci_req_add(req, HCI_OP_LE_SET_EXT_SCAN_ENABLE, sizeof(cp), - &cp); - } else { - struct hci_cp_le_set_scan_enable cp; - - memset(&cp, 0, sizeof(cp)); - cp.enable = LE_SCAN_DISABLE; - hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp); - } - - /* Disable address resolution */ - if (hci_dev_test_flag(hdev, HCI_LL_RPA_RESOLUTION) && !rpa_le_conn) { - __u8 enable = 0x00; - - hci_req_add(req, HCI_OP_LE_SET_ADDR_RESOLV_ENABLE, 1, &enable); - } -} - -static void del_from_accept_list(struct hci_request *req, bdaddr_t *bdaddr, - u8 bdaddr_type) -{ - struct hci_cp_le_del_from_accept_list cp; - - cp.bdaddr_type = bdaddr_type; - bacpy(&cp.bdaddr, bdaddr); - - bt_dev_dbg(req->hdev, "Remove %pMR (0x%x) from accept list", &cp.bdaddr, - cp.bdaddr_type); - hci_req_add(req, HCI_OP_LE_DEL_FROM_ACCEPT_LIST, sizeof(cp), &cp); - - if (use_ll_privacy(req->hdev)) { - struct smp_irk *irk; - - irk = hci_find_irk_by_addr(req->hdev, bdaddr, bdaddr_type); - if (irk) { - struct hci_cp_le_del_from_resolv_list cp; - - cp.bdaddr_type = bdaddr_type; - bacpy(&cp.bdaddr, bdaddr); - - hci_req_add(req, HCI_OP_LE_DEL_FROM_RESOLV_LIST, - sizeof(cp), &cp); - } - } -} - -/* Adds connection to accept list if needed. On error, returns -1. */ -static int add_to_accept_list(struct hci_request *req, - struct hci_conn_params *params, u8 *num_entries, - bool allow_rpa) -{ - struct hci_cp_le_add_to_accept_list cp; - struct hci_dev *hdev = req->hdev; - - /* Already in accept list */ - if (hci_bdaddr_list_lookup(&hdev->le_accept_list, ¶ms->addr, - params->addr_type)) - return 0; - - /* Select filter policy to accept all advertising */ - if (*num_entries >= hdev->le_accept_list_size) - return -1; - - /* Accept list can not be used with RPAs */ - if (!allow_rpa && - !hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY) && - hci_find_irk_by_addr(hdev, ¶ms->addr, params->addr_type)) { - return -1; - } - - /* During suspend, only wakeable devices can be in accept list */ - if (hdev->suspended && - !test_bit(HCI_CONN_FLAG_REMOTE_WAKEUP, params->flags)) - return 0; - - *num_entries += 1; - cp.bdaddr_type = params->addr_type; - bacpy(&cp.bdaddr, ¶ms->addr); - - bt_dev_dbg(hdev, "Add %pMR (0x%x) to accept list", &cp.bdaddr, - cp.bdaddr_type); - hci_req_add(req, HCI_OP_LE_ADD_TO_ACCEPT_LIST, sizeof(cp), &cp); - - if (use_ll_privacy(hdev)) { - struct smp_irk *irk; - - irk = hci_find_irk_by_addr(hdev, ¶ms->addr, - params->addr_type); - if (irk) { - struct hci_cp_le_add_to_resolv_list cp; - - cp.bdaddr_type = params->addr_type; - bacpy(&cp.bdaddr, ¶ms->addr); - memcpy(cp.peer_irk, irk->val, 16); - - if (hci_dev_test_flag(hdev, HCI_PRIVACY)) - memcpy(cp.local_irk, hdev->irk, 16); - else - memset(cp.local_irk, 0, 16); - - hci_req_add(req, HCI_OP_LE_ADD_TO_RESOLV_LIST, - sizeof(cp), &cp); - } - } - - return 0; -} - -static u8 update_accept_list(struct hci_request *req) -{ - struct hci_dev *hdev = req->hdev; - struct hci_conn_params *params; - struct bdaddr_list *b; - u8 num_entries = 0; - bool pend_conn, pend_report; - /* We allow usage of accept list even with RPAs in suspend. In the worst - * case, we won't be able to wake from devices that use the privacy1.2 - * features. Additionally, once we support privacy1.2 and IRK - * offloading, we can update this to also check for those conditions. - */ - bool allow_rpa = hdev->suspended; - - if (use_ll_privacy(hdev)) - allow_rpa = true; - - /* Go through the current accept list programmed into the - * controller one by one and check if that address is still - * in the list of pending connections or list of devices to - * report. If not present in either list, then queue the - * command to remove it from the controller. - */ - list_for_each_entry(b, &hdev->le_accept_list, list) { - pend_conn = hci_pend_le_action_lookup(&hdev->pend_le_conns, - &b->bdaddr, - b->bdaddr_type); - pend_report = hci_pend_le_action_lookup(&hdev->pend_le_reports, - &b->bdaddr, - b->bdaddr_type); - - /* If the device is not likely to connect or report, - * remove it from the accept list. - */ - if (!pend_conn && !pend_report) { - del_from_accept_list(req, &b->bdaddr, b->bdaddr_type); - continue; - } - - /* Accept list can not be used with RPAs */ - if (!allow_rpa && - !hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY) && - hci_find_irk_by_addr(hdev, &b->bdaddr, b->bdaddr_type)) { - return 0x00; - } - - num_entries++; - } - - /* Since all no longer valid accept list entries have been - * removed, walk through the list of pending connections - * and ensure that any new device gets programmed into - * the controller. - * - * If the list of the devices is larger than the list of - * available accept list entries in the controller, then - * just abort and return filer policy value to not use the - * accept list. - */ - list_for_each_entry(params, &hdev->pend_le_conns, action) { - if (add_to_accept_list(req, params, &num_entries, allow_rpa)) - return 0x00; - } - - /* After adding all new pending connections, walk through - * the list of pending reports and also add these to the - * accept list if there is still space. Abort if space runs out. - */ - list_for_each_entry(params, &hdev->pend_le_reports, action) { - if (add_to_accept_list(req, params, &num_entries, allow_rpa)) - return 0x00; - } - - /* Use the allowlist unless the following conditions are all true: - * - We are not currently suspending - * - There are 1 or more ADV monitors registered and it's not offloaded - * - Interleaved scanning is not currently using the allowlist - */ - if (!idr_is_empty(&hdev->adv_monitors_idr) && !hdev->suspended && - hci_get_adv_monitor_offload_ext(hdev) == HCI_ADV_MONITOR_EXT_NONE && - hdev->interleave_scan_state != INTERLEAVE_SCAN_ALLOWLIST) - return 0x00; - - /* Select filter policy to use accept list */ - return 0x01; -} - -static bool scan_use_rpa(struct hci_dev *hdev) -{ - return hci_dev_test_flag(hdev, HCI_PRIVACY); -} - -static void hci_req_start_scan(struct hci_request *req, u8 type, u16 interval, - u16 window, u8 own_addr_type, u8 filter_policy, - bool filter_dup, bool addr_resolv) -{ - struct hci_dev *hdev = req->hdev; - - if (hdev->scanning_paused) { - bt_dev_dbg(hdev, "Scanning is paused for suspend"); - return; - } - - if (use_ll_privacy(hdev) && addr_resolv) { - u8 enable = 0x01; - - hci_req_add(req, HCI_OP_LE_SET_ADDR_RESOLV_ENABLE, 1, &enable); - } - - /* Use ext scanning if set ext scan param and ext scan enable is - * supported - */ - if (use_ext_scan(hdev)) { - struct hci_cp_le_set_ext_scan_params *ext_param_cp; - struct hci_cp_le_set_ext_scan_enable ext_enable_cp; - struct hci_cp_le_scan_phy_params *phy_params; - u8 data[sizeof(*ext_param_cp) + sizeof(*phy_params) * 2]; - u32 plen; - - ext_param_cp = (void *)data; - phy_params = (void *)ext_param_cp->data; - - memset(ext_param_cp, 0, sizeof(*ext_param_cp)); - ext_param_cp->own_addr_type = own_addr_type; - ext_param_cp->filter_policy = filter_policy; - - plen = sizeof(*ext_param_cp); - - if (scan_1m(hdev) || scan_2m(hdev)) { - ext_param_cp->scanning_phys |= LE_SCAN_PHY_1M; - - memset(phy_params, 0, sizeof(*phy_params)); - phy_params->type = type; - phy_params->interval = cpu_to_le16(interval); - phy_params->window = cpu_to_le16(window); - - plen += sizeof(*phy_params); - phy_params++; - } - - if (scan_coded(hdev)) { - ext_param_cp->scanning_phys |= LE_SCAN_PHY_CODED; - - memset(phy_params, 0, sizeof(*phy_params)); - phy_params->type = type; - phy_params->interval = cpu_to_le16(interval); - phy_params->window = cpu_to_le16(window); - - plen += sizeof(*phy_params); - phy_params++; - } - - hci_req_add(req, HCI_OP_LE_SET_EXT_SCAN_PARAMS, - plen, ext_param_cp); - - memset(&ext_enable_cp, 0, sizeof(ext_enable_cp)); - ext_enable_cp.enable = LE_SCAN_ENABLE; - ext_enable_cp.filter_dup = filter_dup; - - hci_req_add(req, HCI_OP_LE_SET_EXT_SCAN_ENABLE, - sizeof(ext_enable_cp), &ext_enable_cp); - } else { - struct hci_cp_le_set_scan_param param_cp; - struct hci_cp_le_set_scan_enable enable_cp; - - memset(¶m_cp, 0, sizeof(param_cp)); - param_cp.type = type; - param_cp.interval = cpu_to_le16(interval); - param_cp.window = cpu_to_le16(window); - param_cp.own_address_type = own_addr_type; - param_cp.filter_policy = filter_policy; - hci_req_add(req, HCI_OP_LE_SET_SCAN_PARAM, sizeof(param_cp), - ¶m_cp); - - memset(&enable_cp, 0, sizeof(enable_cp)); - enable_cp.enable = LE_SCAN_ENABLE; - enable_cp.filter_dup = filter_dup; - hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(enable_cp), - &enable_cp); - } -} - -/* Returns true if an le connection is in the scanning state */ -static inline bool hci_is_le_conn_scanning(struct hci_dev *hdev) -{ - struct hci_conn_hash *h = &hdev->conn_hash; - struct hci_conn *c; - - rcu_read_lock(); - - list_for_each_entry_rcu(c, &h->list, list) { - if (c->type == LE_LINK && c->state == BT_CONNECT && - test_bit(HCI_CONN_SCANNING, &c->flags)) { - rcu_read_unlock(); - return true; - } - } - - rcu_read_unlock(); - - return false; -} - -/* Ensure to call hci_req_add_le_scan_disable() first to disable the - * controller based address resolution to be able to reconfigure - * resolving list. - */ -void hci_req_add_le_passive_scan(struct hci_request *req) -{ - struct hci_dev *hdev = req->hdev; - u8 own_addr_type; - u8 filter_policy; - u16 window, interval; - /* Default is to enable duplicates filter */ - u8 filter_dup = LE_SCAN_FILTER_DUP_ENABLE; - /* Background scanning should run with address resolution */ - bool addr_resolv = true; - - if (hdev->scanning_paused) { - bt_dev_dbg(hdev, "Scanning is paused for suspend"); - return; - } - - /* Set require_privacy to false since no SCAN_REQ are send - * during passive scanning. Not using an non-resolvable address - * here is important so that peer devices using direct - * advertising with our address will be correctly reported - * by the controller. - */ - if (hci_update_random_address(req, false, scan_use_rpa(hdev), - &own_addr_type)) - return; - - if (hdev->enable_advmon_interleave_scan && - __hci_update_interleaved_scan(hdev)) - return; - - bt_dev_dbg(hdev, "interleave state %d", hdev->interleave_scan_state); - /* Adding or removing entries from the accept list must - * happen before enabling scanning. The controller does - * not allow accept list modification while scanning. - */ - filter_policy = update_accept_list(req); - - /* When the controller is using random resolvable addresses and - * with that having LE privacy enabled, then controllers with - * Extended Scanner Filter Policies support can now enable support - * for handling directed advertising. - * - * So instead of using filter polices 0x00 (no accept list) - * and 0x01 (accept list enabled) use the new filter policies - * 0x02 (no accept list) and 0x03 (accept list enabled). - */ - if (hci_dev_test_flag(hdev, HCI_PRIVACY) && - (hdev->le_features[0] & HCI_LE_EXT_SCAN_POLICY)) - filter_policy |= 0x02; - - if (hdev->suspended) { - window = hdev->le_scan_window_suspend; - interval = hdev->le_scan_int_suspend; - } else if (hci_is_le_conn_scanning(hdev)) { - window = hdev->le_scan_window_connect; - interval = hdev->le_scan_int_connect; - } else if (hci_is_adv_monitoring(hdev)) { - window = hdev->le_scan_window_adv_monitor; - interval = hdev->le_scan_int_adv_monitor; - - /* Disable duplicates filter when scanning for advertisement - * monitor for the following reasons. - * - * For HW pattern filtering (ex. MSFT), Realtek and Qualcomm - * controllers ignore RSSI_Sampling_Period when the duplicates - * filter is enabled. - * - * For SW pattern filtering, when we're not doing interleaved - * scanning, it is necessary to disable duplicates filter, - * otherwise hosts can only receive one advertisement and it's - * impossible to know if a peer is still in range. - */ - filter_dup = LE_SCAN_FILTER_DUP_DISABLE; - } else { - window = hdev->le_scan_window; - interval = hdev->le_scan_interval; - } - - bt_dev_dbg(hdev, "LE passive scan with accept list = %d", - filter_policy); - hci_req_start_scan(req, LE_SCAN_PASSIVE, interval, window, - own_addr_type, filter_policy, filter_dup, - addr_resolv); -} - -static void cancel_adv_timeout(struct hci_dev *hdev) -{ - if (hdev->adv_instance_timeout) { - hdev->adv_instance_timeout = 0; - cancel_delayed_work(&hdev->adv_instance_expire); - } -} - -static bool adv_cur_instance_is_scannable(struct hci_dev *hdev) -{ - return hci_adv_instance_is_scannable(hdev, hdev->cur_adv_instance); -} - -void __hci_req_disable_advertising(struct hci_request *req) -{ - if (ext_adv_capable(req->hdev)) { - __hci_req_disable_ext_adv_instance(req, 0x00); - - } else { - u8 enable = 0x00; - - hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable); - } -} - -static bool adv_use_rpa(struct hci_dev *hdev, uint32_t flags) -{ - /* If privacy is not enabled don't use RPA */ - if (!hci_dev_test_flag(hdev, HCI_PRIVACY)) - return false; - - /* If basic privacy mode is enabled use RPA */ - if (!hci_dev_test_flag(hdev, HCI_LIMITED_PRIVACY)) - return true; - - /* If limited privacy mode is enabled don't use RPA if we're - * both discoverable and bondable. - */ - if ((flags & MGMT_ADV_FLAG_DISCOV) && - hci_dev_test_flag(hdev, HCI_BONDABLE)) - return false; - - /* We're neither bondable nor discoverable in the limited - * privacy mode, therefore use RPA. - */ - return true; -} - -static bool is_advertising_allowed(struct hci_dev *hdev, bool connectable) -{ - /* If there is no connection we are OK to advertise. */ - if (hci_conn_num(hdev, LE_LINK) == 0) - return true; - - /* Check le_states if there is any connection in peripheral role. */ - if (hdev->conn_hash.le_num_peripheral > 0) { - /* Peripheral connection state and non connectable mode bit 20. - */ - if (!connectable && !(hdev->le_states[2] & 0x10)) - return false; - - /* Peripheral connection state and connectable mode bit 38 - * and scannable bit 21. - */ - if (connectable && (!(hdev->le_states[4] & 0x40) || - !(hdev->le_states[2] & 0x20))) - return false; - } - - /* Check le_states if there is any connection in central role. */ - if (hci_conn_num(hdev, LE_LINK) != hdev->conn_hash.le_num_peripheral) { - /* Central connection state and non connectable mode bit 18. */ - if (!connectable && !(hdev->le_states[2] & 0x02)) - return false; - - /* Central connection state and connectable mode bit 35 and - * scannable 19. - */ - if (connectable && (!(hdev->le_states[4] & 0x08) || - !(hdev->le_states[2] & 0x08))) - return false; - } - - return true; -} - -void __hci_req_enable_advertising(struct hci_request *req) -{ - struct hci_dev *hdev = req->hdev; - struct adv_info *adv; - struct hci_cp_le_set_adv_param cp; - u8 own_addr_type, enable = 0x01; - bool connectable; - u16 adv_min_interval, adv_max_interval; - u32 flags; - - flags = hci_adv_instance_flags(hdev, hdev->cur_adv_instance); - adv = hci_find_adv_instance(hdev, hdev->cur_adv_instance); - - /* If the "connectable" instance flag was not set, then choose between - * ADV_IND and ADV_NONCONN_IND based on the global connectable setting. - */ - connectable = (flags & MGMT_ADV_FLAG_CONNECTABLE) || - mgmt_get_connectable(hdev); - - if (!is_advertising_allowed(hdev, connectable)) - return; - - if (hci_dev_test_flag(hdev, HCI_LE_ADV)) - __hci_req_disable_advertising(req); - - /* Clear the HCI_LE_ADV bit temporarily so that the - * hci_update_random_address knows that it's safe to go ahead - * and write a new random address. The flag will be set back on - * as soon as the SET_ADV_ENABLE HCI command completes. - */ - hci_dev_clear_flag(hdev, HCI_LE_ADV); - - /* Set require_privacy to true only when non-connectable - * advertising is used. In that case it is fine to use a - * non-resolvable private address. - */ - if (hci_update_random_address(req, !connectable, - adv_use_rpa(hdev, flags), - &own_addr_type) < 0) - return; - - memset(&cp, 0, sizeof(cp)); - - if (adv) { - adv_min_interval = adv->min_interval; - adv_max_interval = adv->max_interval; - } else { - adv_min_interval = hdev->le_adv_min_interval; - adv_max_interval = hdev->le_adv_max_interval; - } - - if (connectable) { - cp.type = LE_ADV_IND; - } else { - if (adv_cur_instance_is_scannable(hdev)) - cp.type = LE_ADV_SCAN_IND; - else - cp.type = LE_ADV_NONCONN_IND; - - if (!hci_dev_test_flag(hdev, HCI_DISCOVERABLE) || - hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE)) { - adv_min_interval = DISCOV_LE_FAST_ADV_INT_MIN; - adv_max_interval = DISCOV_LE_FAST_ADV_INT_MAX; - } - } - - cp.min_interval = cpu_to_le16(adv_min_interval); - cp.max_interval = cpu_to_le16(adv_max_interval); - cp.own_address_type = own_addr_type; - cp.channel_map = hdev->le_adv_channel_map; - - hci_req_add(req, HCI_OP_LE_SET_ADV_PARAM, sizeof(cp), &cp); - - hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable); -} - -void __hci_req_update_scan_rsp_data(struct hci_request *req, u8 instance) -{ - struct hci_dev *hdev = req->hdev; - u8 len; - - if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED)) - return; - - if (ext_adv_capable(hdev)) { - struct { - struct hci_cp_le_set_ext_scan_rsp_data cp; - u8 data[HCI_MAX_EXT_AD_LENGTH]; - } pdu; - - memset(&pdu, 0, sizeof(pdu)); - - len = eir_create_scan_rsp(hdev, instance, pdu.data); - - if (hdev->scan_rsp_data_len == len && - !memcmp(pdu.data, hdev->scan_rsp_data, len)) - return; - - memcpy(hdev->scan_rsp_data, pdu.data, len); - hdev->scan_rsp_data_len = len; - - pdu.cp.handle = instance; - pdu.cp.length = len; - pdu.cp.operation = LE_SET_ADV_DATA_OP_COMPLETE; - pdu.cp.frag_pref = LE_SET_ADV_DATA_NO_FRAG; - - hci_req_add(req, HCI_OP_LE_SET_EXT_SCAN_RSP_DATA, - sizeof(pdu.cp) + len, &pdu.cp); - } else { - struct hci_cp_le_set_scan_rsp_data cp; - - memset(&cp, 0, sizeof(cp)); - - len = eir_create_scan_rsp(hdev, instance, cp.data); - - if (hdev->scan_rsp_data_len == len && - !memcmp(cp.data, hdev->scan_rsp_data, len)) - return; - - memcpy(hdev->scan_rsp_data, cp.data, sizeof(cp.data)); - hdev->scan_rsp_data_len = len; - - cp.length = len; - - hci_req_add(req, HCI_OP_LE_SET_SCAN_RSP_DATA, sizeof(cp), &cp); - } -} - -void __hci_req_update_adv_data(struct hci_request *req, u8 instance) -{ - struct hci_dev *hdev = req->hdev; - u8 len; - - if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED)) - return; - - if (ext_adv_capable(hdev)) { - struct { - struct hci_cp_le_set_ext_adv_data cp; - u8 data[HCI_MAX_EXT_AD_LENGTH]; - } pdu; - - memset(&pdu, 0, sizeof(pdu)); - - len = eir_create_adv_data(hdev, instance, pdu.data); - - /* There's nothing to do if the data hasn't changed */ - if (hdev->adv_data_len == len && - memcmp(pdu.data, hdev->adv_data, len) == 0) - return; - - memcpy(hdev->adv_data, pdu.data, len); - hdev->adv_data_len = len; - - pdu.cp.length = len; - pdu.cp.handle = instance; - pdu.cp.operation = LE_SET_ADV_DATA_OP_COMPLETE; - pdu.cp.frag_pref = LE_SET_ADV_DATA_NO_FRAG; - - hci_req_add(req, HCI_OP_LE_SET_EXT_ADV_DATA, - sizeof(pdu.cp) + len, &pdu.cp); - } else { - struct hci_cp_le_set_adv_data cp; - - memset(&cp, 0, sizeof(cp)); - - len = eir_create_adv_data(hdev, instance, cp.data); - - /* There's nothing to do if the data hasn't changed */ - if (hdev->adv_data_len == len && - memcmp(cp.data, hdev->adv_data, len) == 0) - return; - - memcpy(hdev->adv_data, cp.data, sizeof(cp.data)); - hdev->adv_data_len = len; - - cp.length = len; - - hci_req_add(req, HCI_OP_LE_SET_ADV_DATA, sizeof(cp), &cp); - } -} - -int hci_req_update_adv_data(struct hci_dev *hdev, u8 instance) -{ - struct hci_request req; - - hci_req_init(&req, hdev); - __hci_req_update_adv_data(&req, instance); - - return hci_req_run(&req, NULL); -} - -static void enable_addr_resolution_complete(struct hci_dev *hdev, u8 status, - u16 opcode) -{ - BT_DBG("%s status %u", hdev->name, status); -} - -void hci_req_disable_address_resolution(struct hci_dev *hdev) -{ - struct hci_request req; - __u8 enable = 0x00; - - if (!hci_dev_test_flag(hdev, HCI_LL_RPA_RESOLUTION)) - return; - - hci_req_init(&req, hdev); - - hci_req_add(&req, HCI_OP_LE_SET_ADDR_RESOLV_ENABLE, 1, &enable); - - hci_req_run(&req, enable_addr_resolution_complete); -} - -static void adv_enable_complete(struct hci_dev *hdev, u8 status, u16 opcode) -{ - bt_dev_dbg(hdev, "status %u", status); -} - -void hci_req_reenable_advertising(struct hci_dev *hdev) -{ - struct hci_request req; - - if (!hci_dev_test_flag(hdev, HCI_ADVERTISING) && - list_empty(&hdev->adv_instances)) - return; - - hci_req_init(&req, hdev); - - if (hdev->cur_adv_instance) { - __hci_req_schedule_adv_instance(&req, hdev->cur_adv_instance, - true); - } else { - if (ext_adv_capable(hdev)) { - __hci_req_start_ext_adv(&req, 0x00); - } else { - __hci_req_update_adv_data(&req, 0x00); - __hci_req_update_scan_rsp_data(&req, 0x00); - __hci_req_enable_advertising(&req); - } - } - - hci_req_run(&req, adv_enable_complete); -} - -static void adv_timeout_expire(struct work_struct *work) -{ - struct hci_dev *hdev = container_of(work, struct hci_dev, - adv_instance_expire.work); - - struct hci_request req; - u8 instance; - - bt_dev_dbg(hdev, ""); - - hci_dev_lock(hdev); - - hdev->adv_instance_timeout = 0; - - instance = hdev->cur_adv_instance; - if (instance == 0x00) - goto unlock; - - hci_req_init(&req, hdev); - - hci_req_clear_adv_instance(hdev, NULL, &req, instance, false); - - if (list_empty(&hdev->adv_instances)) - __hci_req_disable_advertising(&req); - - hci_req_run(&req, NULL); - -unlock: - hci_dev_unlock(hdev); -} - -static int hci_req_add_le_interleaved_scan(struct hci_request *req, - unsigned long opt) -{ - struct hci_dev *hdev = req->hdev; - int ret = 0; - - hci_dev_lock(hdev); - - if (hci_dev_test_flag(hdev, HCI_LE_SCAN)) - hci_req_add_le_scan_disable(req, false); - hci_req_add_le_passive_scan(req); - - switch (hdev->interleave_scan_state) { - case INTERLEAVE_SCAN_ALLOWLIST: - bt_dev_dbg(hdev, "next state: allowlist"); - hdev->interleave_scan_state = INTERLEAVE_SCAN_NO_FILTER; - break; - case INTERLEAVE_SCAN_NO_FILTER: - bt_dev_dbg(hdev, "next state: no filter"); - hdev->interleave_scan_state = INTERLEAVE_SCAN_ALLOWLIST; - break; - case INTERLEAVE_SCAN_NONE: - BT_ERR("unexpected error"); - ret = -1; - } - - hci_dev_unlock(hdev); - - return ret; -} - -static void interleave_scan_work(struct work_struct *work) -{ - struct hci_dev *hdev = container_of(work, struct hci_dev, - interleave_scan.work); - u8 status; - unsigned long timeout; - - if (hdev->interleave_scan_state == INTERLEAVE_SCAN_ALLOWLIST) { - timeout = msecs_to_jiffies(hdev->advmon_allowlist_duration); - } else if (hdev->interleave_scan_state == INTERLEAVE_SCAN_NO_FILTER) { - timeout = msecs_to_jiffies(hdev->advmon_no_filter_duration); - } else { - bt_dev_err(hdev, "unexpected error"); - return; - } - - hci_req_sync(hdev, hci_req_add_le_interleaved_scan, 0, - HCI_CMD_TIMEOUT, &status); - - /* Don't continue interleaving if it was canceled */ - if (is_interleave_scanning(hdev)) - queue_delayed_work(hdev->req_workqueue, - &hdev->interleave_scan, timeout); -} - -int hci_get_random_address(struct hci_dev *hdev, bool require_privacy, - bool use_rpa, struct adv_info *adv_instance, - u8 *own_addr_type, bdaddr_t *rand_addr) -{ - int err; - - bacpy(rand_addr, BDADDR_ANY); - - /* If privacy is enabled use a resolvable private address. If - * current RPA has expired then generate a new one. - */ - if (use_rpa) { - /* If Controller supports LL Privacy use own address type is - * 0x03 - */ - if (use_ll_privacy(hdev)) - *own_addr_type = ADDR_LE_DEV_RANDOM_RESOLVED; - else - *own_addr_type = ADDR_LE_DEV_RANDOM; - - if (adv_instance) { - if (adv_rpa_valid(adv_instance)) - return 0; - } else { - if (rpa_valid(hdev)) - return 0; - } - - err = smp_generate_rpa(hdev, hdev->irk, &hdev->rpa); - if (err < 0) { - bt_dev_err(hdev, "failed to generate new RPA"); - return err; - } - - bacpy(rand_addr, &hdev->rpa); - - return 0; - } - - /* In case of required privacy without resolvable private address, - * use an non-resolvable private address. This is useful for - * non-connectable advertising. - */ - if (require_privacy) { - bdaddr_t nrpa; - - while (true) { - /* The non-resolvable private address is generated - * from random six bytes with the two most significant - * bits cleared. - */ - get_random_bytes(&nrpa, 6); - nrpa.b[5] &= 0x3f; - - /* The non-resolvable private address shall not be - * equal to the public address. - */ - if (bacmp(&hdev->bdaddr, &nrpa)) - break; - } - - *own_addr_type = ADDR_LE_DEV_RANDOM; - bacpy(rand_addr, &nrpa); - - return 0; - } - - /* No privacy so use a public address. */ - *own_addr_type = ADDR_LE_DEV_PUBLIC; - - return 0; -} - -void __hci_req_clear_ext_adv_sets(struct hci_request *req) -{ - hci_req_add(req, HCI_OP_LE_CLEAR_ADV_SETS, 0, NULL); -} - -static void set_random_addr(struct hci_request *req, bdaddr_t *rpa) -{ - struct hci_dev *hdev = req->hdev; - - /* If we're advertising or initiating an LE connection we can't - * go ahead and change the random address at this time. This is - * because the eventual initiator address used for the - * subsequently created connection will be undefined (some - * controllers use the new address and others the one we had - * when the operation started). - * - * In this kind of scenario skip the update and let the random - * address be updated at the next cycle. - */ - if (hci_dev_test_flag(hdev, HCI_LE_ADV) || - hci_lookup_le_connect(hdev)) { - bt_dev_dbg(hdev, "Deferring random address update"); - hci_dev_set_flag(hdev, HCI_RPA_EXPIRED); - return; - } - - hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6, rpa); -} - -int __hci_req_setup_ext_adv_instance(struct hci_request *req, u8 instance) -{ - struct hci_cp_le_set_ext_adv_params cp; - struct hci_dev *hdev = req->hdev; - bool connectable; - u32 flags; - bdaddr_t random_addr; - u8 own_addr_type; - int err; - struct adv_info *adv_instance; - bool secondary_adv; - - if (instance > 0) { - adv_instance = hci_find_adv_instance(hdev, instance); - if (!adv_instance) - return -EINVAL; - } else { - adv_instance = NULL; - } - - flags = hci_adv_instance_flags(hdev, instance); - - /* If the "connectable" instance flag was not set, then choose between - * ADV_IND and ADV_NONCONN_IND based on the global connectable setting. - */ - connectable = (flags & MGMT_ADV_FLAG_CONNECTABLE) || - mgmt_get_connectable(hdev); - - if (!is_advertising_allowed(hdev, connectable)) - return -EPERM; - - /* Set require_privacy to true only when non-connectable - * advertising is used. In that case it is fine to use a - * non-resolvable private address. - */ - err = hci_get_random_address(hdev, !connectable, - adv_use_rpa(hdev, flags), adv_instance, - &own_addr_type, &random_addr); - if (err < 0) - return err; - - memset(&cp, 0, sizeof(cp)); - - if (adv_instance) { - hci_cpu_to_le24(adv_instance->min_interval, cp.min_interval); - hci_cpu_to_le24(adv_instance->max_interval, cp.max_interval); - cp.tx_power = adv_instance->tx_power; - } else { - hci_cpu_to_le24(hdev->le_adv_min_interval, cp.min_interval); - hci_cpu_to_le24(hdev->le_adv_max_interval, cp.max_interval); - cp.tx_power = HCI_ADV_TX_POWER_NO_PREFERENCE; - } - - secondary_adv = (flags & MGMT_ADV_FLAG_SEC_MASK); - - if (connectable) { - if (secondary_adv) - cp.evt_properties = cpu_to_le16(LE_EXT_ADV_CONN_IND); - else - cp.evt_properties = cpu_to_le16(LE_LEGACY_ADV_IND); - } else if (hci_adv_instance_is_scannable(hdev, instance) || - (flags & MGMT_ADV_PARAM_SCAN_RSP)) { - if (secondary_adv) - cp.evt_properties = cpu_to_le16(LE_EXT_ADV_SCAN_IND); - else - cp.evt_properties = cpu_to_le16(LE_LEGACY_ADV_SCAN_IND); - } else { - if (secondary_adv) - cp.evt_properties = cpu_to_le16(LE_EXT_ADV_NON_CONN_IND); - else - cp.evt_properties = cpu_to_le16(LE_LEGACY_NONCONN_IND); - } - - cp.own_addr_type = own_addr_type; - cp.channel_map = hdev->le_adv_channel_map; - cp.handle = instance; - - if (flags & MGMT_ADV_FLAG_SEC_2M) { - cp.primary_phy = HCI_ADV_PHY_1M; - cp.secondary_phy = HCI_ADV_PHY_2M; - } else if (flags & MGMT_ADV_FLAG_SEC_CODED) { - cp.primary_phy = HCI_ADV_PHY_CODED; - cp.secondary_phy = HCI_ADV_PHY_CODED; - } else { - /* In all other cases use 1M */ - cp.primary_phy = HCI_ADV_PHY_1M; - cp.secondary_phy = HCI_ADV_PHY_1M; - } - - hci_req_add(req, HCI_OP_LE_SET_EXT_ADV_PARAMS, sizeof(cp), &cp); - - if ((own_addr_type == ADDR_LE_DEV_RANDOM || - own_addr_type == ADDR_LE_DEV_RANDOM_RESOLVED) && - bacmp(&random_addr, BDADDR_ANY)) { - struct hci_cp_le_set_adv_set_rand_addr cp; - - /* Check if random address need to be updated */ - if (adv_instance) { - if (!bacmp(&random_addr, &adv_instance->random_addr)) - return 0; - } else { - if (!bacmp(&random_addr, &hdev->random_addr)) - return 0; - /* Instance 0x00 doesn't have an adv_info, instead it - * uses hdev->random_addr to track its address so - * whenever it needs to be updated this also set the - * random address since hdev->random_addr is shared with - * scan state machine. - */ - set_random_addr(req, &random_addr); - } - - memset(&cp, 0, sizeof(cp)); - - cp.handle = instance; - bacpy(&cp.bdaddr, &random_addr); - - hci_req_add(req, - HCI_OP_LE_SET_ADV_SET_RAND_ADDR, - sizeof(cp), &cp); - } - - return 0; -} - -int __hci_req_enable_ext_advertising(struct hci_request *req, u8 instance) -{ - struct hci_dev *hdev = req->hdev; - struct hci_cp_le_set_ext_adv_enable *cp; - struct hci_cp_ext_adv_set *adv_set; - u8 data[sizeof(*cp) + sizeof(*adv_set) * 1]; - struct adv_info *adv_instance; - - if (instance > 0) { - adv_instance = hci_find_adv_instance(hdev, instance); - if (!adv_instance) - return -EINVAL; - } else { - adv_instance = NULL; - } - - cp = (void *) data; - adv_set = (void *) cp->data; - - memset(cp, 0, sizeof(*cp)); - - cp->enable = 0x01; - cp->num_of_sets = 0x01; - - memset(adv_set, 0, sizeof(*adv_set)); - - adv_set->handle = instance; - - /* Set duration per instance since controller is responsible for - * scheduling it. - */ - if (adv_instance && adv_instance->duration) { - u16 duration = adv_instance->timeout * MSEC_PER_SEC; - - /* Time = N * 10 ms */ - adv_set->duration = cpu_to_le16(duration / 10); - } - - hci_req_add(req, HCI_OP_LE_SET_EXT_ADV_ENABLE, - sizeof(*cp) + sizeof(*adv_set) * cp->num_of_sets, - data); - - return 0; -} - -int __hci_req_disable_ext_adv_instance(struct hci_request *req, u8 instance) -{ - struct hci_dev *hdev = req->hdev; - struct hci_cp_le_set_ext_adv_enable *cp; - struct hci_cp_ext_adv_set *adv_set; - u8 data[sizeof(*cp) + sizeof(*adv_set) * 1]; - u8 req_size; - - /* If request specifies an instance that doesn't exist, fail */ - if (instance > 0 && !hci_find_adv_instance(hdev, instance)) - return -EINVAL; - - memset(data, 0, sizeof(data)); - - cp = (void *)data; - adv_set = (void *)cp->data; - - /* Instance 0x00 indicates all advertising instances will be disabled */ - cp->num_of_sets = !!instance; - cp->enable = 0x00; - - adv_set->handle = instance; - - req_size = sizeof(*cp) + sizeof(*adv_set) * cp->num_of_sets; - hci_req_add(req, HCI_OP_LE_SET_EXT_ADV_ENABLE, req_size, data); - - return 0; -} - -int __hci_req_remove_ext_adv_instance(struct hci_request *req, u8 instance) -{ - struct hci_dev *hdev = req->hdev; - - /* If request specifies an instance that doesn't exist, fail */ - if (instance > 0 && !hci_find_adv_instance(hdev, instance)) - return -EINVAL; - - hci_req_add(req, HCI_OP_LE_REMOVE_ADV_SET, sizeof(instance), &instance); - - return 0; -} - -int __hci_req_start_ext_adv(struct hci_request *req, u8 instance) -{ - struct hci_dev *hdev = req->hdev; - struct adv_info *adv_instance = hci_find_adv_instance(hdev, instance); - int err; - - /* If instance isn't pending, the chip knows about it, and it's safe to - * disable - */ - if (adv_instance && !adv_instance->pending) - __hci_req_disable_ext_adv_instance(req, instance); - - err = __hci_req_setup_ext_adv_instance(req, instance); - if (err < 0) - return err; - - __hci_req_update_scan_rsp_data(req, instance); - __hci_req_enable_ext_advertising(req, instance); - - return 0; -} - -int __hci_req_schedule_adv_instance(struct hci_request *req, u8 instance, - bool force) -{ - struct hci_dev *hdev = req->hdev; - struct adv_info *adv_instance = NULL; - u16 timeout; - - if (hci_dev_test_flag(hdev, HCI_ADVERTISING) || - list_empty(&hdev->adv_instances)) - return -EPERM; - - if (hdev->adv_instance_timeout) - return -EBUSY; - - adv_instance = hci_find_adv_instance(hdev, instance); - if (!adv_instance) - return -ENOENT; - - /* A zero timeout means unlimited advertising. As long as there is - * only one instance, duration should be ignored. We still set a timeout - * in case further instances are being added later on. - * - * If the remaining lifetime of the instance is more than the duration - * then the timeout corresponds to the duration, otherwise it will be - * reduced to the remaining instance lifetime. - */ - if (adv_instance->timeout == 0 || - adv_instance->duration <= adv_instance->remaining_time) - timeout = adv_instance->duration; - else - timeout = adv_instance->remaining_time; - - /* The remaining time is being reduced unless the instance is being - * advertised without time limit. - */ - if (adv_instance->timeout) - adv_instance->remaining_time = - adv_instance->remaining_time - timeout; - - /* Only use work for scheduling instances with legacy advertising */ - if (!ext_adv_capable(hdev)) { - hdev->adv_instance_timeout = timeout; - queue_delayed_work(hdev->req_workqueue, - &hdev->adv_instance_expire, - msecs_to_jiffies(timeout * 1000)); - } - - /* If we're just re-scheduling the same instance again then do not - * execute any HCI commands. This happens when a single instance is - * being advertised. - */ - if (!force && hdev->cur_adv_instance == instance && - hci_dev_test_flag(hdev, HCI_LE_ADV)) - return 0; - - hdev->cur_adv_instance = instance; - if (ext_adv_capable(hdev)) { - __hci_req_start_ext_adv(req, instance); - } else { - __hci_req_update_adv_data(req, instance); - __hci_req_update_scan_rsp_data(req, instance); - __hci_req_enable_advertising(req); - } - - return 0; -} - -/* For a single instance: - * - force == true: The instance will be removed even when its remaining - * lifetime is not zero. - * - force == false: the instance will be deactivated but kept stored unless - * the remaining lifetime is zero. - * - * For instance == 0x00: - * - force == true: All instances will be removed regardless of their timeout - * setting. - * - force == false: Only instances that have a timeout will be removed. - */ -void hci_req_clear_adv_instance(struct hci_dev *hdev, struct sock *sk, - struct hci_request *req, u8 instance, - bool force) -{ - struct adv_info *adv_instance, *n, *next_instance = NULL; - int err; - u8 rem_inst; - - /* Cancel any timeout concerning the removed instance(s). */ - if (!instance || hdev->cur_adv_instance == instance) - cancel_adv_timeout(hdev); - - /* Get the next instance to advertise BEFORE we remove - * the current one. This can be the same instance again - * if there is only one instance. - */ - if (instance && hdev->cur_adv_instance == instance) - next_instance = hci_get_next_instance(hdev, instance); - - if (instance == 0x00) { - list_for_each_entry_safe(adv_instance, n, &hdev->adv_instances, - list) { - if (!(force || adv_instance->timeout)) - continue; - - rem_inst = adv_instance->instance; - err = hci_remove_adv_instance(hdev, rem_inst); - if (!err) - mgmt_advertising_removed(sk, hdev, rem_inst); - } - } else { - adv_instance = hci_find_adv_instance(hdev, instance); - - if (force || (adv_instance && adv_instance->timeout && - !adv_instance->remaining_time)) { - /* Don't advertise a removed instance. */ - if (next_instance && - next_instance->instance == instance) - next_instance = NULL; - - err = hci_remove_adv_instance(hdev, instance); - if (!err) - mgmt_advertising_removed(sk, hdev, instance); - } - } - - if (!req || !hdev_is_powered(hdev) || - hci_dev_test_flag(hdev, HCI_ADVERTISING)) - return; - - if (next_instance && !ext_adv_capable(hdev)) - __hci_req_schedule_adv_instance(req, next_instance->instance, - false); -} - -int hci_update_random_address(struct hci_request *req, bool require_privacy, - bool use_rpa, u8 *own_addr_type) -{ - struct hci_dev *hdev = req->hdev; - int err; - - /* If privacy is enabled use a resolvable private address. If - * current RPA has expired or there is something else than - * the current RPA in use, then generate a new one. - */ - if (use_rpa) { - /* If Controller supports LL Privacy use own address type is - * 0x03 - */ - if (use_ll_privacy(hdev)) - *own_addr_type = ADDR_LE_DEV_RANDOM_RESOLVED; - else - *own_addr_type = ADDR_LE_DEV_RANDOM; - - if (rpa_valid(hdev)) - return 0; - - err = smp_generate_rpa(hdev, hdev->irk, &hdev->rpa); - if (err < 0) { - bt_dev_err(hdev, "failed to generate new RPA"); - return err; - } - - set_random_addr(req, &hdev->rpa); - - return 0; - } - - /* In case of required privacy without resolvable private address, - * use an non-resolvable private address. This is useful for active - * scanning and non-connectable advertising. - */ - if (require_privacy) { - bdaddr_t nrpa; - - while (true) { - /* The non-resolvable private address is generated - * from random six bytes with the two most significant - * bits cleared. - */ - get_random_bytes(&nrpa, 6); - nrpa.b[5] &= 0x3f; - - /* The non-resolvable private address shall not be - * equal to the public address. - */ - if (bacmp(&hdev->bdaddr, &nrpa)) - break; - } - - *own_addr_type = ADDR_LE_DEV_RANDOM; - set_random_addr(req, &nrpa); - return 0; - } - - /* If forcing static address is in use or there is no public - * address use the static address as random address (but skip - * the HCI command if the current random address is already the - * static one. - * - * In case BR/EDR has been disabled on a dual-mode controller - * and a static address has been configured, then use that - * address instead of the public BR/EDR address. - */ - if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) || - !bacmp(&hdev->bdaddr, BDADDR_ANY) || - (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) && - bacmp(&hdev->static_addr, BDADDR_ANY))) { - *own_addr_type = ADDR_LE_DEV_RANDOM; - if (bacmp(&hdev->static_addr, &hdev->random_addr)) - hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6, - &hdev->static_addr); - return 0; - } - - /* Neither privacy nor static address is being used so use a - * public address. - */ - *own_addr_type = ADDR_LE_DEV_PUBLIC; - - return 0; -} - -static bool disconnected_accept_list_entries(struct hci_dev *hdev) -{ - struct bdaddr_list *b; - - list_for_each_entry(b, &hdev->accept_list, list) { - struct hci_conn *conn; - - conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &b->bdaddr); - if (!conn) - return true; - - if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG) - return true; - } - - return false; -} - -void __hci_req_update_scan(struct hci_request *req) -{ - struct hci_dev *hdev = req->hdev; - u8 scan; - - if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) - return; - - if (!hdev_is_powered(hdev)) - return; - - if (mgmt_powering_down(hdev)) - return; - - if (hdev->scanning_paused) - return; - - if (hci_dev_test_flag(hdev, HCI_CONNECTABLE) || - disconnected_accept_list_entries(hdev)) - scan = SCAN_PAGE; - else - scan = SCAN_DISABLED; - - if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE)) - scan |= SCAN_INQUIRY; - - if (test_bit(HCI_PSCAN, &hdev->flags) == !!(scan & SCAN_PAGE) && - test_bit(HCI_ISCAN, &hdev->flags) == !!(scan & SCAN_INQUIRY)) - return; - - hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan); -} - -static int update_scan(struct hci_request *req, unsigned long opt) -{ - hci_dev_lock(req->hdev); - __hci_req_update_scan(req); - hci_dev_unlock(req->hdev); - return 0; -} - -static void scan_update_work(struct work_struct *work) -{ - struct hci_dev *hdev = container_of(work, struct hci_dev, scan_update); - - hci_req_sync(hdev, update_scan, 0, HCI_CMD_TIMEOUT, NULL); -} - -static u8 get_service_classes(struct hci_dev *hdev) -{ - struct bt_uuid *uuid; - u8 val = 0; - - list_for_each_entry(uuid, &hdev->uuids, list) - val |= uuid->svc_hint; - - return val; -} - -void __hci_req_update_class(struct hci_request *req) -{ - struct hci_dev *hdev = req->hdev; - u8 cod[3]; - - bt_dev_dbg(hdev, ""); - - if (!hdev_is_powered(hdev)) - return; - - if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) - return; - - if (hci_dev_test_flag(hdev, HCI_SERVICE_CACHE)) - return; - - cod[0] = hdev->minor_class; - cod[1] = hdev->major_class; - cod[2] = get_service_classes(hdev); - - if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE)) - cod[1] |= 0x20; - - if (memcmp(cod, hdev->dev_class, 3) == 0) - return; - - hci_req_add(req, HCI_OP_WRITE_CLASS_OF_DEV, sizeof(cod), cod); -} - -static void write_iac(struct hci_request *req) -{ - struct hci_dev *hdev = req->hdev; - struct hci_cp_write_current_iac_lap cp; - - if (!hci_dev_test_flag(hdev, HCI_DISCOVERABLE)) - return; - - if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE)) { - /* Limited discoverable mode */ - cp.num_iac = min_t(u8, hdev->num_iac, 2); - cp.iac_lap[0] = 0x00; /* LIAC */ - cp.iac_lap[1] = 0x8b; - cp.iac_lap[2] = 0x9e; - cp.iac_lap[3] = 0x33; /* GIAC */ - cp.iac_lap[4] = 0x8b; - cp.iac_lap[5] = 0x9e; - } else { - /* General discoverable mode */ - cp.num_iac = 1; - cp.iac_lap[0] = 0x33; /* GIAC */ - cp.iac_lap[1] = 0x8b; - cp.iac_lap[2] = 0x9e; - } - - hci_req_add(req, HCI_OP_WRITE_CURRENT_IAC_LAP, - (cp.num_iac * 3) + 1, &cp); -} - -static int discoverable_update(struct hci_request *req, unsigned long opt) -{ - struct hci_dev *hdev = req->hdev; - - hci_dev_lock(hdev); - - if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) { - write_iac(req); - __hci_req_update_scan(req); - __hci_req_update_class(req); - } - - /* Advertising instances don't use the global discoverable setting, so - * only update AD if advertising was enabled using Set Advertising. - */ - if (hci_dev_test_flag(hdev, HCI_ADVERTISING)) { - __hci_req_update_adv_data(req, 0x00); - - /* Discoverable mode affects the local advertising - * address in limited privacy mode. - */ - if (hci_dev_test_flag(hdev, HCI_LIMITED_PRIVACY)) { - if (ext_adv_capable(hdev)) - __hci_req_start_ext_adv(req, 0x00); - else - __hci_req_enable_advertising(req); - } - } - - hci_dev_unlock(hdev); - - return 0; -} - -void __hci_abort_conn(struct hci_request *req, struct hci_conn *conn, - u8 reason) -{ - switch (conn->state) { - case BT_CONNECTED: - case BT_CONFIG: - if (conn->type == AMP_LINK) { - struct hci_cp_disconn_phy_link cp; - - cp.phy_handle = HCI_PHY_HANDLE(conn->handle); - cp.reason = reason; - hci_req_add(req, HCI_OP_DISCONN_PHY_LINK, sizeof(cp), - &cp); - } else { - struct hci_cp_disconnect dc; - - dc.handle = cpu_to_le16(conn->handle); - dc.reason = reason; - hci_req_add(req, HCI_OP_DISCONNECT, sizeof(dc), &dc); - } - - conn->state = BT_DISCONN; - - break; - case BT_CONNECT: - if (conn->type == LE_LINK) { - if (test_bit(HCI_CONN_SCANNING, &conn->flags)) - break; - hci_req_add(req, HCI_OP_LE_CREATE_CONN_CANCEL, - 0, NULL); - } else if (conn->type == ACL_LINK) { - if (req->hdev->hci_ver < BLUETOOTH_VER_1_2) - break; - hci_req_add(req, HCI_OP_CREATE_CONN_CANCEL, - 6, &conn->dst); - } - break; - case BT_CONNECT2: - if (conn->type == ACL_LINK) { - struct hci_cp_reject_conn_req rej; - - bacpy(&rej.bdaddr, &conn->dst); - rej.reason = reason; - - hci_req_add(req, HCI_OP_REJECT_CONN_REQ, - sizeof(rej), &rej); - } else if (conn->type == SCO_LINK || conn->type == ESCO_LINK) { - struct hci_cp_reject_sync_conn_req rej; - - bacpy(&rej.bdaddr, &conn->dst); - - /* SCO rejection has its own limited set of - * allowed error values (0x0D-0x0F) which isn't - * compatible with most values passed to this - * function. To be safe hard-code one of the - * values that's suitable for SCO. - */ - rej.reason = HCI_ERROR_REJ_LIMITED_RESOURCES; - - hci_req_add(req, HCI_OP_REJECT_SYNC_CONN_REQ, - sizeof(rej), &rej); - } - break; - default: - conn->state = BT_CLOSED; - break; - } -} - -static void abort_conn_complete(struct hci_dev *hdev, u8 status, u16 opcode) -{ - if (status) - bt_dev_dbg(hdev, "Failed to abort connection: status 0x%2.2x", status); -} - -int hci_abort_conn(struct hci_conn *conn, u8 reason) -{ - struct hci_request req; - int err; - - hci_req_init(&req, conn->hdev); - - __hci_abort_conn(&req, conn, reason); - - err = hci_req_run(&req, abort_conn_complete); - if (err && err != -ENODATA) { - bt_dev_err(conn->hdev, "failed to run HCI request: err %d", err); - return err; - } - - return 0; -} - -static int le_scan_disable(struct hci_request *req, unsigned long opt) -{ - hci_req_add_le_scan_disable(req, false); - return 0; -} - -static int bredr_inquiry(struct hci_request *req, unsigned long opt) -{ - u8 length = opt; - const u8 giac[3] = { 0x33, 0x8b, 0x9e }; - const u8 liac[3] = { 0x00, 0x8b, 0x9e }; - struct hci_cp_inquiry cp; - - if (test_bit(HCI_INQUIRY, &req->hdev->flags)) - return 0; - - bt_dev_dbg(req->hdev, ""); - - hci_dev_lock(req->hdev); - hci_inquiry_cache_flush(req->hdev); - hci_dev_unlock(req->hdev); - - memset(&cp, 0, sizeof(cp)); - - if (req->hdev->discovery.limited) - memcpy(&cp.lap, liac, sizeof(cp.lap)); - else - memcpy(&cp.lap, giac, sizeof(cp.lap)); - - cp.length = length; - - hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp); - - return 0; -} - -static void le_scan_disable_work(struct work_struct *work) -{ - struct hci_dev *hdev = container_of(work, struct hci_dev, - le_scan_disable.work); - u8 status; - - bt_dev_dbg(hdev, ""); - - if (!hci_dev_test_flag(hdev, HCI_LE_SCAN)) - return; - - cancel_delayed_work(&hdev->le_scan_restart); - - hci_req_sync(hdev, le_scan_disable, 0, HCI_CMD_TIMEOUT, &status); - if (status) { - bt_dev_err(hdev, "failed to disable LE scan: status 0x%02x", - status); - return; - } - - hdev->discovery.scan_start = 0; - - /* If we were running LE only scan, change discovery state. If - * we were running both LE and BR/EDR inquiry simultaneously, - * and BR/EDR inquiry is already finished, stop discovery, - * otherwise BR/EDR inquiry will stop discovery when finished. - * If we will resolve remote device name, do not change - * discovery state. - */ - - if (hdev->discovery.type == DISCOV_TYPE_LE) - goto discov_stopped; - - if (hdev->discovery.type != DISCOV_TYPE_INTERLEAVED) - return; - - if (test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY, &hdev->quirks)) { - if (!test_bit(HCI_INQUIRY, &hdev->flags) && - hdev->discovery.state != DISCOVERY_RESOLVING) - goto discov_stopped; - - return; - } - - hci_req_sync(hdev, bredr_inquiry, DISCOV_INTERLEAVED_INQUIRY_LEN, - HCI_CMD_TIMEOUT, &status); - if (status) { - bt_dev_err(hdev, "inquiry failed: status 0x%02x", status); - goto discov_stopped; - } - - return; - -discov_stopped: - hci_dev_lock(hdev); - hci_discovery_set_state(hdev, DISCOVERY_STOPPED); - hci_dev_unlock(hdev); -} - -static int le_scan_restart(struct hci_request *req, unsigned long opt) -{ - struct hci_dev *hdev = req->hdev; - - /* If controller is not scanning we are done. */ - if (!hci_dev_test_flag(hdev, HCI_LE_SCAN)) - return 0; - - if (hdev->scanning_paused) { - bt_dev_dbg(hdev, "Scanning is paused for suspend"); - return 0; - } - - hci_req_add_le_scan_disable(req, false); - - if (use_ext_scan(hdev)) { - struct hci_cp_le_set_ext_scan_enable ext_enable_cp; - - memset(&ext_enable_cp, 0, sizeof(ext_enable_cp)); - ext_enable_cp.enable = LE_SCAN_ENABLE; - ext_enable_cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE; - - hci_req_add(req, HCI_OP_LE_SET_EXT_SCAN_ENABLE, - sizeof(ext_enable_cp), &ext_enable_cp); - } else { - struct hci_cp_le_set_scan_enable cp; - - memset(&cp, 0, sizeof(cp)); - cp.enable = LE_SCAN_ENABLE; - cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE; - hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp); - } - - return 0; -} - -static void le_scan_restart_work(struct work_struct *work) -{ - struct hci_dev *hdev = container_of(work, struct hci_dev, - le_scan_restart.work); - unsigned long timeout, duration, scan_start, now; - u8 status; - - bt_dev_dbg(hdev, ""); - - hci_req_sync(hdev, le_scan_restart, 0, HCI_CMD_TIMEOUT, &status); - if (status) { - bt_dev_err(hdev, "failed to restart LE scan: status %d", - status); - return; - } - - hci_dev_lock(hdev); - - if (!test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks) || - !hdev->discovery.scan_start) - goto unlock; - - /* When the scan was started, hdev->le_scan_disable has been queued - * after duration from scan_start. During scan restart this job - * has been canceled, and we need to queue it again after proper - * timeout, to make sure that scan does not run indefinitely. - */ - duration = hdev->discovery.scan_duration; - scan_start = hdev->discovery.scan_start; - now = jiffies; - if (now - scan_start <= duration) { - int elapsed; - - if (now >= scan_start) - elapsed = now - scan_start; - else - elapsed = ULONG_MAX - scan_start + now; - - timeout = duration - elapsed; - } else { - timeout = 0; - } - - queue_delayed_work(hdev->req_workqueue, - &hdev->le_scan_disable, timeout); - -unlock: - hci_dev_unlock(hdev); -} - -static int active_scan(struct hci_request *req, unsigned long opt) -{ - uint16_t interval = opt; - struct hci_dev *hdev = req->hdev; - u8 own_addr_type; - /* Accept list is not used for discovery */ - u8 filter_policy = 0x00; - /* Default is to enable duplicates filter */ - u8 filter_dup = LE_SCAN_FILTER_DUP_ENABLE; - /* Discovery doesn't require controller address resolution */ - bool addr_resolv = false; - int err; - - bt_dev_dbg(hdev, ""); - - /* If controller is scanning, it means the background scanning is - * running. Thus, we should temporarily stop it in order to set the - * discovery scanning parameters. - */ - if (hci_dev_test_flag(hdev, HCI_LE_SCAN)) { - hci_req_add_le_scan_disable(req, false); - cancel_interleave_scan(hdev); - } - - /* All active scans will be done with either a resolvable private - * address (when privacy feature has been enabled) or non-resolvable - * private address. - */ - err = hci_update_random_address(req, true, scan_use_rpa(hdev), - &own_addr_type); - if (err < 0) - own_addr_type = ADDR_LE_DEV_PUBLIC; - - if (hci_is_adv_monitoring(hdev)) { - /* Duplicate filter should be disabled when some advertisement - * monitor is activated, otherwise AdvMon can only receive one - * advertisement for one peer(*) during active scanning, and - * might report loss to these peers. - * - * Note that different controllers have different meanings of - * |duplicate|. Some of them consider packets with the same - * address as duplicate, and others consider packets with the - * same address and the same RSSI as duplicate. Although in the - * latter case we don't need to disable duplicate filter, but - * it is common to have active scanning for a short period of - * time, the power impact should be neglectable. - */ - filter_dup = LE_SCAN_FILTER_DUP_DISABLE; - } - - hci_req_start_scan(req, LE_SCAN_ACTIVE, interval, - hdev->le_scan_window_discovery, own_addr_type, - filter_policy, filter_dup, addr_resolv); - return 0; -} - -static int interleaved_discov(struct hci_request *req, unsigned long opt) -{ - int err; - - bt_dev_dbg(req->hdev, ""); - - err = active_scan(req, opt); - if (err) - return err; - - return bredr_inquiry(req, DISCOV_BREDR_INQUIRY_LEN); -} - -static void start_discovery(struct hci_dev *hdev, u8 *status) -{ - unsigned long timeout; - - bt_dev_dbg(hdev, "type %u", hdev->discovery.type); - - switch (hdev->discovery.type) { - case DISCOV_TYPE_BREDR: - if (!hci_dev_test_flag(hdev, HCI_INQUIRY)) - hci_req_sync(hdev, bredr_inquiry, - DISCOV_BREDR_INQUIRY_LEN, HCI_CMD_TIMEOUT, - status); - return; - case DISCOV_TYPE_INTERLEAVED: - /* When running simultaneous discovery, the LE scanning time - * should occupy the whole discovery time sine BR/EDR inquiry - * and LE scanning are scheduled by the controller. - * - * For interleaving discovery in comparison, BR/EDR inquiry - * and LE scanning are done sequentially with separate - * timeouts. - */ - if (test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY, - &hdev->quirks)) { - timeout = msecs_to_jiffies(DISCOV_LE_TIMEOUT); - /* During simultaneous discovery, we double LE scan - * interval. We must leave some time for the controller - * to do BR/EDR inquiry. - */ - hci_req_sync(hdev, interleaved_discov, - hdev->le_scan_int_discovery * 2, HCI_CMD_TIMEOUT, - status); - break; - } - - timeout = msecs_to_jiffies(hdev->discov_interleaved_timeout); - hci_req_sync(hdev, active_scan, hdev->le_scan_int_discovery, - HCI_CMD_TIMEOUT, status); - break; - case DISCOV_TYPE_LE: - timeout = msecs_to_jiffies(DISCOV_LE_TIMEOUT); - hci_req_sync(hdev, active_scan, hdev->le_scan_int_discovery, - HCI_CMD_TIMEOUT, status); - break; - default: - *status = HCI_ERROR_UNSPECIFIED; - return; - } - - if (*status) - return; - - bt_dev_dbg(hdev, "timeout %u ms", jiffies_to_msecs(timeout)); - - /* When service discovery is used and the controller has a - * strict duplicate filter, it is important to remember the - * start and duration of the scan. This is required for - * restarting scanning during the discovery phase. - */ - if (test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks) && - hdev->discovery.result_filtering) { - hdev->discovery.scan_start = jiffies; - hdev->discovery.scan_duration = timeout; - } - - queue_delayed_work(hdev->req_workqueue, &hdev->le_scan_disable, - timeout); -} - -bool hci_req_stop_discovery(struct hci_request *req) -{ - struct hci_dev *hdev = req->hdev; - struct discovery_state *d = &hdev->discovery; - struct hci_cp_remote_name_req_cancel cp; - struct inquiry_entry *e; - bool ret = false; - - bt_dev_dbg(hdev, "state %u", hdev->discovery.state); - - if (d->state == DISCOVERY_FINDING || d->state == DISCOVERY_STOPPING) { - if (test_bit(HCI_INQUIRY, &hdev->flags)) - hci_req_add(req, HCI_OP_INQUIRY_CANCEL, 0, NULL); - - if (hci_dev_test_flag(hdev, HCI_LE_SCAN)) { - cancel_delayed_work(&hdev->le_scan_disable); - cancel_delayed_work(&hdev->le_scan_restart); - hci_req_add_le_scan_disable(req, false); - } - - ret = true; - } else { - /* Passive scanning */ - if (hci_dev_test_flag(hdev, HCI_LE_SCAN)) { - hci_req_add_le_scan_disable(req, false); - ret = true; - } - } - - /* No further actions needed for LE-only discovery */ - if (d->type == DISCOV_TYPE_LE) - return ret; - - if (d->state == DISCOVERY_RESOLVING || d->state == DISCOVERY_STOPPING) { - e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY, - NAME_PENDING); - if (!e) - return ret; - - bacpy(&cp.bdaddr, &e->data.bdaddr); - hci_req_add(req, HCI_OP_REMOTE_NAME_REQ_CANCEL, sizeof(cp), - &cp); - ret = true; - } - - return ret; -} - -static void config_data_path_complete(struct hci_dev *hdev, u8 status, - u16 opcode) -{ - bt_dev_dbg(hdev, "status %u", status); -} - -int hci_req_configure_datapath(struct hci_dev *hdev, struct bt_codec *codec) -{ - struct hci_request req; - int err; - __u8 vnd_len, *vnd_data = NULL; - struct hci_op_configure_data_path *cmd = NULL; - - hci_req_init(&req, hdev); - - err = hdev->get_codec_config_data(hdev, ESCO_LINK, codec, &vnd_len, - &vnd_data); - if (err < 0) - goto error; - - cmd = kzalloc(sizeof(*cmd) + vnd_len, GFP_KERNEL); - if (!cmd) { - err = -ENOMEM; - goto error; - } - - err = hdev->get_data_path_id(hdev, &cmd->data_path_id); - if (err < 0) - goto error; - - cmd->vnd_len = vnd_len; - memcpy(cmd->vnd_data, vnd_data, vnd_len); - - cmd->direction = 0x00; - hci_req_add(&req, HCI_CONFIGURE_DATA_PATH, sizeof(*cmd) + vnd_len, cmd); - - cmd->direction = 0x01; - hci_req_add(&req, HCI_CONFIGURE_DATA_PATH, sizeof(*cmd) + vnd_len, cmd); - - err = hci_req_run(&req, config_data_path_complete); -error: - - kfree(cmd); - kfree(vnd_data); - return err; -} - -static int stop_discovery(struct hci_request *req, unsigned long opt) -{ - hci_dev_lock(req->hdev); - hci_req_stop_discovery(req); - hci_dev_unlock(req->hdev); - - return 0; -} - -static void discov_update(struct work_struct *work) -{ - struct hci_dev *hdev = container_of(work, struct hci_dev, - discov_update); - u8 status = 0; - - switch (hdev->discovery.state) { - case DISCOVERY_STARTING: - start_discovery(hdev, &status); - mgmt_start_discovery_complete(hdev, status); - if (status) - hci_discovery_set_state(hdev, DISCOVERY_STOPPED); - else - hci_discovery_set_state(hdev, DISCOVERY_FINDING); - break; - case DISCOVERY_STOPPING: - hci_req_sync(hdev, stop_discovery, 0, HCI_CMD_TIMEOUT, &status); - mgmt_stop_discovery_complete(hdev, status); - if (!status) - hci_discovery_set_state(hdev, DISCOVERY_STOPPED); - break; - case DISCOVERY_STOPPED: - default: - return; - } -} - -static void discov_off(struct work_struct *work) -{ - struct hci_dev *hdev = container_of(work, struct hci_dev, - discov_off.work); - - bt_dev_dbg(hdev, ""); - - hci_dev_lock(hdev); - - /* When discoverable timeout triggers, then just make sure - * the limited discoverable flag is cleared. Even in the case - * of a timeout triggered from general discoverable, it is - * safe to unconditionally clear the flag. - */ - hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE); - hci_dev_clear_flag(hdev, HCI_DISCOVERABLE); - hdev->discov_timeout = 0; - - hci_dev_unlock(hdev); - - hci_req_sync(hdev, discoverable_update, 0, HCI_CMD_TIMEOUT, NULL); - mgmt_new_settings(hdev); -} - -static int powered_update_hci(struct hci_request *req, unsigned long opt) -{ - struct hci_dev *hdev = req->hdev; - u8 link_sec; - - hci_dev_lock(hdev); - - if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED) && - !lmp_host_ssp_capable(hdev)) { - u8 mode = 0x01; - - hci_req_add(req, HCI_OP_WRITE_SSP_MODE, sizeof(mode), &mode); - - if (bredr_sc_enabled(hdev) && !lmp_host_sc_capable(hdev)) { - u8 support = 0x01; - - hci_req_add(req, HCI_OP_WRITE_SC_SUPPORT, - sizeof(support), &support); - } - } - - if (hci_dev_test_flag(hdev, HCI_LE_ENABLED) && - lmp_bredr_capable(hdev)) { - struct hci_cp_write_le_host_supported cp; - - cp.le = 0x01; - cp.simul = 0x00; - - /* Check first if we already have the right - * host state (host features set) - */ - if (cp.le != lmp_host_le_capable(hdev) || - cp.simul != lmp_host_le_br_capable(hdev)) - hci_req_add(req, HCI_OP_WRITE_LE_HOST_SUPPORTED, - sizeof(cp), &cp); - } - - if (hci_dev_test_flag(hdev, HCI_LE_ENABLED)) { - /* Make sure the controller has a good default for - * advertising data. This also applies to the case - * where BR/EDR was toggled during the AUTO_OFF phase. - */ - if (hci_dev_test_flag(hdev, HCI_ADVERTISING) || - list_empty(&hdev->adv_instances)) { - int err; - - if (ext_adv_capable(hdev)) { - err = __hci_req_setup_ext_adv_instance(req, - 0x00); - if (!err) - __hci_req_update_scan_rsp_data(req, - 0x00); - } else { - err = 0; - __hci_req_update_adv_data(req, 0x00); - __hci_req_update_scan_rsp_data(req, 0x00); - } - - if (hci_dev_test_flag(hdev, HCI_ADVERTISING)) { - if (!ext_adv_capable(hdev)) - __hci_req_enable_advertising(req); - else if (!err) - __hci_req_enable_ext_advertising(req, - 0x00); - } - } else if (!list_empty(&hdev->adv_instances)) { - struct adv_info *adv_instance; - - adv_instance = list_first_entry(&hdev->adv_instances, - struct adv_info, list); - __hci_req_schedule_adv_instance(req, - adv_instance->instance, - true); - } - } - - link_sec = hci_dev_test_flag(hdev, HCI_LINK_SECURITY); - if (link_sec != test_bit(HCI_AUTH, &hdev->flags)) - hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, - sizeof(link_sec), &link_sec); - - if (lmp_bredr_capable(hdev)) { - if (hci_dev_test_flag(hdev, HCI_FAST_CONNECTABLE)) - __hci_req_write_fast_connectable(req, true); - else - __hci_req_write_fast_connectable(req, false); - __hci_req_update_scan(req); - __hci_req_update_class(req); - __hci_req_update_name(req); - __hci_req_update_eir(req); - } - - hci_dev_unlock(hdev); - return 0; -} - -int __hci_req_hci_power_on(struct hci_dev *hdev) -{ - /* Register the available SMP channels (BR/EDR and LE) only when - * successfully powering on the controller. This late - * registration is required so that LE SMP can clearly decide if - * the public address or static address is used. - */ - smp_register(hdev); - - return __hci_req_sync(hdev, powered_update_hci, 0, HCI_CMD_TIMEOUT, - NULL); -} - -void hci_request_setup(struct hci_dev *hdev) -{ - INIT_WORK(&hdev->discov_update, discov_update); - INIT_WORK(&hdev->scan_update, scan_update_work); - INIT_DELAYED_WORK(&hdev->discov_off, discov_off); - INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work); - INIT_DELAYED_WORK(&hdev->le_scan_restart, le_scan_restart_work); - INIT_DELAYED_WORK(&hdev->adv_instance_expire, adv_timeout_expire); - INIT_DELAYED_WORK(&hdev->interleave_scan, interleave_scan_work); -} - -void hci_request_cancel_all(struct hci_dev *hdev) -{ - __hci_cmd_sync_cancel(hdev, ENODEV); - - cancel_work_sync(&hdev->discov_update); - cancel_work_sync(&hdev->scan_update); - cancel_delayed_work_sync(&hdev->discov_off); - cancel_delayed_work_sync(&hdev->le_scan_disable); - cancel_delayed_work_sync(&hdev->le_scan_restart); - - if (hdev->adv_instance_timeout) { - cancel_delayed_work_sync(&hdev->adv_instance_expire); - hdev->adv_instance_timeout = 0; - } - - cancel_interleave_scan(hdev); -} diff --git a/net/bluetooth/hci_request.h b/net/bluetooth/hci_request.h deleted file mode 100644 index 7f8df258e295..000000000000 --- a/net/bluetooth/hci_request.h +++ /dev/null @@ -1,128 +0,0 @@ -/* - BlueZ - Bluetooth protocol stack for Linux - Copyright (C) 2014 Intel Corporation - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License version 2 as - published by the Free Software Foundation; - - THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS. - IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY - CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES - WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN - ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF - OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. - - ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS, - COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS - SOFTWARE IS DISCLAIMED. -*/ - -#include <asm/unaligned.h> - -#define HCI_REQ_DONE 0 -#define HCI_REQ_PEND 1 -#define HCI_REQ_CANCELED 2 - -#define hci_req_sync_lock(hdev) mutex_lock(&hdev->req_lock) -#define hci_req_sync_unlock(hdev) mutex_unlock(&hdev->req_lock) - -#define HCI_REQ_DONE 0 -#define HCI_REQ_PEND 1 -#define HCI_REQ_CANCELED 2 - -struct hci_request { - struct hci_dev *hdev; - struct sk_buff_head cmd_q; - - /* If something goes wrong when building the HCI request, the error - * value is stored in this field. - */ - int err; -}; - -void hci_req_init(struct hci_request *req, struct hci_dev *hdev); -void hci_req_purge(struct hci_request *req); -bool hci_req_status_pend(struct hci_dev *hdev); -int hci_req_run(struct hci_request *req, hci_req_complete_t complete); -int hci_req_run_skb(struct hci_request *req, hci_req_complete_skb_t complete); -void hci_req_sync_complete(struct hci_dev *hdev, u8 result, u16 opcode, - struct sk_buff *skb); -void hci_req_add(struct hci_request *req, u16 opcode, u32 plen, - const void *param); -void hci_req_add_ev(struct hci_request *req, u16 opcode, u32 plen, - const void *param, u8 event); -void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status, - hci_req_complete_t *req_complete, - hci_req_complete_skb_t *req_complete_skb); - -int hci_req_sync(struct hci_dev *hdev, int (*req)(struct hci_request *req, - unsigned long opt), - unsigned long opt, u32 timeout, u8 *hci_status); -int __hci_req_sync(struct hci_dev *hdev, int (*func)(struct hci_request *req, - unsigned long opt), - unsigned long opt, u32 timeout, u8 *hci_status); - -struct sk_buff *hci_prepare_cmd(struct hci_dev *hdev, u16 opcode, u32 plen, - const void *param); - -int __hci_req_hci_power_on(struct hci_dev *hdev); - -void __hci_req_write_fast_connectable(struct hci_request *req, bool enable); -void __hci_req_update_name(struct hci_request *req); -void __hci_req_update_eir(struct hci_request *req); - -void hci_req_add_le_scan_disable(struct hci_request *req, bool rpa_le_conn); -void hci_req_add_le_passive_scan(struct hci_request *req); - -void hci_req_prepare_suspend(struct hci_dev *hdev, enum suspended_state next); - -void hci_req_disable_address_resolution(struct hci_dev *hdev); -void hci_req_reenable_advertising(struct hci_dev *hdev); -void __hci_req_enable_advertising(struct hci_request *req); -void __hci_req_disable_advertising(struct hci_request *req); -void __hci_req_update_adv_data(struct hci_request *req, u8 instance); -int hci_req_update_adv_data(struct hci_dev *hdev, u8 instance); -void __hci_req_update_scan_rsp_data(struct hci_request *req, u8 instance); - -int __hci_req_schedule_adv_instance(struct hci_request *req, u8 instance, - bool force); -void hci_req_clear_adv_instance(struct hci_dev *hdev, struct sock *sk, - struct hci_request *req, u8 instance, - bool force); - -int __hci_req_setup_ext_adv_instance(struct hci_request *req, u8 instance); -int __hci_req_start_ext_adv(struct hci_request *req, u8 instance); -int __hci_req_enable_ext_advertising(struct hci_request *req, u8 instance); -int __hci_req_disable_ext_adv_instance(struct hci_request *req, u8 instance); -int __hci_req_remove_ext_adv_instance(struct hci_request *req, u8 instance); -void __hci_req_clear_ext_adv_sets(struct hci_request *req); -int hci_get_random_address(struct hci_dev *hdev, bool require_privacy, - bool use_rpa, struct adv_info *adv_instance, - u8 *own_addr_type, bdaddr_t *rand_addr); - -void __hci_req_update_class(struct hci_request *req); - -/* Returns true if HCI commands were queued */ -bool hci_req_stop_discovery(struct hci_request *req); - -int hci_req_configure_datapath(struct hci_dev *hdev, struct bt_codec *codec); - -static inline void hci_req_update_scan(struct hci_dev *hdev) -{ - queue_work(hdev->req_workqueue, &hdev->scan_update); -} - -void __hci_req_update_scan(struct hci_request *req); - -int hci_update_random_address(struct hci_request *req, bool require_privacy, - bool use_rpa, u8 *own_addr_type); - -int hci_abort_conn(struct hci_conn *conn, u8 reason); -void __hci_abort_conn(struct hci_request *req, struct hci_conn *conn, - u8 reason); - -void hci_request_setup(struct hci_dev *hdev); -void hci_request_cancel_all(struct hci_dev *hdev); diff --git a/net/bluetooth/hci_sock.c b/net/bluetooth/hci_sock.c index 33b3c0ffc339..4e7bf63af9c5 100644 --- a/net/bluetooth/hci_sock.c +++ b/net/bluetooth/hci_sock.c @@ -27,7 +27,7 @@ #include <linux/export.h> #include <linux/utsname.h> #include <linux/sched.h> -#include <asm/unaligned.h> +#include <linux/unaligned.h> #include <net/bluetooth/bluetooth.h> #include <net/bluetooth/hci_core.h> @@ -101,7 +101,7 @@ static bool hci_sock_gen_cookie(struct sock *sk) int id = hci_pi(sk)->cookie; if (!id) { - id = ida_simple_get(&sock_cookie_ida, 1, 0, GFP_KERNEL); + id = ida_alloc_min(&sock_cookie_ida, 1, GFP_KERNEL); if (id < 0) id = 0xffffffff; @@ -118,8 +118,8 @@ static void hci_sock_free_cookie(struct sock *sk) int id = hci_pi(sk)->cookie; if (id) { - hci_pi(sk)->cookie = 0xffffffff; - ida_simple_remove(&sock_cookie_ida, id); + hci_pi(sk)->cookie = 0; + ida_free(&sock_cookie_ida, id); } } @@ -234,7 +234,8 @@ void hci_send_to_sock(struct hci_dev *hdev, struct sk_buff *skb) if (hci_skb_pkt_type(skb) != HCI_EVENT_PKT && hci_skb_pkt_type(skb) != HCI_ACLDATA_PKT && hci_skb_pkt_type(skb) != HCI_SCODATA_PKT && - hci_skb_pkt_type(skb) != HCI_ISODATA_PKT) + hci_skb_pkt_type(skb) != HCI_ISODATA_PKT && + hci_skb_pkt_type(skb) != HCI_DRV_PKT) continue; } else { /* Don't send frame to other channel types */ @@ -264,6 +265,53 @@ void hci_send_to_sock(struct hci_dev *hdev, struct sk_buff *skb) kfree_skb(skb_copy); } +static void hci_sock_copy_creds(struct sock *sk, struct sk_buff *skb) +{ + struct scm_creds *creds; + + if (!sk || WARN_ON(!skb)) + return; + + creds = &bt_cb(skb)->creds; + + /* Check if peer credentials is set */ + if (!sk->sk_peer_pid) { + /* Check if parent peer credentials is set */ + if (bt_sk(sk)->parent && bt_sk(sk)->parent->sk_peer_pid) + sk = bt_sk(sk)->parent; + else + return; + } + + /* Check if scm_creds already set */ + if (creds->pid == pid_vnr(sk->sk_peer_pid)) + return; + + memset(creds, 0, sizeof(*creds)); + + creds->pid = pid_vnr(sk->sk_peer_pid); + if (sk->sk_peer_cred) { + creds->uid = sk->sk_peer_cred->uid; + creds->gid = sk->sk_peer_cred->gid; + } +} + +static struct sk_buff *hci_skb_clone(struct sk_buff *skb) +{ + struct sk_buff *nskb; + + if (!skb) + return NULL; + + nskb = skb_clone(skb, GFP_ATOMIC); + if (!nskb) + return NULL; + + hci_sock_copy_creds(skb->sk, nskb); + + return nskb; +} + /* Send frame to sockets with specific channel */ static void __hci_send_to_channel(unsigned short channel, struct sk_buff *skb, int flag, struct sock *skip_sk) @@ -289,7 +337,7 @@ static void __hci_send_to_channel(unsigned short channel, struct sk_buff *skb, if (hci_pi(sk)->channel != channel) continue; - nskb = skb_clone(skb, GFP_ATOMIC); + nskb = hci_skb_clone(skb); if (!nskb) continue; @@ -344,6 +392,12 @@ void hci_send_to_monitor(struct hci_dev *hdev, struct sk_buff *skb) else opcode = cpu_to_le16(HCI_MON_ISO_TX_PKT); break; + case HCI_DRV_PKT: + if (bt_cb(skb)->incoming) + opcode = cpu_to_le16(HCI_MON_DRV_RX_PKT); + else + opcode = cpu_to_le16(HCI_MON_DRV_TX_PKT); + break; case HCI_DIAG_PKT: opcode = cpu_to_le16(HCI_MON_VENDOR_DIAG); break; @@ -356,6 +410,8 @@ void hci_send_to_monitor(struct hci_dev *hdev, struct sk_buff *skb) if (!skb_copy) return; + hci_sock_copy_creds(skb->sk, skb_copy); + /* Put header before the data */ hdr = skb_push(skb_copy, HCI_MON_HDR_SIZE); hdr->opcode = opcode; @@ -436,10 +492,11 @@ static struct sk_buff *create_monitor_event(struct hci_dev *hdev, int event) return NULL; ni = skb_put(skb, HCI_MON_NEW_INDEX_SIZE); - ni->type = hdev->dev_type; + ni->type = 0x00; /* Old hdev->dev_type */ ni->bus = hdev->bus; bacpy(&ni->bdaddr, &hdev->bdaddr); - memcpy(ni->name, hdev->name, 8); + memcpy_and_pad(ni->name, sizeof(ni->name), hdev->name, + strnlen(hdev->name, sizeof(ni->name)), '\0'); opcode = cpu_to_le16(HCI_MON_NEW_INDEX); break; @@ -531,10 +588,12 @@ static struct sk_buff *create_monitor_ctrl_open(struct sock *sk) return NULL; } - skb = bt_skb_alloc(14 + TASK_COMM_LEN , GFP_ATOMIC); + skb = bt_skb_alloc(14 + TASK_COMM_LEN, GFP_ATOMIC); if (!skb) return NULL; + hci_sock_copy_creds(sk, skb); + flags = hci_sock_test_flag(sk, HCI_SOCK_TRUSTED) ? 0x1 : 0x0; put_unaligned_le32(hci_pi(sk)->cookie, skb_put(skb, 4)); @@ -580,6 +639,8 @@ static struct sk_buff *create_monitor_ctrl_close(struct sock *sk) if (!skb) return NULL; + hci_sock_copy_creds(sk, skb); + put_unaligned_le32(hci_pi(sk)->cookie, skb_put(skb, 4)); __net_timestamp(skb); @@ -606,6 +667,8 @@ static struct sk_buff *create_monitor_ctrl_command(struct sock *sk, u16 index, if (!skb) return NULL; + hci_sock_copy_creds(sk, skb); + put_unaligned_le32(hci_pi(sk)->cookie, skb_put(skb, 4)); put_unaligned_le16(opcode, skb_put(skb, 2)); @@ -638,6 +701,8 @@ send_monitor_note(struct sock *sk, const char *fmt, ...) if (!skb) return; + hci_sock_copy_creds(sk, skb); + va_start(args, fmt); vsprintf(skb_put(skb, len), fmt, args); *(u8 *)skb_put(skb, 1) = 0; @@ -869,7 +934,8 @@ static int hci_sock_release(struct socket *sock) hdev = hci_pi(sk)->hdev; if (hdev) { - if (hci_pi(sk)->channel == HCI_CHANNEL_USER) { + if (hci_pi(sk)->channel == HCI_CHANNEL_USER && + !hci_dev_test_flag(hdev, HCI_UNREGISTER)) { /* When releasing a user channel exclusive access, * call hci_dev_do_close directly instead of calling * hci_dev_close to ensure the exclusive access will @@ -878,6 +944,11 @@ static int hci_sock_release(struct socket *sock) * The checking of HCI_AUTO_OFF is not needed in this * case since it will have been cleared already when * opening the user channel. + * + * Make sure to also check that we haven't already + * unregistered since all the cleanup will have already + * been complete and hdev will get released when we put + * below. */ hci_dev_do_close(hdev); hci_dev_clear_flag(hdev, HCI_USER_CHANNEL); @@ -943,9 +1014,6 @@ static int hci_sock_bound_ioctl(struct sock *sk, unsigned int cmd, if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) return -EOPNOTSUPP; - if (hdev->dev_type != HCI_PRIMARY) - return -EOPNOTSUPP; - switch (cmd) { case HCISETRAW: if (!capable(CAP_NET_ADMIN)) @@ -981,6 +1049,34 @@ static int hci_sock_ioctl(struct socket *sock, unsigned int cmd, BT_DBG("cmd %x arg %lx", cmd, arg); + /* Make sure the cmd is valid before doing anything */ + switch (cmd) { + case HCIGETDEVLIST: + case HCIGETDEVINFO: + case HCIGETCONNLIST: + case HCIDEVUP: + case HCIDEVDOWN: + case HCIDEVRESET: + case HCIDEVRESTAT: + case HCISETSCAN: + case HCISETAUTH: + case HCISETENCRYPT: + case HCISETPTYPE: + case HCISETLINKPOL: + case HCISETLINKMODE: + case HCISETACLMTU: + case HCISETSCOMTU: + case HCIINQUIRY: + case HCISETRAW: + case HCIGETCONNINFO: + case HCIGETAUTHINFO: + case HCIBLOCKADDR: + case HCIUNBLOCKADDR: + break; + default: + return -ENOIOCTLCMD; + } + lock_sock(sk); if (hci_pi(sk)->channel != HCI_CHANNEL_RAW) { @@ -997,7 +1093,14 @@ static int hci_sock_ioctl(struct socket *sock, unsigned int cmd, if (hci_sock_gen_cookie(sk)) { struct sk_buff *skb; - if (capable(CAP_NET_ADMIN)) + /* Perform careful checks before setting the HCI_SOCK_TRUSTED + * flag. Make sure that not only the current task but also + * the socket opener has the required capability, since + * privileged programs can be tricked into making ioctl calls + * on HCI sockets, and the socket should not be marked as + * trusted simply because the ioctl caller is privileged. + */ + if (sk_capable(sk, CAP_NET_ADMIN)) hci_sock_set_flag(sk, HCI_SOCK_TRUSTED); /* Send event to monitor */ @@ -1082,7 +1185,7 @@ static int hci_sock_compat_ioctl(struct socket *sock, unsigned int cmd, } #endif -static int hci_sock_bind(struct socket *sock, struct sockaddr *addr, +static int hci_sock_bind(struct socket *sock, struct sockaddr_unsized *addr, int addr_len) { struct sockaddr_hci haddr; @@ -1208,7 +1311,9 @@ static int hci_sock_bind(struct socket *sock, struct sockaddr *addr, goto done; } + hci_dev_lock(hdev); mgmt_index_removed(hdev); + hci_dev_unlock(hdev); err = hci_dev_open(hdev->id); if (err) { @@ -1453,7 +1558,7 @@ static void hci_sock_cmsg(struct sock *sk, struct msghdr *msg, static int hci_sock_recvmsg(struct socket *sock, struct msghdr *msg, size_t len, int flags) { - int noblock = flags & MSG_DONTWAIT; + struct scm_cookie scm; struct sock *sk = sock->sk; struct sk_buff *skb; int copied, err; @@ -1470,7 +1575,7 @@ static int hci_sock_recvmsg(struct socket *sock, struct msghdr *msg, if (sk->sk_state == BT_CLOSED) return 0; - skb = skb_recv_datagram(sk, flags, noblock, &err); + skb = skb_recv_datagram(sk, flags, &err); if (!skb) return err; @@ -1498,11 +1603,16 @@ static int hci_sock_recvmsg(struct socket *sock, struct msghdr *msg, break; } + memset(&scm, 0, sizeof(scm)); + scm.creds = bt_cb(skb)->creds; + skb_free_datagram(sk, skb); if (flags & MSG_TRUNC) copied = skblen; + scm_recv(sock, msg, &scm, flags); + return err ? : copied; } @@ -1759,7 +1869,8 @@ static int hci_sock_sendmsg(struct socket *sock, struct msghdr *msg, if (hci_skb_pkt_type(skb) != HCI_COMMAND_PKT && hci_skb_pkt_type(skb) != HCI_ACLDATA_PKT && hci_skb_pkt_type(skb) != HCI_SCODATA_PKT && - hci_skb_pkt_type(skb) != HCI_ISODATA_PKT) { + hci_skb_pkt_type(skb) != HCI_ISODATA_PKT && + hci_skb_pkt_type(skb) != HCI_DRV_PKT) { err = -EINVAL; goto drop; } @@ -1825,7 +1936,7 @@ drop: } static int hci_sock_setsockopt_old(struct socket *sock, int level, int optname, - sockptr_t optval, unsigned int len) + sockptr_t optval, unsigned int optlen) { struct hci_ufilter uf = { .opcode = 0 }; struct sock *sk = sock->sk; @@ -1842,10 +1953,9 @@ static int hci_sock_setsockopt_old(struct socket *sock, int level, int optname, switch (optname) { case HCI_DATA_DIR: - if (copy_from_sockptr(&opt, optval, sizeof(opt))) { - err = -EFAULT; + err = copy_safe_from_sockptr(&opt, sizeof(opt), optval, optlen); + if (err) break; - } if (opt) hci_pi(sk)->cmsg_mask |= HCI_CMSG_DIR; @@ -1854,10 +1964,9 @@ static int hci_sock_setsockopt_old(struct socket *sock, int level, int optname, break; case HCI_TIME_STAMP: - if (copy_from_sockptr(&opt, optval, sizeof(opt))) { - err = -EFAULT; + err = copy_safe_from_sockptr(&opt, sizeof(opt), optval, optlen); + if (err) break; - } if (opt) hci_pi(sk)->cmsg_mask |= HCI_CMSG_TSTAMP; @@ -1875,11 +1984,9 @@ static int hci_sock_setsockopt_old(struct socket *sock, int level, int optname, uf.event_mask[1] = *((u32 *) f->event_mask + 1); } - len = min_t(unsigned int, len, sizeof(uf)); - if (copy_from_sockptr(&uf, optval, len)) { - err = -EFAULT; + err = copy_safe_from_sockptr(&uf, sizeof(uf), optval, optlen); + if (err) break; - } if (!capable(CAP_NET_RAW)) { uf.type_mask &= hci_sec_filter.type_mask; @@ -1908,7 +2015,7 @@ done: } static int hci_sock_setsockopt(struct socket *sock, int level, int optname, - sockptr_t optval, unsigned int len) + sockptr_t optval, unsigned int optlen) { struct sock *sk = sock->sk; int err = 0; @@ -1918,7 +2025,7 @@ static int hci_sock_setsockopt(struct socket *sock, int level, int optname, if (level == SOL_HCI) return hci_sock_setsockopt_old(sock, level, optname, optval, - len); + optlen); if (level != SOL_BLUETOOTH) return -ENOPROTOOPT; @@ -1938,10 +2045,9 @@ static int hci_sock_setsockopt(struct socket *sock, int level, int optname, goto done; } - if (copy_from_sockptr(&opt, optval, sizeof(opt))) { - err = -EFAULT; + err = copy_safe_from_sockptr(&opt, sizeof(opt), optval, optlen); + if (err) break; - } hci_pi(sk)->mtu = opt; break; @@ -2057,6 +2163,7 @@ static int hci_sock_getsockopt(struct socket *sock, int level, int optname, static void hci_sock_destruct(struct sock *sk) { + mgmt_cleanup(sk); skb_queue_purge(&sk->sk_receive_queue); skb_queue_purge(&sk->sk_write_queue); } @@ -2102,18 +2209,12 @@ static int hci_sock_create(struct net *net, struct socket *sock, int protocol, sock->ops = &hci_sock_ops; - sk = sk_alloc(net, PF_BLUETOOTH, GFP_ATOMIC, &hci_sk_proto, kern); + sk = bt_sock_alloc(net, sock, &hci_sk_proto, protocol, GFP_ATOMIC, + kern); if (!sk) return -ENOMEM; - sock_init_data(sock, sk); - - sock_reset_flag(sk, SOCK_ZAPPED); - - sk->sk_protocol = protocol; - sock->state = SS_UNCONNECTED; - sk->sk_state = BT_OPEN; sk->sk_destruct = hci_sock_destruct; bt_sock_link(&hci_sk_list, sk); diff --git a/net/bluetooth/hci_sync.c b/net/bluetooth/hci_sync.c index 0feb68f12545..a9f5b1a68356 100644 --- a/net/bluetooth/hci_sync.c +++ b/net/bluetooth/hci_sync.c @@ -3,6 +3,7 @@ * BlueZ - Bluetooth protocol stack for Linux * * Copyright (C) 2021 Intel Corporation + * Copyright 2023 NXP */ #include <linux/property.h> @@ -11,7 +12,7 @@ #include <net/bluetooth/hci_core.h> #include <net/bluetooth/mgmt.h> -#include "hci_request.h" +#include "hci_codec.h" #include "hci_debugfs.h" #include "smp.h" #include "eir.h" @@ -30,6 +31,10 @@ static void hci_cmd_sync_complete(struct hci_dev *hdev, u8 result, u16 opcode, hdev->req_result = result; hdev->req_status = HCI_REQ_DONE; + /* Free the request command so it is not used as response */ + kfree_skb(hdev->req_skb); + hdev->req_skb = NULL; + if (skb) { struct sock *sk = hci_skb_sk(skb); @@ -37,15 +42,14 @@ static void hci_cmd_sync_complete(struct hci_dev *hdev, u8 result, u16 opcode, if (sk) sock_put(sk); - hdev->req_skb = skb_get(skb); + hdev->req_rsp = skb_get(skb); } wake_up_interruptible(&hdev->req_wait_q); } -static struct sk_buff *hci_cmd_sync_alloc(struct hci_dev *hdev, u16 opcode, - u32 plen, const void *param, - struct sock *sk) +struct sk_buff *hci_cmd_sync_alloc(struct hci_dev *hdev, u16 opcode, u32 plen, + const void *param, struct sock *sk) { int len = HCI_COMMAND_HDR_SIZE + plen; struct hci_command_hdr *hdr; @@ -108,7 +112,7 @@ static void hci_cmd_sync_add(struct hci_request *req, u16 opcode, u32 plen, skb_queue_tail(&req->cmd_q, skb); } -static int hci_cmd_sync_run(struct hci_request *req) +static int hci_req_sync_run(struct hci_request *req) { struct hci_dev *hdev = req->hdev; struct sk_buff *skb; @@ -141,6 +145,13 @@ static int hci_cmd_sync_run(struct hci_request *req) return 0; } +static void hci_request_init(struct hci_request *req, struct hci_dev *hdev) +{ + skb_queue_head_init(&req->cmd_q); + req->hdev = hdev; + req->err = 0; +} + /* This function requires the caller holds hdev->req_lock. */ struct sk_buff *__hci_cmd_sync_sk(struct hci_dev *hdev, u16 opcode, u32 plen, const void *param, u8 event, u32 timeout, @@ -150,15 +161,15 @@ struct sk_buff *__hci_cmd_sync_sk(struct hci_dev *hdev, u16 opcode, u32 plen, struct sk_buff *skb; int err = 0; - bt_dev_dbg(hdev, "Opcode 0x%4x", opcode); + bt_dev_dbg(hdev, "Opcode 0x%4.4x", opcode); - hci_req_init(&req, hdev); + hci_request_init(&req, hdev); hci_cmd_sync_add(&req, opcode, plen, param, event, sk); hdev->req_status = HCI_REQ_PEND; - err = hci_cmd_sync_run(&req); + err = hci_req_sync_run(&req); if (err < 0) return ERR_PTR(err); @@ -185,8 +196,8 @@ struct sk_buff *__hci_cmd_sync_sk(struct hci_dev *hdev, u16 opcode, u32 plen, hdev->req_status = 0; hdev->req_result = 0; - skb = hdev->req_skb; - hdev->req_skb = NULL; + skb = hdev->req_rsp; + hdev->req_rsp = NULL; bt_dev_dbg(hdev, "end: err %d", err); @@ -195,6 +206,12 @@ struct sk_buff *__hci_cmd_sync_sk(struct hci_dev *hdev, u16 opcode, u32 plen, return ERR_PTR(err); } + /* If command return a status event skb will be set to NULL as there are + * no parameters. + */ + if (!skb) + return ERR_PTR(-ENODATA); + return skb; } EXPORT_SYMBOL(__hci_cmd_sync_sk); @@ -244,19 +261,18 @@ int __hci_cmd_sync_status_sk(struct hci_dev *hdev, u16 opcode, u32 plen, u8 status; skb = __hci_cmd_sync_sk(hdev, opcode, plen, param, event, timeout, sk); + + /* If command return a status event, skb will be set to -ENODATA */ + if (skb == ERR_PTR(-ENODATA)) + return 0; + if (IS_ERR(skb)) { - bt_dev_err(hdev, "Opcode 0x%4x failed: %ld", opcode, - PTR_ERR(skb)); + if (!event) + bt_dev_err(hdev, "Opcode 0x%4.4x failed: %ld", opcode, + PTR_ERR(skb)); return PTR_ERR(skb); } - /* If command return a status event skb will be set to NULL as there are - * no parameters, in case of failure IS_ERR(skb) would have be set to - * the actual error would be found with PTR_ERR(skb). - */ - if (!skb) - return 0; - status = skb->data[0]; kfree_skb(skb); @@ -273,43 +289,53 @@ int __hci_cmd_sync_status(struct hci_dev *hdev, u16 opcode, u32 plen, } EXPORT_SYMBOL(__hci_cmd_sync_status); +int hci_cmd_sync_status(struct hci_dev *hdev, u16 opcode, u32 plen, + const void *param, u32 timeout) +{ + int err; + + hci_req_sync_lock(hdev); + err = __hci_cmd_sync_status(hdev, opcode, plen, param, timeout); + hci_req_sync_unlock(hdev); + + return err; +} +EXPORT_SYMBOL(hci_cmd_sync_status); + static void hci_cmd_sync_work(struct work_struct *work) { struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_sync_work); - struct hci_cmd_sync_work_entry *entry; - hci_cmd_sync_work_func_t func; - hci_cmd_sync_work_destroy_t destroy; - void *data; bt_dev_dbg(hdev, ""); - mutex_lock(&hdev->cmd_sync_work_lock); - entry = list_first_entry(&hdev->cmd_sync_work_list, - struct hci_cmd_sync_work_entry, list); - if (entry) { - list_del(&entry->list); - func = entry->func; - data = entry->data; - destroy = entry->destroy; - kfree(entry); - } else { - func = NULL; - data = NULL; - destroy = NULL; - } - mutex_unlock(&hdev->cmd_sync_work_lock); + /* Dequeue all entries and run them */ + while (1) { + struct hci_cmd_sync_work_entry *entry; - if (func) { - int err; + mutex_lock(&hdev->cmd_sync_work_lock); + entry = list_first_entry_or_null(&hdev->cmd_sync_work_list, + struct hci_cmd_sync_work_entry, + list); + if (entry) + list_del(&entry->list); + mutex_unlock(&hdev->cmd_sync_work_lock); + + if (!entry) + break; - hci_req_sync_lock(hdev); + bt_dev_dbg(hdev, "entry %p", entry); - err = func(hdev, data); + if (entry->func) { + int err; - if (destroy) - destroy(hdev, data, err); + hci_req_sync_lock(hdev); + err = entry->func(hdev, entry->data); + if (entry->destroy) + entry->destroy(hdev, entry->data, err); + hci_req_sync_unlock(hdev); + } - hci_req_sync_unlock(hdev); + kfree(entry); } } @@ -324,13 +350,302 @@ static void hci_cmd_sync_cancel_work(struct work_struct *work) wake_up_interruptible(&hdev->req_wait_q); } +static int hci_scan_disable_sync(struct hci_dev *hdev); +static int scan_disable_sync(struct hci_dev *hdev, void *data) +{ + return hci_scan_disable_sync(hdev); +} + +static int interleaved_inquiry_sync(struct hci_dev *hdev, void *data) +{ + return hci_inquiry_sync(hdev, DISCOV_INTERLEAVED_INQUIRY_LEN, 0); +} + +static void le_scan_disable(struct work_struct *work) +{ + struct hci_dev *hdev = container_of(work, struct hci_dev, + le_scan_disable.work); + int status; + + bt_dev_dbg(hdev, ""); + hci_dev_lock(hdev); + + if (!hci_dev_test_flag(hdev, HCI_LE_SCAN)) + goto _return; + + status = hci_cmd_sync_queue(hdev, scan_disable_sync, NULL, NULL); + if (status) { + bt_dev_err(hdev, "failed to disable LE scan: %d", status); + goto _return; + } + + /* If we were running LE only scan, change discovery state. If + * we were running both LE and BR/EDR inquiry simultaneously, + * and BR/EDR inquiry is already finished, stop discovery, + * otherwise BR/EDR inquiry will stop discovery when finished. + * If we will resolve remote device name, do not change + * discovery state. + */ + + if (hdev->discovery.type == DISCOV_TYPE_LE) + goto discov_stopped; + + if (hdev->discovery.type != DISCOV_TYPE_INTERLEAVED) + goto _return; + + if (hci_test_quirk(hdev, HCI_QUIRK_SIMULTANEOUS_DISCOVERY)) { + if (!test_bit(HCI_INQUIRY, &hdev->flags) && + hdev->discovery.state != DISCOVERY_RESOLVING) + goto discov_stopped; + + goto _return; + } + + status = hci_cmd_sync_queue(hdev, interleaved_inquiry_sync, NULL, NULL); + if (status) { + bt_dev_err(hdev, "inquiry failed: status %d", status); + goto discov_stopped; + } + + goto _return; + +discov_stopped: + hci_discovery_set_state(hdev, DISCOVERY_STOPPED); + +_return: + hci_dev_unlock(hdev); +} + +static int hci_le_set_scan_enable_sync(struct hci_dev *hdev, u8 val, + u8 filter_dup); + +static int reenable_adv_sync(struct hci_dev *hdev, void *data) +{ + bt_dev_dbg(hdev, ""); + + if (!hci_dev_test_flag(hdev, HCI_ADVERTISING) && + list_empty(&hdev->adv_instances)) + return 0; + + if (hdev->cur_adv_instance) { + return hci_schedule_adv_instance_sync(hdev, + hdev->cur_adv_instance, + true); + } else { + if (ext_adv_capable(hdev)) { + hci_start_ext_adv_sync(hdev, 0x00); + } else { + hci_update_adv_data_sync(hdev, 0x00); + hci_update_scan_rsp_data_sync(hdev, 0x00); + hci_enable_advertising_sync(hdev); + } + } + + return 0; +} + +static void reenable_adv(struct work_struct *work) +{ + struct hci_dev *hdev = container_of(work, struct hci_dev, + reenable_adv_work); + int status; + + bt_dev_dbg(hdev, ""); + + hci_dev_lock(hdev); + + status = hci_cmd_sync_queue(hdev, reenable_adv_sync, NULL, NULL); + if (status) + bt_dev_err(hdev, "failed to reenable ADV: %d", status); + + hci_dev_unlock(hdev); +} + +static void cancel_adv_timeout(struct hci_dev *hdev) +{ + if (hdev->adv_instance_timeout) { + hdev->adv_instance_timeout = 0; + cancel_delayed_work(&hdev->adv_instance_expire); + } +} + +/* For a single instance: + * - force == true: The instance will be removed even when its remaining + * lifetime is not zero. + * - force == false: the instance will be deactivated but kept stored unless + * the remaining lifetime is zero. + * + * For instance == 0x00: + * - force == true: All instances will be removed regardless of their timeout + * setting. + * - force == false: Only instances that have a timeout will be removed. + */ +int hci_clear_adv_instance_sync(struct hci_dev *hdev, struct sock *sk, + u8 instance, bool force) +{ + struct adv_info *adv_instance, *n, *next_instance = NULL; + int err; + u8 rem_inst; + + /* Cancel any timeout concerning the removed instance(s). */ + if (!instance || hdev->cur_adv_instance == instance) + cancel_adv_timeout(hdev); + + /* Get the next instance to advertise BEFORE we remove + * the current one. This can be the same instance again + * if there is only one instance. + */ + if (instance && hdev->cur_adv_instance == instance) + next_instance = hci_get_next_instance(hdev, instance); + + if (instance == 0x00) { + list_for_each_entry_safe(adv_instance, n, &hdev->adv_instances, + list) { + if (!(force || adv_instance->timeout)) + continue; + + rem_inst = adv_instance->instance; + err = hci_remove_adv_instance(hdev, rem_inst); + if (!err) + mgmt_advertising_removed(sk, hdev, rem_inst); + } + } else { + adv_instance = hci_find_adv_instance(hdev, instance); + + if (force || (adv_instance && adv_instance->timeout && + !adv_instance->remaining_time)) { + /* Don't advertise a removed instance. */ + if (next_instance && + next_instance->instance == instance) + next_instance = NULL; + + err = hci_remove_adv_instance(hdev, instance); + if (!err) + mgmt_advertising_removed(sk, hdev, instance); + } + } + + if (!hdev_is_powered(hdev) || hci_dev_test_flag(hdev, HCI_ADVERTISING)) + return 0; + + if (next_instance && !ext_adv_capable(hdev)) + return hci_schedule_adv_instance_sync(hdev, + next_instance->instance, + false); + + return 0; +} + +static int adv_timeout_expire_sync(struct hci_dev *hdev, void *data) +{ + u8 instance = *(u8 *)data; + + kfree(data); + + hci_clear_adv_instance_sync(hdev, NULL, instance, false); + + if (list_empty(&hdev->adv_instances)) + return hci_disable_advertising_sync(hdev); + + return 0; +} + +static void adv_timeout_expire(struct work_struct *work) +{ + u8 *inst_ptr; + struct hci_dev *hdev = container_of(work, struct hci_dev, + adv_instance_expire.work); + + bt_dev_dbg(hdev, ""); + + hci_dev_lock(hdev); + + hdev->adv_instance_timeout = 0; + + if (hdev->cur_adv_instance == 0x00) + goto unlock; + + inst_ptr = kmalloc(1, GFP_KERNEL); + if (!inst_ptr) + goto unlock; + + *inst_ptr = hdev->cur_adv_instance; + hci_cmd_sync_queue(hdev, adv_timeout_expire_sync, inst_ptr, NULL); + +unlock: + hci_dev_unlock(hdev); +} + +static bool is_interleave_scanning(struct hci_dev *hdev) +{ + return hdev->interleave_scan_state != INTERLEAVE_SCAN_NONE; +} + +static int hci_passive_scan_sync(struct hci_dev *hdev); + +static void interleave_scan_work(struct work_struct *work) +{ + struct hci_dev *hdev = container_of(work, struct hci_dev, + interleave_scan.work); + unsigned long timeout; + + if (hdev->interleave_scan_state == INTERLEAVE_SCAN_ALLOWLIST) { + timeout = msecs_to_jiffies(hdev->advmon_allowlist_duration); + } else if (hdev->interleave_scan_state == INTERLEAVE_SCAN_NO_FILTER) { + timeout = msecs_to_jiffies(hdev->advmon_no_filter_duration); + } else { + bt_dev_err(hdev, "unexpected error"); + return; + } + + hci_passive_scan_sync(hdev); + + hci_dev_lock(hdev); + + switch (hdev->interleave_scan_state) { + case INTERLEAVE_SCAN_ALLOWLIST: + bt_dev_dbg(hdev, "next state: allowlist"); + hdev->interleave_scan_state = INTERLEAVE_SCAN_NO_FILTER; + break; + case INTERLEAVE_SCAN_NO_FILTER: + bt_dev_dbg(hdev, "next state: no filter"); + hdev->interleave_scan_state = INTERLEAVE_SCAN_ALLOWLIST; + break; + case INTERLEAVE_SCAN_NONE: + bt_dev_err(hdev, "unexpected error"); + } + + hci_dev_unlock(hdev); + + /* Don't continue interleaving if it was canceled */ + if (is_interleave_scanning(hdev)) + queue_delayed_work(hdev->req_workqueue, + &hdev->interleave_scan, timeout); +} + void hci_cmd_sync_init(struct hci_dev *hdev) { INIT_WORK(&hdev->cmd_sync_work, hci_cmd_sync_work); INIT_LIST_HEAD(&hdev->cmd_sync_work_list); mutex_init(&hdev->cmd_sync_work_lock); + mutex_init(&hdev->unregister_lock); INIT_WORK(&hdev->cmd_sync_cancel_work, hci_cmd_sync_cancel_work); + INIT_WORK(&hdev->reenable_adv_work, reenable_adv); + INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable); + INIT_DELAYED_WORK(&hdev->adv_instance_expire, adv_timeout_expire); + INIT_DELAYED_WORK(&hdev->interleave_scan, interleave_scan_work); +} + +static void _hci_cmd_sync_cancel_entry(struct hci_dev *hdev, + struct hci_cmd_sync_work_entry *entry, + int err) +{ + if (entry->destroy) + entry->destroy(hdev, entry->data, err); + + list_del(&entry->list); + kfree(entry); } void hci_cmd_sync_clear(struct hci_dev *hdev) @@ -338,17 +653,15 @@ void hci_cmd_sync_clear(struct hci_dev *hdev) struct hci_cmd_sync_work_entry *entry, *tmp; cancel_work_sync(&hdev->cmd_sync_work); + cancel_work_sync(&hdev->reenable_adv_work); - list_for_each_entry_safe(entry, tmp, &hdev->cmd_sync_work_list, list) { - if (entry->destroy) - entry->destroy(hdev, entry->data, -ECANCELED); - - list_del(&entry->list); - kfree(entry); - } + mutex_lock(&hdev->cmd_sync_work_lock); + list_for_each_entry_safe(entry, tmp, &hdev->cmd_sync_work_list, list) + _hci_cmd_sync_cancel_entry(hdev, entry, -ECANCELED); + mutex_unlock(&hdev->cmd_sync_work_lock); } -void __hci_cmd_sync_cancel(struct hci_dev *hdev, int err) +void hci_cmd_sync_cancel(struct hci_dev *hdev, int err) { bt_dev_dbg(hdev, "err 0x%2.2x", err); @@ -356,36 +669,53 @@ void __hci_cmd_sync_cancel(struct hci_dev *hdev, int err) hdev->req_result = err; hdev->req_status = HCI_REQ_CANCELED; - cancel_delayed_work_sync(&hdev->cmd_timer); - cancel_delayed_work_sync(&hdev->ncmd_timer); - atomic_set(&hdev->cmd_cnt, 1); - - wake_up_interruptible(&hdev->req_wait_q); + queue_work(hdev->workqueue, &hdev->cmd_sync_cancel_work); } } +EXPORT_SYMBOL(hci_cmd_sync_cancel); -void hci_cmd_sync_cancel(struct hci_dev *hdev, int err) +/* Cancel ongoing command request synchronously: + * + * - Set result and mark status to HCI_REQ_CANCELED + * - Wakeup command sync thread + */ +void hci_cmd_sync_cancel_sync(struct hci_dev *hdev, int err) { bt_dev_dbg(hdev, "err 0x%2.2x", err); if (hdev->req_status == HCI_REQ_PEND) { - hdev->req_result = err; + /* req_result is __u32 so error must be positive to be properly + * propagated. + */ + hdev->req_result = err < 0 ? -err : err; hdev->req_status = HCI_REQ_CANCELED; - queue_work(hdev->workqueue, &hdev->cmd_sync_cancel_work); + wake_up_interruptible(&hdev->req_wait_q); } } -EXPORT_SYMBOL(hci_cmd_sync_cancel); +EXPORT_SYMBOL(hci_cmd_sync_cancel_sync); -int hci_cmd_sync_queue(struct hci_dev *hdev, hci_cmd_sync_work_func_t func, - void *data, hci_cmd_sync_work_destroy_t destroy) +/* Submit HCI command to be run in as cmd_sync_work: + * + * - hdev must _not_ be unregistered + */ +int hci_cmd_sync_submit(struct hci_dev *hdev, hci_cmd_sync_work_func_t func, + void *data, hci_cmd_sync_work_destroy_t destroy) { struct hci_cmd_sync_work_entry *entry; + int err = 0; - entry = kmalloc(sizeof(*entry), GFP_KERNEL); - if (!entry) - return -ENOMEM; + mutex_lock(&hdev->unregister_lock); + if (hci_dev_test_flag(hdev, HCI_UNREGISTER)) { + err = -ENODEV; + goto unlock; + } + entry = kmalloc(sizeof(*entry), GFP_KERNEL); + if (!entry) { + err = -ENOMEM; + goto unlock; + } entry->func = func; entry->data = data; entry->destroy = destroy; @@ -396,10 +726,182 @@ int hci_cmd_sync_queue(struct hci_dev *hdev, hci_cmd_sync_work_func_t func, queue_work(hdev->req_workqueue, &hdev->cmd_sync_work); - return 0; +unlock: + mutex_unlock(&hdev->unregister_lock); + return err; +} +EXPORT_SYMBOL(hci_cmd_sync_submit); + +/* Queue HCI command: + * + * - hdev must be running + */ +int hci_cmd_sync_queue(struct hci_dev *hdev, hci_cmd_sync_work_func_t func, + void *data, hci_cmd_sync_work_destroy_t destroy) +{ + /* Only queue command if hdev is running which means it had been opened + * and is either on init phase or is already up. + */ + if (!test_bit(HCI_RUNNING, &hdev->flags)) + return -ENETDOWN; + + return hci_cmd_sync_submit(hdev, func, data, destroy); } EXPORT_SYMBOL(hci_cmd_sync_queue); +static struct hci_cmd_sync_work_entry * +_hci_cmd_sync_lookup_entry(struct hci_dev *hdev, hci_cmd_sync_work_func_t func, + void *data, hci_cmd_sync_work_destroy_t destroy) +{ + struct hci_cmd_sync_work_entry *entry, *tmp; + + list_for_each_entry_safe(entry, tmp, &hdev->cmd_sync_work_list, list) { + if (func && entry->func != func) + continue; + + if (data && entry->data != data) + continue; + + if (destroy && entry->destroy != destroy) + continue; + + return entry; + } + + return NULL; +} + +/* Queue HCI command entry once: + * + * - Lookup if an entry already exist and only if it doesn't creates a new entry + * and queue it. + */ +int hci_cmd_sync_queue_once(struct hci_dev *hdev, hci_cmd_sync_work_func_t func, + void *data, hci_cmd_sync_work_destroy_t destroy) +{ + if (hci_cmd_sync_lookup_entry(hdev, func, data, destroy)) + return 0; + + return hci_cmd_sync_queue(hdev, func, data, destroy); +} +EXPORT_SYMBOL(hci_cmd_sync_queue_once); + +/* Run HCI command: + * + * - hdev must be running + * - if on cmd_sync_work then run immediately otherwise queue + */ +int hci_cmd_sync_run(struct hci_dev *hdev, hci_cmd_sync_work_func_t func, + void *data, hci_cmd_sync_work_destroy_t destroy) +{ + /* Only queue command if hdev is running which means it had been opened + * and is either on init phase or is already up. + */ + if (!test_bit(HCI_RUNNING, &hdev->flags)) + return -ENETDOWN; + + /* If on cmd_sync_work then run immediately otherwise queue */ + if (current_work() == &hdev->cmd_sync_work) + return func(hdev, data); + + return hci_cmd_sync_submit(hdev, func, data, destroy); +} +EXPORT_SYMBOL(hci_cmd_sync_run); + +/* Run HCI command entry once: + * + * - Lookup if an entry already exist and only if it doesn't creates a new entry + * and run it. + * - if on cmd_sync_work then run immediately otherwise queue + */ +int hci_cmd_sync_run_once(struct hci_dev *hdev, hci_cmd_sync_work_func_t func, + void *data, hci_cmd_sync_work_destroy_t destroy) +{ + if (hci_cmd_sync_lookup_entry(hdev, func, data, destroy)) + return 0; + + return hci_cmd_sync_run(hdev, func, data, destroy); +} +EXPORT_SYMBOL(hci_cmd_sync_run_once); + +/* Lookup HCI command entry: + * + * - Return first entry that matches by function callback or data or + * destroy callback. + */ +struct hci_cmd_sync_work_entry * +hci_cmd_sync_lookup_entry(struct hci_dev *hdev, hci_cmd_sync_work_func_t func, + void *data, hci_cmd_sync_work_destroy_t destroy) +{ + struct hci_cmd_sync_work_entry *entry; + + mutex_lock(&hdev->cmd_sync_work_lock); + entry = _hci_cmd_sync_lookup_entry(hdev, func, data, destroy); + mutex_unlock(&hdev->cmd_sync_work_lock); + + return entry; +} +EXPORT_SYMBOL(hci_cmd_sync_lookup_entry); + +/* Cancel HCI command entry */ +void hci_cmd_sync_cancel_entry(struct hci_dev *hdev, + struct hci_cmd_sync_work_entry *entry) +{ + mutex_lock(&hdev->cmd_sync_work_lock); + _hci_cmd_sync_cancel_entry(hdev, entry, -ECANCELED); + mutex_unlock(&hdev->cmd_sync_work_lock); +} +EXPORT_SYMBOL(hci_cmd_sync_cancel_entry); + +/* Dequeue one HCI command entry: + * + * - Lookup and cancel first entry that matches. + */ +bool hci_cmd_sync_dequeue_once(struct hci_dev *hdev, + hci_cmd_sync_work_func_t func, + void *data, hci_cmd_sync_work_destroy_t destroy) +{ + struct hci_cmd_sync_work_entry *entry; + + mutex_lock(&hdev->cmd_sync_work_lock); + + entry = _hci_cmd_sync_lookup_entry(hdev, func, data, destroy); + if (!entry) { + mutex_unlock(&hdev->cmd_sync_work_lock); + return false; + } + + _hci_cmd_sync_cancel_entry(hdev, entry, -ECANCELED); + + mutex_unlock(&hdev->cmd_sync_work_lock); + + return true; +} +EXPORT_SYMBOL(hci_cmd_sync_dequeue_once); + +/* Dequeue HCI command entry: + * + * - Lookup and cancel any entry that matches by function callback or data or + * destroy callback. + */ +bool hci_cmd_sync_dequeue(struct hci_dev *hdev, hci_cmd_sync_work_func_t func, + void *data, hci_cmd_sync_work_destroy_t destroy) +{ + struct hci_cmd_sync_work_entry *entry; + bool ret = false; + + mutex_lock(&hdev->cmd_sync_work_lock); + while ((entry = _hci_cmd_sync_lookup_entry(hdev, func, data, + destroy))) { + _hci_cmd_sync_cancel_entry(hdev, entry, -ECANCELED); + ret = true; + } + mutex_unlock(&hdev->cmd_sync_work_lock); + + return ret; +} +EXPORT_SYMBOL(hci_cmd_sync_dequeue); + int hci_update_eir_sync(struct hci_dev *hdev) { struct hci_cp_write_eir cp; @@ -535,9 +1037,9 @@ static bool adv_use_rpa(struct hci_dev *hdev, uint32_t flags) static int hci_set_random_addr_sync(struct hci_dev *hdev, bdaddr_t *rpa) { - /* If we're advertising or initiating an LE connection we can't - * go ahead and change the random address at this time. This is - * because the eventual initiator address used for the + /* If a random_addr has been set we're advertising or initiating an LE + * connection we can't go ahead and change the random address at this + * time. This is because the eventual initiator address used for the * subsequently created connection will be undefined (some * controllers use the new address and others the one we had * when the operation started). @@ -545,8 +1047,9 @@ static int hci_set_random_addr_sync(struct hci_dev *hdev, bdaddr_t *rpa) * In this kind of scenario skip the update and let the random * address be updated at the next cycle. */ - if (hci_dev_test_flag(hdev, HCI_LE_ADV) || - hci_lookup_le_connect(hdev)) { + if (bacmp(&hdev->random_addr, BDADDR_ANY) && + (hci_dev_test_flag(hdev, HCI_LE_ADV) || + hci_lookup_le_connect(hdev))) { bt_dev_dbg(hdev, "Deferring random address update"); hci_dev_set_flag(hdev, HCI_RPA_EXPIRED); return 0; @@ -569,7 +1072,7 @@ int hci_update_random_address_sync(struct hci_dev *hdev, bool require_privacy, /* If Controller supports LL Privacy use own address type is * 0x03 */ - if (use_ll_privacy(hdev)) + if (ll_privacy_capable(hdev)) *own_addr_type = ADDR_LE_DEV_RANDOM_RESOLVED; else *own_addr_type = ADDR_LE_DEV_RANDOM; @@ -652,11 +1155,10 @@ static int hci_disable_ext_adv_instance_sync(struct hci_dev *hdev, u8 instance) struct hci_cp_ext_adv_set *set; u8 data[sizeof(*cp) + sizeof(*set) * 1]; u8 size; + struct adv_info *adv = NULL; /* If request specifies an instance that doesn't exist, fail */ if (instance > 0) { - struct adv_info *adv; - adv = hci_find_adv_instance(hdev, instance); if (!adv) return -EINVAL; @@ -675,7 +1177,7 @@ static int hci_disable_ext_adv_instance_sync(struct hci_dev *hdev, u8 instance) cp->num_of_sets = !!instance; cp->enable = 0x00; - set->handle = instance; + set->handle = adv ? adv->handle : instance; size = sizeof(*cp) + sizeof(*set) * cp->num_of_sets; @@ -709,10 +1211,127 @@ static int hci_set_adv_set_random_addr_sync(struct hci_dev *hdev, u8 instance, sizeof(cp), &cp, HCI_CMD_TIMEOUT); } +static int +hci_set_ext_adv_params_sync(struct hci_dev *hdev, struct adv_info *adv, + const struct hci_cp_le_set_ext_adv_params *cp, + struct hci_rp_le_set_ext_adv_params *rp) +{ + struct sk_buff *skb; + + skb = __hci_cmd_sync(hdev, HCI_OP_LE_SET_EXT_ADV_PARAMS, sizeof(*cp), + cp, HCI_CMD_TIMEOUT); + + /* If command return a status event, skb will be set to -ENODATA */ + if (skb == ERR_PTR(-ENODATA)) + return 0; + + if (IS_ERR(skb)) { + bt_dev_err(hdev, "Opcode 0x%4.4x failed: %ld", + HCI_OP_LE_SET_EXT_ADV_PARAMS, PTR_ERR(skb)); + return PTR_ERR(skb); + } + + if (skb->len != sizeof(*rp)) { + bt_dev_err(hdev, "Invalid response length for 0x%4.4x: %u", + HCI_OP_LE_SET_EXT_ADV_PARAMS, skb->len); + kfree_skb(skb); + return -EIO; + } + + memcpy(rp, skb->data, sizeof(*rp)); + kfree_skb(skb); + + if (!rp->status) { + hdev->adv_addr_type = cp->own_addr_type; + if (!cp->handle) { + /* Store in hdev for instance 0 */ + hdev->adv_tx_power = rp->tx_power; + } else if (adv) { + adv->tx_power = rp->tx_power; + } + } + + return rp->status; +} + +static int hci_set_ext_adv_data_sync(struct hci_dev *hdev, u8 instance) +{ + DEFINE_FLEX(struct hci_cp_le_set_ext_adv_data, pdu, data, length, + HCI_MAX_EXT_AD_LENGTH); + u8 len; + struct adv_info *adv = NULL; + int err; + + if (instance) { + adv = hci_find_adv_instance(hdev, instance); + if (!adv || !adv->adv_data_changed) + return 0; + } + + len = eir_create_adv_data(hdev, instance, pdu->data, + HCI_MAX_EXT_AD_LENGTH); + + pdu->length = len; + pdu->handle = adv ? adv->handle : instance; + pdu->operation = LE_SET_ADV_DATA_OP_COMPLETE; + pdu->frag_pref = LE_SET_ADV_DATA_NO_FRAG; + + err = __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_EXT_ADV_DATA, + struct_size(pdu, data, len), pdu, + HCI_CMD_TIMEOUT); + if (err) + return err; + + /* Update data if the command succeed */ + if (adv) { + adv->adv_data_changed = false; + } else { + memcpy(hdev->adv_data, pdu->data, len); + hdev->adv_data_len = len; + } + + return 0; +} + +static int hci_set_adv_data_sync(struct hci_dev *hdev, u8 instance) +{ + struct hci_cp_le_set_adv_data cp; + u8 len; + + memset(&cp, 0, sizeof(cp)); + + len = eir_create_adv_data(hdev, instance, cp.data, sizeof(cp.data)); + + /* There's nothing to do if the data hasn't changed */ + if (hdev->adv_data_len == len && + memcmp(cp.data, hdev->adv_data, len) == 0) + return 0; + + memcpy(hdev->adv_data, cp.data, sizeof(cp.data)); + hdev->adv_data_len = len; + + cp.length = len; + + return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_ADV_DATA, + sizeof(cp), &cp, HCI_CMD_TIMEOUT); +} + +int hci_update_adv_data_sync(struct hci_dev *hdev, u8 instance) +{ + if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED)) + return 0; + + if (ext_adv_capable(hdev)) + return hci_set_ext_adv_data_sync(hdev, instance); + + return hci_set_adv_data_sync(hdev, instance); +} + int hci_setup_ext_adv_instance_sync(struct hci_dev *hdev, u8 instance) { struct hci_cp_le_set_ext_adv_params cp; - bool connectable; + struct hci_rp_le_set_ext_adv_params rp; + bool connectable, require_privacy; u32 flags; bdaddr_t random_addr; u8 own_addr_type; @@ -732,7 +1351,7 @@ int hci_setup_ext_adv_instance_sync(struct hci_dev *hdev, u8 instance) * Command Disallowed error, so we must first disable the * instance if it is active. */ - if (adv && !adv->pending) { + if (adv) { err = hci_disable_ext_adv_instance_sync(hdev, instance); if (err) return err; @@ -750,10 +1369,12 @@ int hci_setup_ext_adv_instance_sync(struct hci_dev *hdev, u8 instance) return -EPERM; /* Set require_privacy to true only when non-connectable - * advertising is used. In that case it is fine to use a - * non-resolvable private address. + * advertising is used and it is not periodic. + * In that case it is fine to use a non-resolvable private address. */ - err = hci_get_random_address(hdev, !connectable, + require_privacy = !connectable && !(adv && adv->periodic); + + err = hci_get_random_address(hdev, require_privacy, adv_use_rpa(hdev, flags), adv, &own_addr_type, &random_addr); if (err < 0) @@ -765,10 +1386,12 @@ int hci_setup_ext_adv_instance_sync(struct hci_dev *hdev, u8 instance) hci_cpu_to_le24(adv->min_interval, cp.min_interval); hci_cpu_to_le24(adv->max_interval, cp.max_interval); cp.tx_power = adv->tx_power; + cp.sid = adv->sid; } else { hci_cpu_to_le24(hdev->le_adv_min_interval, cp.min_interval); hci_cpu_to_le24(hdev->le_adv_max_interval, cp.max_interval); cp.tx_power = HCI_ADV_TX_POWER_NO_PREFERENCE; + cp.sid = 0x00; } secondary_adv = (flags & MGMT_ADV_FLAG_SEC_MASK); @@ -804,7 +1427,7 @@ int hci_setup_ext_adv_instance_sync(struct hci_dev *hdev, u8 instance) cp.own_addr_type = own_addr_type; cp.channel_map = hdev->le_adv_channel_map; - cp.handle = instance; + cp.handle = adv ? adv->handle : instance; if (flags & MGMT_ADV_FLAG_SEC_2M) { cp.primary_phy = HCI_ADV_PHY_1M; @@ -818,8 +1441,12 @@ int hci_setup_ext_adv_instance_sync(struct hci_dev *hdev, u8 instance) cp.secondary_phy = HCI_ADV_PHY_1M; } - err = __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_EXT_ADV_PARAMS, - sizeof(cp), &cp, HCI_CMD_TIMEOUT); + err = hci_set_ext_adv_params_sync(hdev, adv, &cp, &rp); + if (err) + return err; + + /* Update adv data as tx power is known now */ + err = hci_set_ext_adv_data_sync(hdev, cp.handle); if (err) return err; @@ -844,31 +1471,39 @@ int hci_setup_ext_adv_instance_sync(struct hci_dev *hdev, u8 instance) static int hci_set_ext_scan_rsp_data_sync(struct hci_dev *hdev, u8 instance) { - struct { - struct hci_cp_le_set_ext_scan_rsp_data cp; - u8 data[HCI_MAX_EXT_AD_LENGTH]; - } pdu; + DEFINE_FLEX(struct hci_cp_le_set_ext_scan_rsp_data, pdu, data, length, + HCI_MAX_EXT_AD_LENGTH); u8 len; + struct adv_info *adv = NULL; + int err; - memset(&pdu, 0, sizeof(pdu)); + if (instance) { + adv = hci_find_adv_instance(hdev, instance); + if (!adv || !adv->scan_rsp_changed) + return 0; + } - len = eir_create_scan_rsp(hdev, instance, pdu.data); + len = eir_create_scan_rsp(hdev, instance, pdu->data); - if (hdev->scan_rsp_data_len == len && - !memcmp(pdu.data, hdev->scan_rsp_data, len)) - return 0; + pdu->handle = adv ? adv->handle : instance; + pdu->length = len; + pdu->operation = LE_SET_ADV_DATA_OP_COMPLETE; + pdu->frag_pref = LE_SET_ADV_DATA_NO_FRAG; - memcpy(hdev->scan_rsp_data, pdu.data, len); - hdev->scan_rsp_data_len = len; + err = __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_EXT_SCAN_RSP_DATA, + struct_size(pdu, data, len), pdu, + HCI_CMD_TIMEOUT); + if (err) + return err; - pdu.cp.handle = instance; - pdu.cp.length = len; - pdu.cp.operation = LE_SET_ADV_DATA_OP_COMPLETE; - pdu.cp.frag_pref = LE_SET_ADV_DATA_NO_FRAG; + if (adv) { + adv->scan_rsp_changed = false; + } else { + memcpy(hdev->scan_rsp_data, pdu->data, len); + hdev->scan_rsp_data_len = len; + } - return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_EXT_SCAN_RSP_DATA, - sizeof(pdu.cp) + len, &pdu.cp, - HCI_CMD_TIMEOUT); + return 0; } static int __hci_set_scan_rsp_data_sync(struct hci_dev *hdev, u8 instance) @@ -932,7 +1567,7 @@ int hci_enable_ext_advertising_sync(struct hci_dev *hdev, u8 instance) memset(set, 0, sizeof(*set)); - set->handle = instance; + set->handle = adv ? adv->handle : instance; /* Set duration per instance since controller is responsible for * scheduling it. @@ -965,6 +1600,206 @@ int hci_start_ext_adv_sync(struct hci_dev *hdev, u8 instance) return hci_enable_ext_advertising_sync(hdev, instance); } +int hci_disable_per_advertising_sync(struct hci_dev *hdev, u8 instance) +{ + struct hci_cp_le_set_per_adv_enable cp; + struct adv_info *adv = NULL; + + /* If periodic advertising already disabled there is nothing to do. */ + adv = hci_find_adv_instance(hdev, instance); + if (!adv || !adv->periodic_enabled) + return 0; + + memset(&cp, 0, sizeof(cp)); + + cp.enable = 0x00; + cp.handle = instance; + + return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_PER_ADV_ENABLE, + sizeof(cp), &cp, HCI_CMD_TIMEOUT); +} + +static int hci_set_per_adv_params_sync(struct hci_dev *hdev, u8 instance, + u16 min_interval, u16 max_interval) +{ + struct hci_cp_le_set_per_adv_params cp; + + memset(&cp, 0, sizeof(cp)); + + if (!min_interval) + min_interval = DISCOV_LE_PER_ADV_INT_MIN; + + if (!max_interval) + max_interval = DISCOV_LE_PER_ADV_INT_MAX; + + cp.handle = instance; + cp.min_interval = cpu_to_le16(min_interval); + cp.max_interval = cpu_to_le16(max_interval); + cp.periodic_properties = 0x0000; + + return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_PER_ADV_PARAMS, + sizeof(cp), &cp, HCI_CMD_TIMEOUT); +} + +static int hci_set_per_adv_data_sync(struct hci_dev *hdev, u8 instance) +{ + DEFINE_FLEX(struct hci_cp_le_set_per_adv_data, pdu, data, length, + HCI_MAX_PER_AD_LENGTH); + u8 len; + struct adv_info *adv = NULL; + + if (instance) { + adv = hci_find_adv_instance(hdev, instance); + if (!adv || !adv->periodic) + return 0; + } + + len = eir_create_per_adv_data(hdev, instance, pdu->data); + + pdu->length = len; + pdu->handle = adv ? adv->handle : instance; + pdu->operation = LE_SET_ADV_DATA_OP_COMPLETE; + + return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_PER_ADV_DATA, + struct_size(pdu, data, len), pdu, + HCI_CMD_TIMEOUT); +} + +static int hci_enable_per_advertising_sync(struct hci_dev *hdev, u8 instance) +{ + struct hci_cp_le_set_per_adv_enable cp; + struct adv_info *adv = NULL; + + /* If periodic advertising already enabled there is nothing to do. */ + adv = hci_find_adv_instance(hdev, instance); + if (adv && adv->periodic_enabled) + return 0; + + memset(&cp, 0, sizeof(cp)); + + cp.enable = 0x01; + cp.handle = instance; + + return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_PER_ADV_ENABLE, + sizeof(cp), &cp, HCI_CMD_TIMEOUT); +} + +/* Checks if periodic advertising data contains a Basic Announcement and if it + * does generates a Broadcast ID and add Broadcast Announcement. + */ +static int hci_adv_bcast_annoucement(struct hci_dev *hdev, struct adv_info *adv) +{ + u8 bid[3]; + u8 ad[HCI_MAX_EXT_AD_LENGTH]; + u8 len; + + /* Skip if NULL adv as instance 0x00 is used for general purpose + * advertising so it cannot used for the likes of Broadcast Announcement + * as it can be overwritten at any point. + */ + if (!adv) + return 0; + + /* Check if PA data doesn't contains a Basic Audio Announcement then + * there is nothing to do. + */ + if (!eir_get_service_data(adv->per_adv_data, adv->per_adv_data_len, + 0x1851, NULL)) + return 0; + + /* Check if advertising data already has a Broadcast Announcement since + * the process may want to control the Broadcast ID directly and in that + * case the kernel shall no interfere. + */ + if (eir_get_service_data(adv->adv_data, adv->adv_data_len, 0x1852, + NULL)) + return 0; + + /* Generate Broadcast ID */ + get_random_bytes(bid, sizeof(bid)); + len = eir_append_service_data(ad, 0, 0x1852, bid, sizeof(bid)); + memcpy(ad + len, adv->adv_data, adv->adv_data_len); + hci_set_adv_instance_data(hdev, adv->instance, len + adv->adv_data_len, + ad, 0, NULL); + + return hci_update_adv_data_sync(hdev, adv->instance); +} + +int hci_start_per_adv_sync(struct hci_dev *hdev, u8 instance, u8 sid, + u8 data_len, u8 *data, u32 flags, u16 min_interval, + u16 max_interval, u16 sync_interval) +{ + struct adv_info *adv = NULL; + int err; + bool added = false; + + hci_disable_per_advertising_sync(hdev, instance); + + if (instance) { + adv = hci_find_adv_instance(hdev, instance); + if (adv) { + if (sid != HCI_SID_INVALID && adv->sid != sid) { + /* If the SID don't match attempt to find by + * SID. + */ + adv = hci_find_adv_sid(hdev, sid); + if (!adv) { + bt_dev_err(hdev, + "Unable to find adv_info"); + return -EINVAL; + } + } + + /* Turn it into periodic advertising */ + adv->periodic = true; + adv->per_adv_data_len = data_len; + if (data) + memcpy(adv->per_adv_data, data, data_len); + adv->flags = flags; + } else if (!adv) { + /* Create an instance if that could not be found */ + adv = hci_add_per_instance(hdev, instance, sid, flags, + data_len, data, + sync_interval, + sync_interval); + if (IS_ERR(adv)) + return PTR_ERR(adv); + adv->pending = false; + added = true; + } + } + + /* Start advertising */ + err = hci_start_ext_adv_sync(hdev, instance); + if (err < 0) + goto fail; + + err = hci_adv_bcast_annoucement(hdev, adv); + if (err < 0) + goto fail; + + err = hci_set_per_adv_params_sync(hdev, instance, min_interval, + max_interval); + if (err < 0) + goto fail; + + err = hci_set_per_adv_data_sync(hdev, instance); + if (err < 0) + goto fail; + + err = hci_enable_per_advertising_sync(hdev, instance); + if (err < 0) + goto fail; + + return 0; + +fail: + if (added) + hci_remove_adv_instance(hdev, instance); + + return err; +} + static int hci_start_adv_sync(struct hci_dev *hdev, u8 instance) { int err; @@ -1104,78 +1939,18 @@ int hci_remove_ext_adv_instance_sync(struct hci_dev *hdev, u8 instance, HCI_CMD_TIMEOUT, sk); } -static void cancel_adv_timeout(struct hci_dev *hdev) -{ - if (hdev->adv_instance_timeout) { - hdev->adv_instance_timeout = 0; - cancel_delayed_work(&hdev->adv_instance_expire); - } -} - -static int hci_set_ext_adv_data_sync(struct hci_dev *hdev, u8 instance) +int hci_le_terminate_big_sync(struct hci_dev *hdev, u8 handle, u8 reason) { - struct { - struct hci_cp_le_set_ext_adv_data cp; - u8 data[HCI_MAX_EXT_AD_LENGTH]; - } pdu; - u8 len; - - memset(&pdu, 0, sizeof(pdu)); - - len = eir_create_adv_data(hdev, instance, pdu.data); - - /* There's nothing to do if the data hasn't changed */ - if (hdev->adv_data_len == len && - memcmp(pdu.data, hdev->adv_data, len) == 0) - return 0; - - memcpy(hdev->adv_data, pdu.data, len); - hdev->adv_data_len = len; - - pdu.cp.length = len; - pdu.cp.handle = instance; - pdu.cp.operation = LE_SET_ADV_DATA_OP_COMPLETE; - pdu.cp.frag_pref = LE_SET_ADV_DATA_NO_FRAG; - - return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_EXT_ADV_DATA, - sizeof(pdu.cp) + len, &pdu.cp, - HCI_CMD_TIMEOUT); -} - -static int hci_set_adv_data_sync(struct hci_dev *hdev, u8 instance) -{ - struct hci_cp_le_set_adv_data cp; - u8 len; + struct hci_cp_le_term_big cp; memset(&cp, 0, sizeof(cp)); + cp.handle = handle; + cp.reason = reason; - len = eir_create_adv_data(hdev, instance, cp.data); - - /* There's nothing to do if the data hasn't changed */ - if (hdev->adv_data_len == len && - memcmp(cp.data, hdev->adv_data, len) == 0) - return 0; - - memcpy(hdev->adv_data, cp.data, sizeof(cp.data)); - hdev->adv_data_len = len; - - cp.length = len; - - return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_ADV_DATA, + return __hci_cmd_sync_status(hdev, HCI_OP_LE_TERM_BIG, sizeof(cp), &cp, HCI_CMD_TIMEOUT); } -int hci_update_adv_data_sync(struct hci_dev *hdev, u8 instance) -{ - if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED)) - return 0; - - if (ext_adv_capable(hdev)) - return hci_set_ext_adv_data_sync(hdev, instance); - - return hci_set_adv_data_sync(hdev, instance); -} - int hci_schedule_adv_instance_sync(struct hci_dev *hdev, u8 instance, bool force) { @@ -1216,7 +1991,7 @@ int hci_schedule_adv_instance_sync(struct hci_dev *hdev, u8 instance, hdev->adv_instance_timeout = timeout; queue_delayed_work(hdev->req_workqueue, &hdev->adv_instance_expire, - msecs_to_jiffies(timeout * 1000)); + secs_to_jiffies(timeout)); } /* If we're just re-scheduling the same instance again then do not @@ -1404,7 +2179,11 @@ static int hci_le_set_ext_scan_enable_sync(struct hci_dev *hdev, u8 val, memset(&cp, 0, sizeof(cp)); cp.enable = val; - cp.filter_dup = filter_dup; + + if (hci_dev_test_flag(hdev, HCI_MESH)) + cp.filter_dup = LE_SCAN_FILTER_DUP_DISABLE; + else + cp.filter_dup = filter_dup; return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_EXT_SCAN_ENABLE, sizeof(cp), &cp, HCI_CMD_TIMEOUT); @@ -1420,7 +2199,11 @@ static int hci_le_set_scan_enable_sync(struct hci_dev *hdev, u8 val, memset(&cp, 0, sizeof(cp)); cp.enable = val; - cp.filter_dup = filter_dup; + + if (val && hci_dev_test_flag(hdev, HCI_MESH)) + cp.filter_dup = LE_SCAN_FILTER_DUP_DISABLE; + else + cp.filter_dup = filter_dup; return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp, HCI_CMD_TIMEOUT); @@ -1428,7 +2211,7 @@ static int hci_le_set_scan_enable_sync(struct hci_dev *hdev, u8 val, static int hci_le_set_addr_resolution_enable_sync(struct hci_dev *hdev, u8 val) { - if (!use_ll_privacy(hdev)) + if (!ll_privacy_capable(hdev)) return 0; /* If controller is not/already resolving we are done. */ @@ -1473,11 +2256,6 @@ static void hci_start_interleave_scan(struct hci_dev *hdev) &hdev->interleave_scan, 0); } -static bool is_interleave_scanning(struct hci_dev *hdev) -{ - return hdev->interleave_scan_state != INTERLEAVE_SCAN_NONE; -} - static void cancel_interleave_scan(struct hci_dev *hdev) { bt_dev_dbg(hdev, "cancelling interleave scan"); @@ -1525,7 +2303,7 @@ static int hci_le_del_resolve_list_sync(struct hci_dev *hdev, struct hci_cp_le_del_from_resolv_list cp; struct bdaddr_list_with_irk *entry; - if (!use_ll_privacy(hdev)) + if (!ll_privacy_capable(hdev)) return 0; /* Check if the IRK has been programmed */ @@ -1572,17 +2350,25 @@ static int hci_le_del_accept_list_sync(struct hci_dev *hdev, return 0; } +struct conn_params { + bdaddr_t addr; + u8 addr_type; + hci_conn_flags_t flags; + u8 privacy_mode; +}; + /* Adds connection to resolve list if needed. * Setting params to NULL programs local hdev->irk */ static int hci_le_add_resolve_list_sync(struct hci_dev *hdev, - struct hci_conn_params *params) + struct conn_params *params) { struct hci_cp_le_add_to_resolv_list cp; struct smp_irk *irk; struct bdaddr_list_with_irk *entry; + struct hci_conn_params *p; - if (!use_ll_privacy(hdev)) + if (!ll_privacy_capable(hdev)) return 0; /* Attempt to program local identity address, type and irk if params is @@ -1595,7 +2381,8 @@ static int hci_le_add_resolve_list_sync(struct hci_dev *hdev, hci_copy_identity_address(hdev, &cp.bdaddr, &cp.bdaddr_type); memcpy(cp.peer_irk, hdev->irk, 16); goto done; - } + } else if (!(params->flags & HCI_CONN_FLAG_ADDRESS_RESOLUTION)) + return 0; irk = hci_find_irk_by_addr(hdev, ¶ms->addr, params->addr_type); if (!irk) @@ -1612,6 +2399,19 @@ static int hci_le_add_resolve_list_sync(struct hci_dev *hdev, bacpy(&cp.bdaddr, ¶ms->addr); memcpy(cp.peer_irk, irk->val, 16); + /* Default privacy mode is always Network */ + params->privacy_mode = HCI_NETWORK_PRIVACY; + + rcu_read_lock(); + p = hci_pend_le_action_lookup(&hdev->pend_le_conns, + ¶ms->addr, params->addr_type); + if (!p) + p = hci_pend_le_action_lookup(&hdev->pend_le_reports, + ¶ms->addr, params->addr_type); + if (p) + WRITE_ONCE(p->privacy_mode, HCI_NETWORK_PRIVACY); + rcu_read_unlock(); + done: if (hci_dev_test_flag(hdev, HCI_PRIVACY)) memcpy(cp.local_irk, hdev->irk, 16); @@ -1624,11 +2424,15 @@ done: /* Set Device Privacy Mode. */ static int hci_le_set_privacy_mode_sync(struct hci_dev *hdev, - struct hci_conn_params *params) + struct conn_params *params) { struct hci_cp_le_set_privacy_mode cp; struct smp_irk *irk; + if (!ll_privacy_capable(hdev) || + !(params->flags & HCI_CONN_FLAG_ADDRESS_RESOLUTION)) + return 0; + /* If device privacy mode has already been set there is nothing to do */ if (params->privacy_mode == HCI_DEVICE_PRIVACY) return 0; @@ -1637,7 +2441,7 @@ static int hci_le_set_privacy_mode_sync(struct hci_dev *hdev, * indicates that LL Privacy has been enabled and * HCI_OP_LE_SET_PRIVACY_MODE is supported. */ - if (!test_bit(HCI_CONN_FLAG_DEVICE_PRIVACY, params->flags)) + if (!(params->flags & HCI_CONN_FLAG_DEVICE_PRIVACY)) return 0; irk = hci_find_irk_by_addr(hdev, ¶ms->addr, params->addr_type); @@ -1649,6 +2453,8 @@ static int hci_le_set_privacy_mode_sync(struct hci_dev *hdev, bacpy(&cp.bdaddr, &irk->bdaddr); cp.mode = HCI_DEVICE_PRIVACY; + /* Note: params->privacy_mode is not updated since it is a copy */ + return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_PRIVACY_MODE, sizeof(cp), &cp, HCI_CMD_TIMEOUT); } @@ -1658,26 +2464,23 @@ static int hci_le_set_privacy_mode_sync(struct hci_dev *hdev, * properly set the privacy mode. */ static int hci_le_add_accept_list_sync(struct hci_dev *hdev, - struct hci_conn_params *params, + struct conn_params *params, u8 *num_entries) { struct hci_cp_le_add_to_accept_list cp; int err; - /* Select filter policy to accept all advertising */ - if (*num_entries >= hdev->le_accept_list_size) - return -ENOSPC; - - /* Accept list can not be used with RPAs */ - if (!use_ll_privacy(hdev) && - hci_find_irk_by_addr(hdev, ¶ms->addr, params->addr_type)) { - return -EINVAL; - } - /* During suspend, only wakeable devices can be in acceptlist */ if (hdev->suspended && - !test_bit(HCI_CONN_FLAG_REMOTE_WAKEUP, params->flags)) + !(params->flags & HCI_CONN_FLAG_REMOTE_WAKEUP)) { + hci_le_del_accept_list_sync(hdev, ¶ms->addr, + params->addr_type); return 0; + } + + /* Select filter policy to accept all advertising */ + if (*num_entries >= hdev->le_accept_list_size) + return -ENOSPC; /* Attempt to program the device in the resolving list first to avoid * having to rollback in case it fails since the resolving list is @@ -1726,6 +2529,10 @@ static int hci_pause_advertising_sync(struct hci_dev *hdev) int err; int old_state; + /* If controller is not advertising we are done. */ + if (!hci_dev_test_flag(hdev, HCI_LE_ADV)) + return 0; + /* If already been paused there is nothing to do. */ if (hdev->advertising_paused) return 0; @@ -1795,6 +2602,12 @@ static int hci_resume_advertising_sync(struct hci_dev *hdev) hci_remove_ext_adv_instance_sync(hdev, adv->instance, NULL); } + + /* If current advertising instance is set to instance 0x00 + * then we need to re-enable it. + */ + if (hci_dev_test_and_clear_flag(hdev, HCI_LE_ADV_0)) + err = hci_enable_ext_advertising_sync(hdev, 0x00); } else { /* Schedule for most recent instance to be restarted and begin * the software rotation loop @@ -1809,6 +2622,45 @@ static int hci_resume_advertising_sync(struct hci_dev *hdev) return err; } +static int hci_pause_addr_resolution(struct hci_dev *hdev) +{ + int err; + + if (!ll_privacy_capable(hdev)) + return 0; + + if (!hci_dev_test_flag(hdev, HCI_LL_RPA_RESOLUTION)) + return 0; + + /* Cannot disable addr resolution if scanning is enabled or + * when initiating an LE connection. + */ + if (hci_dev_test_flag(hdev, HCI_LE_SCAN) || + hci_lookup_le_connect(hdev)) { + bt_dev_err(hdev, "Command not allowed when scan/LE connect"); + return -EPERM; + } + + /* Cannot disable addr resolution if advertising is enabled. */ + err = hci_pause_advertising_sync(hdev); + if (err) { + bt_dev_err(hdev, "Pause advertising failed: %d", err); + return err; + } + + err = hci_le_set_addr_resolution_enable_sync(hdev, 0x00); + if (err) + bt_dev_err(hdev, "Unable to disable Address Resolution: %d", + err); + + /* Return if address resolution is disabled and RPA is not used. */ + if (!err && scan_use_rpa(hdev)) + return 0; + + hci_resume_advertising_sync(hdev); + return err; +} + struct sk_buff *hci_read_local_oob_data_sync(struct hci_dev *hdev, bool extended, struct sock *sk) { @@ -1818,16 +2670,72 @@ struct sk_buff *hci_read_local_oob_data_sync(struct hci_dev *hdev, return __hci_cmd_sync_sk(hdev, opcode, 0, NULL, 0, HCI_CMD_TIMEOUT, sk); } +static struct conn_params *conn_params_copy(struct list_head *list, size_t *n) +{ + struct hci_conn_params *params; + struct conn_params *p; + size_t i; + + rcu_read_lock(); + + i = 0; + list_for_each_entry_rcu(params, list, action) + ++i; + *n = i; + + rcu_read_unlock(); + + p = kvcalloc(*n, sizeof(struct conn_params), GFP_KERNEL); + if (!p) + return NULL; + + rcu_read_lock(); + + i = 0; + list_for_each_entry_rcu(params, list, action) { + /* Racing adds are handled in next scan update */ + if (i >= *n) + break; + + /* No hdev->lock, but: addr, addr_type are immutable. + * privacy_mode is only written by us or in + * hci_cc_le_set_privacy_mode that we wait for. + * We should be idempotent so MGMT updating flags + * while we are processing is OK. + */ + bacpy(&p[i].addr, ¶ms->addr); + p[i].addr_type = params->addr_type; + p[i].flags = READ_ONCE(params->flags); + p[i].privacy_mode = READ_ONCE(params->privacy_mode); + ++i; + } + + rcu_read_unlock(); + + *n = i; + return p; +} + +/* Clear LE Accept List */ +static int hci_le_clear_accept_list_sync(struct hci_dev *hdev) +{ + if (!(hdev->commands[26] & 0x80)) + return 0; + + return __hci_cmd_sync_status(hdev, HCI_OP_LE_CLEAR_ACCEPT_LIST, 0, NULL, + HCI_CMD_TIMEOUT); +} + /* Device must not be scanning when updating the accept list. * * Update is done using the following sequence: * - * use_ll_privacy((Disable Advertising) -> Disable Resolving List) -> + * ll_privacy_capable((Disable Advertising) -> Disable Resolving List) -> * Remove Devices From Accept List -> - * (has IRK && use_ll_privacy(Remove Devices From Resolving List))-> + * (has IRK && ll_privacy_capable(Remove Devices From Resolving List))-> * Add Devices to Accept List -> - * (has IRK && use_ll_privacy(Remove Devices From Resolving List)) -> - * use_ll_privacy(Enable Resolving List -> (Enable Advertising)) -> + * (has IRK && ll_privacy_capable(Remove Devices From Resolving List)) -> + * ll_privacy_capable(Enable Resolving List -> (Enable Advertising)) -> * Enable Scanning * * In case of failure advertising shall be restored to its original state and @@ -1837,16 +2745,18 @@ struct sk_buff *hci_read_local_oob_data_sync(struct hci_dev *hdev, */ static u8 hci_update_accept_list_sync(struct hci_dev *hdev) { - struct hci_conn_params *params; + struct conn_params *params; struct bdaddr_list *b, *t; u8 num_entries = 0; bool pend_conn, pend_report; + u8 filter_policy; + size_t i, n; int err; - /* Pause advertising if resolving list can be used as controllers are + /* Pause advertising if resolving list can be used as controllers * cannot accept resolving list modifications while advertising. */ - if (use_ll_privacy(hdev)) { + if (ll_privacy_capable(hdev)) { err = hci_pause_advertising_sync(hdev); if (err) { bt_dev_err(hdev, "pause advertising failed: %d", err); @@ -1864,13 +2774,42 @@ static u8 hci_update_accept_list_sync(struct hci_dev *hdev) goto done; } + /* Force address filtering if PA Sync is in progress */ + if (hci_dev_test_flag(hdev, HCI_PA_SYNC)) { + struct hci_conn *conn; + + conn = hci_conn_hash_lookup_create_pa_sync(hdev); + if (conn) { + struct conn_params pa; + + memset(&pa, 0, sizeof(pa)); + + bacpy(&pa.addr, &conn->dst); + pa.addr_type = conn->dst_type; + + /* Clear first since there could be addresses left + * behind. + */ + hci_le_clear_accept_list_sync(hdev); + + num_entries = 1; + err = hci_le_add_accept_list_sync(hdev, &pa, + &num_entries); + goto done; + } + } + /* Go through the current accept list programmed into the - * controller one by one and check if that address is still - * in the list of pending connections or list of devices to + * controller one by one and check if that address is connected or is + * still in the list of pending connections or list of devices to * report. If not present in either list, then remove it from * the controller. */ list_for_each_entry_safe(b, t, &hdev->le_accept_list, list) { + if (hci_conn_hash_lookup_le(hdev, &b->bdaddr, b->bdaddr_type)) + continue; + + /* Pointers not dereferenced, no locks needed */ pend_conn = hci_pend_le_action_lookup(&hdev->pend_le_conns, &b->bdaddr, b->bdaddr_type); @@ -1899,23 +2838,50 @@ static u8 hci_update_accept_list_sync(struct hci_dev *hdev) * available accept list entries in the controller, then * just abort and return filer policy value to not use the * accept list. + * + * The list and params may be mutated while we wait for events, + * so make a copy and iterate it. */ - list_for_each_entry(params, &hdev->pend_le_conns, action) { - err = hci_le_add_accept_list_sync(hdev, params, &num_entries); - if (err) + + params = conn_params_copy(&hdev->pend_le_conns, &n); + if (!params) { + err = -ENOMEM; + goto done; + } + + for (i = 0; i < n; ++i) { + err = hci_le_add_accept_list_sync(hdev, ¶ms[i], + &num_entries); + if (err) { + kvfree(params); goto done; + } } + kvfree(params); + /* After adding all new pending connections, walk through * the list of pending reports and also add these to the * accept list if there is still space. Abort if space runs out. */ - list_for_each_entry(params, &hdev->pend_le_reports, action) { - err = hci_le_add_accept_list_sync(hdev, params, &num_entries); - if (err) + + params = conn_params_copy(&hdev->pend_le_reports, &n); + if (!params) { + err = -ENOMEM; + goto done; + } + + for (i = 0; i < n; ++i) { + err = hci_le_add_accept_list_sync(hdev, ¶ms[i], + &num_entries); + if (err) { + kvfree(params); goto done; + } } + kvfree(params); + /* Use the allowlist unless the following conditions are all true: * - We are not currently suspending * - There are 1 or more ADV monitors registered and it's not offloaded @@ -1927,38 +2893,27 @@ static u8 hci_update_accept_list_sync(struct hci_dev *hdev) err = -EINVAL; done: + filter_policy = err ? 0x00 : 0x01; + /* Enable address resolution when LL Privacy is enabled. */ err = hci_le_set_addr_resolution_enable_sync(hdev, 0x01); if (err) bt_dev_err(hdev, "Unable to enable LL privacy: %d", err); /* Resume advertising if it was paused */ - if (use_ll_privacy(hdev)) + if (ll_privacy_capable(hdev)) hci_resume_advertising_sync(hdev); /* Select filter policy to use accept list */ - return err ? 0x00 : 0x01; + return filter_policy; } -/* Returns true if an le connection is in the scanning state */ -static inline bool hci_is_le_conn_scanning(struct hci_dev *hdev) +static void hci_le_scan_phy_params(struct hci_cp_le_scan_phy_params *cp, + u8 type, u16 interval, u16 window) { - struct hci_conn_hash *h = &hdev->conn_hash; - struct hci_conn *c; - - rcu_read_lock(); - - list_for_each_entry_rcu(c, &h->list, list) { - if (c->type == LE_LINK && c->state == BT_CONNECT && - test_bit(HCI_CONN_SCANNING, &c->flags)) { - rcu_read_unlock(); - return true; - } - } - - rcu_read_unlock(); - - return false; + cp->type = type; + cp->interval = cpu_to_le16(interval); + cp->window = cpu_to_le16(window); } static int hci_le_set_ext_scan_param_sync(struct hci_dev *hdev, u8 type, @@ -1968,7 +2923,7 @@ static int hci_le_set_ext_scan_param_sync(struct hci_dev *hdev, u8 type, struct hci_cp_le_set_ext_scan_params *cp; struct hci_cp_le_scan_phy_params *phy; u8 data[sizeof(*cp) + sizeof(*phy) * 2]; - u8 num_phy = 0; + u8 num_phy = 0x00; cp = (void *)data; phy = (void *)cp->data; @@ -1978,28 +2933,64 @@ static int hci_le_set_ext_scan_param_sync(struct hci_dev *hdev, u8 type, cp->own_addr_type = own_addr_type; cp->filter_policy = filter_policy; + /* Check if PA Sync is in progress then select the PHY based on the + * hci_conn.iso_qos. + */ + if (hci_dev_test_flag(hdev, HCI_PA_SYNC)) { + struct hci_cp_le_add_to_accept_list *sent; + + sent = hci_sent_cmd_data(hdev, HCI_OP_LE_ADD_TO_ACCEPT_LIST); + if (sent) { + struct hci_conn *conn; + + conn = hci_conn_hash_lookup_ba(hdev, PA_LINK, + &sent->bdaddr); + if (conn) { + struct bt_iso_qos *qos = &conn->iso_qos; + + if (qos->bcast.in.phy & BT_ISO_PHY_1M || + qos->bcast.in.phy & BT_ISO_PHY_2M) { + cp->scanning_phys |= LE_SCAN_PHY_1M; + hci_le_scan_phy_params(phy, type, + interval, + window); + num_phy++; + phy++; + } + + if (qos->bcast.in.phy & BT_ISO_PHY_CODED) { + cp->scanning_phys |= LE_SCAN_PHY_CODED; + hci_le_scan_phy_params(phy, type, + interval * 3, + window * 3); + num_phy++; + phy++; + } + + if (num_phy) + goto done; + } + } + } + if (scan_1m(hdev) || scan_2m(hdev)) { cp->scanning_phys |= LE_SCAN_PHY_1M; - - phy->type = type; - phy->interval = cpu_to_le16(interval); - phy->window = cpu_to_le16(window); - + hci_le_scan_phy_params(phy, type, interval, window); num_phy++; phy++; } if (scan_coded(hdev)) { cp->scanning_phys |= LE_SCAN_PHY_CODED; - - phy->type = type; - phy->interval = cpu_to_le16(interval); - phy->window = cpu_to_le16(window); - + hci_le_scan_phy_params(phy, type, interval * 3, window * 3); num_phy++; phy++; } +done: + if (!num_phy) + return -EINVAL; + return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_EXT_SCAN_PARAMS, sizeof(*cp) + sizeof(*phy) * num_phy, data, HCI_CMD_TIMEOUT); @@ -2051,6 +3042,7 @@ static int hci_passive_scan_sync(struct hci_dev *hdev) u8 own_addr_type; u8 filter_policy; u16 window, interval; + u8 filter_dups = LE_SCAN_FILTER_DUP_ENABLE; int err; if (hdev->scanning_paused) { @@ -2086,6 +3078,27 @@ static int hci_passive_scan_sync(struct hci_dev *hdev) */ filter_policy = hci_update_accept_list_sync(hdev); + /* If suspended and filter_policy set to 0x00 (no acceptlist) then + * passive scanning cannot be started since that would require the host + * to be woken up to process the reports. + */ + if (hdev->suspended && !filter_policy) { + /* Check if accept list is empty then there is no need to scan + * while suspended. + */ + if (list_empty(&hdev->le_accept_list)) + return 0; + + /* If there are devices is the accept_list that means some + * devices could not be programmed which in non-suspended case + * means filter_policy needs to be set to 0x00 so the host needs + * to filter, but since this is treating suspended case we + * can ignore device needing host to filter to allow devices in + * the acceptlist to be able to wakeup the system. + */ + filter_policy = 0x01; + } + /* When the controller is using random resolvable addresses and * with that having LE privacy enabled, then controllers with * Extended Scanner Filter Policies support can now enable support @@ -2108,16 +3121,35 @@ static int hci_passive_scan_sync(struct hci_dev *hdev) } else if (hci_is_adv_monitoring(hdev)) { window = hdev->le_scan_window_adv_monitor; interval = hdev->le_scan_int_adv_monitor; + + /* Disable duplicates filter when scanning for advertisement + * monitor for the following reasons. + * + * For HW pattern filtering (ex. MSFT), Realtek and Qualcomm + * controllers ignore RSSI_Sampling_Period when the duplicates + * filter is enabled. + * + * For SW pattern filtering, when we're not doing interleaved + * scanning, it is necessary to disable duplicates filter, + * otherwise hosts can only receive one advertisement and it's + * impossible to know if a peer is still in range. + */ + filter_dups = LE_SCAN_FILTER_DUP_DISABLE; } else { window = hdev->le_scan_window; interval = hdev->le_scan_interval; } + /* Disable all filtering for Mesh */ + if (hci_dev_test_flag(hdev, HCI_MESH)) { + filter_policy = 0; + filter_dups = LE_SCAN_FILTER_DUP_DISABLE; + } + bt_dev_dbg(hdev, "LE passive scan with acceptlist = %d", filter_policy); return hci_start_scan_sync(hdev, LE_SCAN_PASSIVE, interval, window, - own_addr_type, filter_policy, - LE_SCAN_FILTER_DUP_ENABLE); + own_addr_type, filter_policy, filter_dups); } /* This function controls the passive scanning based on hdev->pend_le_conns @@ -2127,7 +3159,7 @@ static int hci_passive_scan_sync(struct hci_dev *hdev) * If there are devices to scan: * * Disable Scanning -> Update Accept List -> - * use_ll_privacy((Disable Advertising) -> Disable Resolving List -> + * ll_privacy_capable((Disable Advertising) -> Disable Resolving List -> * Update Resolving List -> Enable Resolving List -> (Enable Advertising)) -> * Enable Scanning * @@ -2167,9 +3199,11 @@ int hci_update_passive_scan_sync(struct hci_dev *hdev) bt_dev_dbg(hdev, "ADV monitoring is %s", hci_is_adv_monitoring(hdev) ? "on" : "off"); - if (list_empty(&hdev->pend_le_conns) && + if (!hci_dev_test_flag(hdev, HCI_MESH) && + list_empty(&hdev->pend_le_conns) && list_empty(&hdev->pend_le_reports) && - !hci_is_adv_monitoring(hdev)) { + !hci_is_adv_monitoring(hdev) && + !hci_dev_test_flag(hdev, HCI_PA_SYNC)) { /* If there is no pending LE connections or devices * to be scanned for or no ADV monitors, we should stop the * background scanning. @@ -2204,6 +3238,16 @@ int hci_update_passive_scan_sync(struct hci_dev *hdev) return err; } +static int update_scan_sync(struct hci_dev *hdev, void *data) +{ + return hci_update_scan_sync(hdev); +} + +int hci_update_scan(struct hci_dev *hdev) +{ + return hci_cmd_sync_queue(hdev, update_scan_sync, NULL, NULL); +} + static int update_passive_scan_sync(struct hci_dev *hdev, void *data) { return hci_update_passive_scan_sync(hdev); @@ -2220,7 +3264,8 @@ int hci_update_passive_scan(struct hci_dev *hdev) hci_dev_test_flag(hdev, HCI_UNREGISTER)) return 0; - return hci_cmd_sync_queue(hdev, update_passive_scan_sync, NULL, NULL); + return hci_cmd_sync_queue_once(hdev, update_passive_scan_sync, NULL, + NULL); } int hci_write_sc_support_sync(struct hci_dev *hdev, u8 val) @@ -2313,7 +3358,7 @@ static int hci_powered_update_adv_sync(struct hci_dev *hdev) * advertising data. This also applies to the case * where BR/EDR was toggled during the AUTO_OFF phase. */ - if (hci_dev_test_flag(hdev, HCI_ADVERTISING) || + if (hci_dev_test_flag(hdev, HCI_ADVERTISING) && list_empty(&hdev->adv_instances)) { if (ext_adv_capable(hdev)) { err = hci_setup_ext_adv_instance_sync(hdev, 0x00); @@ -2450,13 +3495,13 @@ int hci_update_scan_sync(struct hci_dev *hdev) return hci_write_scan_enable_sync(hdev, scan); } -int hci_update_name_sync(struct hci_dev *hdev) +int hci_update_name_sync(struct hci_dev *hdev, const u8 *name) { struct hci_cp_write_local_name cp; memset(&cp, 0, sizeof(cp)); - memcpy(cp.name, hdev->dev_name, sizeof(cp.name)); + memcpy(cp.name, name, sizeof(cp.name)); return __hci_cmd_sync_status(hdev, HCI_OP_WRITE_LOCAL_NAME, sizeof(cp), &cp, @@ -2468,11 +3513,12 @@ int hci_update_name_sync(struct hci_dev *hdev) * * HCI_SSP_ENABLED(Enable SSP) * HCI_LE_ENABLED(Enable LE) - * HCI_LE_ENABLED(use_ll_privacy(Add local IRK to Resolving List) -> + * HCI_LE_ENABLED(ll_privacy_capable(Add local IRK to Resolving List) -> * Update adv data) * Enable Authentication * lmp_bredr_capable(Set Fast Connectable -> Set Scan Type -> Set Class -> * Set Name -> Set EIR) + * HCI_FORCE_STATIC_ADDR | BDADDR_ANY && !HCI_BREDR_ENABLED (Set Static Address) */ int hci_powered_update_sync(struct hci_dev *hdev) { @@ -2508,10 +3554,27 @@ int hci_powered_update_sync(struct hci_dev *hdev) hci_write_fast_connectable_sync(hdev, false); hci_update_scan_sync(hdev); hci_update_class_sync(hdev); - hci_update_name_sync(hdev); + hci_update_name_sync(hdev, hdev->dev_name); hci_update_eir_sync(hdev); } + /* If forcing static address is in use or there is no public + * address use the static address as random address (but skip + * the HCI command if the current random address is already the + * static one. + * + * In case BR/EDR has been disabled on a dual-mode controller + * and a static address has been configured, then use that + * address instead of the public BR/EDR address. + */ + if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) || + (!bacmp(&hdev->bdaddr, BDADDR_ANY) && + !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))) { + if (bacmp(&hdev->static_addr, BDADDR_ANY)) + return hci_set_random_addr_sync(hdev, + &hdev->static_addr); + } + return 0; } @@ -2538,7 +3601,10 @@ static void hci_dev_get_bd_addr_from_property(struct hci_dev *hdev) if (ret < 0 || !bacmp(&ba, BDADDR_ANY)) return; - bacpy(&hdev->public_addr, &ba); + if (hci_test_quirk(hdev, HCI_QUIRK_BDADDR_PROPERTY_BROKEN)) + baswap(&hdev->public_addr, &ba); + else + bacpy(&hdev->public_addr, &ba); } struct hci_init_stage { @@ -2610,7 +3676,7 @@ static int hci_init0_sync(struct hci_dev *hdev) bt_dev_dbg(hdev, ""); /* Reset */ - if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks)) { + if (!hci_test_quirk(hdev, HCI_QUIRK_RESET_ON_CLOSE)) { err = hci_reset_sync(hdev); if (err) return err; @@ -2623,7 +3689,7 @@ static int hci_unconf_init_sync(struct hci_dev *hdev) { int err; - if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks)) + if (hci_test_quirk(hdev, HCI_QUIRK_RAW_DEVICE)) return 0; err = hci_init0_sync(hdev); @@ -2639,10 +3705,6 @@ static int hci_unconf_init_sync(struct hci_dev *hdev) /* Read Local Supported Features. */ static int hci_read_local_features_sync(struct hci_dev *hdev) { - /* Not all AMP controllers support this command */ - if (hdev->dev_type == HCI_AMP && !(hdev->commands[14] & 0x20)) - return 0; - return __hci_cmd_sync_status(hdev, HCI_OP_READ_LOCAL_FEATURES, 0, NULL, HCI_CMD_TIMEOUT); } @@ -2670,57 +3732,13 @@ static int hci_read_local_cmds_sync(struct hci_dev *hdev) * supported commands. */ if (hdev->hci_ver > BLUETOOTH_VER_1_1 && - !test_bit(HCI_QUIRK_BROKEN_LOCAL_COMMANDS, &hdev->quirks)) + !hci_test_quirk(hdev, HCI_QUIRK_BROKEN_LOCAL_COMMANDS)) return __hci_cmd_sync_status(hdev, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL, HCI_CMD_TIMEOUT); return 0; } -/* Read Local AMP Info */ -static int hci_read_local_amp_info_sync(struct hci_dev *hdev) -{ - return __hci_cmd_sync_status(hdev, HCI_OP_READ_LOCAL_AMP_INFO, - 0, NULL, HCI_CMD_TIMEOUT); -} - -/* Read Data Blk size */ -static int hci_read_data_block_size_sync(struct hci_dev *hdev) -{ - return __hci_cmd_sync_status(hdev, HCI_OP_READ_DATA_BLOCK_SIZE, - 0, NULL, HCI_CMD_TIMEOUT); -} - -/* Read Flow Control Mode */ -static int hci_read_flow_control_mode_sync(struct hci_dev *hdev) -{ - return __hci_cmd_sync_status(hdev, HCI_OP_READ_FLOW_CONTROL_MODE, - 0, NULL, HCI_CMD_TIMEOUT); -} - -/* Read Location Data */ -static int hci_read_location_data_sync(struct hci_dev *hdev) -{ - return __hci_cmd_sync_status(hdev, HCI_OP_READ_LOCATION_DATA, - 0, NULL, HCI_CMD_TIMEOUT); -} - -/* AMP Controller init stage 1 command sequence */ -static const struct hci_init_stage amp_init1[] = { - /* HCI_OP_READ_LOCAL_VERSION */ - HCI_INIT(hci_read_local_version_sync), - /* HCI_OP_READ_LOCAL_COMMANDS */ - HCI_INIT(hci_read_local_cmds_sync), - /* HCI_OP_READ_LOCAL_AMP_INFO */ - HCI_INIT(hci_read_local_amp_info_sync), - /* HCI_OP_READ_DATA_BLOCK_SIZE */ - HCI_INIT(hci_read_data_block_size_sync), - /* HCI_OP_READ_FLOW_CONTROL_MODE */ - HCI_INIT(hci_read_flow_control_mode_sync), - /* HCI_OP_READ_LOCATION_DATA */ - HCI_INIT(hci_read_location_data_sync), -}; - static int hci_init1_sync(struct hci_dev *hdev) { int err; @@ -2728,33 +3746,15 @@ static int hci_init1_sync(struct hci_dev *hdev) bt_dev_dbg(hdev, ""); /* Reset */ - if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks)) { + if (!hci_test_quirk(hdev, HCI_QUIRK_RESET_ON_CLOSE)) { err = hci_reset_sync(hdev); if (err) return err; } - switch (hdev->dev_type) { - case HCI_PRIMARY: - hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED; - return hci_init_stage_sync(hdev, br_init1); - case HCI_AMP: - hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED; - return hci_init_stage_sync(hdev, amp_init1); - default: - bt_dev_err(hdev, "Unknown device type %d", hdev->dev_type); - break; - } - - return 0; + return hci_init_stage_sync(hdev, br_init1); } -/* AMP Controller init stage 2 command sequence */ -static const struct hci_init_stage amp_init2[] = { - /* HCI_OP_READ_LOCAL_FEATURES */ - HCI_INIT(hci_read_local_features_sync), -}; - /* Read Buffer Size (ACL mtu, max pkt, etc.) */ static int hci_read_buffer_size_sync(struct hci_dev *hdev) { @@ -2779,6 +3779,9 @@ static int hci_read_local_name_sync(struct hci_dev *hdev) /* Read Voice Setting */ static int hci_read_voice_setting_sync(struct hci_dev *hdev) { + if (!read_voice_setting_capable(hdev)) + return 0; + return __hci_cmd_sync_status(hdev, HCI_OP_READ_VOICE_SETTING, 0, NULL, HCI_CMD_TIMEOUT); } @@ -2806,6 +3809,9 @@ static int hci_set_event_filter_sync(struct hci_dev *hdev, u8 flt_type, if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) return 0; + if (hci_test_quirk(hdev, HCI_QUIRK_BROKEN_FILTER_CLEAR_ALL)) + return 0; + memset(&cp, 0, sizeof(cp)); cp.flt_type = flt_type; @@ -2826,6 +3832,13 @@ static int hci_clear_event_filter_sync(struct hci_dev *hdev) if (!hci_dev_test_flag(hdev, HCI_EVENT_FILTER_CONFIGURED)) return 0; + /* In theory the state machine should not reach here unless + * a hci_set_event_filter_sync() call succeeds, but we do + * the check both for parity and as a future reminder. + */ + if (hci_test_quirk(hdev, HCI_QUIRK_BROKEN_FILTER_CLEAR_ALL)) + return 0; + return hci_set_event_filter_sync(hdev, HCI_FLT_CLEAR_ALL, 0x00, BDADDR_ANY, 0x00); } @@ -2839,6 +3852,28 @@ static int hci_write_ca_timeout_sync(struct hci_dev *hdev) sizeof(param), ¶m, HCI_CMD_TIMEOUT); } +/* Enable SCO flow control if supported */ +static int hci_write_sync_flowctl_sync(struct hci_dev *hdev) +{ + struct hci_cp_write_sync_flowctl cp; + int err; + + /* Check if the controller supports SCO and HCI_OP_WRITE_SYNC_FLOWCTL */ + if (!lmp_sco_capable(hdev) || !(hdev->commands[10] & BIT(4)) || + !hci_test_quirk(hdev, HCI_QUIRK_SYNC_FLOWCTL_SUPPORTED)) + return 0; + + memset(&cp, 0, sizeof(cp)); + cp.enable = 0x01; + + err = __hci_cmd_sync_status(hdev, HCI_OP_WRITE_SYNC_FLOWCTL, + sizeof(cp), &cp, HCI_CMD_TIMEOUT); + if (!err) + hci_dev_set_flag(hdev, HCI_SCO_FLOWCTL); + + return err; +} + /* BR Controller init stage 2 command sequence */ static const struct hci_init_stage br_init2[] = { /* HCI_OP_READ_BUFFER_SIZE */ @@ -2857,6 +3892,8 @@ static const struct hci_init_stage br_init2[] = { HCI_INIT(hci_clear_event_filter_sync), /* HCI_OP_WRITE_CA_TIMEOUT */ HCI_INIT(hci_write_ca_timeout_sync), + /* HCI_OP_WRITE_SYNC_FLOWCTL */ + HCI_INIT(hci_write_sync_flowctl_sync), {} }; @@ -2898,7 +3935,7 @@ static int hci_write_inquiry_mode_sync(struct hci_dev *hdev) u8 mode; if (!lmp_inq_rssi_capable(hdev) && - !test_bit(HCI_QUIRK_FIXUP_INQUIRY_MODE, &hdev->quirks)) + !hci_test_quirk(hdev, HCI_QUIRK_FIXUP_INQUIRY_MODE)) return 0; /* If Extended Inquiry Result events are supported, then @@ -2961,6 +3998,12 @@ static const struct hci_init_stage hci_init2[] = { /* Read LE Buffer Size */ static int hci_le_read_buffer_size_sync(struct hci_dev *hdev) { + /* Use Read LE Buffer Size V2 if supported */ + if (iso_capable(hdev) && hdev->commands[41] & 0x20) + return __hci_cmd_sync_status(hdev, + HCI_OP_LE_READ_BUFFER_SIZE_V2, + 0, NULL, HCI_CMD_TIMEOUT); + return __hci_cmd_sync_status(hdev, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL, HCI_CMD_TIMEOUT); } @@ -2968,8 +4011,19 @@ static int hci_le_read_buffer_size_sync(struct hci_dev *hdev) /* Read LE Local Supported Features */ static int hci_le_read_local_features_sync(struct hci_dev *hdev) { - return __hci_cmd_sync_status(hdev, HCI_OP_LE_READ_LOCAL_FEATURES, - 0, NULL, HCI_CMD_TIMEOUT); + int err; + + err = __hci_cmd_sync_status(hdev, HCI_OP_LE_READ_LOCAL_FEATURES, + 0, NULL, HCI_CMD_TIMEOUT); + if (err) + return err; + + if (ll_ext_feature_capable(hdev) && hdev->commands[47] & BIT(2)) + return __hci_cmd_sync_status(hdev, + HCI_OP_LE_READ_ALL_LOCAL_FEATURES, + 0, NULL, HCI_CMD_TIMEOUT); + + return err; } /* Read LE Supported States */ @@ -2981,10 +4035,10 @@ static int hci_le_read_supported_states_sync(struct hci_dev *hdev) /* LE Controller init stage 2 command sequence */ static const struct hci_init_stage le_init2[] = { - /* HCI_OP_LE_READ_BUFFER_SIZE */ - HCI_INIT(hci_le_read_buffer_size_sync), /* HCI_OP_LE_READ_LOCAL_FEATURES */ HCI_INIT(hci_le_read_local_features_sync), + /* HCI_OP_LE_READ_BUFFER_SIZE */ + HCI_INIT(hci_le_read_buffer_size_sync), /* HCI_OP_LE_READ_SUPPORTED_STATES */ HCI_INIT(hci_le_read_supported_states_sync), {} @@ -2996,8 +4050,9 @@ static int hci_init2_sync(struct hci_dev *hdev) bt_dev_dbg(hdev, ""); - if (hdev->dev_type == HCI_AMP) - return hci_init_stage_sync(hdev, amp_init2); + err = hci_init_stage_sync(hdev, hci_init2); + if (err) + return err; if (lmp_bredr_capable(hdev)) { err = hci_init_stage_sync(hdev, br_init2); @@ -3016,7 +4071,7 @@ static int hci_init2_sync(struct hci_dev *hdev) hci_dev_set_flag(hdev, HCI_LE_ENABLED); } - return hci_init_stage_sync(hdev, hci_init2); + return 0; } static int hci_set_event_mask_sync(struct hci_dev *hdev) @@ -3036,12 +4091,14 @@ static int hci_set_event_mask_sync(struct hci_dev *hdev) if (lmp_bredr_capable(hdev)) { events[4] |= 0x01; /* Flow Specification Complete */ - /* Don't set Disconnect Complete when suspended as that - * would wakeup the host when disconnecting due to - * suspend. + /* Don't set Disconnect Complete and mode change when + * suspended as that would wakeup the host when disconnecting + * due to suspend. */ - if (hdev->suspended) + if (hdev->suspended) { events[0] &= 0xef; + events[2] &= 0xf7; + } } else { /* Use a different default for LE-only devices */ memset(events, 0, sizeof(events)); @@ -3079,7 +4136,7 @@ static int hci_set_event_mask_sync(struct hci_dev *hdev) } if (lmp_inq_rssi_capable(hdev) || - test_bit(HCI_QUIRK_FIXUP_INQUIRY_MODE, &hdev->quirks)) + hci_test_quirk(hdev, HCI_QUIRK_FIXUP_INQUIRY_MODE)) events[4] |= 0x02; /* Inquiry Result with RSSI */ if (lmp_ext_feat_capable(hdev)) @@ -3131,7 +4188,7 @@ static int hci_read_stored_link_key_sync(struct hci_dev *hdev) struct hci_cp_read_stored_link_key cp; if (!(hdev->commands[6] & 0x20) || - test_bit(HCI_QUIRK_BROKEN_STORED_LINK_KEY, &hdev->quirks)) + hci_test_quirk(hdev, HCI_QUIRK_BROKEN_STORED_LINK_KEY)) return 0; memset(&cp, 0, sizeof(cp)); @@ -3179,7 +4236,8 @@ static int hci_read_page_scan_activity_sync(struct hci_dev *hdev) static int hci_read_def_err_data_reporting_sync(struct hci_dev *hdev) { if (!(hdev->commands[18] & 0x04) || - test_bit(HCI_QUIRK_BROKEN_ERR_DATA_REPORTING, &hdev->quirks)) + !(hdev->features[0][6] & LMP_ERR_DATA_REPORTING) || + hci_test_quirk(hdev, HCI_QUIRK_BROKEN_ERR_DATA_REPORTING)) return 0; return __hci_cmd_sync_status(hdev, HCI_OP_READ_DEF_ERR_DATA_REPORTING, @@ -3192,7 +4250,8 @@ static int hci_read_page_scan_type_sync(struct hci_dev *hdev) * support the Read Page Scan Type command. Check support for * this command in the bit mask of supported commands. */ - if (!(hdev->commands[13] & 0x01)) + if (!(hdev->commands[13] & 0x01) || + hci_test_quirk(hdev, HCI_QUIRK_BROKEN_READ_PAGE_SCAN_TYPE)) return 0; return __hci_cmd_sync_status(hdev, HCI_OP_READ_PAGE_SCAN_TYPE, @@ -3262,12 +4321,24 @@ static int hci_le_set_event_mask_sync(struct hci_dev *hdev) if (hdev->le_features[0] & HCI_LE_DATA_LEN_EXT) events[0] |= 0x40; /* LE Data Length Change */ - /* If the controller supports LL Privacy feature, enable - * the corresponding event. + /* If the controller supports LL Privacy feature or LE Extended Adv, + * enable the corresponding event. */ - if (hdev->le_features[0] & HCI_LE_LL_PRIVACY) + if (use_enhanced_conn_complete(hdev)) events[1] |= 0x02; /* LE Enhanced Connection Complete */ + /* Mark Device Privacy if Privacy Mode is supported */ + if (privacy_mode_capable(hdev)) + hdev->conn_flags |= HCI_CONN_FLAG_DEVICE_PRIVACY; + + /* Mark Address Resolution if LL Privacy is supported */ + if (ll_privacy_capable(hdev)) + hdev->conn_flags |= HCI_CONN_FLAG_ADDRESS_RESOLUTION; + + /* Mark PAST if supported */ + if (past_capable(hdev)) + hdev->conn_flags |= HCI_CONN_FLAG_PAST; + /* If the controller supports Extended Scanner Filter * Policies, enable the corresponding event. */ @@ -3337,6 +4408,25 @@ static int hci_le_set_event_mask_sync(struct hci_dev *hdev) if (ext_adv_capable(hdev)) events[2] |= 0x02; /* LE Advertising Set Terminated */ + if (past_receiver_capable(hdev)) + events[2] |= 0x80; /* LE PAST Received */ + + if (cis_capable(hdev)) { + events[3] |= 0x01; /* LE CIS Established */ + if (cis_peripheral_capable(hdev)) + events[3] |= 0x02; /* LE CIS Request */ + } + + if (bis_capable(hdev)) { + events[1] |= 0x20; /* LE PA Report */ + events[1] |= 0x40; /* LE PA Sync Established */ + events[3] |= 0x04; /* LE Create BIG Complete */ + events[3] |= 0x08; /* LE Terminate BIG Complete */ + events[3] |= 0x10; /* LE BIG Sync Established */ + events[3] |= 0x20; /* LE BIG Sync Loss */ + events[4] |= 0x02; /* LE BIG Info Advertising Report */ + } + return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_EVENT_MASK, sizeof(events), events, HCI_CMD_TIMEOUT); } @@ -3363,7 +4453,7 @@ static int hci_le_read_adv_tx_power_sync(struct hci_dev *hdev) static int hci_le_read_tx_power_sync(struct hci_dev *hdev) { if (!(hdev->commands[38] & 0x80) || - test_bit(HCI_QUIRK_BROKEN_READ_TRANSMIT_POWER, &hdev->quirks)) + hci_test_quirk(hdev, HCI_QUIRK_BROKEN_READ_TRANSMIT_POWER)) return 0; return __hci_cmd_sync_status(hdev, HCI_OP_LE_READ_TRANSMIT_POWER, @@ -3380,16 +4470,6 @@ static int hci_le_read_accept_list_size_sync(struct hci_dev *hdev) 0, NULL, HCI_CMD_TIMEOUT); } -/* Clear LE Accept List */ -static int hci_le_clear_accept_list_sync(struct hci_dev *hdev) -{ - if (!(hdev->commands[26] & 0x80)) - return 0; - - return __hci_cmd_sync_status(hdev, HCI_OP_LE_CLEAR_ACCEPT_LIST, 0, NULL, - HCI_CMD_TIMEOUT); -} - /* Read LE Resolving List Size */ static int hci_le_read_resolv_list_size_sync(struct hci_dev *hdev) { @@ -3415,7 +4495,8 @@ static int hci_le_set_rpa_timeout_sync(struct hci_dev *hdev) { __le16 timeout = cpu_to_le16(hdev->rpa_timeout); - if (!(hdev->commands[35] & 0x04)) + if (!(hdev->commands[35] & 0x04) || + hci_test_quirk(hdev, HCI_QUIRK_BROKEN_SET_RPA_TIMEOUT)) return 0; return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_RPA_TIMEOUT, @@ -3477,6 +4558,24 @@ static int hci_set_le_support_sync(struct hci_dev *hdev) sizeof(cp), &cp, HCI_CMD_TIMEOUT); } +/* LE Set Host Feature */ +static int hci_le_set_host_feature_sync(struct hci_dev *hdev) +{ + struct hci_cp_le_set_host_feature cp; + + if (!iso_capable(hdev)) + return 0; + + memset(&cp, 0, sizeof(cp)); + + /* Connected Isochronous Channels (Host Support) */ + cp.bit_number = 32; + cp.bit_value = iso_enabled(hdev) ? 0x01 : 0x00; + + return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_HOST_FEATURE, + sizeof(cp), &cp, HCI_CMD_TIMEOUT); +} + /* LE Controller init stage 3 command sequence */ static const struct hci_init_stage le_init3[] = { /* HCI_OP_LE_SET_EVENT_MASK */ @@ -3503,6 +4602,8 @@ static const struct hci_init_stage le_init3[] = { HCI_INIT(hci_le_read_num_support_adv_sets_sync), /* HCI_OP_WRITE_LE_HOST_SUPPORTED */ HCI_INIT(hci_set_le_support_sync), + /* HCI_OP_LE_SET_HOST_FEATURE */ + HCI_INIT(hci_le_set_host_feature_sync), {} }; @@ -3540,7 +4641,7 @@ static int hci_delete_stored_link_key_sync(struct hci_dev *hdev) * just disable this command. */ if (!(hdev->commands[6] & 0x80) || - test_bit(HCI_QUIRK_BROKEN_STORED_LINK_KEY, &hdev->quirks)) + hci_test_quirk(hdev, HCI_QUIRK_BROKEN_STORED_LINK_KEY)) return 0; memset(&cp, 0, sizeof(cp)); @@ -3566,7 +4667,7 @@ static int hci_set_event_mask_page_2_sync(struct hci_dev *hdev) if (lmp_cpb_central_capable(hdev)) { events[1] |= 0x40; /* Triggered Clock Capture */ events[1] |= 0x80; /* Synchronization Train Complete */ - events[2] |= 0x10; /* Peripheral Page Response Timeout */ + events[2] |= 0x08; /* Truncated Page Complete */ events[2] |= 0x20; /* CPB Channel Map Change */ changed = true; } @@ -3578,7 +4679,7 @@ static int hci_set_event_mask_page_2_sync(struct hci_dev *hdev) events[2] |= 0x01; /* Synchronization Train Received */ events[2] |= 0x02; /* CPB Receive */ events[2] |= 0x04; /* CPB Timeout */ - events[2] |= 0x08; /* Truncated Page Complete */ + events[2] |= 0x10; /* Peripheral Page Response Timeout */ changed = true; } @@ -3604,11 +4705,12 @@ static int hci_set_event_mask_page_2_sync(struct hci_dev *hdev) /* Read local codec list if the HCI command is supported */ static int hci_read_local_codecs_sync(struct hci_dev *hdev) { - if (!(hdev->commands[29] & 0x20)) - return 0; + if (hdev->commands[45] & 0x04) + hci_read_supported_codecs_v2(hdev); + else if (hdev->commands[29] & 0x20) + hci_read_supported_codecs(hdev); - return __hci_cmd_sync_status(hdev, HCI_OP_READ_LOCAL_CODECS, 0, NULL, - HCI_CMD_TIMEOUT); + return 0; } /* Read local pairing options if the HCI command is supported */ @@ -3624,7 +4726,7 @@ static int hci_read_local_pairing_opts_sync(struct hci_dev *hdev) /* Get MWS transport configuration if the HCI command is supported */ static int hci_get_mws_transport_config_sync(struct hci_dev *hdev) { - if (!(hdev->commands[30] & 0x08)) + if (!mws_transport_config_capable(hdev)) return 0; return __hci_cmd_sync_status(hdev, HCI_OP_GET_MWS_TRANSPORT_CONFIG, @@ -3664,7 +4766,8 @@ static int hci_set_err_data_report_sync(struct hci_dev *hdev) bool enabled = hci_dev_test_flag(hdev, HCI_WIDEBAND_SPEECH_ENABLED); if (!(hdev->commands[18] & 0x08) || - test_bit(HCI_QUIRK_BROKEN_ERR_DATA_REPORTING, &hdev->quirks)) + !(hdev->features[0][6] & LMP_ERR_DATA_REPORTING) || + hci_test_quirk(hdev, HCI_QUIRK_BROKEN_ERR_DATA_REPORTING)) return 0; if (enabled == hdev->err_data_reporting) @@ -3714,18 +4817,38 @@ static int hci_le_set_write_def_data_len_sync(struct hci_dev *hdev) sizeof(cp), &cp, HCI_CMD_TIMEOUT); } -/* Set Default PHY parameters if command is supported */ +/* Set Default PHY parameters if command is supported, enables all supported + * PHYs according to the LE Features bits. + */ static int hci_le_set_default_phy_sync(struct hci_dev *hdev) { struct hci_cp_le_set_default_phy cp; - if (!(hdev->commands[35] & 0x20)) + if (!(hdev->commands[35] & 0x20)) { + /* If the command is not supported it means only 1M PHY is + * supported. + */ + hdev->le_tx_def_phys = HCI_LE_SET_PHY_1M; + hdev->le_rx_def_phys = HCI_LE_SET_PHY_1M; return 0; + } memset(&cp, 0, sizeof(cp)); cp.all_phys = 0x00; - cp.tx_phys = hdev->le_tx_def_phys; - cp.rx_phys = hdev->le_rx_def_phys; + cp.tx_phys = HCI_LE_SET_PHY_1M; + cp.rx_phys = HCI_LE_SET_PHY_1M; + + /* Enables 2M PHY if supported */ + if (le_2m_capable(hdev)) { + cp.tx_phys |= HCI_LE_SET_PHY_2M; + cp.rx_phys |= HCI_LE_SET_PHY_2M; + } + + /* Enables Coded PHY if supported */ + if (le_coded_capable(hdev)) { + cp.tx_phys |= HCI_LE_SET_PHY_CODED; + cp.rx_phys |= HCI_LE_SET_PHY_CODED; + } return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_DEFAULT_PHY, sizeof(cp), &cp, HCI_CMD_TIMEOUT); @@ -3770,13 +4893,6 @@ static int hci_init_sync(struct hci_dev *hdev) if (err < 0) return err; - /* HCI_PRIMARY covers both single-mode LE, BR/EDR and dual-mode - * BR/EDR/LE type controllers. AMP controllers only need the - * first two stages of init. - */ - if (hdev->dev_type != HCI_PRIMARY) - return 0; - err = hci_init3_sync(hdev); if (err < 0) return err; @@ -3801,6 +4917,9 @@ static int hci_init_sync(struct hci_dev *hdev) !hci_dev_test_flag(hdev, HCI_CONFIG)) return 0; + if (hci_dev_test_and_set_flag(hdev, HCI_DEBUGFS_CREATED)) + return 0; + hci_debugfs_create_common(hdev); if (lmp_bredr_capable(hdev)) @@ -3812,128 +4931,132 @@ static int hci_init_sync(struct hci_dev *hdev) return 0; } -int hci_dev_open_sync(struct hci_dev *hdev) +#define HCI_QUIRK_BROKEN(_quirk, _desc) { HCI_QUIRK_BROKEN_##_quirk, _desc } + +static const struct { + unsigned long quirk; + const char *desc; +} hci_broken_table[] = { + HCI_QUIRK_BROKEN(LOCAL_COMMANDS, + "HCI Read Local Supported Commands not supported"), + HCI_QUIRK_BROKEN(STORED_LINK_KEY, + "HCI Delete Stored Link Key command is advertised, " + "but not supported."), + HCI_QUIRK_BROKEN(ERR_DATA_REPORTING, + "HCI Read Default Erroneous Data Reporting command is " + "advertised, but not supported."), + HCI_QUIRK_BROKEN(READ_TRANSMIT_POWER, + "HCI Read Transmit Power Level command is advertised, " + "but not supported."), + HCI_QUIRK_BROKEN(FILTER_CLEAR_ALL, + "HCI Set Event Filter command not supported."), + HCI_QUIRK_BROKEN(ENHANCED_SETUP_SYNC_CONN, + "HCI Enhanced Setup Synchronous Connection command is " + "advertised, but not supported."), + HCI_QUIRK_BROKEN(SET_RPA_TIMEOUT, + "HCI LE Set Random Private Address Timeout command is " + "advertised, but not supported."), + HCI_QUIRK_BROKEN(EXT_CREATE_CONN, + "HCI LE Extended Create Connection command is " + "advertised, but not supported."), + HCI_QUIRK_BROKEN(WRITE_AUTH_PAYLOAD_TIMEOUT, + "HCI WRITE AUTH PAYLOAD TIMEOUT command leads " + "to unexpected SMP errors when pairing " + "and will not be used."), + HCI_QUIRK_BROKEN(LE_CODED, + "HCI LE Coded PHY feature bit is set, " + "but its usage is not supported.") +}; + +/* This function handles hdev setup stage: + * + * Calls hdev->setup + * Setup address if HCI_QUIRK_USE_BDADDR_PROPERTY is set. + */ +static int hci_dev_setup_sync(struct hci_dev *hdev) { int ret = 0; + bool invalid_bdaddr; + size_t i; + + if (!hci_dev_test_flag(hdev, HCI_SETUP) && + !hci_test_quirk(hdev, HCI_QUIRK_NON_PERSISTENT_SETUP)) + return 0; bt_dev_dbg(hdev, ""); - if (hci_dev_test_flag(hdev, HCI_UNREGISTER)) { - ret = -ENODEV; - goto done; - } + hci_sock_dev_event(hdev, HCI_DEV_SETUP); - if (!hci_dev_test_flag(hdev, HCI_SETUP) && - !hci_dev_test_flag(hdev, HCI_CONFIG)) { - /* Check for rfkill but allow the HCI setup stage to - * proceed (which in itself doesn't cause any RF activity). - */ - if (hci_dev_test_flag(hdev, HCI_RFKILLED)) { - ret = -ERFKILL; - goto done; - } + if (hdev->setup) + ret = hdev->setup(hdev); - /* Check for valid public address or a configured static - * random address, but let the HCI setup proceed to - * be able to determine if there is a public address - * or not. - * - * In case of user channel usage, it is not important - * if a public address or static random address is - * available. - * - * This check is only valid for BR/EDR controllers - * since AMP controllers do not have an address. - */ - if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL) && - hdev->dev_type == HCI_PRIMARY && - !bacmp(&hdev->bdaddr, BDADDR_ANY) && - !bacmp(&hdev->static_addr, BDADDR_ANY)) { - ret = -EADDRNOTAVAIL; - goto done; - } + for (i = 0; i < ARRAY_SIZE(hci_broken_table); i++) { + if (hci_test_quirk(hdev, hci_broken_table[i].quirk)) + bt_dev_warn(hdev, "%s", hci_broken_table[i].desc); } - if (test_bit(HCI_UP, &hdev->flags)) { - ret = -EALREADY; - goto done; - } + /* The transport driver can set the quirk to mark the + * BD_ADDR invalid before creating the HCI device or in + * its setup callback. + */ + invalid_bdaddr = hci_test_quirk(hdev, HCI_QUIRK_INVALID_BDADDR) || + hci_test_quirk(hdev, HCI_QUIRK_USE_BDADDR_PROPERTY); + if (!ret) { + if (hci_test_quirk(hdev, HCI_QUIRK_USE_BDADDR_PROPERTY) && + !bacmp(&hdev->public_addr, BDADDR_ANY)) + hci_dev_get_bd_addr_from_property(hdev); - if (hdev->open(hdev)) { - ret = -EIO; - goto done; + if (invalid_bdaddr && bacmp(&hdev->public_addr, BDADDR_ANY) && + hdev->set_bdaddr) { + ret = hdev->set_bdaddr(hdev, &hdev->public_addr); + if (!ret) + invalid_bdaddr = false; + } } - set_bit(HCI_RUNNING, &hdev->flags); - hci_sock_dev_event(hdev, HCI_DEV_OPEN); - - atomic_set(&hdev->cmd_cnt, 1); - set_bit(HCI_INIT, &hdev->flags); - - if (hci_dev_test_flag(hdev, HCI_SETUP) || - test_bit(HCI_QUIRK_NON_PERSISTENT_SETUP, &hdev->quirks)) { - bool invalid_bdaddr; - - hci_sock_dev_event(hdev, HCI_DEV_SETUP); - - if (hdev->setup) - ret = hdev->setup(hdev); - - /* The transport driver can set the quirk to mark the - * BD_ADDR invalid before creating the HCI device or in - * its setup callback. - */ - invalid_bdaddr = test_bit(HCI_QUIRK_INVALID_BDADDR, - &hdev->quirks); + /* The transport driver can set these quirks before + * creating the HCI device or in its setup callback. + * + * For the invalid BD_ADDR quirk it is possible that + * it becomes a valid address if the bootloader does + * provide it (see above). + * + * In case any of them is set, the controller has to + * start up as unconfigured. + */ + if (hci_test_quirk(hdev, HCI_QUIRK_EXTERNAL_CONFIG) || + invalid_bdaddr) + hci_dev_set_flag(hdev, HCI_UNCONFIGURED); - if (ret) - goto setup_failed; + /* For an unconfigured controller it is required to + * read at least the version information provided by + * the Read Local Version Information command. + * + * If the set_bdaddr driver callback is provided, then + * also the original Bluetooth public device address + * will be read using the Read BD Address command. + */ + if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) + return hci_unconf_init_sync(hdev); - if (test_bit(HCI_QUIRK_USE_BDADDR_PROPERTY, &hdev->quirks)) { - if (!bacmp(&hdev->public_addr, BDADDR_ANY)) - hci_dev_get_bd_addr_from_property(hdev); + return ret; +} - if (bacmp(&hdev->public_addr, BDADDR_ANY) && - hdev->set_bdaddr) { - ret = hdev->set_bdaddr(hdev, - &hdev->public_addr); +/* This function handles hdev init stage: + * + * Calls hci_dev_setup_sync to perform setup stage + * Calls hci_init_sync to perform HCI command init sequence + */ +static int hci_dev_init_sync(struct hci_dev *hdev) +{ + int ret; - /* If setting of the BD_ADDR from the device - * property succeeds, then treat the address - * as valid even if the invalid BD_ADDR - * quirk indicates otherwise. - */ - if (!ret) - invalid_bdaddr = false; - } - } + bt_dev_dbg(hdev, ""); -setup_failed: - /* The transport driver can set these quirks before - * creating the HCI device or in its setup callback. - * - * For the invalid BD_ADDR quirk it is possible that - * it becomes a valid address if the bootloader does - * provide it (see above). - * - * In case any of them is set, the controller has to - * start up as unconfigured. - */ - if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) || - invalid_bdaddr) - hci_dev_set_flag(hdev, HCI_UNCONFIGURED); + atomic_set(&hdev->cmd_cnt, 1); + set_bit(HCI_INIT, &hdev->flags); - /* For an unconfigured controller it is required to - * read at least the version information provided by - * the Read Local Version Information command. - * - * If the set_bdaddr driver callback is provided, then - * also the original Bluetooth public device address - * will be read using the Read BD Address command. - */ - if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) - ret = hci_unconf_init_sync(hdev); - } + ret = hci_dev_setup_sync(hdev); if (hci_dev_test_flag(hdev, HCI_CONFIG)) { /* If public address change is configured, ensure that @@ -3961,7 +5084,7 @@ setup_failed: * then they need to be reprogrammed after the init procedure * completed. */ - if (test_bit(HCI_QUIRK_NON_PERSISTENT_DIAG, &hdev->quirks) && + if (hci_test_quirk(hdev, HCI_QUIRK_NON_PERSISTENT_DIAG) && !hci_dev_test_flag(hdev, HCI_USER_CHANNEL) && hci_dev_test_flag(hdev, HCI_VENDOR_DIAG) && hdev->set_diag) ret = hdev->set_diag(hdev, true); @@ -3973,6 +5096,63 @@ setup_failed: clear_bit(HCI_INIT, &hdev->flags); + return ret; +} + +int hci_dev_open_sync(struct hci_dev *hdev) +{ + int ret; + + bt_dev_dbg(hdev, ""); + + if (hci_dev_test_flag(hdev, HCI_UNREGISTER)) { + ret = -ENODEV; + goto done; + } + + if (!hci_dev_test_flag(hdev, HCI_SETUP) && + !hci_dev_test_flag(hdev, HCI_CONFIG)) { + /* Check for rfkill but allow the HCI setup stage to + * proceed (which in itself doesn't cause any RF activity). + */ + if (hci_dev_test_flag(hdev, HCI_RFKILLED)) { + ret = -ERFKILL; + goto done; + } + + /* Check for valid public address or a configured static + * random address, but let the HCI setup proceed to + * be able to determine if there is a public address + * or not. + * + * In case of user channel usage, it is not important + * if a public address or static random address is + * available. + */ + if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL) && + !bacmp(&hdev->bdaddr, BDADDR_ANY) && + !bacmp(&hdev->static_addr, BDADDR_ANY)) { + ret = -EADDRNOTAVAIL; + goto done; + } + } + + if (test_bit(HCI_UP, &hdev->flags)) { + ret = -EALREADY; + goto done; + } + + if (hdev->open(hdev)) { + ret = -EIO; + goto done; + } + + hci_devcd_reset(hdev); + + set_bit(HCI_RUNNING, &hdev->flags); + hci_sock_dev_event(hdev, HCI_DEV_OPEN); + + ret = hci_dev_init_sync(hdev); if (!ret) { hci_dev_hold(hdev); hci_dev_set_flag(hdev, HCI_RPA_EXPIRED); @@ -3984,9 +5164,9 @@ setup_failed: !hci_dev_test_flag(hdev, HCI_CONFIG) && !hci_dev_test_flag(hdev, HCI_UNCONFIGURED) && !hci_dev_test_flag(hdev, HCI_USER_CHANNEL) && - hci_dev_test_flag(hdev, HCI_MGMT) && - hdev->dev_type == HCI_PRIMARY) { + hci_dev_test_flag(hdev, HCI_MGMT)) { ret = hci_powered_update_sync(hdev); + mgmt_power_on(hdev, ret); } } else { /* Init failed, cleanup */ @@ -4006,10 +5186,16 @@ setup_failed: hdev->flush(hdev); if (hdev->sent_cmd) { + cancel_delayed_work_sync(&hdev->cmd_timer); kfree_skb(hdev->sent_cmd); hdev->sent_cmd = NULL; } + if (hdev->req_skb) { + kfree_skb(hdev->req_skb); + hdev->req_skb = NULL; + } + clear_bit(HCI_RUNNING, &hdev->flags); hci_sock_dev_event(hdev, HCI_DEV_CLOSE); @@ -4027,17 +5213,42 @@ static void hci_pend_le_actions_clear(struct hci_dev *hdev) struct hci_conn_params *p; list_for_each_entry(p, &hdev->le_conn_params, list) { + hci_pend_le_list_del_init(p); if (p->conn) { hci_conn_drop(p->conn); hci_conn_put(p->conn); p->conn = NULL; } - list_del_init(&p->action); } BT_DBG("All LE pending actions cleared"); } +static int hci_dev_shutdown(struct hci_dev *hdev) +{ + int err = 0; + /* Similar to how we first do setup and then set the exclusive access + * bit for userspace, we must first unset userchannel and then clean up. + * Otherwise, the kernel can't properly use the hci channel to clean up + * the controller (some shutdown routines require sending additional + * commands to the controller for example). + */ + bool was_userchannel = + hci_dev_test_and_clear_flag(hdev, HCI_USER_CHANNEL); + + if (!hci_dev_test_flag(hdev, HCI_UNREGISTER) && + test_bit(HCI_UP, &hdev->flags)) { + /* Execute vendor specific shutdown routine */ + if (hdev->shutdown) + err = hdev->shutdown(hdev); + } + + if (was_userchannel) + hci_dev_set_flag(hdev, HCI_USER_CHANNEL); + + return err; +} + int hci_dev_close_sync(struct hci_dev *hdev) { bool auto_off; @@ -4045,19 +5256,27 @@ int hci_dev_close_sync(struct hci_dev *hdev) bt_dev_dbg(hdev, ""); - cancel_delayed_work(&hdev->power_off); - cancel_delayed_work(&hdev->ncmd_timer); + if (hci_dev_test_flag(hdev, HCI_UNREGISTER)) { + disable_delayed_work(&hdev->power_off); + disable_delayed_work(&hdev->ncmd_timer); + disable_delayed_work(&hdev->le_scan_disable); + } else { + cancel_delayed_work(&hdev->power_off); + cancel_delayed_work(&hdev->ncmd_timer); + cancel_delayed_work(&hdev->le_scan_disable); + } - hci_request_cancel_all(hdev); + hci_cmd_sync_cancel_sync(hdev, ENODEV); - if (!hci_dev_test_flag(hdev, HCI_UNREGISTER) && - !hci_dev_test_flag(hdev, HCI_USER_CHANNEL) && - test_bit(HCI_UP, &hdev->flags)) { - /* Execute vendor specific shutdown routine */ - if (hdev->shutdown) - err = hdev->shutdown(hdev); + cancel_interleave_scan(hdev); + + if (hdev->adv_instance_timeout) { + cancel_delayed_work_sync(&hdev->adv_instance_expire); + hdev->adv_instance_timeout = 0; } + err = hci_dev_shutdown(hdev); + if (!test_and_clear_bit(HCI_UP, &hdev->flags)) { cancel_delayed_work_sync(&hdev->cmd_timer); return err; @@ -4098,17 +5317,16 @@ int hci_dev_close_sync(struct hci_dev *hdev) auto_off = hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF); - if (!auto_off && hdev->dev_type == HCI_PRIMARY && - !hci_dev_test_flag(hdev, HCI_USER_CHANNEL) && + if (!auto_off && !hci_dev_test_flag(hdev, HCI_USER_CHANNEL) && hci_dev_test_flag(hdev, HCI_MGMT)) __mgmt_power_off(hdev); hci_inquiry_cache_flush(hdev); hci_pend_le_actions_clear(hdev); hci_conn_hash_flush(hdev); - hci_dev_unlock(hdev); - + /* Prevent data races on hdev->smp_data or hdev->smp_bredr_data */ smp_unregister(hdev); + hci_dev_unlock(hdev); hci_sock_dev_event(hdev, HCI_DEV_DOWN); @@ -4123,7 +5341,7 @@ int hci_dev_close_sync(struct hci_dev *hdev) /* Reset device */ skb_queue_purge(&hdev->cmd_q); atomic_set(&hdev->cmd_cnt, 1); - if (test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks) && + if (hci_test_quirk(hdev, HCI_QUIRK_RESET_ON_CLOSE) && !auto_off && !hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) { set_bit(HCI_INIT, &hdev->flags); hci_reset_sync(hdev); @@ -4145,6 +5363,12 @@ int hci_dev_close_sync(struct hci_dev *hdev) hdev->sent_cmd = NULL; } + /* Drop last request */ + if (hdev->req_skb) { + kfree_skb(hdev->req_skb); + hdev->req_skb = NULL; + } + clear_bit(HCI_RUNNING, &hdev->flags); hci_sock_dev_event(hdev, HCI_DEV_CLOSE); @@ -4155,12 +5379,10 @@ int hci_dev_close_sync(struct hci_dev *hdev) hdev->flags &= BIT(HCI_RAW); hci_dev_clear_volatile_flags(hdev); - /* Controller radio is available but is currently powered down */ - hdev->amp_status = AMP_STATUS_POWERED_DOWN; - memset(hdev->eir, 0, sizeof(hdev->eir)); memset(hdev->dev_class, 0, sizeof(hdev->dev_class)); bacpy(&hdev->random_addr, BDADDR_ANY); + hci_codec_list_clear(&hdev->local_codecs); hci_dev_put(hdev); return err; @@ -4193,8 +5415,7 @@ static int hci_power_on_sync(struct hci_dev *hdev) */ if (hci_dev_test_flag(hdev, HCI_RFKILLED) || hci_dev_test_flag(hdev, HCI_UNCONFIGURED) || - (hdev->dev_type == HCI_PRIMARY && - !bacmp(&hdev->bdaddr, BDADDR_ANY) && + (!bacmp(&hdev->bdaddr, BDADDR_ANY) && !bacmp(&hdev->static_addr, BDADDR_ANY))) { hci_dev_clear_flag(hdev, HCI_AUTO_OFF); hci_dev_close_sync(hdev); @@ -4264,7 +5485,6 @@ int hci_stop_discovery_sync(struct hci_dev *hdev) if (hci_dev_test_flag(hdev, HCI_LE_SCAN)) { cancel_delayed_work(&hdev->le_scan_disable); - cancel_delayed_work(&hdev->le_scan_restart); err = hci_scan_disable_sync(hdev); if (err) @@ -4278,7 +5498,7 @@ int hci_stop_discovery_sync(struct hci_dev *hdev) } /* Resume advertising if it was paused */ - if (use_ll_privacy(hdev)) + if (ll_privacy_capable(hdev)) hci_resume_advertising_sync(hdev); /* No further actions needed for LE-only discovery */ @@ -4291,41 +5511,41 @@ int hci_stop_discovery_sync(struct hci_dev *hdev) if (!e) return 0; - return hci_remote_name_cancel_sync(hdev, &e->data.bdaddr); + /* Ignore cancel errors since it should interfere with stopping + * of the discovery. + */ + hci_remote_name_cancel_sync(hdev, &e->data.bdaddr); } return 0; } -static int hci_disconnect_phy_link_sync(struct hci_dev *hdev, u16 handle, - u8 reason) -{ - struct hci_cp_disconn_phy_link cp; - - memset(&cp, 0, sizeof(cp)); - cp.phy_handle = HCI_PHY_HANDLE(handle); - cp.reason = reason; - - return __hci_cmd_sync_status(hdev, HCI_OP_DISCONN_PHY_LINK, - sizeof(cp), &cp, HCI_CMD_TIMEOUT); -} - static int hci_disconnect_sync(struct hci_dev *hdev, struct hci_conn *conn, u8 reason) { struct hci_cp_disconnect cp; - if (conn->type == AMP_LINK) - return hci_disconnect_phy_link_sync(hdev, conn->handle, reason); + if (conn->type == BIS_LINK || conn->type == PA_LINK) { + /* This is a BIS connection, hci_conn_del will + * do the necessary cleanup. + */ + hci_dev_lock(hdev); + hci_conn_failed(conn, reason); + hci_dev_unlock(hdev); + + return 0; + } memset(&cp, 0, sizeof(cp)); cp.handle = cpu_to_le16(conn->handle); cp.reason = reason; - /* Wait for HCI_EV_DISCONN_COMPLETE not HCI_EV_CMD_STATUS when not - * suspending. + /* Wait for HCI_EV_DISCONN_COMPLETE, not HCI_EV_CMD_STATUS, when the + * reason is anything but HCI_ERROR_REMOTE_POWER_OFF. This reason is + * used when suspending or powering off, where we don't want to wait + * for the peer's response. */ - if (!hdev->suspended) + if (reason != HCI_ERROR_REMOTE_POWER_OFF) return __hci_cmd_sync_status_sk(hdev, HCI_OP_DISCONNECT, sizeof(cp), &cp, HCI_EV_DISCONN_COMPLETE, @@ -4336,23 +5556,65 @@ static int hci_disconnect_sync(struct hci_dev *hdev, struct hci_conn *conn, } static int hci_le_connect_cancel_sync(struct hci_dev *hdev, - struct hci_conn *conn) + struct hci_conn *conn, u8 reason) { + /* Return reason if scanning since the connection shall probably be + * cleanup directly. + */ if (test_bit(HCI_CONN_SCANNING, &conn->flags)) + return reason; + + if (conn->role == HCI_ROLE_SLAVE || + test_and_set_bit(HCI_CONN_CANCEL, &conn->flags)) return 0; return __hci_cmd_sync_status(hdev, HCI_OP_LE_CREATE_CONN_CANCEL, - 6, &conn->dst, HCI_CMD_TIMEOUT); + 0, NULL, HCI_CMD_TIMEOUT); } -static int hci_connect_cancel_sync(struct hci_dev *hdev, struct hci_conn *conn) +static int hci_connect_cancel_sync(struct hci_dev *hdev, struct hci_conn *conn, + u8 reason) { if (conn->type == LE_LINK) - return hci_le_connect_cancel_sync(hdev, conn); + return hci_le_connect_cancel_sync(hdev, conn, reason); + + if (conn->type == CIS_LINK) { + /* BLUETOOTH CORE SPECIFICATION Version 5.3 | Vol 4, Part E + * page 1857: + * + * If this command is issued for a CIS on the Central and the + * CIS is successfully terminated before being established, + * then an HCI_LE_CIS_Established event shall also be sent for + * this CIS with the Status Operation Cancelled by Host (0x44). + */ + if (test_bit(HCI_CONN_CREATE_CIS, &conn->flags)) + return hci_disconnect_sync(hdev, conn, reason); + + /* CIS with no Create CIS sent have nothing to cancel */ + return HCI_ERROR_LOCAL_HOST_TERM; + } + + if (conn->type == BIS_LINK || conn->type == PA_LINK) { + /* There is no way to cancel a BIS without terminating the BIG + * which is done later on connection cleanup. + */ + return 0; + } if (hdev->hci_ver < BLUETOOTH_VER_1_2) return 0; + /* Wait for HCI_EV_CONN_COMPLETE, not HCI_EV_CMD_STATUS, when the + * reason is anything but HCI_ERROR_REMOTE_POWER_OFF. This reason is + * used when suspending or powering off, where we don't want to wait + * for the peer's response. + */ + if (reason != HCI_ERROR_REMOTE_POWER_OFF) + return __hci_cmd_sync_status_sk(hdev, HCI_OP_CREATE_CONN_CANCEL, + 6, &conn->dst, + HCI_EV_CONN_COMPLETE, + HCI_CMD_TIMEOUT, NULL); + return __hci_cmd_sync_status(hdev, HCI_OP_CREATE_CONN_CANCEL, 6, &conn->dst, HCI_CMD_TIMEOUT); } @@ -4376,11 +5638,30 @@ static int hci_reject_sco_sync(struct hci_dev *hdev, struct hci_conn *conn, sizeof(cp), &cp, HCI_CMD_TIMEOUT); } +static int hci_le_reject_cis_sync(struct hci_dev *hdev, struct hci_conn *conn, + u8 reason) +{ + struct hci_cp_le_reject_cis cp; + + memset(&cp, 0, sizeof(cp)); + cp.handle = cpu_to_le16(conn->handle); + cp.reason = reason; + + return __hci_cmd_sync_status(hdev, HCI_OP_LE_REJECT_CIS, + sizeof(cp), &cp, HCI_CMD_TIMEOUT); +} + static int hci_reject_conn_sync(struct hci_dev *hdev, struct hci_conn *conn, u8 reason) { struct hci_cp_reject_conn_req cp; + if (conn->type == CIS_LINK) + return hci_le_reject_cis_sync(hdev, conn, reason); + + if (conn->type == BIS_LINK || conn->type == PA_LINK) + return -EINVAL; + if (conn->type == SCO_LINK || conn->type == ESCO_LINK) return hci_reject_sco_sync(hdev, conn, reason); @@ -4392,37 +5673,81 @@ static int hci_reject_conn_sync(struct hci_dev *hdev, struct hci_conn *conn, sizeof(cp), &cp, HCI_CMD_TIMEOUT); } -static int hci_abort_conn_sync(struct hci_dev *hdev, struct hci_conn *conn, - u8 reason) +int hci_abort_conn_sync(struct hci_dev *hdev, struct hci_conn *conn, u8 reason) { + int err = 0; + u16 handle = conn->handle; + bool disconnect = false; + struct hci_conn *c; + switch (conn->state) { case BT_CONNECTED: case BT_CONFIG: - return hci_disconnect_sync(hdev, conn, reason); + err = hci_disconnect_sync(hdev, conn, reason); + break; case BT_CONNECT: - return hci_connect_cancel_sync(hdev, conn); + err = hci_connect_cancel_sync(hdev, conn, reason); + break; case BT_CONNECT2: - return hci_reject_conn_sync(hdev, conn, reason); + err = hci_reject_conn_sync(hdev, conn, reason); + break; + case BT_OPEN: + case BT_BOUND: + break; default: - conn->state = BT_CLOSED; + disconnect = true; break; } - return 0; + hci_dev_lock(hdev); + + /* Check if the connection has been cleaned up concurrently */ + c = hci_conn_hash_lookup_handle(hdev, handle); + if (!c || c != conn) { + err = 0; + goto unlock; + } + + /* Cleanup hci_conn object if it cannot be cancelled as it + * likely means the controller and host stack are out of sync + * or in case of LE it was still scanning so it can be cleanup + * safely. + */ + if (disconnect) { + conn->state = BT_CLOSED; + hci_disconn_cfm(conn, reason); + hci_conn_del(conn); + } else { + hci_conn_failed(conn, reason); + } + +unlock: + hci_dev_unlock(hdev); + return err; } static int hci_disconnect_all_sync(struct hci_dev *hdev, u8 reason) { - struct hci_conn *conn, *tmp; - int err; + struct list_head *head = &hdev->conn_hash.list; + struct hci_conn *conn; - list_for_each_entry_safe(conn, tmp, &hdev->conn_hash.list, list) { - err = hci_abort_conn_sync(hdev, conn, reason); - if (err) - return err; + rcu_read_lock(); + while ((conn = list_first_or_null_rcu(head, struct hci_conn, list))) { + /* Make sure the connection is not freed while unlocking */ + conn = hci_conn_get(conn); + rcu_read_unlock(); + /* Disregard possible errors since hci_conn_del shall have been + * called even in case of errors had occurred since it would + * then cause hci_conn_failed to be called which calls + * hci_conn_del internally. + */ + hci_abort_conn_sync(hdev, conn, reason); + hci_conn_put(conn); + rcu_read_lock(); } + rcu_read_unlock(); - return err; + return 0; } /* This function perform power off HCI command sequence as follows: @@ -4440,27 +5765,33 @@ static int hci_power_off_sync(struct hci_dev *hdev) if (!test_bit(HCI_UP, &hdev->flags)) return 0; + hci_dev_set_flag(hdev, HCI_POWERING_DOWN); + if (test_bit(HCI_ISCAN, &hdev->flags) || test_bit(HCI_PSCAN, &hdev->flags)) { err = hci_write_scan_enable_sync(hdev, 0x00); if (err) - return err; + goto out; } err = hci_clear_adv_sync(hdev, NULL, false); if (err) - return err; + goto out; err = hci_stop_discovery_sync(hdev); if (err) - return err; + goto out; /* Terminated due to Power Off */ err = hci_disconnect_all_sync(hdev, HCI_ERROR_REMOTE_POWER_OFF); if (err) - return err; + goto out; - return hci_dev_close_sync(hdev); + err = hci_dev_close_sync(hdev); + +out: + hci_dev_clear_flag(hdev, HCI_POWERING_DOWN); + return err; } int hci_set_powered_sync(struct hci_dev *hdev, u8 val) @@ -4591,7 +5922,7 @@ int hci_update_connectable_sync(struct hci_dev *hdev) return hci_update_passive_scan_sync(hdev); } -static int hci_inquiry_sync(struct hci_dev *hdev, u8 length) +int hci_inquiry_sync(struct hci_dev *hdev, u8 length, u8 num_rsp) { const u8 giac[3] = { 0x33, 0x8b, 0x9e }; const u8 liac[3] = { 0x00, 0x8b, 0x9e }; @@ -4599,7 +5930,7 @@ static int hci_inquiry_sync(struct hci_dev *hdev, u8 length) bt_dev_dbg(hdev, ""); - if (hci_dev_test_flag(hdev, HCI_INQUIRY)) + if (test_bit(HCI_INQUIRY, &hdev->flags)) return 0; hci_dev_lock(hdev); @@ -4614,6 +5945,7 @@ static int hci_inquiry_sync(struct hci_dev *hdev, u8 length) memcpy(&cp.lap, giac, sizeof(cp.lap)); cp.length = length; + cp.num_rsp = num_rsp; return __hci_cmd_sync_status(hdev, HCI_OP_INQUIRY, sizeof(cp), &cp, HCI_CMD_TIMEOUT); @@ -4642,27 +5974,12 @@ static int hci_active_scan_sync(struct hci_dev *hdev, uint16_t interval) cancel_interleave_scan(hdev); - /* Pause advertising since active scanning disables address resolution - * which advertising depend on in order to generate its RPAs. - */ - if (use_ll_privacy(hdev)) { - err = hci_pause_advertising_sync(hdev); - if (err) { - bt_dev_err(hdev, "pause advertising failed: %d", err); - goto failed; - } - } - - /* Disable address resolution while doing active scanning since the - * accept list shall not be used and all reports shall reach the host - * anyway. + /* Pause address resolution for active scan and stop advertising if + * privacy is enabled. */ - err = hci_le_set_addr_resolution_enable_sync(hdev, 0x00); - if (err) { - bt_dev_err(hdev, "Unable to disable Address Resolution: %d", - err); + err = hci_pause_addr_resolution(hdev); + if (err) goto failed; - } /* All active scans will be done with either a resolvable private * address (when privacy feature has been enabled) or non-resolvable @@ -4673,19 +5990,18 @@ static int hci_active_scan_sync(struct hci_dev *hdev, uint16_t interval) if (err < 0) own_addr_type = ADDR_LE_DEV_PUBLIC; - if (hci_is_adv_monitoring(hdev)) { + if (hci_is_adv_monitoring(hdev) || + (hci_test_quirk(hdev, HCI_QUIRK_STRICT_DUPLICATE_FILTER) && + hdev->discovery.result_filtering)) { /* Duplicate filter should be disabled when some advertisement * monitor is activated, otherwise AdvMon can only receive one * advertisement for one peer(*) during active scanning, and * might report loss to these peers. * - * Note that different controllers have different meanings of - * |duplicate|. Some of them consider packets with the same - * address as duplicate, and others consider packets with the - * same address and the same RSSI as duplicate. Although in the - * latter case we don't need to disable duplicate filter, but - * it is common to have active scanning for a short period of - * time, the power impact should be neglectable. + * If controller does strict duplicate filtering and the + * discovery requires result filtering disables controller based + * filtering since that can cause reports that would match the + * host filter to not be reported. */ filter_dup = LE_SCAN_FILTER_DUP_DISABLE; } @@ -4698,7 +6014,7 @@ static int hci_active_scan_sync(struct hci_dev *hdev, uint16_t interval) failed: /* Resume advertising if it was paused */ - if (use_ll_privacy(hdev)) + if (ll_privacy_capable(hdev)) hci_resume_advertising_sync(hdev); /* Resume passive scanning */ @@ -4716,7 +6032,7 @@ static int hci_start_interleaved_discovery_sync(struct hci_dev *hdev) if (err) return err; - return hci_inquiry_sync(hdev, DISCOV_BREDR_INQUIRY_LEN); + return hci_inquiry_sync(hdev, DISCOV_BREDR_INQUIRY_LEN, 0); } int hci_start_discovery_sync(struct hci_dev *hdev) @@ -4728,7 +6044,7 @@ int hci_start_discovery_sync(struct hci_dev *hdev) switch (hdev->discovery.type) { case DISCOV_TYPE_BREDR: - return hci_inquiry_sync(hdev, DISCOV_BREDR_INQUIRY_LEN); + return hci_inquiry_sync(hdev, DISCOV_BREDR_INQUIRY_LEN, 0); case DISCOV_TYPE_INTERLEAVED: /* When running simultaneous discovery, the LE scanning time * should occupy the whole discovery time sine BR/EDR inquiry @@ -4738,8 +6054,7 @@ int hci_start_discovery_sync(struct hci_dev *hdev) * and LE scanning are done sequentially with separate * timeouts. */ - if (test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY, - &hdev->quirks)) { + if (hci_test_quirk(hdev, HCI_QUIRK_SIMULTANEOUS_DISCOVERY)) { timeout = msecs_to_jiffies(DISCOV_LE_TIMEOUT); /* During simultaneous discovery, we double LE scan * interval. We must leave some time for the controller @@ -4765,17 +6080,6 @@ int hci_start_discovery_sync(struct hci_dev *hdev) bt_dev_dbg(hdev, "timeout %u ms", jiffies_to_msecs(timeout)); - /* When service discovery is used and the controller has a - * strict duplicate filter, it is important to remember the - * start and duration of the scan. This is required for - * restarting scanning during the discovery phase. - */ - if (test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks) && - hdev->discovery.result_filtering) { - hdev->discovery.scan_start = jiffies; - hdev->discovery.scan_duration = timeout; - } - queue_delayed_work(hdev->req_workqueue, &hdev->le_scan_disable, timeout); return 0; @@ -4809,7 +6113,6 @@ static int hci_pause_discovery_sync(struct hci_dev *hdev) return err; hdev->discovery_paused = true; - hdev->discovery_old_state = old_state; hci_discovery_set_state(hdev, DISCOVERY_STOPPED); return 0; @@ -4825,11 +6128,17 @@ static int hci_update_event_filter_sync(struct hci_dev *hdev) if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) return 0; + /* Some fake CSR controllers lock up after setting this type of + * filter, so avoid sending the request altogether. + */ + if (hci_test_quirk(hdev, HCI_QUIRK_BROKEN_FILTER_CLEAR_ALL)) + return 0; + /* Always clear event filter when starting */ hci_clear_event_filter_sync(hdev); list_for_each_entry(b, &hdev->accept_list, list) { - if (!test_bit(HCI_CONN_FLAG_REMOTE_WAKEUP, b->flags)) + if (!(b->flags & HCI_CONN_FLAG_REMOTE_WAKEUP)) continue; bt_dev_dbg(hdev, "Adding event filters for %pMR", &b->bdaddr); @@ -4839,7 +6148,7 @@ static int hci_update_event_filter_sync(struct hci_dev *hdev) &b->bdaddr, HCI_CONN_SETUP_AUTO_ON); if (err) - bt_dev_dbg(hdev, "Failed to set event filter for %pMR", + bt_dev_err(hdev, "Failed to set event filter for %pMR", &b->bdaddr); else scan = SCAN_PAGE; @@ -4853,10 +6162,28 @@ static int hci_update_event_filter_sync(struct hci_dev *hdev) return 0; } +/* This function disables scan (BR and LE) and mark it as paused */ +static int hci_pause_scan_sync(struct hci_dev *hdev) +{ + if (hdev->scanning_paused) + return 0; + + /* Disable page scan if enabled */ + if (test_bit(HCI_PSCAN, &hdev->flags)) + hci_write_scan_enable_sync(hdev, SCAN_DISABLED); + + hci_scan_disable_sync(hdev); + + hdev->scanning_paused = true; + + return 0; +} + /* This function performs the HCI suspend procedures in the follow order: * * Pause discovery (active scanning/inquiry) * Pause Directed Advertising/Advertising + * Pause Scanning (passive scanning in case discovery was not active) * Disconnect all connections * Set suspend_status to BT_SUSPEND_DISCONNECT if hdev cannot wakeup * otherwise: @@ -4882,23 +6209,26 @@ int hci_suspend_sync(struct hci_dev *hdev) /* Pause other advertisements */ hci_pause_advertising_sync(hdev); - /* Disable page scan if enabled */ - if (test_bit(HCI_PSCAN, &hdev->flags)) - hci_write_scan_enable_sync(hdev, SCAN_DISABLED); - /* Suspend monitor filters */ hci_suspend_monitor_sync(hdev); /* Prevent disconnects from causing scanning to be re-enabled */ - hdev->scanning_paused = true; + hci_pause_scan_sync(hdev); - /* Soft disconnect everything (power off) */ - err = hci_disconnect_all_sync(hdev, HCI_ERROR_REMOTE_POWER_OFF); - if (err) { - /* Set state to BT_RUNNING so resume doesn't notify */ - hdev->suspend_state = BT_RUNNING; - hci_resume_sync(hdev); - return err; + if (hci_conn_count(hdev)) { + /* Soft disconnect everything (power off) */ + err = hci_disconnect_all_sync(hdev, HCI_ERROR_REMOTE_POWER_OFF); + if (err) { + /* Set state to BT_RUNNING so resume doesn't notify */ + hdev->suspend_state = BT_RUNNING; + hci_resume_sync(hdev); + return err; + } + + /* Update event mask so only the allowed event can wakeup the + * host. + */ + hci_set_event_mask_sync(hdev); } /* Only configure accept list if disconnect succeeded and wake @@ -4912,9 +6242,6 @@ int hci_suspend_sync(struct hci_dev *hdev) /* Unpause to take care of updating scanning params */ hdev->scanning_paused = false; - /* Update event mask so only the allowed event can wakeup the host */ - hci_set_event_mask_sync(hdev); - /* Enable event filter for paired devices */ hci_update_event_filter_sync(hdev); @@ -4961,6 +6288,22 @@ static void hci_resume_monitor_sync(struct hci_dev *hdev) } } +/* This function resume scan and reset paused flag */ +static int hci_resume_scan_sync(struct hci_dev *hdev) +{ + if (!hdev->scanning_paused) + return 0; + + hdev->scanning_paused = false; + + hci_update_scan_sync(hdev); + + /* Reset passive scanning to normal */ + hci_update_passive_scan_sync(hdev); + + return 0; +} + /* This function performs the HCI suspend procedures in the follow order: * * Restore event mask @@ -4976,17 +6319,15 @@ int hci_resume_sync(struct hci_dev *hdev) return 0; hdev->suspended = false; - hdev->scanning_paused = false; /* Restore event mask */ hci_set_event_mask_sync(hdev); /* Clear any event filters and restore scan state */ hci_clear_event_filter_sync(hdev); - hci_update_scan_sync(hdev); - /* Reset passive scanning to normal */ - hci_update_passive_scan_sync(hdev); + /* Resume scanning */ + hci_resume_scan_sync(hdev); /* Resume monitor filters */ hci_resume_monitor_sync(hdev); @@ -5011,6 +6352,7 @@ static int hci_le_ext_directed_advertising_sync(struct hci_dev *hdev, struct hci_conn *conn) { struct hci_cp_le_set_ext_adv_params cp; + struct hci_rp_le_set_ext_adv_params rp; int err; bdaddr_t random_addr; u8 own_addr_type; @@ -5031,7 +6373,6 @@ static int hci_le_ext_directed_advertising_sync(struct hci_dev *hdev, memset(&cp, 0, sizeof(cp)); cp.evt_properties = cpu_to_le16(LE_LEGACY_ADV_DIRECT_IND); - cp.own_addr_type = own_addr_type; cp.channel_map = hdev->le_adv_channel_map; cp.tx_power = HCI_TX_POWER_INVALID; cp.primary_phy = HCI_ADV_PHY_1M; @@ -5053,8 +6394,12 @@ static int hci_le_ext_directed_advertising_sync(struct hci_dev *hdev, if (err) return err; - err = __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_EXT_ADV_PARAMS, - sizeof(cp), &cp, HCI_CMD_TIMEOUT); + err = hci_set_ext_adv_params_sync(hdev, NULL, &cp, &rp); + if (err) + return err; + + /* Update adv data as tx power is known now */ + err = hci_set_ext_adv_data_sync(hdev, cp.handle); if (err) return err; @@ -5140,8 +6485,8 @@ static void set_ext_conn_params(struct hci_conn *conn, p->max_ce_len = cpu_to_le16(0x0000); } -int hci_le_ext_create_conn_sync(struct hci_dev *hdev, struct hci_conn *conn, - u8 own_addr_type) +static int hci_le_ext_create_conn_sync(struct hci_dev *hdev, + struct hci_conn *conn, u8 own_addr_type) { struct hci_cp_le_ext_create_conn *cp; struct hci_cp_le_ext_conn_param *p; @@ -5159,7 +6504,8 @@ int hci_le_ext_create_conn_sync(struct hci_dev *hdev, struct hci_conn *conn, plen = sizeof(*cp); - if (scan_1m(hdev)) { + if (scan_1m(hdev) && (conn->le_adv_phy == HCI_ADV_PHY_1M || + conn->le_adv_sec_phy == HCI_ADV_PHY_1M)) { cp->phys |= LE_SCAN_PHY_1M; set_ext_conn_params(conn, p); @@ -5167,7 +6513,8 @@ int hci_le_ext_create_conn_sync(struct hci_dev *hdev, struct hci_conn *conn, plen += sizeof(*p); } - if (scan_2m(hdev)) { + if (scan_2m(hdev) && (conn->le_adv_phy == HCI_ADV_PHY_2M || + conn->le_adv_sec_phy == HCI_ADV_PHY_2M)) { cp->phys |= LE_SCAN_PHY_2M; set_ext_conn_params(conn, p); @@ -5175,7 +6522,8 @@ int hci_le_ext_create_conn_sync(struct hci_dev *hdev, struct hci_conn *conn, plen += sizeof(*p); } - if (scan_coded(hdev)) { + if (scan_coded(hdev) && (conn->le_adv_phy == HCI_ADV_PHY_CODED || + conn->le_adv_sec_phy == HCI_ADV_PHY_CODED)) { cp->phys |= LE_SCAN_PHY_CODED; set_ext_conn_params(conn, p); @@ -5185,15 +6533,24 @@ int hci_le_ext_create_conn_sync(struct hci_dev *hdev, struct hci_conn *conn, return __hci_cmd_sync_status_sk(hdev, HCI_OP_LE_EXT_CREATE_CONN, plen, data, HCI_EV_LE_ENHANCED_CONN_COMPLETE, - HCI_CMD_TIMEOUT, NULL); + conn->conn_timeout, NULL); } -int hci_le_create_conn_sync(struct hci_dev *hdev, struct hci_conn *conn) +static int hci_le_create_conn_sync(struct hci_dev *hdev, void *data) { struct hci_cp_le_create_conn cp; struct hci_conn_params *params; u8 own_addr_type; int err; + struct hci_conn *conn = data; + + if (!hci_conn_valid(hdev, conn)) + return -ECANCELED; + + bt_dev_dbg(hdev, "conn %p", conn); + + clear_bit(HCI_CONN_SCANNING, &conn->flags); + conn->state = BT_CONNECT; /* If requested to connect as peripheral use directed advertising */ if (conn->role == HCI_ROLE_SLAVE) { @@ -5249,7 +6606,7 @@ int hci_le_create_conn_sync(struct hci_dev *hdev, struct hci_conn *conn) &own_addr_type); if (err) goto done; - + /* Send command LE Extended Create Connection if supported */ if (use_ext_conn(hdev)) { err = hci_le_ext_create_conn_sync(hdev, conn, own_addr_type); goto done; @@ -5270,12 +6627,794 @@ int hci_le_create_conn_sync(struct hci_dev *hdev, struct hci_conn *conn) cp.min_ce_len = cpu_to_le16(0x0000); cp.max_ce_len = cpu_to_le16(0x0000); + /* BLUETOOTH CORE SPECIFICATION Version 5.3 | Vol 4, Part E page 2261: + * + * If this event is unmasked and the HCI_LE_Connection_Complete event + * is unmasked, only the HCI_LE_Enhanced_Connection_Complete event is + * sent when a new connection has been created. + */ err = __hci_cmd_sync_status_sk(hdev, HCI_OP_LE_CREATE_CONN, - sizeof(cp), &cp, HCI_EV_LE_CONN_COMPLETE, - HCI_CMD_TIMEOUT, NULL); + sizeof(cp), &cp, + use_enhanced_conn_complete(hdev) ? + HCI_EV_LE_ENHANCED_CONN_COMPLETE : + HCI_EV_LE_CONN_COMPLETE, + conn->conn_timeout, NULL); done: + if (err == -ETIMEDOUT) + hci_le_connect_cancel_sync(hdev, conn, 0x00); + /* Re-enable advertising after the connection attempt is finished. */ hci_resume_advertising_sync(hdev); return err; } + +int hci_le_create_cis_sync(struct hci_dev *hdev) +{ + DEFINE_FLEX(struct hci_cp_le_create_cis, cmd, cis, num_cis, 0x1f); + size_t aux_num_cis = 0; + struct hci_conn *conn; + u8 cig = BT_ISO_QOS_CIG_UNSET; + + /* The spec allows only one pending LE Create CIS command at a time. If + * the command is pending now, don't do anything. We check for pending + * connections after each CIS Established event. + * + * BLUETOOTH CORE SPECIFICATION Version 5.3 | Vol 4, Part E + * page 2566: + * + * If the Host issues this command before all the + * HCI_LE_CIS_Established events from the previous use of the + * command have been generated, the Controller shall return the + * error code Command Disallowed (0x0C). + * + * BLUETOOTH CORE SPECIFICATION Version 5.3 | Vol 4, Part E + * page 2567: + * + * When the Controller receives the HCI_LE_Create_CIS command, the + * Controller sends the HCI_Command_Status event to the Host. An + * HCI_LE_CIS_Established event will be generated for each CIS when it + * is established or if it is disconnected or considered lost before + * being established; until all the events are generated, the command + * remains pending. + */ + + hci_dev_lock(hdev); + + rcu_read_lock(); + + /* Wait until previous Create CIS has completed */ + list_for_each_entry_rcu(conn, &hdev->conn_hash.list, list) { + if (test_bit(HCI_CONN_CREATE_CIS, &conn->flags)) + goto done; + } + + /* Find CIG with all CIS ready */ + list_for_each_entry_rcu(conn, &hdev->conn_hash.list, list) { + struct hci_conn *link; + + if (hci_conn_check_create_cis(conn)) + continue; + + cig = conn->iso_qos.ucast.cig; + + list_for_each_entry_rcu(link, &hdev->conn_hash.list, list) { + if (hci_conn_check_create_cis(link) > 0 && + link->iso_qos.ucast.cig == cig && + link->state != BT_CONNECTED) { + cig = BT_ISO_QOS_CIG_UNSET; + break; + } + } + + if (cig != BT_ISO_QOS_CIG_UNSET) + break; + } + + if (cig == BT_ISO_QOS_CIG_UNSET) + goto done; + + list_for_each_entry_rcu(conn, &hdev->conn_hash.list, list) { + struct hci_cis *cis = &cmd->cis[aux_num_cis]; + + if (hci_conn_check_create_cis(conn) || + conn->iso_qos.ucast.cig != cig) + continue; + + set_bit(HCI_CONN_CREATE_CIS, &conn->flags); + cis->acl_handle = cpu_to_le16(conn->parent->handle); + cis->cis_handle = cpu_to_le16(conn->handle); + aux_num_cis++; + + if (aux_num_cis >= cmd->num_cis) + break; + } + cmd->num_cis = aux_num_cis; + +done: + rcu_read_unlock(); + + hci_dev_unlock(hdev); + + if (!aux_num_cis) + return 0; + + /* Wait for HCI_LE_CIS_Established */ + return __hci_cmd_sync_status_sk(hdev, HCI_OP_LE_CREATE_CIS, + struct_size(cmd, cis, cmd->num_cis), + cmd, HCI_EVT_LE_CIS_ESTABLISHED, + conn->conn_timeout, NULL); +} + +int hci_le_remove_cig_sync(struct hci_dev *hdev, u8 handle) +{ + struct hci_cp_le_remove_cig cp; + + memset(&cp, 0, sizeof(cp)); + cp.cig_id = handle; + + return __hci_cmd_sync_status(hdev, HCI_OP_LE_REMOVE_CIG, sizeof(cp), + &cp, HCI_CMD_TIMEOUT); +} + +int hci_le_big_terminate_sync(struct hci_dev *hdev, u8 handle) +{ + struct hci_cp_le_big_term_sync cp; + + memset(&cp, 0, sizeof(cp)); + cp.handle = handle; + + return __hci_cmd_sync_status(hdev, HCI_OP_LE_BIG_TERM_SYNC, + sizeof(cp), &cp, HCI_CMD_TIMEOUT); +} + +int hci_le_pa_terminate_sync(struct hci_dev *hdev, u16 handle) +{ + struct hci_cp_le_pa_term_sync cp; + + memset(&cp, 0, sizeof(cp)); + cp.handle = cpu_to_le16(handle); + + return __hci_cmd_sync_status(hdev, HCI_OP_LE_PA_TERM_SYNC, + sizeof(cp), &cp, HCI_CMD_TIMEOUT); +} + +int hci_get_random_address(struct hci_dev *hdev, bool require_privacy, + bool use_rpa, struct adv_info *adv_instance, + u8 *own_addr_type, bdaddr_t *rand_addr) +{ + int err; + + bacpy(rand_addr, BDADDR_ANY); + + /* If privacy is enabled use a resolvable private address. If + * current RPA has expired then generate a new one. + */ + if (use_rpa) { + /* If Controller supports LL Privacy use own address type is + * 0x03 + */ + if (ll_privacy_capable(hdev)) + *own_addr_type = ADDR_LE_DEV_RANDOM_RESOLVED; + else + *own_addr_type = ADDR_LE_DEV_RANDOM; + + if (adv_instance) { + if (adv_rpa_valid(adv_instance)) + return 0; + } else { + if (rpa_valid(hdev)) + return 0; + } + + err = smp_generate_rpa(hdev, hdev->irk, &hdev->rpa); + if (err < 0) { + bt_dev_err(hdev, "failed to generate new RPA"); + return err; + } + + bacpy(rand_addr, &hdev->rpa); + + return 0; + } + + /* In case of required privacy without resolvable private address, + * use an non-resolvable private address. This is useful for + * non-connectable advertising. + */ + if (require_privacy) { + bdaddr_t nrpa; + + while (true) { + /* The non-resolvable private address is generated + * from random six bytes with the two most significant + * bits cleared. + */ + get_random_bytes(&nrpa, 6); + nrpa.b[5] &= 0x3f; + + /* The non-resolvable private address shall not be + * equal to the public address. + */ + if (bacmp(&hdev->bdaddr, &nrpa)) + break; + } + + *own_addr_type = ADDR_LE_DEV_RANDOM; + bacpy(rand_addr, &nrpa); + + return 0; + } + + /* No privacy, use the current address */ + hci_copy_identity_address(hdev, rand_addr, own_addr_type); + + return 0; +} + +static int _update_adv_data_sync(struct hci_dev *hdev, void *data) +{ + u8 instance = PTR_UINT(data); + + return hci_update_adv_data_sync(hdev, instance); +} + +int hci_update_adv_data(struct hci_dev *hdev, u8 instance) +{ + return hci_cmd_sync_queue(hdev, _update_adv_data_sync, + UINT_PTR(instance), NULL); +} + +static int hci_acl_create_conn_sync(struct hci_dev *hdev, void *data) +{ + struct hci_conn *conn = data; + struct inquiry_entry *ie; + struct hci_cp_create_conn cp; + int err; + + if (!hci_conn_valid(hdev, conn)) + return -ECANCELED; + + /* Many controllers disallow HCI Create Connection while it is doing + * HCI Inquiry. So we cancel the Inquiry first before issuing HCI Create + * Connection. This may cause the MGMT discovering state to become false + * without user space's request but it is okay since the MGMT Discovery + * APIs do not promise that discovery should be done forever. Instead, + * the user space monitors the status of MGMT discovering and it may + * request for discovery again when this flag becomes false. + */ + if (test_bit(HCI_INQUIRY, &hdev->flags)) { + err = __hci_cmd_sync_status(hdev, HCI_OP_INQUIRY_CANCEL, 0, + NULL, HCI_CMD_TIMEOUT); + if (err) + bt_dev_warn(hdev, "Failed to cancel inquiry %d", err); + } + + conn->state = BT_CONNECT; + conn->out = true; + conn->role = HCI_ROLE_MASTER; + + conn->attempt++; + + conn->link_policy = hdev->link_policy; + + memset(&cp, 0, sizeof(cp)); + bacpy(&cp.bdaddr, &conn->dst); + cp.pscan_rep_mode = 0x02; + + ie = hci_inquiry_cache_lookup(hdev, &conn->dst); + if (ie) { + if (inquiry_entry_age(ie) <= INQUIRY_ENTRY_AGE_MAX) { + cp.pscan_rep_mode = ie->data.pscan_rep_mode; + cp.pscan_mode = ie->data.pscan_mode; + cp.clock_offset = ie->data.clock_offset | + cpu_to_le16(0x8000); + } + + memcpy(conn->dev_class, ie->data.dev_class, 3); + } + + cp.pkt_type = cpu_to_le16(conn->pkt_type); + if (lmp_rswitch_capable(hdev) && !(hdev->link_mode & HCI_LM_MASTER)) + cp.role_switch = 0x01; + else + cp.role_switch = 0x00; + + return __hci_cmd_sync_status_sk(hdev, HCI_OP_CREATE_CONN, + sizeof(cp), &cp, + HCI_EV_CONN_COMPLETE, + conn->conn_timeout, NULL); +} + +int hci_connect_acl_sync(struct hci_dev *hdev, struct hci_conn *conn) +{ + return hci_cmd_sync_queue_once(hdev, hci_acl_create_conn_sync, conn, + NULL); +} + +static void create_le_conn_complete(struct hci_dev *hdev, void *data, int err) +{ + struct hci_conn *conn = data; + + bt_dev_dbg(hdev, "err %d", err); + + if (err == -ECANCELED) + return; + + hci_dev_lock(hdev); + + if (!hci_conn_valid(hdev, conn)) + goto done; + + if (!err) { + hci_connect_le_scan_cleanup(conn, 0x00); + goto done; + } + + /* Check if connection is still pending */ + if (conn != hci_lookup_le_connect(hdev)) + goto done; + + /* Flush to make sure we send create conn cancel command if needed */ + flush_delayed_work(&conn->le_conn_timeout); + hci_conn_failed(conn, bt_status(err)); + +done: + hci_dev_unlock(hdev); +} + +int hci_connect_le_sync(struct hci_dev *hdev, struct hci_conn *conn) +{ + return hci_cmd_sync_queue_once(hdev, hci_le_create_conn_sync, conn, + create_le_conn_complete); +} + +int hci_cancel_connect_sync(struct hci_dev *hdev, struct hci_conn *conn) +{ + if (conn->state != BT_OPEN) + return -EINVAL; + + switch (conn->type) { + case ACL_LINK: + return !hci_cmd_sync_dequeue_once(hdev, + hci_acl_create_conn_sync, + conn, NULL); + case LE_LINK: + return !hci_cmd_sync_dequeue_once(hdev, hci_le_create_conn_sync, + conn, create_le_conn_complete); + } + + return -ENOENT; +} + +int hci_le_conn_update_sync(struct hci_dev *hdev, struct hci_conn *conn, + struct hci_conn_params *params) +{ + struct hci_cp_le_conn_update cp; + + memset(&cp, 0, sizeof(cp)); + cp.handle = cpu_to_le16(conn->handle); + cp.conn_interval_min = cpu_to_le16(params->conn_min_interval); + cp.conn_interval_max = cpu_to_le16(params->conn_max_interval); + cp.conn_latency = cpu_to_le16(params->conn_latency); + cp.supervision_timeout = cpu_to_le16(params->supervision_timeout); + cp.min_ce_len = cpu_to_le16(0x0000); + cp.max_ce_len = cpu_to_le16(0x0000); + + return __hci_cmd_sync_status(hdev, HCI_OP_LE_CONN_UPDATE, + sizeof(cp), &cp, HCI_CMD_TIMEOUT); +} + +static void create_pa_complete(struct hci_dev *hdev, void *data, int err) +{ + struct hci_conn *conn = data; + struct hci_conn *pa_sync; + + bt_dev_dbg(hdev, "err %d", err); + + if (err == -ECANCELED) + return; + + hci_dev_lock(hdev); + + if (hci_conn_valid(hdev, conn)) + clear_bit(HCI_CONN_CREATE_PA_SYNC, &conn->flags); + + if (!err) + goto unlock; + + /* Add connection to indicate PA sync error */ + pa_sync = hci_conn_add_unset(hdev, PA_LINK, BDADDR_ANY, 0, + HCI_ROLE_SLAVE); + + if (IS_ERR(pa_sync)) + goto unlock; + + set_bit(HCI_CONN_PA_SYNC_FAILED, &pa_sync->flags); + + /* Notify iso layer */ + hci_connect_cfm(pa_sync, bt_status(err)); + +unlock: + hci_dev_unlock(hdev); +} + +static int hci_le_past_params_sync(struct hci_dev *hdev, struct hci_conn *conn, + struct hci_conn *acl, struct bt_iso_qos *qos) +{ + struct hci_cp_le_past_params cp; + int err; + + memset(&cp, 0, sizeof(cp)); + cp.handle = cpu_to_le16(acl->handle); + /* An HCI_LE_Periodic_Advertising_Sync_Transfer_Received event is sent + * to the Host. HCI_LE_Periodic_Advertising_Report events will be + * enabled with duplicate filtering enabled. + */ + cp.mode = 0x03; + cp.skip = cpu_to_le16(qos->bcast.skip); + cp.sync_timeout = cpu_to_le16(qos->bcast.sync_timeout); + cp.cte_type = qos->bcast.sync_cte_type; + + /* HCI_LE_PAST_PARAMS command returns a command complete event so it + * cannot wait for HCI_EV_LE_PAST_RECEIVED. + */ + err = __hci_cmd_sync_status(hdev, HCI_OP_LE_PAST_PARAMS, + sizeof(cp), &cp, HCI_CMD_TIMEOUT); + if (err) + return err; + + /* Wait for HCI_EV_LE_PAST_RECEIVED event */ + return __hci_cmd_sync_status_sk(hdev, HCI_OP_NOP, 0, NULL, + HCI_EV_LE_PAST_RECEIVED, + conn->conn_timeout, NULL); +} + +static int hci_le_pa_create_sync(struct hci_dev *hdev, void *data) +{ + struct hci_cp_le_pa_create_sync cp; + struct hci_conn *conn = data, *le; + struct bt_iso_qos *qos = &conn->iso_qos; + int err; + + if (!hci_conn_valid(hdev, conn)) + return -ECANCELED; + + if (conn->sync_handle != HCI_SYNC_HANDLE_INVALID) + return -EINVAL; + + if (hci_dev_test_and_set_flag(hdev, HCI_PA_SYNC)) + return -EBUSY; + + /* Stop scanning if SID has not been set and active scanning is enabled + * so we use passive scanning which will be scanning using the allow + * list programmed to contain only the connection address. + */ + if (conn->sid == HCI_SID_INVALID && + hci_dev_test_flag(hdev, HCI_LE_SCAN)) { + hci_scan_disable_sync(hdev); + hci_dev_set_flag(hdev, HCI_LE_SCAN_INTERRUPTED); + hci_discovery_set_state(hdev, DISCOVERY_STOPPED); + } + + /* Mark HCI_CONN_CREATE_PA_SYNC so hci_update_passive_scan_sync can + * program the address in the allow list so PA advertisements can be + * received. + */ + set_bit(HCI_CONN_CREATE_PA_SYNC, &conn->flags); + + hci_update_passive_scan_sync(hdev); + + /* Check if PAST is possible: + * + * 1. Check if an ACL connection with the destination address exists + * 2. Check if that HCI_CONN_FLAG_PAST has been set which indicates that + * user really intended to use PAST. + */ + le = hci_conn_hash_lookup_le(hdev, &conn->dst, conn->dst_type); + if (le) { + struct hci_conn_params *params; + + params = hci_conn_params_lookup(hdev, &le->dst, le->dst_type); + if (params && params->flags & HCI_CONN_FLAG_PAST) { + err = hci_le_past_params_sync(hdev, conn, le, qos); + if (!err) + goto done; + } + } + + /* SID has not been set listen for HCI_EV_LE_EXT_ADV_REPORT to update + * it. + */ + if (conn->sid == HCI_SID_INVALID) { + err = __hci_cmd_sync_status_sk(hdev, HCI_OP_NOP, 0, NULL, + HCI_EV_LE_EXT_ADV_REPORT, + conn->conn_timeout, NULL); + if (err == -ETIMEDOUT) + goto done; + } + + memset(&cp, 0, sizeof(cp)); + cp.options = qos->bcast.options; + cp.sid = conn->sid; + cp.addr_type = conn->dst_type; + bacpy(&cp.addr, &conn->dst); + cp.skip = cpu_to_le16(qos->bcast.skip); + cp.sync_timeout = cpu_to_le16(qos->bcast.sync_timeout); + cp.sync_cte_type = qos->bcast.sync_cte_type; + + /* The spec allows only one pending LE Periodic Advertising Create + * Sync command at a time so we forcefully wait for PA Sync Established + * event since cmd_work can only schedule one command at a time. + * + * BLUETOOTH CORE SPECIFICATION Version 5.3 | Vol 4, Part E + * page 2493: + * + * If the Host issues this command when another HCI_LE_Periodic_ + * Advertising_Create_Sync command is pending, the Controller shall + * return the error code Command Disallowed (0x0C). + */ + err = __hci_cmd_sync_status_sk(hdev, HCI_OP_LE_PA_CREATE_SYNC, + sizeof(cp), &cp, + HCI_EV_LE_PA_SYNC_ESTABLISHED, + conn->conn_timeout, NULL); + if (err == -ETIMEDOUT) + __hci_cmd_sync_status(hdev, HCI_OP_LE_PA_CREATE_SYNC_CANCEL, + 0, NULL, HCI_CMD_TIMEOUT); + +done: + hci_dev_clear_flag(hdev, HCI_PA_SYNC); + + /* Update passive scan since HCI_PA_SYNC flag has been cleared */ + hci_update_passive_scan_sync(hdev); + + return err; +} + +int hci_connect_pa_sync(struct hci_dev *hdev, struct hci_conn *conn) +{ + return hci_cmd_sync_queue_once(hdev, hci_le_pa_create_sync, conn, + create_pa_complete); +} + +static void create_big_complete(struct hci_dev *hdev, void *data, int err) +{ + struct hci_conn *conn = data; + + bt_dev_dbg(hdev, "err %d", err); + + if (err == -ECANCELED) + return; + + if (hci_conn_valid(hdev, conn)) + clear_bit(HCI_CONN_CREATE_BIG_SYNC, &conn->flags); +} + +static int hci_le_big_create_sync(struct hci_dev *hdev, void *data) +{ + DEFINE_FLEX(struct hci_cp_le_big_create_sync, cp, bis, num_bis, 0x11); + struct hci_conn *conn = data; + struct bt_iso_qos *qos = &conn->iso_qos; + int err; + + if (!hci_conn_valid(hdev, conn)) + return -ECANCELED; + + set_bit(HCI_CONN_CREATE_BIG_SYNC, &conn->flags); + + memset(cp, 0, sizeof(*cp)); + cp->handle = qos->bcast.big; + cp->sync_handle = cpu_to_le16(conn->sync_handle); + cp->encryption = qos->bcast.encryption; + memcpy(cp->bcode, qos->bcast.bcode, sizeof(cp->bcode)); + cp->mse = qos->bcast.mse; + cp->timeout = cpu_to_le16(qos->bcast.timeout); + cp->num_bis = conn->num_bis; + memcpy(cp->bis, conn->bis, conn->num_bis); + + /* The spec allows only one pending LE BIG Create Sync command at + * a time, so we forcefully wait for BIG Sync Established event since + * cmd_work can only schedule one command at a time. + * + * BLUETOOTH CORE SPECIFICATION Version 5.3 | Vol 4, Part E + * page 2586: + * + * If the Host sends this command when the Controller is in the + * process of synchronizing to any BIG, i.e. the HCI_LE_BIG_Sync_ + * Established event has not been generated, the Controller shall + * return the error code Command Disallowed (0x0C). + */ + err = __hci_cmd_sync_status_sk(hdev, HCI_OP_LE_BIG_CREATE_SYNC, + struct_size(cp, bis, cp->num_bis), cp, + HCI_EVT_LE_BIG_SYNC_ESTABLISHED, + conn->conn_timeout, NULL); + if (err == -ETIMEDOUT) + hci_le_big_terminate_sync(hdev, cp->handle); + + return err; +} + +int hci_connect_big_sync(struct hci_dev *hdev, struct hci_conn *conn) +{ + return hci_cmd_sync_queue_once(hdev, hci_le_big_create_sync, conn, + create_big_complete); +} + +struct past_data { + struct hci_conn *conn; + struct hci_conn *le; +}; + +static void past_complete(struct hci_dev *hdev, void *data, int err) +{ + struct past_data *past = data; + + bt_dev_dbg(hdev, "err %d", err); + + kfree(past); +} + +static int hci_le_past_set_info_sync(struct hci_dev *hdev, void *data) +{ + struct past_data *past = data; + struct hci_cp_le_past_set_info cp; + + hci_dev_lock(hdev); + + if (!hci_conn_valid(hdev, past->conn) || + !hci_conn_valid(hdev, past->le)) { + hci_dev_unlock(hdev); + return -ECANCELED; + } + + memset(&cp, 0, sizeof(cp)); + cp.handle = cpu_to_le16(past->le->handle); + cp.adv_handle = past->conn->iso_qos.bcast.bis; + + hci_dev_unlock(hdev); + + return __hci_cmd_sync_status(hdev, HCI_OP_LE_PAST_SET_INFO, + sizeof(cp), &cp, HCI_CMD_TIMEOUT); +} + +static int hci_le_past_sync(struct hci_dev *hdev, void *data) +{ + struct past_data *past = data; + struct hci_cp_le_past cp; + + hci_dev_lock(hdev); + + if (!hci_conn_valid(hdev, past->conn) || + !hci_conn_valid(hdev, past->le)) { + hci_dev_unlock(hdev); + return -ECANCELED; + } + + memset(&cp, 0, sizeof(cp)); + cp.handle = cpu_to_le16(past->le->handle); + cp.sync_handle = cpu_to_le16(past->conn->sync_handle); + + hci_dev_unlock(hdev); + + return __hci_cmd_sync_status(hdev, HCI_OP_LE_PAST, + sizeof(cp), &cp, HCI_CMD_TIMEOUT); +} + +int hci_past_sync(struct hci_conn *conn, struct hci_conn *le) +{ + struct past_data *data; + int err; + + if (conn->type != BIS_LINK && conn->type != PA_LINK) + return -EINVAL; + + if (!past_sender_capable(conn->hdev)) + return -EOPNOTSUPP; + + data = kmalloc(sizeof(*data), GFP_KERNEL); + if (!data) + return -ENOMEM; + + data->conn = conn; + data->le = le; + + if (conn->role == HCI_ROLE_MASTER) + err = hci_cmd_sync_queue_once(conn->hdev, + hci_le_past_set_info_sync, data, + past_complete); + else + err = hci_cmd_sync_queue_once(conn->hdev, hci_le_past_sync, + data, past_complete); + + if (err) + kfree(data); + + return err; +} + +static void le_read_features_complete(struct hci_dev *hdev, void *data, int err) +{ + struct hci_conn *conn = data; + + bt_dev_dbg(hdev, "err %d", err); + + if (err == -ECANCELED) + return; + + hci_conn_drop(conn); +} + +static int hci_le_read_all_remote_features_sync(struct hci_dev *hdev, + void *data) +{ + struct hci_conn *conn = data; + struct hci_cp_le_read_all_remote_features cp; + + memset(&cp, 0, sizeof(cp)); + cp.handle = cpu_to_le16(conn->handle); + cp.pages = 10; /* Attempt to read all pages */ + + /* Wait for HCI_EVT_LE_ALL_REMOTE_FEATURES_COMPLETE event otherwise + * hci_conn_drop may run prematurely causing a disconnection. + */ + return __hci_cmd_sync_status_sk(hdev, + HCI_OP_LE_READ_ALL_REMOTE_FEATURES, + sizeof(cp), &cp, + HCI_EVT_LE_ALL_REMOTE_FEATURES_COMPLETE, + HCI_CMD_TIMEOUT, NULL); + + return __hci_cmd_sync_status(hdev, HCI_OP_LE_READ_ALL_REMOTE_FEATURES, + sizeof(cp), &cp, HCI_CMD_TIMEOUT); +} + +static int hci_le_read_remote_features_sync(struct hci_dev *hdev, void *data) +{ + struct hci_conn *conn = data; + struct hci_cp_le_read_remote_features cp; + + if (!hci_conn_valid(hdev, conn)) + return -ECANCELED; + + /* Check if LL Extended Feature Set is supported and + * HCI_OP_LE_READ_ALL_REMOTE_FEATURES is supported then use that to read + * all features. + */ + if (ll_ext_feature_capable(hdev) && hdev->commands[47] & BIT(3)) + return hci_le_read_all_remote_features_sync(hdev, data); + + memset(&cp, 0, sizeof(cp)); + cp.handle = cpu_to_le16(conn->handle); + + /* Wait for HCI_EV_LE_REMOTE_FEAT_COMPLETE event otherwise + * hci_conn_drop may run prematurely causing a disconnection. + */ + return __hci_cmd_sync_status_sk(hdev, HCI_OP_LE_READ_REMOTE_FEATURES, + sizeof(cp), &cp, + HCI_EV_LE_REMOTE_FEAT_COMPLETE, + HCI_CMD_TIMEOUT, NULL); +} + +int hci_le_read_remote_features(struct hci_conn *conn) +{ + struct hci_dev *hdev = conn->hdev; + int err; + + /* The remote features procedure is defined for central + * role only. So only in case of an initiated connection + * request the remote features. + * + * If the local controller supports peripheral-initiated features + * exchange, then requesting the remote features in peripheral + * role is possible. Otherwise just transition into the + * connected state without requesting the remote features. + */ + if (conn->out || (hdev->le_features[0] & HCI_LE_PERIPHERAL_FEATURES)) + err = hci_cmd_sync_queue_once(hdev, + hci_le_read_remote_features_sync, + hci_conn_hold(conn), + le_read_features_complete); + else + err = -EOPNOTSUPP; + + return err; +} diff --git a/net/bluetooth/hci_sysfs.c b/net/bluetooth/hci_sysfs.c index 4e3e0451b08c..041ce9adc378 100644 --- a/net/bluetooth/hci_sysfs.c +++ b/net/bluetooth/hci_sysfs.c @@ -6,7 +6,9 @@ #include <net/bluetooth/bluetooth.h> #include <net/bluetooth/hci_core.h> -static struct class *bt_class; +static const struct class bt_class = { + .name = "bluetooth", +}; static void bt_link_release(struct device *dev) { @@ -19,24 +21,14 @@ static const struct device_type bt_link = { .release = bt_link_release, }; -/* - * The rfcomm tty device will possibly retain even when conn - * is down, and sysfs doesn't support move zombie device, - * so we should move the device before conn device is destroyed. - */ -static int __match_tty(struct device *dev, void *data) -{ - return !strncmp(dev_name(dev), "rfcomm", 6); -} - void hci_conn_init_sysfs(struct hci_conn *conn) { struct hci_dev *hdev = conn->hdev; - BT_DBG("conn %p", conn); + bt_dev_dbg(hdev, "conn %p", conn); conn->dev.type = &bt_link; - conn->dev.class = bt_class; + conn->dev.class = &bt_class; conn->dev.parent = &hdev->dev; device_initialize(&conn->dev); @@ -46,38 +38,45 @@ void hci_conn_add_sysfs(struct hci_conn *conn) { struct hci_dev *hdev = conn->hdev; - BT_DBG("conn %p", conn); + bt_dev_dbg(hdev, "conn %p", conn); + + if (device_is_registered(&conn->dev)) + return; dev_set_name(&conn->dev, "%s:%d", hdev->name, conn->handle); - if (device_add(&conn->dev) < 0) { + if (device_add(&conn->dev) < 0) bt_dev_err(hdev, "failed to register connection device"); - return; - } - - hci_dev_hold(hdev); } void hci_conn_del_sysfs(struct hci_conn *conn) { struct hci_dev *hdev = conn->hdev; - if (!device_is_registered(&conn->dev)) + bt_dev_dbg(hdev, "conn %p", conn); + + if (!device_is_registered(&conn->dev)) { + /* If device_add() has *not* succeeded, use *only* put_device() + * to drop the reference count. + */ + put_device(&conn->dev); return; + } + /* If there are devices using the connection as parent reset it to NULL + * before unregistering the device. + */ while (1) { struct device *dev; - dev = device_find_child(&conn->dev, NULL, __match_tty); + dev = device_find_any_child(&conn->dev); if (!dev) break; device_move(dev, NULL, DPM_ORDER_DEV_LAST); put_device(dev); } - device_del(&conn->dev); - - hci_dev_put(hdev); + device_unregister(&conn->dev); } static void bt_host_release(struct device *dev) @@ -91,9 +90,28 @@ static void bt_host_release(struct device *dev) module_put(THIS_MODULE); } +static ssize_t reset_store(struct device *dev, struct device_attribute *attr, + const char *buf, size_t count) +{ + struct hci_dev *hdev = to_hci_dev(dev); + + if (hdev->reset) + hdev->reset(hdev); + + return count; +} +static DEVICE_ATTR_WO(reset); + +static struct attribute *bt_host_attrs[] = { + &dev_attr_reset.attr, + NULL, +}; +ATTRIBUTE_GROUPS(bt_host); + static const struct device_type bt_host = { .name = "host", .release = bt_host_release, + .groups = bt_host_groups, }; void hci_init_sysfs(struct hci_dev *hdev) @@ -101,7 +119,7 @@ void hci_init_sysfs(struct hci_dev *hdev) struct device *dev = &hdev->dev; dev->type = &bt_host; - dev->class = bt_class; + dev->class = &bt_class; __module_get(THIS_MODULE); device_initialize(dev); @@ -109,12 +127,10 @@ void hci_init_sysfs(struct hci_dev *hdev) int __init bt_sysfs_init(void) { - bt_class = class_create(THIS_MODULE, "bluetooth"); - - return PTR_ERR_OR_ZERO(bt_class); + return class_register(&bt_class); } void bt_sysfs_cleanup(void) { - class_destroy(bt_class); + class_unregister(&bt_class); } diff --git a/net/bluetooth/hidp/Kconfig b/net/bluetooth/hidp/Kconfig index 14100f341f33..e08aae35351a 100644 --- a/net/bluetooth/hidp/Kconfig +++ b/net/bluetooth/hidp/Kconfig @@ -1,8 +1,7 @@ # SPDX-License-Identifier: GPL-2.0-only config BT_HIDP tristate "HIDP protocol support" - depends on BT_BREDR && INPUT - select HID + depends on BT_BREDR && HID help HIDP (Human Interface Device Protocol) is a transport layer for HID reports. HIDP is required for the Bluetooth Human diff --git a/net/bluetooth/hidp/core.c b/net/bluetooth/hidp/core.c index 5940744a8cd8..6724adce615b 100644 --- a/net/bluetooth/hidp/core.c +++ b/net/bluetooth/hidp/core.c @@ -83,14 +83,14 @@ static void hidp_copy_session(struct hidp_session *session, struct hidp_conninfo ci->product = session->input->id.product; ci->version = session->input->id.version; if (session->input->name) - strlcpy(ci->name, session->input->name, 128); + strscpy(ci->name, session->input->name, 128); else - strlcpy(ci->name, "HID Boot Device", 128); + strscpy(ci->name, "HID Boot Device", 128); } else if (session->hid) { ci->vendor = session->hid->vendor; ci->product = session->hid->product; ci->version = session->hid->version; - strlcpy(ci->name, session->hid->name, 128); + strscpy(ci->name, session->hid->name, 128); } } @@ -405,7 +405,7 @@ static int hidp_raw_request(struct hid_device *hid, unsigned char reportnum, static void hidp_idle_timeout(struct timer_list *t) { - struct hidp_session *session = from_timer(session, t, timer); + struct hidp_session *session = timer_container_of(session, t, timer); /* The HIDP user-space API only contains calls to add and remove * devices. There is no way to forward events of any kind. Therefore, @@ -433,7 +433,7 @@ static void hidp_set_timer(struct hidp_session *session) static void hidp_del_timer(struct hidp_session *session) { if (session->idle_to > 0) - del_timer(&session->timer); + timer_delete_sync(&session->timer); } static void hidp_process_report(struct hidp_session *session, int type, @@ -739,7 +739,7 @@ static void hidp_stop(struct hid_device *hid) hid->claimed = 0; } -struct hid_ll_driver hidp_hid_driver = { +static const struct hid_ll_driver hidp_hid_driver = { .parse = hidp_parse, .start = hidp_start, .stop = hidp_stop, @@ -748,7 +748,6 @@ struct hid_ll_driver hidp_hid_driver = { .raw_request = hidp_raw_request, .output_report = hidp_output_report, }; -EXPORT_SYMBOL_GPL(hidp_hid_driver); /* This function sets up the hid device. It does not add it to the HID system. That is done in hidp_add_connection(). */ diff --git a/net/bluetooth/hidp/sock.c b/net/bluetooth/hidp/sock.c index 369ed92dac99..c93aaeb3a3fa 100644 --- a/net/bluetooth/hidp/sock.c +++ b/net/bluetooth/hidp/sock.c @@ -256,21 +256,13 @@ static int hidp_sock_create(struct net *net, struct socket *sock, int protocol, if (sock->type != SOCK_RAW) return -ESOCKTNOSUPPORT; - sk = sk_alloc(net, PF_BLUETOOTH, GFP_ATOMIC, &hidp_proto, kern); + sk = bt_sock_alloc(net, sock, &hidp_proto, protocol, GFP_ATOMIC, kern); if (!sk) return -ENOMEM; - sock_init_data(sock, sk); - sock->ops = &hidp_sock_ops; - sock->state = SS_UNCONNECTED; - sock_reset_flag(sk, SOCK_ZAPPED); - - sk->sk_protocol = protocol; - sk->sk_state = BT_OPEN; - bt_sock_link(&hidp_sk_list, sk); return 0; diff --git a/net/bluetooth/iso.c b/net/bluetooth/iso.c new file mode 100644 index 000000000000..e36d24a9098b --- /dev/null +++ b/net/bluetooth/iso.c @@ -0,0 +1,2734 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * BlueZ - Bluetooth protocol stack for Linux + * + * Copyright (C) 2022 Intel Corporation + * Copyright 2023-2024 NXP + */ + +#include <linux/module.h> +#include <linux/debugfs.h> +#include <linux/seq_file.h> +#include <linux/sched/signal.h> + +#include <net/bluetooth/bluetooth.h> +#include <net/bluetooth/hci_core.h> +#include <net/bluetooth/iso.h> +#include "eir.h" + +static const struct proto_ops iso_sock_ops; + +static struct bt_sock_list iso_sk_list = { + .lock = __RW_LOCK_UNLOCKED(iso_sk_list.lock) +}; + +/* ---- ISO connections ---- */ +struct iso_conn { + struct hci_conn *hcon; + + /* @lock: spinlock protecting changes to iso_conn fields */ + spinlock_t lock; + struct sock *sk; + + struct delayed_work timeout_work; + + struct sk_buff *rx_skb; + __u32 rx_len; + __u16 tx_sn; + struct kref ref; +}; + +#define iso_conn_lock(c) spin_lock(&(c)->lock) +#define iso_conn_unlock(c) spin_unlock(&(c)->lock) + +static void iso_sock_close(struct sock *sk); +static void iso_sock_kill(struct sock *sk); + +/* ----- ISO socket info ----- */ +#define iso_pi(sk) ((struct iso_pinfo *)sk) + +#define EIR_SERVICE_DATA_LENGTH 4 +#define BASE_MAX_LENGTH (HCI_MAX_PER_AD_LENGTH - EIR_SERVICE_DATA_LENGTH) +#define EIR_BAA_SERVICE_UUID 0x1851 + +/* iso_pinfo flags values */ +enum { + BT_SK_BIG_SYNC, + BT_SK_PA_SYNC, +}; + +struct iso_pinfo { + struct bt_sock bt; + bdaddr_t src; + __u8 src_type; + bdaddr_t dst; + __u8 dst_type; + __u8 bc_sid; + __u8 bc_num_bis; + __u8 bc_bis[ISO_MAX_NUM_BIS]; + __u16 sync_handle; + unsigned long flags; + struct bt_iso_qos qos; + bool qos_user_set; + __u8 base_len; + __u8 base[BASE_MAX_LENGTH]; + struct iso_conn *conn; +}; + +static struct bt_iso_qos default_qos; + +static bool check_ucast_qos(struct bt_iso_qos *qos); +static bool check_bcast_qos(struct bt_iso_qos *qos); +static bool iso_match_sid(struct sock *sk, void *data); +static bool iso_match_sid_past(struct sock *sk, void *data); +static bool iso_match_sync_handle(struct sock *sk, void *data); +static bool iso_match_sync_handle_pa_report(struct sock *sk, void *data); +static void iso_sock_disconn(struct sock *sk); + +typedef bool (*iso_sock_match_t)(struct sock *sk, void *data); + +static struct sock *iso_get_sock(struct hci_dev *hdev, bdaddr_t *src, + bdaddr_t *dst, enum bt_sock_state state, + iso_sock_match_t match, void *data); + +/* ---- ISO timers ---- */ +#define ISO_CONN_TIMEOUT secs_to_jiffies(20) +#define ISO_DISCONN_TIMEOUT secs_to_jiffies(2) + +static void iso_conn_free(struct kref *ref) +{ + struct iso_conn *conn = container_of(ref, struct iso_conn, ref); + + BT_DBG("conn %p", conn); + + if (conn->sk) + iso_pi(conn->sk)->conn = NULL; + + if (conn->hcon) { + conn->hcon->iso_data = NULL; + hci_conn_drop(conn->hcon); + } + + /* Ensure no more work items will run since hci_conn has been dropped */ + disable_delayed_work_sync(&conn->timeout_work); + + kfree_skb(conn->rx_skb); + + kfree(conn); +} + +static void iso_conn_put(struct iso_conn *conn) +{ + if (!conn) + return; + + BT_DBG("conn %p refcnt %d", conn, kref_read(&conn->ref)); + + kref_put(&conn->ref, iso_conn_free); +} + +static struct iso_conn *iso_conn_hold_unless_zero(struct iso_conn *conn) +{ + if (!conn) + return NULL; + + BT_DBG("conn %p refcnt %u", conn, kref_read(&conn->ref)); + + if (!kref_get_unless_zero(&conn->ref)) + return NULL; + + return conn; +} + +static struct sock *iso_sock_hold(struct iso_conn *conn) +{ + if (!conn || !bt_sock_linked(&iso_sk_list, conn->sk)) + return NULL; + + sock_hold(conn->sk); + + return conn->sk; +} + +static void iso_sock_timeout(struct work_struct *work) +{ + struct iso_conn *conn = container_of(work, struct iso_conn, + timeout_work.work); + struct sock *sk; + + conn = iso_conn_hold_unless_zero(conn); + if (!conn) + return; + + iso_conn_lock(conn); + sk = iso_sock_hold(conn); + iso_conn_unlock(conn); + iso_conn_put(conn); + + if (!sk) + return; + + BT_DBG("sock %p state %d", sk, sk->sk_state); + + lock_sock(sk); + sk->sk_err = ETIMEDOUT; + sk->sk_state_change(sk); + release_sock(sk); + sock_put(sk); +} + +static void iso_sock_set_timer(struct sock *sk, long timeout) +{ + if (!iso_pi(sk)->conn) + return; + + BT_DBG("sock %p state %d timeout %ld", sk, sk->sk_state, timeout); + cancel_delayed_work(&iso_pi(sk)->conn->timeout_work); + schedule_delayed_work(&iso_pi(sk)->conn->timeout_work, timeout); +} + +static void iso_sock_clear_timer(struct sock *sk) +{ + if (!iso_pi(sk)->conn) + return; + + BT_DBG("sock %p state %d", sk, sk->sk_state); + cancel_delayed_work(&iso_pi(sk)->conn->timeout_work); +} + +/* ---- ISO connections ---- */ +static struct iso_conn *iso_conn_add(struct hci_conn *hcon) +{ + struct iso_conn *conn = hcon->iso_data; + + conn = iso_conn_hold_unless_zero(conn); + if (conn) { + if (!conn->hcon) { + iso_conn_lock(conn); + conn->hcon = hcon; + iso_conn_unlock(conn); + } + iso_conn_put(conn); + return conn; + } + + conn = kzalloc(sizeof(*conn), GFP_KERNEL); + if (!conn) + return NULL; + + kref_init(&conn->ref); + spin_lock_init(&conn->lock); + INIT_DELAYED_WORK(&conn->timeout_work, iso_sock_timeout); + + hcon->iso_data = conn; + conn->hcon = hcon; + conn->tx_sn = 0; + + BT_DBG("hcon %p conn %p", hcon, conn); + + return conn; +} + +/* Delete channel. Must be called on the locked socket. */ +static void iso_chan_del(struct sock *sk, int err) +{ + struct iso_conn *conn; + struct sock *parent; + + conn = iso_pi(sk)->conn; + iso_pi(sk)->conn = NULL; + + BT_DBG("sk %p, conn %p, err %d", sk, conn, err); + + if (conn) { + iso_conn_lock(conn); + conn->sk = NULL; + iso_conn_unlock(conn); + iso_conn_put(conn); + } + + sk->sk_state = BT_CLOSED; + sk->sk_err = err; + + parent = bt_sk(sk)->parent; + if (parent) { + bt_accept_unlink(sk); + parent->sk_data_ready(parent); + } else { + sk->sk_state_change(sk); + } + + sock_set_flag(sk, SOCK_ZAPPED); +} + +static void iso_conn_del(struct hci_conn *hcon, int err) +{ + struct iso_conn *conn = hcon->iso_data; + struct sock *sk; + + conn = iso_conn_hold_unless_zero(conn); + if (!conn) + return; + + BT_DBG("hcon %p conn %p, err %d", hcon, conn, err); + + /* Kill socket */ + iso_conn_lock(conn); + sk = iso_sock_hold(conn); + iso_conn_unlock(conn); + iso_conn_put(conn); + + if (!sk) { + iso_conn_put(conn); + return; + } + + lock_sock(sk); + iso_sock_clear_timer(sk); + iso_chan_del(sk, err); + release_sock(sk); + sock_put(sk); +} + +static int __iso_chan_add(struct iso_conn *conn, struct sock *sk, + struct sock *parent) +{ + BT_DBG("conn %p", conn); + + if (iso_pi(sk)->conn == conn && conn->sk == sk) + return 0; + + if (conn->sk) { + BT_ERR("conn->sk already set"); + return -EBUSY; + } + + iso_pi(sk)->conn = conn; + conn->sk = sk; + + if (parent) + bt_accept_enqueue(parent, sk, true); + + return 0; +} + +static int iso_chan_add(struct iso_conn *conn, struct sock *sk, + struct sock *parent) +{ + int err; + + iso_conn_lock(conn); + err = __iso_chan_add(conn, sk, parent); + iso_conn_unlock(conn); + + return err; +} + +static inline u8 le_addr_type(u8 bdaddr_type) +{ + if (bdaddr_type == BDADDR_LE_PUBLIC) + return ADDR_LE_DEV_PUBLIC; + else + return ADDR_LE_DEV_RANDOM; +} + +static int iso_connect_bis(struct sock *sk) +{ + struct iso_conn *conn; + struct hci_conn *hcon; + struct hci_dev *hdev; + int err; + + BT_DBG("%pMR (SID 0x%2.2x)", &iso_pi(sk)->src, iso_pi(sk)->bc_sid); + + hdev = hci_get_route(&iso_pi(sk)->dst, &iso_pi(sk)->src, + iso_pi(sk)->src_type); + if (!hdev) + return -EHOSTUNREACH; + + hci_dev_lock(hdev); + + if (!bis_capable(hdev)) { + err = -EOPNOTSUPP; + goto unlock; + } + + /* Fail if user set invalid QoS */ + if (iso_pi(sk)->qos_user_set && !check_bcast_qos(&iso_pi(sk)->qos)) { + iso_pi(sk)->qos = default_qos; + err = -EINVAL; + goto unlock; + } + + /* Fail if out PHYs are marked as disabled */ + if (!iso_pi(sk)->qos.bcast.out.phy) { + err = -EINVAL; + goto unlock; + } + + /* Just bind if DEFER_SETUP has been set */ + if (test_bit(BT_SK_DEFER_SETUP, &bt_sk(sk)->flags)) { + hcon = hci_bind_bis(hdev, &iso_pi(sk)->dst, iso_pi(sk)->bc_sid, + &iso_pi(sk)->qos, iso_pi(sk)->base_len, + iso_pi(sk)->base, + READ_ONCE(sk->sk_sndtimeo)); + if (IS_ERR(hcon)) { + err = PTR_ERR(hcon); + goto unlock; + } + } else { + hcon = hci_connect_bis(hdev, &iso_pi(sk)->dst, + le_addr_type(iso_pi(sk)->dst_type), + iso_pi(sk)->bc_sid, &iso_pi(sk)->qos, + iso_pi(sk)->base_len, iso_pi(sk)->base, + READ_ONCE(sk->sk_sndtimeo)); + if (IS_ERR(hcon)) { + err = PTR_ERR(hcon); + goto unlock; + } + + /* Update SID if it was not set */ + if (iso_pi(sk)->bc_sid == HCI_SID_INVALID) + iso_pi(sk)->bc_sid = hcon->sid; + } + + conn = iso_conn_add(hcon); + if (!conn) { + hci_conn_drop(hcon); + err = -ENOMEM; + goto unlock; + } + + lock_sock(sk); + + err = iso_chan_add(conn, sk, NULL); + if (err) { + release_sock(sk); + goto unlock; + } + + /* Update source addr of the socket */ + bacpy(&iso_pi(sk)->src, &hcon->src); + + if (hcon->state == BT_CONNECTED) { + iso_sock_clear_timer(sk); + sk->sk_state = BT_CONNECTED; + } else if (test_bit(BT_SK_DEFER_SETUP, &bt_sk(sk)->flags)) { + iso_sock_clear_timer(sk); + sk->sk_state = BT_CONNECT; + } else { + sk->sk_state = BT_CONNECT; + iso_sock_set_timer(sk, READ_ONCE(sk->sk_sndtimeo)); + } + + release_sock(sk); + +unlock: + hci_dev_unlock(hdev); + hci_dev_put(hdev); + return err; +} + +static int iso_connect_cis(struct sock *sk) +{ + struct iso_conn *conn; + struct hci_conn *hcon; + struct hci_dev *hdev; + int err; + + BT_DBG("%pMR -> %pMR", &iso_pi(sk)->src, &iso_pi(sk)->dst); + + hdev = hci_get_route(&iso_pi(sk)->dst, &iso_pi(sk)->src, + iso_pi(sk)->src_type); + if (!hdev) + return -EHOSTUNREACH; + + hci_dev_lock(hdev); + + if (!cis_central_capable(hdev)) { + err = -EOPNOTSUPP; + goto unlock; + } + + /* Fail if user set invalid QoS */ + if (iso_pi(sk)->qos_user_set && !check_ucast_qos(&iso_pi(sk)->qos)) { + iso_pi(sk)->qos = default_qos; + err = -EINVAL; + goto unlock; + } + + /* Fail if either PHYs are marked as disabled */ + if (!iso_pi(sk)->qos.ucast.in.phy && !iso_pi(sk)->qos.ucast.out.phy) { + err = -EINVAL; + goto unlock; + } + + /* Check if there are available buffers for output/TX. */ + if (iso_pi(sk)->qos.ucast.out.sdu && !hci_iso_count(hdev) && + (hdev->iso_pkts && !hdev->iso_cnt)) { + err = -ENOBUFS; + goto unlock; + } + + /* Just bind if DEFER_SETUP has been set */ + if (test_bit(BT_SK_DEFER_SETUP, &bt_sk(sk)->flags)) { + hcon = hci_bind_cis(hdev, &iso_pi(sk)->dst, + le_addr_type(iso_pi(sk)->dst_type), + &iso_pi(sk)->qos, + READ_ONCE(sk->sk_sndtimeo)); + if (IS_ERR(hcon)) { + err = PTR_ERR(hcon); + goto unlock; + } + } else { + hcon = hci_connect_cis(hdev, &iso_pi(sk)->dst, + le_addr_type(iso_pi(sk)->dst_type), + &iso_pi(sk)->qos, + READ_ONCE(sk->sk_sndtimeo)); + if (IS_ERR(hcon)) { + err = PTR_ERR(hcon); + goto unlock; + } + } + + conn = iso_conn_add(hcon); + if (!conn) { + hci_conn_drop(hcon); + err = -ENOMEM; + goto unlock; + } + + lock_sock(sk); + + err = iso_chan_add(conn, sk, NULL); + if (err) { + release_sock(sk); + goto unlock; + } + + /* Update source addr of the socket */ + bacpy(&iso_pi(sk)->src, &hcon->src); + + if (hcon->state == BT_CONNECTED) { + iso_sock_clear_timer(sk); + sk->sk_state = BT_CONNECTED; + } else if (test_bit(BT_SK_DEFER_SETUP, &bt_sk(sk)->flags)) { + iso_sock_clear_timer(sk); + sk->sk_state = BT_CONNECT; + } else { + sk->sk_state = BT_CONNECT; + iso_sock_set_timer(sk, READ_ONCE(sk->sk_sndtimeo)); + } + + release_sock(sk); + +unlock: + hci_dev_unlock(hdev); + hci_dev_put(hdev); + return err; +} + +static struct bt_iso_qos *iso_sock_get_qos(struct sock *sk) +{ + if (sk->sk_state == BT_CONNECTED || sk->sk_state == BT_CONNECT2) + return &iso_pi(sk)->conn->hcon->iso_qos; + + return &iso_pi(sk)->qos; +} + +static int iso_send_frame(struct sock *sk, struct sk_buff *skb, + const struct sockcm_cookie *sockc) +{ + struct iso_conn *conn = iso_pi(sk)->conn; + struct bt_iso_qos *qos = iso_sock_get_qos(sk); + struct hci_iso_data_hdr *hdr; + int len = 0; + + BT_DBG("sk %p len %d", sk, skb->len); + + if (skb->len > qos->ucast.out.sdu) + return -EMSGSIZE; + + len = skb->len; + + /* Push ISO data header */ + hdr = skb_push(skb, HCI_ISO_DATA_HDR_SIZE); + hdr->sn = cpu_to_le16(conn->tx_sn++); + hdr->slen = cpu_to_le16(hci_iso_data_len_pack(len, + HCI_ISO_STATUS_VALID)); + + if (sk->sk_state == BT_CONNECTED) { + hci_setup_tx_timestamp(skb, 1, sockc); + hci_send_iso(conn->hcon, skb); + } else { + len = -ENOTCONN; + } + + return len; +} + +static void iso_recv_frame(struct iso_conn *conn, struct sk_buff *skb) +{ + struct sock *sk; + + iso_conn_lock(conn); + sk = conn->sk; + iso_conn_unlock(conn); + + if (!sk) + goto drop; + + BT_DBG("sk %p len %d", sk, skb->len); + + if (sk->sk_state != BT_CONNECTED) + goto drop; + + if (!sock_queue_rcv_skb(sk, skb)) + return; + +drop: + kfree_skb(skb); +} + +/* -------- Socket interface ---------- */ +static struct sock *__iso_get_sock_listen_by_addr(bdaddr_t *src, bdaddr_t *dst) +{ + struct sock *sk; + + sk_for_each(sk, &iso_sk_list.head) { + if (sk->sk_state != BT_LISTEN) + continue; + + if (bacmp(&iso_pi(sk)->dst, dst)) + continue; + + if (!bacmp(&iso_pi(sk)->src, src)) + return sk; + } + + return NULL; +} + +static struct sock *__iso_get_sock_listen_by_sid(bdaddr_t *ba, bdaddr_t *bc, + __u8 sid) +{ + struct sock *sk; + + sk_for_each(sk, &iso_sk_list.head) { + if (sk->sk_state != BT_LISTEN) + continue; + + if (bacmp(&iso_pi(sk)->src, ba)) + continue; + + if (bacmp(&iso_pi(sk)->dst, bc)) + continue; + + if (iso_pi(sk)->bc_sid == sid) + return sk; + } + + return NULL; +} + +/* Find socket in given state: + * source bdaddr (Unicast) + * destination bdaddr (Broadcast only) + * match func - pass NULL to ignore + * match func data - pass -1 to ignore + * Returns closest match. + */ +static struct sock *iso_get_sock(struct hci_dev *hdev, bdaddr_t *src, + bdaddr_t *dst, enum bt_sock_state state, + iso_sock_match_t match, void *data) +{ + struct sock *sk = NULL, *sk1 = NULL; + + read_lock(&iso_sk_list.lock); + + sk_for_each(sk, &iso_sk_list.head) { + if (sk->sk_state != state) + continue; + + /* Match Broadcast destination */ + if (bacmp(dst, BDADDR_ANY) && bacmp(&iso_pi(sk)->dst, dst)) { + struct smp_irk *irk1, *irk2; + + /* Check if destination is an RPA that we can resolve */ + irk1 = hci_find_irk_by_rpa(hdev, dst); + if (!irk1) + continue; + + /* Match with identity address */ + if (bacmp(&iso_pi(sk)->dst, &irk1->bdaddr)) { + /* Check if socket destination address is also + * an RPA and if the IRK matches. + */ + irk2 = hci_find_irk_by_rpa(hdev, + &iso_pi(sk)->dst); + if (!irk2 || irk1 != irk2) + continue; + } + } + + /* Use Match function if provided */ + if (match && !match(sk, data)) + continue; + + /* Exact match. */ + if (!bacmp(&iso_pi(sk)->src, src)) { + sock_hold(sk); + break; + } + + /* Closest match */ + if (!bacmp(&iso_pi(sk)->src, BDADDR_ANY)) { + if (sk1) + sock_put(sk1); + + sk1 = sk; + sock_hold(sk1); + } + } + + if (sk && sk1) + sock_put(sk1); + + read_unlock(&iso_sk_list.lock); + + return sk ? sk : sk1; +} + +static struct sock *iso_get_sock_big(struct sock *match_sk, bdaddr_t *src, + bdaddr_t *dst, uint8_t big) +{ + struct sock *sk = NULL; + + read_lock(&iso_sk_list.lock); + + sk_for_each(sk, &iso_sk_list.head) { + if (match_sk == sk) + continue; + + /* Look for sockets that have already been + * connected to the BIG + */ + if (sk->sk_state != BT_CONNECTED && + sk->sk_state != BT_CONNECT) + continue; + + /* Match Broadcast destination */ + if (bacmp(&iso_pi(sk)->dst, dst)) + continue; + + /* Match BIG handle */ + if (iso_pi(sk)->qos.bcast.big != big) + continue; + + /* Match source address */ + if (bacmp(&iso_pi(sk)->src, src)) + continue; + + sock_hold(sk); + break; + } + + read_unlock(&iso_sk_list.lock); + + return sk; +} + +static void iso_sock_destruct(struct sock *sk) +{ + BT_DBG("sk %p", sk); + + iso_conn_put(iso_pi(sk)->conn); + + skb_queue_purge(&sk->sk_receive_queue); + skb_queue_purge(&sk->sk_write_queue); +} + +static void iso_sock_cleanup_listen(struct sock *parent) +{ + struct sock *sk; + + BT_DBG("parent %p", parent); + + /* Close not yet accepted channels */ + while ((sk = bt_accept_dequeue(parent, NULL))) { + iso_sock_close(sk); + iso_sock_kill(sk); + } + + /* If listening socket has a hcon, properly disconnect it */ + if (iso_pi(parent)->conn && iso_pi(parent)->conn->hcon) { + iso_sock_disconn(parent); + return; + } + + parent->sk_state = BT_CLOSED; + sock_set_flag(parent, SOCK_ZAPPED); +} + +/* Kill socket (only if zapped and orphan) + * Must be called on unlocked socket. + */ +static void iso_sock_kill(struct sock *sk) +{ + if (!sock_flag(sk, SOCK_ZAPPED) || sk->sk_socket || + sock_flag(sk, SOCK_DEAD)) + return; + + BT_DBG("sk %p state %d", sk, sk->sk_state); + + /* Sock is dead, so set conn->sk to NULL to avoid possible UAF */ + if (iso_pi(sk)->conn) { + iso_conn_lock(iso_pi(sk)->conn); + iso_pi(sk)->conn->sk = NULL; + iso_conn_unlock(iso_pi(sk)->conn); + } + + /* Kill poor orphan */ + bt_sock_unlink(&iso_sk_list, sk); + sock_set_flag(sk, SOCK_DEAD); + sock_put(sk); +} + +static void iso_sock_disconn(struct sock *sk) +{ + struct sock *bis_sk; + struct hci_conn *hcon = iso_pi(sk)->conn->hcon; + + if (test_bit(HCI_CONN_BIG_CREATED, &hcon->flags)) { + bis_sk = iso_get_sock_big(sk, &iso_pi(sk)->src, + &iso_pi(sk)->dst, + iso_pi(sk)->qos.bcast.big); + + /* If there are any other connected sockets for the + * same BIG, just delete the sk and leave the bis + * hcon active, in case later rebinding is needed. + */ + if (bis_sk) { + hcon->state = BT_OPEN; + hcon->iso_data = NULL; + iso_pi(sk)->conn->hcon = NULL; + iso_sock_clear_timer(sk); + iso_chan_del(sk, bt_to_errno(hcon->abort_reason)); + sock_put(bis_sk); + return; + } + } + + sk->sk_state = BT_DISCONN; + iso_conn_lock(iso_pi(sk)->conn); + hci_conn_drop(iso_pi(sk)->conn->hcon); + iso_pi(sk)->conn->hcon = NULL; + iso_conn_unlock(iso_pi(sk)->conn); +} + +static void __iso_sock_close(struct sock *sk) +{ + BT_DBG("sk %p state %d socket %p", sk, sk->sk_state, sk->sk_socket); + + switch (sk->sk_state) { + case BT_LISTEN: + iso_sock_cleanup_listen(sk); + break; + + case BT_CONNECT: + case BT_CONNECTED: + case BT_CONFIG: + if (iso_pi(sk)->conn->hcon) + iso_sock_disconn(sk); + else + iso_chan_del(sk, ECONNRESET); + break; + + case BT_CONNECT2: + if (iso_pi(sk)->conn->hcon && + (test_bit(HCI_CONN_PA_SYNC, &iso_pi(sk)->conn->hcon->flags) || + test_bit(HCI_CONN_PA_SYNC_FAILED, &iso_pi(sk)->conn->hcon->flags))) + iso_sock_disconn(sk); + else + iso_chan_del(sk, ECONNRESET); + break; + case BT_DISCONN: + iso_chan_del(sk, ECONNRESET); + break; + + default: + sock_set_flag(sk, SOCK_ZAPPED); + break; + } +} + +/* Must be called on unlocked socket. */ +static void iso_sock_close(struct sock *sk) +{ + iso_sock_clear_timer(sk); + lock_sock(sk); + __iso_sock_close(sk); + release_sock(sk); + iso_sock_kill(sk); +} + +static void iso_sock_init(struct sock *sk, struct sock *parent) +{ + BT_DBG("sk %p", sk); + + if (parent) { + sk->sk_type = parent->sk_type; + bt_sk(sk)->flags = bt_sk(parent)->flags; + security_sk_clone(parent, sk); + } +} + +static struct proto iso_proto = { + .name = "ISO", + .owner = THIS_MODULE, + .obj_size = sizeof(struct iso_pinfo) +}; + +#define DEFAULT_IO_QOS \ +{ \ + .interval = 10000u, \ + .latency = 10u, \ + .sdu = 40u, \ + .phy = BT_ISO_PHY_2M, \ + .rtn = 2u, \ +} + +static struct bt_iso_qos default_qos = { + .bcast = { + .big = BT_ISO_QOS_BIG_UNSET, + .bis = BT_ISO_QOS_BIS_UNSET, + .sync_factor = 0x01, + .packing = 0x00, + .framing = 0x00, + .in = DEFAULT_IO_QOS, + .out = DEFAULT_IO_QOS, + .encryption = 0x00, + .bcode = {0x00}, + .options = 0x00, + .skip = 0x0000, + .sync_timeout = BT_ISO_SYNC_TIMEOUT, + .sync_cte_type = 0x00, + .mse = 0x00, + .timeout = BT_ISO_SYNC_TIMEOUT, + }, +}; + +static struct sock *iso_sock_alloc(struct net *net, struct socket *sock, + int proto, gfp_t prio, int kern) +{ + struct sock *sk; + + sk = bt_sock_alloc(net, sock, &iso_proto, proto, prio, kern); + if (!sk) + return NULL; + + sk->sk_destruct = iso_sock_destruct; + sk->sk_sndtimeo = ISO_CONN_TIMEOUT; + + /* Set address type as public as default src address is BDADDR_ANY */ + iso_pi(sk)->src_type = BDADDR_LE_PUBLIC; + + iso_pi(sk)->qos = default_qos; + iso_pi(sk)->sync_handle = -1; + + bt_sock_link(&iso_sk_list, sk); + return sk; +} + +static int iso_sock_create(struct net *net, struct socket *sock, int protocol, + int kern) +{ + struct sock *sk; + + BT_DBG("sock %p", sock); + + sock->state = SS_UNCONNECTED; + + if (sock->type != SOCK_SEQPACKET) + return -ESOCKTNOSUPPORT; + + sock->ops = &iso_sock_ops; + + sk = iso_sock_alloc(net, sock, protocol, GFP_ATOMIC, kern); + if (!sk) + return -ENOMEM; + + iso_sock_init(sk, NULL); + return 0; +} + +static int iso_sock_bind_bc(struct socket *sock, struct sockaddr_unsized *addr, + int addr_len) +{ + struct sockaddr_iso *sa = (struct sockaddr_iso *)addr; + struct sock *sk = sock->sk; + int i; + + BT_DBG("sk %p bc_sid %u bc_num_bis %u", sk, sa->iso_bc->bc_sid, + sa->iso_bc->bc_num_bis); + + if (addr_len != sizeof(*sa) + sizeof(*sa->iso_bc)) + return -EINVAL; + + bacpy(&iso_pi(sk)->dst, &sa->iso_bc->bc_bdaddr); + + /* Check if the address type is of LE type */ + if (!bdaddr_type_is_le(sa->iso_bc->bc_bdaddr_type)) + return -EINVAL; + + iso_pi(sk)->dst_type = sa->iso_bc->bc_bdaddr_type; + + if (sa->iso_bc->bc_sid > 0x0f && sa->iso_bc->bc_sid != HCI_SID_INVALID) + return -EINVAL; + + iso_pi(sk)->bc_sid = sa->iso_bc->bc_sid; + + if (sa->iso_bc->bc_num_bis > ISO_MAX_NUM_BIS) + return -EINVAL; + + iso_pi(sk)->bc_num_bis = sa->iso_bc->bc_num_bis; + + for (i = 0; i < iso_pi(sk)->bc_num_bis; i++) + if (sa->iso_bc->bc_bis[i] < 0x01 || + sa->iso_bc->bc_bis[i] > 0x1f) + return -EINVAL; + + memcpy(iso_pi(sk)->bc_bis, sa->iso_bc->bc_bis, + iso_pi(sk)->bc_num_bis); + + return 0; +} + +/* Must be called on the locked socket. */ +static int iso_sock_rebind_bis(struct sock *sk, struct sockaddr_iso *sa, + int addr_len) +{ + int err = 0; + + if (!test_bit(BT_SK_PA_SYNC, &iso_pi(sk)->flags)) + return -EBADFD; + + if (sa->iso_bc->bc_num_bis > ISO_MAX_NUM_BIS) { + err = -EINVAL; + goto done; + } + + iso_pi(sk)->bc_num_bis = sa->iso_bc->bc_num_bis; + + for (int i = 0; i < iso_pi(sk)->bc_num_bis; i++) + if (sa->iso_bc->bc_bis[i] < 0x01 || + sa->iso_bc->bc_bis[i] > 0x1f) { + err = -EINVAL; + goto done; + } + + memcpy(iso_pi(sk)->bc_bis, sa->iso_bc->bc_bis, + iso_pi(sk)->bc_num_bis); + +done: + return err; +} + +static struct hci_dev *iso_conn_get_hdev(struct iso_conn *conn) +{ + struct hci_dev *hdev = NULL; + + iso_conn_lock(conn); + if (conn->hcon) + hdev = hci_dev_hold(conn->hcon->hdev); + iso_conn_unlock(conn); + + return hdev; +} + +/* Must be called on the locked socket. */ +static int iso_sock_rebind_bc(struct sock *sk, struct sockaddr_iso *sa, + int addr_len) +{ + struct hci_dev *hdev; + struct hci_conn *bis; + int err; + + if (sk->sk_type != SOCK_SEQPACKET || !iso_pi(sk)->conn) + return -EINVAL; + + /* Check if it is really a Broadcast address being requested */ + if (addr_len != sizeof(*sa) + sizeof(*sa->iso_bc)) + return -EINVAL; + + /* Check if the address hasn't changed then perhaps only the number of + * bis has changed. + */ + if (!bacmp(&iso_pi(sk)->dst, &sa->iso_bc->bc_bdaddr) || + !bacmp(&sa->iso_bc->bc_bdaddr, BDADDR_ANY)) + return iso_sock_rebind_bis(sk, sa, addr_len); + + /* Check if the address type is of LE type */ + if (!bdaddr_type_is_le(sa->iso_bc->bc_bdaddr_type)) + return -EINVAL; + + hdev = iso_conn_get_hdev(iso_pi(sk)->conn); + if (!hdev) + return -EINVAL; + + bis = iso_pi(sk)->conn->hcon; + + /* Release the socket before lookups since that requires hci_dev_lock + * which shall not be acquired while holding sock_lock for proper + * ordering. + */ + release_sock(sk); + hci_dev_lock(bis->hdev); + lock_sock(sk); + + if (!iso_pi(sk)->conn || iso_pi(sk)->conn->hcon != bis) { + /* raced with iso_conn_del() or iso_disconn_sock() */ + err = -ENOTCONN; + goto unlock; + } + + BT_DBG("sk %p %pMR type %u", sk, &sa->iso_bc->bc_bdaddr, + sa->iso_bc->bc_bdaddr_type); + + err = hci_past_bis(bis, &sa->iso_bc->bc_bdaddr, + le_addr_type(sa->iso_bc->bc_bdaddr_type)); + +unlock: + hci_dev_unlock(hdev); + hci_dev_put(hdev); + + return err; +} + +static int iso_sock_bind(struct socket *sock, struct sockaddr_unsized *addr, + int addr_len) +{ + struct sockaddr_iso *sa = (struct sockaddr_iso *)addr; + struct sock *sk = sock->sk; + int err = 0; + + BT_DBG("sk %p %pMR type %u", sk, &sa->iso_bdaddr, sa->iso_bdaddr_type); + + if (!addr || addr_len < sizeof(struct sockaddr_iso) || + addr->sa_family != AF_BLUETOOTH) + return -EINVAL; + + lock_sock(sk); + + if ((sk->sk_state == BT_CONNECT2 || sk->sk_state == BT_CONNECTED) && + addr_len > sizeof(*sa)) { + /* Allow the user to rebind to a different address using + * PAST procedures. + */ + err = iso_sock_rebind_bc(sk, sa, addr_len); + goto done; + } + + if (sk->sk_state != BT_OPEN) { + err = -EBADFD; + goto done; + } + + if (sk->sk_type != SOCK_SEQPACKET) { + err = -EINVAL; + goto done; + } + + /* Check if the address type is of LE type */ + if (!bdaddr_type_is_le(sa->iso_bdaddr_type)) { + err = -EINVAL; + goto done; + } + + bacpy(&iso_pi(sk)->src, &sa->iso_bdaddr); + iso_pi(sk)->src_type = sa->iso_bdaddr_type; + + /* Check for Broadcast address */ + if (addr_len > sizeof(*sa)) { + err = iso_sock_bind_bc(sock, addr, addr_len); + if (err) + goto done; + } + + sk->sk_state = BT_BOUND; + +done: + release_sock(sk); + return err; +} + +static int iso_sock_connect(struct socket *sock, struct sockaddr_unsized *addr, + int alen, int flags) +{ + struct sockaddr_iso *sa = (struct sockaddr_iso *)addr; + struct sock *sk = sock->sk; + int err; + + BT_DBG("sk %p", sk); + + if (alen < sizeof(struct sockaddr_iso) || + addr->sa_family != AF_BLUETOOTH) + return -EINVAL; + + if (sk->sk_state != BT_OPEN && sk->sk_state != BT_BOUND) + return -EBADFD; + + if (sk->sk_type != SOCK_SEQPACKET) + return -EINVAL; + + /* Check if the address type is of LE type */ + if (!bdaddr_type_is_le(sa->iso_bdaddr_type)) + return -EINVAL; + + lock_sock(sk); + + bacpy(&iso_pi(sk)->dst, &sa->iso_bdaddr); + iso_pi(sk)->dst_type = sa->iso_bdaddr_type; + + release_sock(sk); + + if (bacmp(&iso_pi(sk)->dst, BDADDR_ANY)) + err = iso_connect_cis(sk); + else + err = iso_connect_bis(sk); + + if (err) + return err; + + lock_sock(sk); + + if (!test_bit(BT_SK_DEFER_SETUP, &bt_sk(sk)->flags)) { + err = bt_sock_wait_state(sk, BT_CONNECTED, + sock_sndtimeo(sk, flags & O_NONBLOCK)); + } + + release_sock(sk); + return err; +} + +static int iso_listen_bis(struct sock *sk) +{ + struct hci_dev *hdev; + int err = 0; + struct iso_conn *conn; + struct hci_conn *hcon; + + BT_DBG("%pMR -> %pMR (SID 0x%2.2x)", &iso_pi(sk)->src, + &iso_pi(sk)->dst, iso_pi(sk)->bc_sid); + + write_lock(&iso_sk_list.lock); + + if (__iso_get_sock_listen_by_sid(&iso_pi(sk)->src, &iso_pi(sk)->dst, + iso_pi(sk)->bc_sid)) + err = -EADDRINUSE; + + write_unlock(&iso_sk_list.lock); + + if (err) + return err; + + hdev = hci_get_route(&iso_pi(sk)->dst, &iso_pi(sk)->src, + iso_pi(sk)->src_type); + if (!hdev) + return -EHOSTUNREACH; + + hci_dev_lock(hdev); + lock_sock(sk); + + /* Fail if user set invalid QoS */ + if (iso_pi(sk)->qos_user_set && !check_bcast_qos(&iso_pi(sk)->qos)) { + iso_pi(sk)->qos = default_qos; + err = -EINVAL; + goto unlock; + } + + hcon = hci_pa_create_sync(hdev, &iso_pi(sk)->dst, + le_addr_type(iso_pi(sk)->dst_type), + iso_pi(sk)->bc_sid, &iso_pi(sk)->qos); + if (IS_ERR(hcon)) { + err = PTR_ERR(hcon); + goto unlock; + } + + conn = iso_conn_add(hcon); + if (!conn) { + hci_conn_drop(hcon); + err = -ENOMEM; + goto unlock; + } + + err = iso_chan_add(conn, sk, NULL); + if (err) { + hci_conn_drop(hcon); + goto unlock; + } + +unlock: + release_sock(sk); + hci_dev_unlock(hdev); + hci_dev_put(hdev); + return err; +} + +static int iso_listen_cis(struct sock *sk) +{ + int err = 0; + + BT_DBG("%pMR", &iso_pi(sk)->src); + + write_lock(&iso_sk_list.lock); + + if (__iso_get_sock_listen_by_addr(&iso_pi(sk)->src, &iso_pi(sk)->dst)) + err = -EADDRINUSE; + + write_unlock(&iso_sk_list.lock); + + return err; +} + +static int iso_sock_listen(struct socket *sock, int backlog) +{ + struct sock *sk = sock->sk; + int err = 0; + + BT_DBG("sk %p backlog %d", sk, backlog); + + sock_hold(sk); + lock_sock(sk); + + if (sk->sk_state != BT_BOUND) { + err = -EBADFD; + goto done; + } + + if (sk->sk_type != SOCK_SEQPACKET) { + err = -EINVAL; + goto done; + } + + if (!bacmp(&iso_pi(sk)->dst, BDADDR_ANY)) { + err = iso_listen_cis(sk); + } else { + /* Drop sock lock to avoid potential + * deadlock with the hdev lock. + */ + release_sock(sk); + err = iso_listen_bis(sk); + lock_sock(sk); + } + + if (err) + goto done; + + sk->sk_max_ack_backlog = backlog; + sk->sk_ack_backlog = 0; + + sk->sk_state = BT_LISTEN; + +done: + release_sock(sk); + sock_put(sk); + return err; +} + +static int iso_sock_accept(struct socket *sock, struct socket *newsock, + struct proto_accept_arg *arg) +{ + DEFINE_WAIT_FUNC(wait, woken_wake_function); + struct sock *sk = sock->sk, *ch; + long timeo; + int err = 0; + + /* Use explicit nested locking to avoid lockdep warnings generated + * because the parent socket and the child socket are locked on the + * same thread. + */ + lock_sock_nested(sk, SINGLE_DEPTH_NESTING); + + timeo = sock_rcvtimeo(sk, arg->flags & O_NONBLOCK); + + BT_DBG("sk %p timeo %ld", sk, timeo); + + /* Wait for an incoming connection. (wake-one). */ + add_wait_queue_exclusive(sk_sleep(sk), &wait); + while (1) { + if (sk->sk_state != BT_LISTEN) { + err = -EBADFD; + break; + } + + ch = bt_accept_dequeue(sk, newsock); + if (ch) + break; + + if (!timeo) { + err = -EAGAIN; + break; + } + + if (signal_pending(current)) { + err = sock_intr_errno(timeo); + break; + } + + release_sock(sk); + + timeo = wait_woken(&wait, TASK_INTERRUPTIBLE, timeo); + lock_sock_nested(sk, SINGLE_DEPTH_NESTING); + } + remove_wait_queue(sk_sleep(sk), &wait); + + if (err) + goto done; + + newsock->state = SS_CONNECTED; + + BT_DBG("new socket %p", ch); + + /* A Broadcast Sink might require BIG sync to be terminated + * and re-established multiple times, while keeping the same + * PA sync handle active. To allow this, once all BIS + * connections have been accepted on a PA sync parent socket, + * "reset" socket state, to allow future BIG re-sync procedures. + */ + if (test_bit(BT_SK_PA_SYNC, &iso_pi(sk)->flags)) { + /* Iterate through the list of bound BIS indices + * and clear each BIS as they are accepted by the + * user space, one by one. + */ + for (int i = 0; i < iso_pi(sk)->bc_num_bis; i++) { + if (iso_pi(sk)->bc_bis[i] > 0) { + iso_pi(sk)->bc_bis[i] = 0; + iso_pi(sk)->bc_num_bis--; + break; + } + } + + if (iso_pi(sk)->bc_num_bis == 0) { + /* Once the last BIS was accepted, reset parent + * socket parameters to mark that the listening + * process for BIS connections has been completed: + * + * 1. Reset the DEFER setup flag on the parent sk. + * 2. Clear the flag marking that the BIG create + * sync command is pending. + * 3. Transition socket state from BT_LISTEN to + * BT_CONNECTED. + */ + set_bit(BT_SK_DEFER_SETUP, &bt_sk(sk)->flags); + clear_bit(BT_SK_BIG_SYNC, &iso_pi(sk)->flags); + sk->sk_state = BT_CONNECTED; + } + } + +done: + release_sock(sk); + return err; +} + +static int iso_sock_getname(struct socket *sock, struct sockaddr *addr, + int peer) +{ + struct sockaddr_iso *sa = (struct sockaddr_iso *)addr; + struct sock *sk = sock->sk; + int len = sizeof(struct sockaddr_iso); + + BT_DBG("sock %p, sk %p", sock, sk); + + addr->sa_family = AF_BLUETOOTH; + + if (peer) { + struct hci_conn *hcon = iso_pi(sk)->conn ? + iso_pi(sk)->conn->hcon : NULL; + + bacpy(&sa->iso_bdaddr, &iso_pi(sk)->dst); + sa->iso_bdaddr_type = iso_pi(sk)->dst_type; + + if (hcon && (hcon->type == BIS_LINK || hcon->type == PA_LINK)) { + sa->iso_bc->bc_sid = iso_pi(sk)->bc_sid; + sa->iso_bc->bc_num_bis = iso_pi(sk)->bc_num_bis; + memcpy(sa->iso_bc->bc_bis, iso_pi(sk)->bc_bis, + ISO_MAX_NUM_BIS); + len += sizeof(struct sockaddr_iso_bc); + } + } else { + bacpy(&sa->iso_bdaddr, &iso_pi(sk)->src); + sa->iso_bdaddr_type = iso_pi(sk)->src_type; + } + + return len; +} + +static int iso_sock_sendmsg(struct socket *sock, struct msghdr *msg, + size_t len) +{ + struct sock *sk = sock->sk; + struct sk_buff *skb, **frag; + struct sockcm_cookie sockc; + size_t mtu; + int err; + + BT_DBG("sock %p, sk %p", sock, sk); + + err = sock_error(sk); + if (err) + return err; + + if (msg->msg_flags & MSG_OOB) + return -EOPNOTSUPP; + + hci_sockcm_init(&sockc, sk); + + if (msg->msg_controllen) { + err = sock_cmsg_send(sk, msg, &sockc); + if (err) + return err; + } + + lock_sock(sk); + + if (sk->sk_state != BT_CONNECTED) { + release_sock(sk); + return -ENOTCONN; + } + + mtu = iso_pi(sk)->conn->hcon->mtu; + + release_sock(sk); + + skb = bt_skb_sendmsg(sk, msg, len, mtu, HCI_ISO_DATA_HDR_SIZE, 0); + if (IS_ERR(skb)) + return PTR_ERR(skb); + + len -= skb->len; + + BT_DBG("skb %p len %d", sk, skb->len); + + /* Continuation fragments */ + frag = &skb_shinfo(skb)->frag_list; + while (len) { + struct sk_buff *tmp; + + tmp = bt_skb_sendmsg(sk, msg, len, mtu, 0, 0); + if (IS_ERR(tmp)) { + kfree_skb(skb); + return PTR_ERR(tmp); + } + + *frag = tmp; + + len -= tmp->len; + + skb->len += tmp->len; + skb->data_len += tmp->len; + + BT_DBG("frag %p len %d", *frag, tmp->len); + + frag = &(*frag)->next; + } + + lock_sock(sk); + + if (sk->sk_state == BT_CONNECTED) + err = iso_send_frame(sk, skb, &sockc); + else + err = -ENOTCONN; + + release_sock(sk); + + if (err < 0) + kfree_skb(skb); + return err; +} + +static void iso_conn_defer_accept(struct hci_conn *conn) +{ + struct hci_cp_le_accept_cis cp; + struct hci_dev *hdev = conn->hdev; + + BT_DBG("conn %p", conn); + + conn->state = BT_CONFIG; + + cp.handle = cpu_to_le16(conn->handle); + + hci_send_cmd(hdev, HCI_OP_LE_ACCEPT_CIS, sizeof(cp), &cp); +} + +static void iso_conn_big_sync(struct sock *sk) +{ + int err; + struct hci_dev *hdev; + + hdev = hci_get_route(&iso_pi(sk)->dst, &iso_pi(sk)->src, + iso_pi(sk)->src_type); + + if (!hdev) + return; + + /* hci_le_big_create_sync requires hdev lock to be held, since + * it enqueues the HCI LE BIG Create Sync command via + * hci_cmd_sync_queue_once, which checks hdev flags that might + * change. + */ + hci_dev_lock(hdev); + lock_sock(sk); + + if (!test_and_set_bit(BT_SK_BIG_SYNC, &iso_pi(sk)->flags)) { + err = hci_conn_big_create_sync(hdev, iso_pi(sk)->conn->hcon, + &iso_pi(sk)->qos, + iso_pi(sk)->sync_handle, + iso_pi(sk)->bc_num_bis, + iso_pi(sk)->bc_bis); + if (err) + bt_dev_err(hdev, "hci_big_create_sync: %d", err); + } + + release_sock(sk); + hci_dev_unlock(hdev); +} + +static int iso_sock_recvmsg(struct socket *sock, struct msghdr *msg, + size_t len, int flags) +{ + struct sock *sk = sock->sk; + struct iso_pinfo *pi = iso_pi(sk); + bool early_ret = false; + int err = 0; + + BT_DBG("sk %p", sk); + + if (unlikely(flags & MSG_ERRQUEUE)) + return sock_recv_errqueue(sk, msg, len, SOL_BLUETOOTH, + BT_SCM_ERROR); + + if (test_and_clear_bit(BT_SK_DEFER_SETUP, &bt_sk(sk)->flags)) { + sock_hold(sk); + lock_sock(sk); + + switch (sk->sk_state) { + case BT_CONNECT2: + if (test_bit(BT_SK_PA_SYNC, &pi->flags)) { + release_sock(sk); + iso_conn_big_sync(sk); + lock_sock(sk); + + sk->sk_state = BT_LISTEN; + } else { + iso_conn_defer_accept(pi->conn->hcon); + sk->sk_state = BT_CONFIG; + } + + early_ret = true; + break; + case BT_CONNECTED: + if (test_bit(BT_SK_PA_SYNC, &iso_pi(sk)->flags)) { + release_sock(sk); + iso_conn_big_sync(sk); + lock_sock(sk); + + sk->sk_state = BT_LISTEN; + early_ret = true; + } + + break; + case BT_CONNECT: + release_sock(sk); + err = iso_connect_cis(sk); + lock_sock(sk); + + early_ret = true; + break; + default: + break; + } + + release_sock(sk); + sock_put(sk); + + if (early_ret) + return err; + } + + return bt_sock_recvmsg(sock, msg, len, flags); +} + +static bool check_io_qos(struct bt_iso_io_qos *qos) +{ + /* If no PHY is enable SDU must be 0 */ + if (!qos->phy && qos->sdu) + return false; + + if (qos->interval && (qos->interval < 0xff || qos->interval > 0xfffff)) + return false; + + if (qos->latency && (qos->latency < 0x05 || qos->latency > 0xfa0)) + return false; + + if (qos->phy > BT_ISO_PHY_ANY) + return false; + + return true; +} + +static bool check_ucast_qos(struct bt_iso_qos *qos) +{ + if (qos->ucast.cig > 0xef && qos->ucast.cig != BT_ISO_QOS_CIG_UNSET) + return false; + + if (qos->ucast.cis > 0xef && qos->ucast.cis != BT_ISO_QOS_CIS_UNSET) + return false; + + if (qos->ucast.sca > 0x07) + return false; + + if (qos->ucast.packing > 0x01) + return false; + + if (qos->ucast.framing > 0x01) + return false; + + if (!check_io_qos(&qos->ucast.in)) + return false; + + if (!check_io_qos(&qos->ucast.out)) + return false; + + return true; +} + +static bool check_bcast_qos(struct bt_iso_qos *qos) +{ + if (!qos->bcast.sync_factor) + qos->bcast.sync_factor = 0x01; + + if (qos->bcast.packing > 0x01) + return false; + + if (qos->bcast.framing > 0x01) + return false; + + if (!check_io_qos(&qos->bcast.in)) + return false; + + if (!check_io_qos(&qos->bcast.out)) + return false; + + if (qos->bcast.encryption > 0x01) + return false; + + if (qos->bcast.options > 0x07) + return false; + + if (qos->bcast.skip > 0x01f3) + return false; + + if (!qos->bcast.sync_timeout) + qos->bcast.sync_timeout = BT_ISO_SYNC_TIMEOUT; + + if (qos->bcast.sync_timeout < 0x000a || qos->bcast.sync_timeout > 0x4000) + return false; + + if (qos->bcast.sync_cte_type > 0x1f) + return false; + + if (qos->bcast.mse > 0x1f) + return false; + + if (!qos->bcast.timeout) + qos->bcast.sync_timeout = BT_ISO_SYNC_TIMEOUT; + + if (qos->bcast.timeout < 0x000a || qos->bcast.timeout > 0x4000) + return false; + + return true; +} + +static int iso_sock_setsockopt(struct socket *sock, int level, int optname, + sockptr_t optval, unsigned int optlen) +{ + struct sock *sk = sock->sk; + int err = 0; + struct bt_iso_qos qos = default_qos; + u32 opt; + + BT_DBG("sk %p", sk); + + lock_sock(sk); + + switch (optname) { + case BT_DEFER_SETUP: + if (sk->sk_state != BT_BOUND && sk->sk_state != BT_LISTEN) { + err = -EINVAL; + break; + } + + err = copy_safe_from_sockptr(&opt, sizeof(opt), optval, optlen); + if (err) + break; + + if (opt) + set_bit(BT_SK_DEFER_SETUP, &bt_sk(sk)->flags); + else + clear_bit(BT_SK_DEFER_SETUP, &bt_sk(sk)->flags); + break; + + case BT_PKT_STATUS: + err = copy_safe_from_sockptr(&opt, sizeof(opt), optval, optlen); + if (err) + break; + + if (opt) + set_bit(BT_SK_PKT_STATUS, &bt_sk(sk)->flags); + else + clear_bit(BT_SK_PKT_STATUS, &bt_sk(sk)->flags); + break; + + case BT_PKT_SEQNUM: + err = copy_safe_from_sockptr(&opt, sizeof(opt), optval, optlen); + if (err) + break; + + if (opt) + set_bit(BT_SK_PKT_SEQNUM, &bt_sk(sk)->flags); + else + clear_bit(BT_SK_PKT_SEQNUM, &bt_sk(sk)->flags); + break; + + case BT_ISO_QOS: + if (sk->sk_state != BT_OPEN && sk->sk_state != BT_BOUND && + sk->sk_state != BT_CONNECT2 && + (!test_bit(BT_SK_PA_SYNC, &iso_pi(sk)->flags) || + sk->sk_state != BT_CONNECTED)) { + err = -EINVAL; + break; + } + + err = copy_safe_from_sockptr(&qos, sizeof(qos), optval, optlen); + if (err) + break; + + iso_pi(sk)->qos = qos; + iso_pi(sk)->qos_user_set = true; + + break; + + case BT_ISO_BASE: + if (sk->sk_state != BT_OPEN && sk->sk_state != BT_BOUND && + sk->sk_state != BT_CONNECT2) { + err = -EINVAL; + break; + } + + if (optlen > sizeof(iso_pi(sk)->base)) { + err = -EINVAL; + break; + } + + err = copy_safe_from_sockptr(iso_pi(sk)->base, optlen, optval, + optlen); + if (err) + break; + + iso_pi(sk)->base_len = optlen; + + break; + + default: + err = -ENOPROTOOPT; + break; + } + + release_sock(sk); + return err; +} + +static int iso_sock_getsockopt(struct socket *sock, int level, int optname, + char __user *optval, int __user *optlen) +{ + struct sock *sk = sock->sk; + int len, err = 0; + struct bt_iso_qos *qos; + u8 base_len; + u8 *base; + + BT_DBG("sk %p", sk); + + if (get_user(len, optlen)) + return -EFAULT; + + lock_sock(sk); + + switch (optname) { + case BT_DEFER_SETUP: + if (sk->sk_state == BT_CONNECTED) { + err = -EINVAL; + break; + } + + if (put_user(test_bit(BT_SK_DEFER_SETUP, &bt_sk(sk)->flags), + (u32 __user *)optval)) + err = -EFAULT; + + break; + + case BT_PKT_STATUS: + if (put_user(test_bit(BT_SK_PKT_STATUS, &bt_sk(sk)->flags), + (int __user *)optval)) + err = -EFAULT; + break; + + case BT_ISO_QOS: + qos = iso_sock_get_qos(sk); + + len = min_t(unsigned int, len, sizeof(*qos)); + if (copy_to_user(optval, qos, len)) + err = -EFAULT; + + break; + + case BT_ISO_BASE: + if (sk->sk_state == BT_CONNECTED && + !bacmp(&iso_pi(sk)->dst, BDADDR_ANY)) { + base_len = iso_pi(sk)->conn->hcon->le_per_adv_data_len; + base = iso_pi(sk)->conn->hcon->le_per_adv_data; + } else { + base_len = iso_pi(sk)->base_len; + base = iso_pi(sk)->base; + } + + len = min_t(unsigned int, len, base_len); + if (copy_to_user(optval, base, len)) + err = -EFAULT; + if (put_user(len, optlen)) + err = -EFAULT; + + break; + + default: + err = -ENOPROTOOPT; + break; + } + + release_sock(sk); + return err; +} + +static int iso_sock_shutdown(struct socket *sock, int how) +{ + struct sock *sk = sock->sk; + int err = 0; + + BT_DBG("sock %p, sk %p, how %d", sock, sk, how); + + if (!sk) + return 0; + + sock_hold(sk); + lock_sock(sk); + + switch (how) { + case SHUT_RD: + if (sk->sk_shutdown & RCV_SHUTDOWN) + goto unlock; + sk->sk_shutdown |= RCV_SHUTDOWN; + break; + case SHUT_WR: + if (sk->sk_shutdown & SEND_SHUTDOWN) + goto unlock; + sk->sk_shutdown |= SEND_SHUTDOWN; + break; + case SHUT_RDWR: + if (sk->sk_shutdown & SHUTDOWN_MASK) + goto unlock; + sk->sk_shutdown |= SHUTDOWN_MASK; + break; + } + + iso_sock_clear_timer(sk); + __iso_sock_close(sk); + + if (sock_flag(sk, SOCK_LINGER) && sk->sk_lingertime && + !(current->flags & PF_EXITING)) + err = bt_sock_wait_state(sk, BT_CLOSED, sk->sk_lingertime); + +unlock: + release_sock(sk); + sock_put(sk); + + return err; +} + +static int iso_sock_release(struct socket *sock) +{ + struct sock *sk = sock->sk; + int err = 0; + + BT_DBG("sock %p, sk %p", sock, sk); + + if (!sk) + return 0; + + iso_sock_close(sk); + + if (sock_flag(sk, SOCK_LINGER) && READ_ONCE(sk->sk_lingertime) && + !(current->flags & PF_EXITING)) { + lock_sock(sk); + err = bt_sock_wait_state(sk, BT_CLOSED, sk->sk_lingertime); + release_sock(sk); + } + + sock_orphan(sk); + iso_sock_kill(sk); + return err; +} + +static void iso_sock_ready(struct sock *sk) +{ + BT_DBG("sk %p", sk); + + if (!sk) + return; + + lock_sock(sk); + iso_sock_clear_timer(sk); + sk->sk_state = BT_CONNECTED; + sk->sk_state_change(sk); + release_sock(sk); +} + +static bool iso_match_big(struct sock *sk, void *data) +{ + struct hci_evt_le_big_sync_established *ev = data; + + return ev->handle == iso_pi(sk)->qos.bcast.big; +} + +static bool iso_match_big_hcon(struct sock *sk, void *data) +{ + struct hci_conn *hcon = data; + + return hcon->iso_qos.bcast.big == iso_pi(sk)->qos.bcast.big; +} + +static bool iso_match_pa_sync_flag(struct sock *sk, void *data) +{ + return test_bit(BT_SK_PA_SYNC, &iso_pi(sk)->flags); +} + +static bool iso_match_dst(struct sock *sk, void *data) +{ + return !bacmp(&iso_pi(sk)->dst, (bdaddr_t *)data); +} + +static void iso_conn_ready(struct iso_conn *conn) +{ + struct sock *parent = NULL; + struct sock *sk = conn->sk; + struct hci_ev_le_big_sync_established *ev = NULL; + struct hci_ev_le_pa_sync_established *ev2 = NULL; + struct hci_ev_le_per_adv_report *ev3 = NULL; + struct hci_conn *hcon; + struct hci_dev *hdev; + + BT_DBG("conn %p", conn); + + if (sk) { + /* Attempt to update source address in case of BIS Sender if + * the advertisement is using a random address. + */ + if (conn->hcon->type == BIS_LINK && + conn->hcon->role == HCI_ROLE_MASTER && + !bacmp(&conn->hcon->dst, BDADDR_ANY)) { + struct hci_conn *bis = conn->hcon; + struct adv_info *adv; + + adv = hci_find_adv_instance(bis->hdev, + bis->iso_qos.bcast.bis); + if (adv && bacmp(&adv->random_addr, BDADDR_ANY)) { + lock_sock(sk); + iso_pi(sk)->src_type = BDADDR_LE_RANDOM; + bacpy(&iso_pi(sk)->src, &adv->random_addr); + release_sock(sk); + } + } + + iso_sock_ready(conn->sk); + } else { + hcon = conn->hcon; + if (!hcon) + return; + + hdev = hcon->hdev; + + if (test_bit(HCI_CONN_BIG_SYNC, &hcon->flags)) { + /* A BIS slave hcon is notified to the ISO layer + * after the Command Complete for the LE Setup + * ISO Data Path command is received. Get the + * parent socket that matches the hcon BIG handle. + */ + parent = iso_get_sock(hdev, &hcon->src, &hcon->dst, + BT_LISTEN, iso_match_big_hcon, + hcon); + } else if (test_bit(HCI_CONN_BIG_SYNC_FAILED, &hcon->flags)) { + ev = hci_recv_event_data(hcon->hdev, + HCI_EVT_LE_BIG_SYNC_ESTABLISHED); + + /* Get reference to PA sync parent socket, if it exists */ + parent = iso_get_sock(hdev, &hcon->src, &hcon->dst, + BT_LISTEN, + iso_match_pa_sync_flag, + NULL); + if (!parent && ev) + parent = iso_get_sock(hdev, &hcon->src, + &hcon->dst, + BT_LISTEN, + iso_match_big, ev); + } else if (test_bit(HCI_CONN_PA_SYNC_FAILED, &hcon->flags)) { + ev2 = hci_recv_event_data(hcon->hdev, + HCI_EV_LE_PA_SYNC_ESTABLISHED); + if (ev2) + parent = iso_get_sock(hdev, &hcon->src, + &hcon->dst, + BT_LISTEN, + iso_match_sid, ev2); + } else if (test_bit(HCI_CONN_PA_SYNC, &hcon->flags)) { + ev3 = hci_recv_event_data(hcon->hdev, + HCI_EV_LE_PER_ADV_REPORT); + if (ev3) + parent = iso_get_sock(hdev, &hcon->src, + &hcon->dst, + BT_LISTEN, + iso_match_sync_handle_pa_report, + ev3); + } + + if (!parent) + parent = iso_get_sock(hdev, &hcon->src, BDADDR_ANY, + BT_LISTEN, iso_match_dst, BDADDR_ANY); + + if (!parent) + return; + + lock_sock(parent); + + sk = iso_sock_alloc(sock_net(parent), NULL, + BTPROTO_ISO, GFP_ATOMIC, 0); + if (!sk) { + release_sock(parent); + return; + } + + iso_sock_init(sk, parent); + + bacpy(&iso_pi(sk)->src, &hcon->src); + + /* Convert from HCI to three-value type */ + if (hcon->src_type == ADDR_LE_DEV_PUBLIC) + iso_pi(sk)->src_type = BDADDR_LE_PUBLIC; + else + iso_pi(sk)->src_type = BDADDR_LE_RANDOM; + + /* If hcon has no destination address (BDADDR_ANY) it means it + * was created by HCI_EV_LE_BIG_SYNC_ESTABILISHED or + * HCI_EV_LE_PA_SYNC_ESTABLISHED so we need to initialize using + * the parent socket destination address. + */ + if (!bacmp(&hcon->dst, BDADDR_ANY)) { + bacpy(&hcon->dst, &iso_pi(parent)->dst); + hcon->dst_type = le_addr_type(iso_pi(parent)->dst_type); + } + + if (test_bit(HCI_CONN_PA_SYNC, &hcon->flags)) { + iso_pi(sk)->qos = iso_pi(parent)->qos; + hcon->iso_qos = iso_pi(sk)->qos; + iso_pi(sk)->bc_sid = iso_pi(parent)->bc_sid; + iso_pi(sk)->bc_num_bis = iso_pi(parent)->bc_num_bis; + memcpy(iso_pi(sk)->bc_bis, iso_pi(parent)->bc_bis, + ISO_MAX_NUM_BIS); + set_bit(BT_SK_PA_SYNC, &iso_pi(sk)->flags); + } + + bacpy(&iso_pi(sk)->dst, &hcon->dst); + + /* Convert from HCI to three-value type */ + if (hcon->dst_type == ADDR_LE_DEV_PUBLIC) + iso_pi(sk)->dst_type = BDADDR_LE_PUBLIC; + else + iso_pi(sk)->dst_type = BDADDR_LE_RANDOM; + + iso_pi(sk)->sync_handle = iso_pi(parent)->sync_handle; + memcpy(iso_pi(sk)->base, iso_pi(parent)->base, iso_pi(parent)->base_len); + iso_pi(sk)->base_len = iso_pi(parent)->base_len; + + hci_conn_hold(hcon); + iso_chan_add(conn, sk, parent); + + if ((ev && ((struct hci_evt_le_big_sync_established *)ev)->status) || + (ev2 && ev2->status)) { + /* Trigger error signal on child socket */ + sk->sk_err = ECONNREFUSED; + sk->sk_error_report(sk); + } + + if (test_bit(BT_SK_DEFER_SETUP, &bt_sk(parent)->flags)) + sk->sk_state = BT_CONNECT2; + else + sk->sk_state = BT_CONNECTED; + + /* Wake up parent */ + parent->sk_data_ready(parent); + + release_sock(parent); + sock_put(parent); + } +} + +static bool iso_match_sid(struct sock *sk, void *data) +{ + struct hci_ev_le_pa_sync_established *ev = data; + + if (iso_pi(sk)->bc_sid == HCI_SID_INVALID) + return true; + + return ev->sid == iso_pi(sk)->bc_sid; +} + +static bool iso_match_sid_past(struct sock *sk, void *data) +{ + struct hci_ev_le_past_received *ev = data; + + if (iso_pi(sk)->bc_sid == HCI_SID_INVALID) + return true; + + return ev->sid == iso_pi(sk)->bc_sid; +} + +static bool iso_match_sync_handle(struct sock *sk, void *data) +{ + struct hci_evt_le_big_info_adv_report *ev = data; + + return le16_to_cpu(ev->sync_handle) == iso_pi(sk)->sync_handle; +} + +static bool iso_match_sync_handle_pa_report(struct sock *sk, void *data) +{ + struct hci_ev_le_per_adv_report *ev = data; + + return le16_to_cpu(ev->sync_handle) == iso_pi(sk)->sync_handle; +} + +/* ----- ISO interface with lower layer (HCI) ----- */ + +int iso_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr, __u8 *flags) +{ + struct hci_ev_le_pa_sync_established *ev1; + struct hci_ev_le_past_received *ev1a; + struct hci_evt_le_big_info_adv_report *ev2; + struct hci_ev_le_per_adv_report *ev3; + struct sock *sk; + + bt_dev_dbg(hdev, "bdaddr %pMR", bdaddr); + + /* Broadcast receiver requires handling of some events before it can + * proceed to establishing a BIG sync: + * + * 1. HCI_EV_LE_PA_SYNC_ESTABLISHED: The socket may specify a specific + * SID to listen to and once sync is established its handle needs to + * be stored in iso_pi(sk)->sync_handle so it can be matched once + * receiving the BIG Info. + * 1a. HCI_EV_LE_PAST_RECEIVED: alternative to 1. + * 2. HCI_EVT_LE_BIG_INFO_ADV_REPORT: When connect_ind is triggered by a + * a BIG Info it attempts to check if there any listening socket with + * the same sync_handle and if it does then attempt to create a sync. + * 3. HCI_EV_LE_PER_ADV_REPORT: When a PA report is received, it is stored + * in iso_pi(sk)->base so it can be passed up to user, in the case of a + * broadcast sink. + */ + ev1 = hci_recv_event_data(hdev, HCI_EV_LE_PA_SYNC_ESTABLISHED); + if (ev1) { + sk = iso_get_sock(hdev, &hdev->bdaddr, bdaddr, BT_LISTEN, + iso_match_sid, ev1); + if (sk && !ev1->status) { + iso_pi(sk)->sync_handle = le16_to_cpu(ev1->handle); + iso_pi(sk)->bc_sid = ev1->sid; + } + + goto done; + } + + ev1a = hci_recv_event_data(hdev, HCI_EV_LE_PAST_RECEIVED); + if (ev1a) { + sk = iso_get_sock(hdev, &hdev->bdaddr, bdaddr, BT_LISTEN, + iso_match_sid_past, ev1a); + if (sk && !ev1a->status) { + iso_pi(sk)->sync_handle = le16_to_cpu(ev1a->sync_handle); + iso_pi(sk)->bc_sid = ev1a->sid; + } + + goto done; + } + + ev2 = hci_recv_event_data(hdev, HCI_EVT_LE_BIG_INFO_ADV_REPORT); + if (ev2) { + /* Check if BIGInfo report has already been handled */ + sk = iso_get_sock(hdev, &hdev->bdaddr, bdaddr, BT_CONNECTED, + iso_match_sync_handle, ev2); + if (sk) { + sock_put(sk); + sk = NULL; + goto done; + } + + /* Try to get PA sync socket, if it exists */ + sk = iso_get_sock(hdev, &hdev->bdaddr, bdaddr, BT_CONNECT2, + iso_match_sync_handle, ev2); + if (!sk) + sk = iso_get_sock(hdev, &hdev->bdaddr, bdaddr, + BT_LISTEN, + iso_match_sync_handle, + ev2); + + if (sk) { + int err; + struct hci_conn *hcon = iso_pi(sk)->conn->hcon; + + iso_pi(sk)->qos.bcast.encryption = ev2->encryption; + + if (ev2->num_bis < iso_pi(sk)->bc_num_bis) + iso_pi(sk)->bc_num_bis = ev2->num_bis; + + if (!test_bit(BT_SK_DEFER_SETUP, &bt_sk(sk)->flags) && + !test_and_set_bit(BT_SK_BIG_SYNC, &iso_pi(sk)->flags)) { + err = hci_conn_big_create_sync(hdev, hcon, + &iso_pi(sk)->qos, + iso_pi(sk)->sync_handle, + iso_pi(sk)->bc_num_bis, + iso_pi(sk)->bc_bis); + if (err) { + bt_dev_err(hdev, "hci_le_big_create_sync: %d", + err); + sock_put(sk); + sk = NULL; + } + } + } + + goto done; + } + + ev3 = hci_recv_event_data(hdev, HCI_EV_LE_PER_ADV_REPORT); + if (ev3) { + size_t base_len = 0; + u8 *base; + struct hci_conn *hcon; + + sk = iso_get_sock(hdev, &hdev->bdaddr, bdaddr, BT_LISTEN, + iso_match_sync_handle_pa_report, ev3); + if (!sk) + goto done; + + hcon = iso_pi(sk)->conn->hcon; + if (!hcon) + goto done; + + if (ev3->data_status == LE_PA_DATA_TRUNCATED) { + /* The controller was unable to retrieve PA data. */ + memset(hcon->le_per_adv_data, 0, + HCI_MAX_PER_AD_TOT_LEN); + hcon->le_per_adv_data_len = 0; + hcon->le_per_adv_data_offset = 0; + goto done; + } + + if (hcon->le_per_adv_data_offset + ev3->length > + HCI_MAX_PER_AD_TOT_LEN) + goto done; + + memcpy(hcon->le_per_adv_data + hcon->le_per_adv_data_offset, + ev3->data, ev3->length); + hcon->le_per_adv_data_offset += ev3->length; + + if (ev3->data_status == LE_PA_DATA_COMPLETE) { + /* All PA data has been received. */ + hcon->le_per_adv_data_len = + hcon->le_per_adv_data_offset; + hcon->le_per_adv_data_offset = 0; + + /* Extract BASE */ + base = eir_get_service_data(hcon->le_per_adv_data, + hcon->le_per_adv_data_len, + EIR_BAA_SERVICE_UUID, + &base_len); + + if (!base || base_len > BASE_MAX_LENGTH) + goto done; + + memcpy(iso_pi(sk)->base, base, base_len); + iso_pi(sk)->base_len = base_len; + } else { + /* This is a PA data fragment. Keep pa_data_len set to 0 + * until all data has been reassembled. + */ + hcon->le_per_adv_data_len = 0; + } + } else { + sk = iso_get_sock(hdev, &hdev->bdaddr, BDADDR_ANY, + BT_LISTEN, iso_match_dst, BDADDR_ANY); + } + +done: + if (!sk) + return 0; + + if (test_bit(BT_SK_DEFER_SETUP, &bt_sk(sk)->flags)) + *flags |= HCI_PROTO_DEFER; + + sock_put(sk); + + return HCI_LM_ACCEPT; +} + +static void iso_connect_cfm(struct hci_conn *hcon, __u8 status) +{ + if (hcon->type != CIS_LINK && hcon->type != BIS_LINK && + hcon->type != PA_LINK) { + if (hcon->type != LE_LINK) + return; + + /* Check if LE link has failed */ + if (status) { + struct hci_link *link, *t; + + list_for_each_entry_safe(link, t, &hcon->link_list, + list) + iso_conn_del(link->conn, bt_to_errno(status)); + + return; + } + + /* Create CIS if pending */ + hci_le_create_cis_pending(hcon->hdev); + return; + } + + BT_DBG("hcon %p bdaddr %pMR status %d", hcon, &hcon->dst, status); + + /* Similar to the success case, if HCI_CONN_BIG_SYNC_FAILED or + * HCI_CONN_PA_SYNC_FAILED is set, queue the failed connection + * into the accept queue of the listening socket and wake up + * userspace, to inform the user about the event. + */ + if (!status || test_bit(HCI_CONN_BIG_SYNC_FAILED, &hcon->flags) || + test_bit(HCI_CONN_PA_SYNC_FAILED, &hcon->flags)) { + struct iso_conn *conn; + + conn = iso_conn_add(hcon); + if (conn) + iso_conn_ready(conn); + } else { + iso_conn_del(hcon, bt_to_errno(status)); + } +} + +static void iso_disconn_cfm(struct hci_conn *hcon, __u8 reason) +{ + if (hcon->type != CIS_LINK && hcon->type != BIS_LINK && + hcon->type != PA_LINK) + return; + + BT_DBG("hcon %p reason %d", hcon, reason); + + iso_conn_del(hcon, bt_to_errno(reason)); +} + +int iso_recv(struct hci_dev *hdev, u16 handle, struct sk_buff *skb, u16 flags) +{ + struct hci_conn *hcon; + struct iso_conn *conn; + struct skb_shared_hwtstamps *hwts; + __u16 pb, ts, len, sn; + + hci_dev_lock(hdev); + + hcon = hci_conn_hash_lookup_handle(hdev, handle); + if (!hcon) { + hci_dev_unlock(hdev); + kfree_skb(skb); + return -ENOENT; + } + + conn = iso_conn_hold_unless_zero(hcon->iso_data); + hcon = NULL; + + hci_dev_unlock(hdev); + + if (!conn) { + kfree_skb(skb); + return -EINVAL; + } + + pb = hci_iso_flags_pb(flags); + ts = hci_iso_flags_ts(flags); + + BT_DBG("conn %p len %d pb 0x%x ts 0x%x", conn, skb->len, pb, ts); + + switch (pb) { + case ISO_START: + case ISO_SINGLE: + if (conn->rx_len) { + BT_ERR("Unexpected start frame (len %d)", skb->len); + kfree_skb(conn->rx_skb); + conn->rx_skb = NULL; + conn->rx_len = 0; + } + + if (ts) { + struct hci_iso_ts_data_hdr *hdr; + + hdr = skb_pull_data(skb, HCI_ISO_TS_DATA_HDR_SIZE); + if (!hdr) { + BT_ERR("Frame is too short (len %d)", skb->len); + goto drop; + } + + /* Record the timestamp to skb */ + hwts = skb_hwtstamps(skb); + hwts->hwtstamp = us_to_ktime(le32_to_cpu(hdr->ts)); + + sn = __le16_to_cpu(hdr->sn); + len = __le16_to_cpu(hdr->slen); + } else { + struct hci_iso_data_hdr *hdr; + + hdr = skb_pull_data(skb, HCI_ISO_DATA_HDR_SIZE); + if (!hdr) { + BT_ERR("Frame is too short (len %d)", skb->len); + goto drop; + } + + sn = __le16_to_cpu(hdr->sn); + len = __le16_to_cpu(hdr->slen); + } + + flags = hci_iso_data_flags(len); + len = hci_iso_data_len(len); + + BT_DBG("Start: total len %d, frag len %d flags 0x%4.4x sn %d", + len, skb->len, flags, sn); + + if (len == skb->len) { + /* Complete frame received */ + hci_skb_pkt_status(skb) = flags & 0x03; + hci_skb_pkt_seqnum(skb) = sn; + iso_recv_frame(conn, skb); + goto done; + } + + if (pb == ISO_SINGLE) { + BT_ERR("Frame malformed (len %d, expected len %d)", + skb->len, len); + goto drop; + } + + if (skb->len > len) { + BT_ERR("Frame is too long (len %d, expected len %d)", + skb->len, len); + goto drop; + } + + /* Allocate skb for the complete frame (with header) */ + conn->rx_skb = bt_skb_alloc(len, GFP_KERNEL); + if (!conn->rx_skb) + goto drop; + + hci_skb_pkt_status(conn->rx_skb) = flags & 0x03; + hci_skb_pkt_seqnum(conn->rx_skb) = sn; + skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len), + skb->len); + conn->rx_len = len - skb->len; + + /* Copy hw timestamp from skb to rx_skb if present */ + if (ts) { + hwts = skb_hwtstamps(conn->rx_skb); + hwts->hwtstamp = skb_hwtstamps(skb)->hwtstamp; + } + + break; + + case ISO_CONT: + BT_DBG("Cont: frag len %d (expecting %d)", skb->len, + conn->rx_len); + + if (!conn->rx_len) { + BT_ERR("Unexpected continuation frame (len %d)", + skb->len); + goto drop; + } + + if (skb->len > conn->rx_len) { + BT_ERR("Fragment is too long (len %d, expected %d)", + skb->len, conn->rx_len); + kfree_skb(conn->rx_skb); + conn->rx_skb = NULL; + conn->rx_len = 0; + goto drop; + } + + skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len), + skb->len); + conn->rx_len -= skb->len; + break; + + case ISO_END: + skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len), + skb->len); + conn->rx_len -= skb->len; + + if (!conn->rx_len) { + struct sk_buff *rx_skb = conn->rx_skb; + + /* Complete frame received. iso_recv_frame + * takes ownership of the skb so set the global + * rx_skb pointer to NULL first. + */ + conn->rx_skb = NULL; + iso_recv_frame(conn, rx_skb); + } + break; + } + +drop: + kfree_skb(skb); +done: + iso_conn_put(conn); + return 0; +} + +static struct hci_cb iso_cb = { + .name = "ISO", + .connect_cfm = iso_connect_cfm, + .disconn_cfm = iso_disconn_cfm, +}; + +static int iso_debugfs_show(struct seq_file *f, void *p) +{ + struct sock *sk; + + read_lock(&iso_sk_list.lock); + + sk_for_each(sk, &iso_sk_list.head) { + seq_printf(f, "%pMR %pMR %d\n", &iso_pi(sk)->src, + &iso_pi(sk)->dst, sk->sk_state); + } + + read_unlock(&iso_sk_list.lock); + + return 0; +} + +DEFINE_SHOW_ATTRIBUTE(iso_debugfs); + +static struct dentry *iso_debugfs; + +static const struct proto_ops iso_sock_ops = { + .family = PF_BLUETOOTH, + .owner = THIS_MODULE, + .release = iso_sock_release, + .bind = iso_sock_bind, + .connect = iso_sock_connect, + .listen = iso_sock_listen, + .accept = iso_sock_accept, + .getname = iso_sock_getname, + .sendmsg = iso_sock_sendmsg, + .recvmsg = iso_sock_recvmsg, + .poll = bt_sock_poll, + .ioctl = bt_sock_ioctl, + .mmap = sock_no_mmap, + .socketpair = sock_no_socketpair, + .shutdown = iso_sock_shutdown, + .setsockopt = iso_sock_setsockopt, + .getsockopt = iso_sock_getsockopt +}; + +static const struct net_proto_family iso_sock_family_ops = { + .family = PF_BLUETOOTH, + .owner = THIS_MODULE, + .create = iso_sock_create, +}; + +static bool inited; + +bool iso_inited(void) +{ + return inited; +} + +int iso_init(void) +{ + int err; + + BUILD_BUG_ON(sizeof(struct sockaddr_iso) > sizeof(struct sockaddr)); + + if (inited) + return -EALREADY; + + err = proto_register(&iso_proto, 0); + if (err < 0) + return err; + + err = bt_sock_register(BTPROTO_ISO, &iso_sock_family_ops); + if (err < 0) { + BT_ERR("ISO socket registration failed"); + goto error; + } + + err = bt_procfs_init(&init_net, "iso", &iso_sk_list, NULL); + if (err < 0) { + BT_ERR("Failed to create ISO proc file"); + bt_sock_unregister(BTPROTO_ISO); + goto error; + } + + BT_INFO("ISO socket layer initialized"); + + hci_register_cb(&iso_cb); + + if (!IS_ERR_OR_NULL(bt_debugfs)) + iso_debugfs = debugfs_create_file("iso", 0444, bt_debugfs, + NULL, &iso_debugfs_fops); + + inited = true; + + return 0; + +error: + proto_unregister(&iso_proto); + return err; +} + +int iso_exit(void) +{ + if (!inited) + return -EALREADY; + + bt_procfs_cleanup(&init_net, "iso"); + + debugfs_remove(iso_debugfs); + iso_debugfs = NULL; + + hci_unregister_cb(&iso_cb); + + bt_sock_unregister(BTPROTO_ISO); + + proto_unregister(&iso_proto); + + inited = false; + + return 0; +} diff --git a/net/bluetooth/l2cap_core.c b/net/bluetooth/l2cap_core.c index e817ff0607a0..07b493331fd7 100644 --- a/net/bluetooth/l2cap_core.c +++ b/net/bluetooth/l2cap_core.c @@ -39,13 +39,11 @@ #include <net/bluetooth/l2cap.h> #include "smp.h" -#include "a2mp.h" -#include "amp.h" #define LE_FLOWCTL_MAX_CREDITS 65535 bool disable_ertm; -bool enable_ecred; +bool enable_ecred = IS_ENABLED(CONFIG_BT_LE_L2CAP_ECRED); static u32 l2cap_feat_mask = L2CAP_FEAT_FIXED_CHAN | L2CAP_FEAT_UCD; @@ -61,6 +59,9 @@ static void l2cap_send_disconn_req(struct l2cap_chan *chan, int err); static void l2cap_tx(struct l2cap_chan *chan, struct l2cap_ctrl *control, struct sk_buff_head *skbs, u8 event); +static void l2cap_retrans_timeout(struct work_struct *work); +static void l2cap_monitor_timeout(struct work_struct *work); +static void l2cap_ack_timeout(struct work_struct *work); static inline u8 bdaddr_type(u8 link_type, u8 bdaddr_type) { @@ -111,34 +112,39 @@ static struct l2cap_chan *__l2cap_get_chan_by_scid(struct l2cap_conn *conn, } /* Find channel with given SCID. - * Returns locked channel. */ + * Returns a reference locked channel. + */ static struct l2cap_chan *l2cap_get_chan_by_scid(struct l2cap_conn *conn, u16 cid) { struct l2cap_chan *c; - mutex_lock(&conn->chan_lock); c = __l2cap_get_chan_by_scid(conn, cid); - if (c) - l2cap_chan_lock(c); - mutex_unlock(&conn->chan_lock); + if (c) { + /* Only lock if chan reference is not 0 */ + c = l2cap_chan_hold_unless_zero(c); + if (c) + l2cap_chan_lock(c); + } return c; } /* Find channel with given DCID. - * Returns locked channel. + * Returns a reference locked channel. */ static struct l2cap_chan *l2cap_get_chan_by_dcid(struct l2cap_conn *conn, u16 cid) { struct l2cap_chan *c; - mutex_lock(&conn->chan_lock); c = __l2cap_get_chan_by_dcid(conn, cid); - if (c) - l2cap_chan_lock(c); - mutex_unlock(&conn->chan_lock); + if (c) { + /* Only lock if chan reference is not 0 */ + c = l2cap_chan_hold_unless_zero(c); + if (c) + l2cap_chan_lock(c); + } return c; } @@ -155,20 +161,6 @@ static struct l2cap_chan *__l2cap_get_chan_by_ident(struct l2cap_conn *conn, return NULL; } -static struct l2cap_chan *l2cap_get_chan_by_ident(struct l2cap_conn *conn, - u8 ident) -{ - struct l2cap_chan *c; - - mutex_lock(&conn->chan_lock); - c = __l2cap_get_chan_by_ident(conn, ident); - if (c) - l2cap_chan_lock(c); - mutex_unlock(&conn->chan_lock); - - return c; -} - static struct l2cap_chan *__l2cap_global_chan_by_addr(__le16 psm, bdaddr_t *src, u8 src_type) { @@ -419,7 +411,10 @@ static void l2cap_chan_timeout(struct work_struct *work) BT_DBG("chan %p state %s", chan, state_to_string(chan->state)); - mutex_lock(&conn->chan_lock); + if (!conn) + return; + + mutex_lock(&conn->lock); /* __set_chan_timer() calls l2cap_chan_hold(chan) while scheduling * this work. No need to call l2cap_chan_hold(chan) here again. */ @@ -440,7 +435,7 @@ static void l2cap_chan_timeout(struct work_struct *work) l2cap_chan_unlock(chan); l2cap_chan_put(chan); - mutex_unlock(&conn->chan_lock); + mutex_unlock(&conn->lock); } struct l2cap_chan *l2cap_chan_create(void) @@ -458,11 +453,17 @@ struct l2cap_chan *l2cap_chan_create(void) /* Set default lock nesting level */ atomic_set(&chan->nesting, L2CAP_NESTING_NORMAL); + /* Available receive buffer space is initially unknown */ + chan->rx_avail = -1; + write_lock(&chan_list_lock); list_add(&chan->global_l, &chan_list); write_unlock(&chan_list_lock); INIT_DELAYED_WORK(&chan->chan_timer, l2cap_chan_timeout); + INIT_DELAYED_WORK(&chan->retrans_timer, l2cap_retrans_timeout); + INIT_DELAYED_WORK(&chan->monitor_timer, l2cap_monitor_timeout); + INIT_DELAYED_WORK(&chan->ack_timer, l2cap_ack_timeout); chan->state = BT_OPEN; @@ -496,6 +497,17 @@ void l2cap_chan_hold(struct l2cap_chan *c) kref_get(&c->kref); } +EXPORT_SYMBOL_GPL(l2cap_chan_hold); + +struct l2cap_chan *l2cap_chan_hold_unless_zero(struct l2cap_chan *c) +{ + BT_DBG("chan %p orig refcnt %u", c, kref_read(&c->kref)); + + if (!kref_get_unless_zero(&c->kref)) + return NULL; + + return c; +} void l2cap_chan_put(struct l2cap_chan *c) { @@ -526,6 +538,28 @@ void l2cap_chan_set_defaults(struct l2cap_chan *chan) } EXPORT_SYMBOL_GPL(l2cap_chan_set_defaults); +static __u16 l2cap_le_rx_credits(struct l2cap_chan *chan) +{ + size_t sdu_len = chan->sdu ? chan->sdu->len : 0; + + if (chan->mps == 0) + return 0; + + /* If we don't know the available space in the receiver buffer, give + * enough credits for a full packet. + */ + if (chan->rx_avail == -1) + return (chan->imtu / chan->mps) + 1; + + /* If we know how much space is available in the receive buffer, give + * out as many credits as would fill the buffer. + */ + if (chan->rx_avail <= sdu_len) + return 0; + + return DIV_ROUND_UP(chan->rx_avail - sdu_len, chan->mps); +} + static void l2cap_le_flowctl_init(struct l2cap_chan *chan, u16 tx_credits) { chan->sdu = NULL; @@ -534,8 +568,7 @@ static void l2cap_le_flowctl_init(struct l2cap_chan *chan, u16 tx_credits) chan->tx_credits = tx_credits; /* Derive MPS from connection MTU to stop HCI fragmentation */ chan->mps = min_t(u16, chan->imtu, chan->conn->mtu - L2CAP_HDR_SIZE); - /* Give enough credits for a full packet */ - chan->rx_credits = (chan->imtu / chan->mps) + 1; + chan->rx_credits = l2cap_le_rx_credits(chan); skb_queue_head_init(&chan->tx_q); } @@ -547,7 +580,7 @@ static void l2cap_ecred_init(struct l2cap_chan *chan, u16 tx_credits) /* L2CAP implementations shall support a minimum MPS of 64 octets */ if (chan->mps < L2CAP_ECRED_MIN_MPS) { chan->mps = L2CAP_ECRED_MIN_MPS; - chan->rx_credits = (chan->imtu / chan->mps) + 1; + chan->rx_credits = l2cap_le_rx_credits(chan); } } @@ -600,14 +633,15 @@ void __l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan) test_bit(FLAG_HOLD_HCI_CONN, &chan->flags)) hci_conn_hold(conn->hcon); - list_add(&chan->list, &conn->chan_l); + /* Append to the list since the order matters for ECRED */ + list_add_tail(&chan->list, &conn->chan_l); } void l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan) { - mutex_lock(&conn->chan_lock); + mutex_lock(&conn->lock); __l2cap_chan_add(conn, chan); - mutex_unlock(&conn->chan_lock); + mutex_unlock(&conn->lock); } void l2cap_chan_del(struct l2cap_chan *chan, int err) @@ -622,7 +656,6 @@ void l2cap_chan_del(struct l2cap_chan *chan, int err) chan->ops->teardown(chan, err); if (conn) { - struct amp_mgr *mgr = conn->hcon->amp_mgr; /* Delete from channel list */ list_del(&chan->list); @@ -637,16 +670,6 @@ void l2cap_chan_del(struct l2cap_chan *chan, int err) if (chan->chan_type != L2CAP_CHAN_FIXED || test_bit(FLAG_HOLD_HCI_CONN, &chan->flags)) hci_conn_drop(conn->hcon); - - if (mgr && mgr->bredr_chan == chan) - mgr->bredr_chan = NULL; - } - - if (chan->hs_hchan) { - struct hci_chan *hs_hchan = chan->hs_hchan; - - BT_DBG("chan %p disconnect hs_hchan %p", chan, hs_hchan); - amp_disconnect_logical_link(hs_hchan); } if (test_bit(CONF_NOT_COMPLETE, &chan->conf_state)) @@ -679,6 +702,17 @@ void l2cap_chan_del(struct l2cap_chan *chan, int err) } EXPORT_SYMBOL_GPL(l2cap_chan_del); +static void __l2cap_chan_list_id(struct l2cap_conn *conn, u16 id, + l2cap_chan_func_t func, void *data) +{ + struct l2cap_chan *chan, *l; + + list_for_each_entry_safe(chan, l, &conn->chan_l, list) { + if (chan->ident == id) + func(chan, data); + } +} + static void __l2cap_chan_list(struct l2cap_conn *conn, l2cap_chan_func_t func, void *data) { @@ -695,9 +729,9 @@ void l2cap_chan_list(struct l2cap_conn *conn, l2cap_chan_func_t func, if (!conn) return; - mutex_lock(&conn->chan_lock); + mutex_lock(&conn->lock); __l2cap_chan_list(conn, func, data); - mutex_unlock(&conn->chan_lock); + mutex_unlock(&conn->lock); } EXPORT_SYMBOL_GPL(l2cap_chan_list); @@ -705,11 +739,11 @@ EXPORT_SYMBOL_GPL(l2cap_chan_list); static void l2cap_conn_update_id_addr(struct work_struct *work) { struct l2cap_conn *conn = container_of(work, struct l2cap_conn, - id_addr_update_work); + id_addr_timer.work); struct hci_conn *hcon = conn->hcon; struct l2cap_chan *chan; - mutex_lock(&conn->chan_lock); + mutex_lock(&conn->lock); list_for_each_entry(chan, &conn->chan_l, list) { l2cap_chan_lock(chan); @@ -718,7 +752,7 @@ static void l2cap_conn_update_id_addr(struct work_struct *work) l2cap_chan_unlock(chan); } - mutex_unlock(&conn->chan_lock); + mutex_unlock(&conn->lock); } static void l2cap_chan_le_connect_reject(struct l2cap_chan *chan) @@ -746,23 +780,9 @@ static void l2cap_chan_le_connect_reject(struct l2cap_chan *chan) static void l2cap_chan_ecred_connect_reject(struct l2cap_chan *chan) { - struct l2cap_conn *conn = chan->conn; - struct l2cap_ecred_conn_rsp rsp; - u16 result; - - if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) - result = L2CAP_CR_LE_AUTHORIZATION; - else - result = L2CAP_CR_LE_BAD_PSM; - l2cap_state_change(chan, BT_DISCONN); - memset(&rsp, 0, sizeof(rsp)); - - rsp.result = cpu_to_le16(result); - - l2cap_send_cmd(conn, chan->ident, L2CAP_LE_CONN_RSP, sizeof(rsp), - &rsp); + __l2cap_ecred_conn_rsp_defer(chan); } static void l2cap_chan_connect_reject(struct l2cap_chan *chan) @@ -817,7 +837,7 @@ void l2cap_chan_close(struct l2cap_chan *chan, int reason) break; case L2CAP_MODE_EXT_FLOWCTL: l2cap_chan_ecred_connect_reject(chan); - break; + return; } } } @@ -926,6 +946,16 @@ static u8 l2cap_get_ident(struct l2cap_conn *conn) return id; } +static void l2cap_send_acl(struct l2cap_conn *conn, struct sk_buff *skb, + u8 flags) +{ + /* Check if the hcon still valid before attempting to send */ + if (hci_conn_valid(conn->hcon->hdev, conn->hcon)) + hci_send_acl(conn->hchan, skb, flags); + else + kfree_skb(skb); +} + static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len, void *data) { @@ -948,13 +978,7 @@ static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len, bt_cb(skb)->force_active = BT_POWER_FORCE_ACTIVE_ON; skb->priority = HCI_PRIO_MAX; - hci_send_acl(conn->hchan, skb, flags); -} - -static bool __chan_is_moving(struct l2cap_chan *chan) -{ - return chan->move_state != L2CAP_MOVE_STABLE && - chan->move_state != L2CAP_MOVE_WAIT_PREPARE; + l2cap_send_acl(conn, skb, flags); } static void l2cap_do_send(struct l2cap_chan *chan, struct sk_buff *skb) @@ -965,15 +989,6 @@ static void l2cap_do_send(struct l2cap_chan *chan, struct sk_buff *skb) BT_DBG("chan %p, skb %p len %d priority %u", chan, skb, skb->len, skb->priority); - if (chan->hs_hcon && !__chan_is_moving(chan)) { - if (chan->hs_hchan) - hci_send_acl(chan->hs_hchan, skb, ACL_COMPLETE); - else - kfree_skb(skb); - - return; - } - /* Use NO_FLUSH for LE links (where this is the only option) or * if the BR/EDR link supports it and flushing has not been * explicitly requested (through FLAG_FLUSHABLE). @@ -1154,9 +1169,6 @@ static void l2cap_send_sframe(struct l2cap_chan *chan, if (!control->sframe) return; - if (__chan_is_moving(chan)) - return; - if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state) && !control->poll) control->final = 1; @@ -1211,40 +1223,6 @@ static inline int __l2cap_no_conn_pending(struct l2cap_chan *chan) return !test_bit(CONF_CONNECT_PEND, &chan->conf_state); } -static bool __amp_capable(struct l2cap_chan *chan) -{ - struct l2cap_conn *conn = chan->conn; - struct hci_dev *hdev; - bool amp_available = false; - - if (!(conn->local_fixed_chan & L2CAP_FC_A2MP)) - return false; - - if (!(conn->remote_fixed_chan & L2CAP_FC_A2MP)) - return false; - - read_lock(&hci_dev_list_lock); - list_for_each_entry(hdev, &hci_dev_list, list) { - if (hdev->amp_type != AMP_TYPE_BREDR && - test_bit(HCI_UP, &hdev->flags)) { - amp_available = true; - break; - } - } - read_unlock(&hci_dev_list_lock); - - if (chan->chan_policy == BT_CHANNEL_POLICY_AMP_PREFERRED) - return amp_available; - - return false; -} - -static bool l2cap_check_efs(struct l2cap_chan *chan) -{ - /* Check EFS parameters */ - return true; -} - void l2cap_send_conn_req(struct l2cap_chan *chan) { struct l2cap_conn *conn = chan->conn; @@ -1260,76 +1238,6 @@ void l2cap_send_conn_req(struct l2cap_chan *chan) l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_REQ, sizeof(req), &req); } -static void l2cap_send_create_chan_req(struct l2cap_chan *chan, u8 amp_id) -{ - struct l2cap_create_chan_req req; - req.scid = cpu_to_le16(chan->scid); - req.psm = chan->psm; - req.amp_id = amp_id; - - chan->ident = l2cap_get_ident(chan->conn); - - l2cap_send_cmd(chan->conn, chan->ident, L2CAP_CREATE_CHAN_REQ, - sizeof(req), &req); -} - -static void l2cap_move_setup(struct l2cap_chan *chan) -{ - struct sk_buff *skb; - - BT_DBG("chan %p", chan); - - if (chan->mode != L2CAP_MODE_ERTM) - return; - - __clear_retrans_timer(chan); - __clear_monitor_timer(chan); - __clear_ack_timer(chan); - - chan->retry_count = 0; - skb_queue_walk(&chan->tx_q, skb) { - if (bt_cb(skb)->l2cap.retries) - bt_cb(skb)->l2cap.retries = 1; - else - break; - } - - chan->expected_tx_seq = chan->buffer_seq; - - clear_bit(CONN_REJ_ACT, &chan->conn_state); - clear_bit(CONN_SREJ_ACT, &chan->conn_state); - l2cap_seq_list_clear(&chan->retrans_list); - l2cap_seq_list_clear(&chan->srej_list); - skb_queue_purge(&chan->srej_q); - - chan->tx_state = L2CAP_TX_STATE_XMIT; - chan->rx_state = L2CAP_RX_STATE_MOVE; - - set_bit(CONN_REMOTE_BUSY, &chan->conn_state); -} - -static void l2cap_move_done(struct l2cap_chan *chan) -{ - u8 move_role = chan->move_role; - BT_DBG("chan %p", chan); - - chan->move_state = L2CAP_MOVE_STABLE; - chan->move_role = L2CAP_MOVE_ROLE_NONE; - - if (chan->mode != L2CAP_MODE_ERTM) - return; - - switch (move_role) { - case L2CAP_MOVE_ROLE_INITIATOR: - l2cap_tx(chan, NULL, NULL, L2CAP_EV_EXPLICIT_POLL); - chan->rx_state = L2CAP_RX_STATE_WAIT_F; - break; - case L2CAP_MOVE_ROLE_RESPONDER: - chan->rx_state = L2CAP_RX_STATE_WAIT_P; - break; - } -} - static void l2cap_chan_ready(struct l2cap_chan *chan) { /* The channel may have already been flagged as connected in @@ -1369,6 +1277,7 @@ static void l2cap_le_connect(struct l2cap_chan *chan) l2cap_le_flowctl_init(chan, 0); + memset(&req, 0, sizeof(req)); req.psm = chan->psm; req.scid = cpu_to_le16(chan->scid); req.mtu = cpu_to_le16(chan->imtu); @@ -1383,7 +1292,7 @@ static void l2cap_le_connect(struct l2cap_chan *chan) struct l2cap_ecred_conn_data { struct { - struct l2cap_ecred_conn_req req; + struct l2cap_ecred_conn_req_hdr req; __le16 scid[5]; } __packed pdu; struct l2cap_chan *chan; @@ -1436,6 +1345,7 @@ static void l2cap_ecred_connect(struct l2cap_chan *chan) l2cap_ecred_init(chan, 0); + memset(&data, 0, sizeof(data)); data.pdu.req.psm = chan->psm; data.pdu.req.mtu = cpu_to_le16(chan->imtu); data.pdu.req.mps = cpu_to_le16(chan->mps); @@ -1443,7 +1353,6 @@ static void l2cap_ecred_connect(struct l2cap_chan *chan) data.pdu.scid[0] = cpu_to_le16(chan->scid); chan->ident = l2cap_get_ident(conn); - data.pid = chan->ops->get_peer_pid(chan); data.count = 1; data.chan = chan; @@ -1478,10 +1387,7 @@ static void l2cap_le_start(struct l2cap_chan *chan) static void l2cap_start_connection(struct l2cap_chan *chan) { - if (__amp_capable(chan)) { - BT_DBG("chan %p AMP capable: discover AMPs", chan); - a2mp_discover_amp(chan); - } else if (chan->conn->hcon->type == LE_LINK) { + if (chan->conn->hcon->type == LE_LINK) { l2cap_le_start(chan); } else { l2cap_send_conn_req(chan); @@ -1506,7 +1412,8 @@ static void l2cap_request_info(struct l2cap_conn *conn) sizeof(req), &req); } -static bool l2cap_check_enc_key_size(struct hci_conn *hcon) +static bool l2cap_check_enc_key_size(struct hci_conn *hcon, + struct l2cap_chan *chan) { /* The minimum encryption key size needs to be enforced by the * host stack before establishing any L2CAP connections. The @@ -1520,7 +1427,7 @@ static bool l2cap_check_enc_key_size(struct hci_conn *hcon) int min_key_size = hcon->hdev->min_enc_key_size; /* On FIPS security level, key size must be 16 bytes */ - if (hcon->sec_level == BT_SECURITY_FIPS) + if (chan->sec_level == BT_SECURITY_FIPS) min_key_size = 16; return (!test_bit(HCI_CONN_ENCRYPT, &hcon->flags) || @@ -1548,7 +1455,7 @@ static void l2cap_do_start(struct l2cap_chan *chan) !__l2cap_no_conn_pending(chan)) return; - if (l2cap_check_enc_key_size(conn->hcon)) + if (l2cap_check_enc_key_size(conn->hcon, chan)) l2cap_start_connection(chan); else __set_chan_timer(chan, L2CAP_DISC_TIMEOUT); @@ -1584,11 +1491,6 @@ static void l2cap_send_disconn_req(struct l2cap_chan *chan, int err) __clear_ack_timer(chan); } - if (chan->scid == L2CAP_CID_A2MP) { - l2cap_state_change(chan, BT_DISCONN); - return; - } - req.dcid = cpu_to_le16(chan->dcid); req.scid = cpu_to_le16(chan->scid); l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_DISCONN_REQ, @@ -1604,8 +1506,6 @@ static void l2cap_conn_start(struct l2cap_conn *conn) BT_DBG("conn %p", conn); - mutex_lock(&conn->chan_lock); - list_for_each_entry_safe(chan, tmp, &conn->chan_l, list) { l2cap_chan_lock(chan); @@ -1630,7 +1530,7 @@ static void l2cap_conn_start(struct l2cap_conn *conn) continue; } - if (l2cap_check_enc_key_size(conn->hcon)) + if (l2cap_check_enc_key_size(conn->hcon, chan)) l2cap_start_connection(chan); else l2cap_chan_close(chan, ECONNREFUSED); @@ -1674,8 +1574,6 @@ static void l2cap_conn_start(struct l2cap_conn *conn) l2cap_chan_unlock(chan); } - - mutex_unlock(&conn->chan_lock); } static void l2cap_le_conn_ready(struct l2cap_conn *conn) @@ -1721,17 +1619,12 @@ static void l2cap_conn_ready(struct l2cap_conn *conn) if (hcon->type == ACL_LINK) l2cap_request_info(conn); - mutex_lock(&conn->chan_lock); + mutex_lock(&conn->lock); list_for_each_entry(chan, &conn->chan_l, list) { l2cap_chan_lock(chan); - if (chan->scid == L2CAP_CID_A2MP) { - l2cap_chan_unlock(chan); - continue; - } - if (hcon->type == LE_LINK) { l2cap_le_start(chan); } else if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) { @@ -1744,7 +1637,7 @@ static void l2cap_conn_ready(struct l2cap_conn *conn) l2cap_chan_unlock(chan); } - mutex_unlock(&conn->chan_lock); + mutex_unlock(&conn->lock); if (hcon->type == LE_LINK) l2cap_le_conn_ready(conn); @@ -1759,14 +1652,10 @@ static void l2cap_conn_unreliable(struct l2cap_conn *conn, int err) BT_DBG("conn %p", conn); - mutex_lock(&conn->chan_lock); - list_for_each_entry(chan, &conn->chan_l, list) { if (test_bit(FLAG_FORCE_RELIABLE, &chan->flags)) l2cap_chan_set_err(chan, err); } - - mutex_unlock(&conn->chan_lock); } static void l2cap_info_timeout(struct work_struct *work) @@ -1777,7 +1666,9 @@ static void l2cap_info_timeout(struct work_struct *work) conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE; conn->info_ident = 0; + mutex_lock(&conn->lock); l2cap_conn_start(conn); + mutex_unlock(&conn->lock); } /* @@ -1869,6 +1760,8 @@ static void l2cap_conn_del(struct hci_conn *hcon, int err) BT_DBG("hcon %p conn %p, err %d", hcon, conn, err); + mutex_lock(&conn->lock); + kfree_skb(conn->rx_skb); skb_queue_purge(&conn->pending_rx); @@ -1880,16 +1773,13 @@ static void l2cap_conn_del(struct hci_conn *hcon, int err) if (work_pending(&conn->pending_rx_work)) cancel_work_sync(&conn->pending_rx_work); - if (work_pending(&conn->id_addr_update_work)) - cancel_work_sync(&conn->id_addr_update_work); + cancel_delayed_work_sync(&conn->id_addr_timer); l2cap_unregister_all_users(conn); /* Force the connection to be immediately dropped */ hcon->disc_timeout = 0; - mutex_lock(&conn->chan_lock); - /* Kill channels */ list_for_each_entry_safe(chan, l, &conn->chan_l, list) { l2cap_chan_hold(chan); @@ -1903,15 +1793,14 @@ static void l2cap_conn_del(struct hci_conn *hcon, int err) l2cap_chan_put(chan); } - mutex_unlock(&conn->chan_lock); - - hci_chan_del(conn->hchan); - if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) cancel_delayed_work_sync(&conn->info_timer); - hcon->l2cap_data = NULL; + hci_chan_del(conn->hchan); conn->hchan = NULL; + + hcon->l2cap_data = NULL; + mutex_unlock(&conn->lock); l2cap_conn_put(conn); } @@ -1946,11 +1835,11 @@ static struct l2cap_chan *l2cap_global_chan_by_psm(int state, __le16 psm, bdaddr_t *dst, u8 link_type) { - struct l2cap_chan *c, *c1 = NULL; + struct l2cap_chan *c, *tmp, *c1 = NULL; read_lock(&chan_list_lock); - list_for_each_entry(c, &chan_list, global_l) { + list_for_each_entry_safe(c, tmp, &chan_list, global_l) { if (state && c->state != state) continue; @@ -1960,7 +1849,7 @@ static struct l2cap_chan *l2cap_global_chan_by_psm(int state, __le16 psm, if (link_type == LE_LINK && c->src_type == BDADDR_BREDR) continue; - if (c->psm == psm) { + if (c->chan_type != L2CAP_CHAN_FIXED && c->psm == psm) { int src_match, dst_match; int src_any, dst_any; @@ -1968,7 +1857,9 @@ static struct l2cap_chan *l2cap_global_chan_by_psm(int state, __le16 psm, src_match = !bacmp(&c->src, src); dst_match = !bacmp(&c->dst, dst); if (src_match && dst_match) { - l2cap_chan_hold(c); + if (!l2cap_chan_hold_unless_zero(c)) + continue; + read_unlock(&chan_list_lock); return c; } @@ -1983,7 +1874,7 @@ static struct l2cap_chan *l2cap_global_chan_by_psm(int state, __le16 psm, } if (c1) - l2cap_chan_hold(c1); + c1 = l2cap_chan_hold_unless_zero(c1); read_unlock(&chan_list_lock); @@ -2039,9 +1930,6 @@ static void l2cap_streaming_send(struct l2cap_chan *chan, BT_DBG("chan %p, skbs %p", chan, skbs); - if (__chan_is_moving(chan)) - return; - skb_queue_splice_tail_init(skbs, &chan->tx_q); while (!skb_queue_empty(&chan->tx_q)) { @@ -2084,9 +1972,6 @@ static int l2cap_ertm_send(struct l2cap_chan *chan) if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state)) return 0; - if (__chan_is_moving(chan)) - return 0; - while (chan->tx_send_head && chan->unacked_frames < chan->remote_tx_win && chan->tx_state == L2CAP_TX_STATE_XMIT) { @@ -2152,9 +2037,6 @@ static void l2cap_ertm_resend(struct l2cap_chan *chan) if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state)) return; - if (__chan_is_moving(chan)) - return; - while (chan->retrans_list.head != L2CAP_SEQ_LIST_CLEAR) { seq = l2cap_seq_list_pop(&chan->retrans_list); @@ -2494,8 +2376,7 @@ static int l2cap_segment_sdu(struct l2cap_chan *chan, pdu_len = chan->conn->mtu; /* Constrain PDU size for BR/EDR connections */ - if (!chan->hs_hcon) - pdu_len = min_t(size_t, pdu_len, L2CAP_BREDR_MAX_PAYLOAD); + pdu_len = min_t(size_t, pdu_len, L2CAP_BREDR_MAX_PAYLOAD); /* Adjust for largest possible L2CAP overhead. */ if (chan->fcs) @@ -2636,7 +2517,33 @@ static void l2cap_le_flowctl_send(struct l2cap_chan *chan) skb_queue_len(&chan->tx_q)); } -int l2cap_chan_send(struct l2cap_chan *chan, struct msghdr *msg, size_t len) +static void l2cap_tx_timestamp(struct sk_buff *skb, + const struct sockcm_cookie *sockc, + size_t len) +{ + struct sock *sk = skb ? skb->sk : NULL; + + if (sk && sk->sk_type == SOCK_STREAM) + hci_setup_tx_timestamp(skb, len, sockc); + else + hci_setup_tx_timestamp(skb, 1, sockc); +} + +static void l2cap_tx_timestamp_seg(struct sk_buff_head *queue, + const struct sockcm_cookie *sockc, + size_t len) +{ + struct sk_buff *skb = skb_peek(queue); + struct sock *sk = skb ? skb->sk : NULL; + + if (sk && sk->sk_type == SOCK_STREAM) + l2cap_tx_timestamp(skb_peek_tail(queue), sockc, len); + else + l2cap_tx_timestamp(skb, sockc, len); +} + +int l2cap_chan_send(struct l2cap_chan *chan, struct msghdr *msg, size_t len, + const struct sockcm_cookie *sockc) { struct sk_buff *skb; int err; @@ -2651,13 +2558,7 @@ int l2cap_chan_send(struct l2cap_chan *chan, struct msghdr *msg, size_t len) if (IS_ERR(skb)) return PTR_ERR(skb); - /* Channel lock is released before requesting new skb and then - * reacquired thus we need to recheck channel state. - */ - if (chan->state != BT_CONNECTED) { - kfree_skb(skb); - return -ENOTCONN; - } + l2cap_tx_timestamp(skb, sockc, len); l2cap_do_send(chan, skb); return len; @@ -2682,6 +2583,8 @@ int l2cap_chan_send(struct l2cap_chan *chan, struct msghdr *msg, size_t len) if (err) return err; + l2cap_tx_timestamp_seg(&seg_queue, sockc, len); + skb_queue_splice_tail_init(&seg_queue, &chan->tx_q); l2cap_le_flowctl_send(chan); @@ -2703,13 +2606,7 @@ int l2cap_chan_send(struct l2cap_chan *chan, struct msghdr *msg, size_t len) if (IS_ERR(skb)) return PTR_ERR(skb); - /* Channel lock is released before requesting new skb and then - * reacquired thus we need to recheck channel state. - */ - if (chan->state != BT_CONNECTED) { - kfree_skb(skb); - return -ENOTCONN; - } + l2cap_tx_timestamp(skb, sockc, len); l2cap_do_send(chan, skb); err = len; @@ -2731,21 +2628,16 @@ int l2cap_chan_send(struct l2cap_chan *chan, struct msghdr *msg, size_t len) */ err = l2cap_segment_sdu(chan, &seg_queue, msg, len); - /* The channel could have been closed while segmenting, - * check that it is still connected. - */ - if (chan->state != BT_CONNECTED) { - __skb_queue_purge(&seg_queue); - err = -ENOTCONN; - } - if (err) break; - if (chan->mode == L2CAP_MODE_ERTM) + if (chan->mode == L2CAP_MODE_ERTM) { + /* TODO: ERTM mode timestamping */ l2cap_tx(chan, NULL, &seg_queue, L2CAP_EV_DATA_REQUEST); - else + } else { + l2cap_tx_timestamp_seg(&seg_queue, sockc, len); l2cap_streaming_send(chan, &seg_queue); + } err = len; @@ -3061,8 +2953,6 @@ static void l2cap_raw_recv(struct l2cap_conn *conn, struct sk_buff *skb) BT_DBG("conn %p", conn); - mutex_lock(&conn->chan_lock); - list_for_each_entry(chan, &conn->chan_l, list) { if (chan->chan_type != L2CAP_CHAN_RAW) continue; @@ -3077,8 +2967,6 @@ static void l2cap_raw_recv(struct l2cap_conn *conn, struct sk_buff *skb) if (chan->ops->recv(chan, nskb)) kfree_skb(nskb); } - - mutex_unlock(&conn->chan_lock); } /* ---- L2CAP signalling commands ---- */ @@ -3283,21 +3171,12 @@ int l2cap_ertm_init(struct l2cap_chan *chan) skb_queue_head_init(&chan->tx_q); - chan->local_amp_id = AMP_ID_BREDR; - chan->move_id = AMP_ID_BREDR; - chan->move_state = L2CAP_MOVE_STABLE; - chan->move_role = L2CAP_MOVE_ROLE_NONE; - if (chan->mode != L2CAP_MODE_ERTM) return 0; chan->rx_state = L2CAP_RX_STATE_RECV; chan->tx_state = L2CAP_TX_STATE_XMIT; - INIT_DELAYED_WORK(&chan->retrans_timer, l2cap_retrans_timeout); - INIT_DELAYED_WORK(&chan->monitor_timer, l2cap_monitor_timeout); - INIT_DELAYED_WORK(&chan->ack_timer, l2cap_ack_timeout); - skb_queue_head_init(&chan->srej_q); err = l2cap_seq_list_init(&chan->srej_list, chan->tx_win); @@ -3326,52 +3205,19 @@ static inline __u8 l2cap_select_mode(__u8 mode, __u16 remote_feat_mask) static inline bool __l2cap_ews_supported(struct l2cap_conn *conn) { - return ((conn->local_fixed_chan & L2CAP_FC_A2MP) && - (conn->feat_mask & L2CAP_FEAT_EXT_WINDOW)); + return (conn->feat_mask & L2CAP_FEAT_EXT_WINDOW); } static inline bool __l2cap_efs_supported(struct l2cap_conn *conn) { - return ((conn->local_fixed_chan & L2CAP_FC_A2MP) && - (conn->feat_mask & L2CAP_FEAT_EXT_FLOW)); + return (conn->feat_mask & L2CAP_FEAT_EXT_FLOW); } static void __l2cap_set_ertm_timeouts(struct l2cap_chan *chan, struct l2cap_conf_rfc *rfc) { - if (chan->local_amp_id != AMP_ID_BREDR && chan->hs_hcon) { - u64 ertm_to = chan->hs_hcon->hdev->amp_be_flush_to; - - /* Class 1 devices have must have ERTM timeouts - * exceeding the Link Supervision Timeout. The - * default Link Supervision Timeout for AMP - * controllers is 10 seconds. - * - * Class 1 devices use 0xffffffff for their - * best-effort flush timeout, so the clamping logic - * will result in a timeout that meets the above - * requirement. ERTM timeouts are 16-bit values, so - * the maximum timeout is 65.535 seconds. - */ - - /* Convert timeout to milliseconds and round */ - ertm_to = DIV_ROUND_UP_ULL(ertm_to, 1000); - - /* This is the recommended formula for class 2 devices - * that start ERTM timers when packets are sent to the - * controller. - */ - ertm_to = 3 * ertm_to + 500; - - if (ertm_to > 0xffff) - ertm_to = 0xffff; - - rfc->retrans_timeout = cpu_to_le16((u16) ertm_to); - rfc->monitor_timeout = rfc->retrans_timeout; - } else { - rfc->retrans_timeout = cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO); - rfc->monitor_timeout = cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO); - } + rfc->retrans_timeout = cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO); + rfc->monitor_timeout = cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO); } static inline void l2cap_txwin_setup(struct l2cap_chan *chan) @@ -3570,7 +3416,7 @@ static int l2cap_parse_conf_req(struct l2cap_chan *chan, void *data, size_t data struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC }; struct l2cap_conf_efs efs; u8 remote_efs = 0; - u16 mtu = L2CAP_DEFAULT_MTU; + u16 mtu = 0; u16 result = L2CAP_CONF_SUCCESS; u16 size; @@ -3623,13 +3469,7 @@ static int l2cap_parse_conf_req(struct l2cap_chan *chan, void *data, size_t data case L2CAP_CONF_EWS: if (olen != 2) break; - if (!(chan->conn->local_fixed_chan & L2CAP_FC_A2MP)) - return -ECONNREFUSED; - set_bit(FLAG_EXT_CTRL, &chan->flags); - set_bit(CONF_EWS_RECV, &chan->conf_state); - chan->tx_win_max = L2CAP_DEFAULT_EXT_WINDOW; - chan->remote_tx_win = val; - break; + return -ECONNREFUSED; default: if (hint) @@ -3681,6 +3521,29 @@ done: /* Configure output options and let the other side know * which ones we don't like. */ + /* If MTU is not provided in configure request, try adjusting it + * to the current output MTU if it has been set + * + * Bluetooth Core 6.1, Vol 3, Part A, Section 4.5 + * + * Each configuration parameter value (if any is present) in an + * L2CAP_CONFIGURATION_RSP packet reflects an ‘adjustment’ to a + * configuration parameter value that has been sent (or, in case + * of default values, implied) in the corresponding + * L2CAP_CONFIGURATION_REQ packet. + */ + if (!mtu) { + /* Only adjust for ERTM channels as for older modes the + * remote stack may not be able to detect that the + * adjustment causing it to silently drop packets. + */ + if (chan->mode == L2CAP_MODE_ERTM && + chan->omtu && chan->omtu != L2CAP_DEFAULT_MTU) + mtu = chan->omtu; + else + mtu = L2CAP_DEFAULT_MTU; + } + if (mtu < L2CAP_DEFAULT_MIN_MTU) result = L2CAP_CONF_UNACCEPT; else { @@ -3736,7 +3599,8 @@ done: l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc), (unsigned long) &rfc, endptr - ptr); - if (test_bit(FLAG_EFS_ENABLE, &chan->flags)) { + if (remote_efs && + test_bit(FLAG_EFS_ENABLE, &chan->flags)) { chan->remote_id = efs.id; chan->remote_stype = efs.stype; chan->remote_msdu = le16_to_cpu(efs.msdu); @@ -3933,43 +3797,92 @@ void __l2cap_le_connect_rsp_defer(struct l2cap_chan *chan) &rsp); } -void __l2cap_ecred_conn_rsp_defer(struct l2cap_chan *chan) +static void l2cap_ecred_list_defer(struct l2cap_chan *chan, void *data) { + int *result = data; + + if (*result || test_bit(FLAG_ECRED_CONN_REQ_SENT, &chan->flags)) + return; + + switch (chan->state) { + case BT_CONNECT2: + /* If channel still pending accept add to result */ + (*result)++; + return; + case BT_CONNECTED: + return; + default: + /* If not connected or pending accept it has been refused */ + *result = -ECONNREFUSED; + return; + } +} + +struct l2cap_ecred_rsp_data { struct { - struct l2cap_ecred_conn_rsp rsp; - __le16 dcid[5]; + struct l2cap_ecred_conn_rsp_hdr rsp; + __le16 scid[L2CAP_ECRED_MAX_CID]; } __packed pdu; + int count; +}; + +static void l2cap_ecred_rsp_defer(struct l2cap_chan *chan, void *data) +{ + struct l2cap_ecred_rsp_data *rsp = data; + struct l2cap_ecred_conn_rsp *rsp_flex = + container_of(&rsp->pdu.rsp, struct l2cap_ecred_conn_rsp, hdr); + + /* Check if channel for outgoing connection or if it wasn't deferred + * since in those cases it must be skipped. + */ + if (test_bit(FLAG_ECRED_CONN_REQ_SENT, &chan->flags) || + !test_and_clear_bit(FLAG_DEFER_SETUP, &chan->flags)) + return; + + /* Reset ident so only one response is sent */ + chan->ident = 0; + + /* Include all channels pending with the same ident */ + if (!rsp->pdu.rsp.result) + rsp_flex->dcid[rsp->count++] = cpu_to_le16(chan->scid); + else + l2cap_chan_del(chan, ECONNRESET); +} + +void __l2cap_ecred_conn_rsp_defer(struct l2cap_chan *chan) +{ struct l2cap_conn *conn = chan->conn; - u16 ident = chan->ident; - int i = 0; + struct l2cap_ecred_rsp_data data; + u16 id = chan->ident; + int result = 0; - if (!ident) + if (!id) return; - BT_DBG("chan %p ident %d", chan, ident); + BT_DBG("chan %p id %d", chan, id); - pdu.rsp.mtu = cpu_to_le16(chan->imtu); - pdu.rsp.mps = cpu_to_le16(chan->mps); - pdu.rsp.credits = cpu_to_le16(chan->rx_credits); - pdu.rsp.result = cpu_to_le16(L2CAP_CR_LE_SUCCESS); + memset(&data, 0, sizeof(data)); - mutex_lock(&conn->chan_lock); + data.pdu.rsp.mtu = cpu_to_le16(chan->imtu); + data.pdu.rsp.mps = cpu_to_le16(chan->mps); + data.pdu.rsp.credits = cpu_to_le16(chan->rx_credits); + data.pdu.rsp.result = cpu_to_le16(L2CAP_CR_LE_SUCCESS); - list_for_each_entry(chan, &conn->chan_l, list) { - if (chan->ident != ident) - continue; + /* Verify that all channels are ready */ + __l2cap_chan_list_id(conn, id, l2cap_ecred_list_defer, &result); - /* Reset ident so only one response is sent */ - chan->ident = 0; + if (result > 0) + return; - /* Include all channels pending with the same ident */ - pdu.dcid[i++] = cpu_to_le16(chan->scid); - } + if (result < 0) + data.pdu.rsp.result = cpu_to_le16(L2CAP_CR_LE_AUTHORIZATION); - mutex_unlock(&conn->chan_lock); + /* Build response */ + __l2cap_chan_list_id(conn, id, l2cap_ecred_rsp_defer, &data); - l2cap_send_cmd(conn, ident, L2CAP_ECRED_CONN_RSP, - sizeof(pdu.rsp) + i * sizeof(__le16), &pdu); + l2cap_send_cmd(conn, id, L2CAP_ECRED_CONN_RSP, + sizeof(data.pdu.rsp) + (data.count * sizeof(__le16)), + &data.pdu); } void __l2cap_connect_rsp_defer(struct l2cap_chan *chan) @@ -3983,11 +3896,7 @@ void __l2cap_connect_rsp_defer(struct l2cap_chan *chan) rsp.dcid = cpu_to_le16(chan->scid); rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS); rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO); - - if (chan->hs_hcon) - rsp_code = L2CAP_CREATE_CHAN_RSP; - else - rsp_code = L2CAP_CONN_RSP; + rsp_code = L2CAP_CONN_RSP; BT_DBG("chan %p rsp_code %u", chan, rsp_code); @@ -4082,13 +3991,12 @@ static inline int l2cap_command_rej(struct l2cap_conn *conn, return 0; } -static struct l2cap_chan *l2cap_connect(struct l2cap_conn *conn, - struct l2cap_cmd_hdr *cmd, - u8 *data, u8 rsp_code, u8 amp_id) +static void l2cap_connect(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, + u8 *data, u8 rsp_code) { struct l2cap_conn_req *req = (struct l2cap_conn_req *) data; struct l2cap_conn_rsp rsp; - struct l2cap_chan *chan = NULL, *pchan; + struct l2cap_chan *chan = NULL, *pchan = NULL; int result, status = L2CAP_CS_NO_INFO; u16 dcid = 0, scid = __le16_to_cpu(req->scid); @@ -4101,15 +4009,15 @@ static struct l2cap_chan *l2cap_connect(struct l2cap_conn *conn, &conn->hcon->dst, ACL_LINK); if (!pchan) { result = L2CAP_CR_BAD_PSM; - goto sendresp; + goto response; } - mutex_lock(&conn->chan_lock); l2cap_chan_lock(pchan); /* Check if the ACL is secure enough (if not SDP) */ if (psm != cpu_to_le16(L2CAP_PSM_SDP) && - !hci_conn_check_link_mode(conn->hcon)) { + (!hci_conn_check_link_mode(conn->hcon) || + !l2cap_check_enc_key_size(conn->hcon, pchan))) { conn->disc_reason = HCI_ERROR_AUTH_FAILURE; result = L2CAP_CR_SEC_BLOCK; goto response; @@ -4146,7 +4054,6 @@ static struct l2cap_chan *l2cap_connect(struct l2cap_conn *conn, chan->dst_type = bdaddr_dst_type(conn->hcon); chan->psm = psm; chan->dcid = scid; - chan->local_amp_id = amp_id; __l2cap_chan_add(conn, chan); @@ -4164,17 +4071,8 @@ static struct l2cap_chan *l2cap_connect(struct l2cap_conn *conn, status = L2CAP_CS_AUTHOR_PEND; chan->ops->defer(chan); } else { - /* Force pending result for AMP controllers. - * The connection will succeed after the - * physical link is up. - */ - if (amp_id == AMP_ID_BREDR) { - l2cap_state_change(chan, BT_CONFIG); - result = L2CAP_CR_SUCCESS; - } else { - l2cap_state_change(chan, BT_CONNECT2); - result = L2CAP_CR_PEND; - } + l2cap_state_change(chan, BT_CONFIG); + result = L2CAP_CR_SUCCESS; status = L2CAP_CS_NO_INFO; } } else { @@ -4189,17 +4087,15 @@ static struct l2cap_chan *l2cap_connect(struct l2cap_conn *conn, } response: - l2cap_chan_unlock(pchan); - mutex_unlock(&conn->chan_lock); - l2cap_chan_put(pchan); - -sendresp: rsp.scid = cpu_to_le16(scid); rsp.dcid = cpu_to_le16(dcid); rsp.result = cpu_to_le16(result); rsp.status = cpu_to_le16(status); l2cap_send_cmd(conn, cmd->ident, rsp_code, sizeof(rsp), &rsp); + if (!pchan) + return; + if (result == L2CAP_CR_PEND && status == L2CAP_CS_NO_INFO) { struct l2cap_info_req info; info.type = cpu_to_le16(L2CAP_IT_FEAT_MASK); @@ -4222,25 +4118,17 @@ sendresp: chan->num_conf_req++; } - return chan; + l2cap_chan_unlock(pchan); + l2cap_chan_put(pchan); } static int l2cap_connect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data) { - struct hci_dev *hdev = conn->hcon->hdev; - struct hci_conn *hcon = conn->hcon; - if (cmd_len < sizeof(struct l2cap_conn_req)) return -EPROTO; - hci_dev_lock(hdev); - if (hci_dev_test_flag(hdev, HCI_MGMT) && - !test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &hcon->flags)) - mgmt_device_connected(hdev, hcon, NULL, 0); - hci_dev_unlock(hdev); - - l2cap_connect(conn, cmd, data, L2CAP_CONN_RSP, 0); + l2cap_connect(conn, cmd, data, L2CAP_CONN_RSP); return 0; } @@ -4262,31 +4150,38 @@ static int l2cap_connect_create_rsp(struct l2cap_conn *conn, result = __le16_to_cpu(rsp->result); status = __le16_to_cpu(rsp->status); + if (result == L2CAP_CR_SUCCESS && (dcid < L2CAP_CID_DYN_START || + dcid > L2CAP_CID_DYN_END)) + return -EPROTO; + BT_DBG("dcid 0x%4.4x scid 0x%4.4x result 0x%2.2x status 0x%2.2x", dcid, scid, result, status); - mutex_lock(&conn->chan_lock); - if (scid) { chan = __l2cap_get_chan_by_scid(conn, scid); - if (!chan) { - err = -EBADSLT; - goto unlock; - } + if (!chan) + return -EBADSLT; } else { chan = __l2cap_get_chan_by_ident(conn, cmd->ident); - if (!chan) { - err = -EBADSLT; - goto unlock; - } + if (!chan) + return -EBADSLT; } + chan = l2cap_chan_hold_unless_zero(chan); + if (!chan) + return -EBADSLT; + err = 0; l2cap_chan_lock(chan); switch (result) { case L2CAP_CR_SUCCESS: + if (__l2cap_get_chan_by_dcid(conn, dcid)) { + err = -EBADSLT; + break; + } + l2cap_state_change(chan, BT_CONFIG); chan->ident = 0; chan->dcid = dcid; @@ -4310,9 +4205,7 @@ static int l2cap_connect_create_rsp(struct l2cap_conn *conn, } l2cap_chan_unlock(chan); - -unlock: - mutex_unlock(&conn->chan_lock); + l2cap_chan_put(chan); return err; } @@ -4417,7 +4310,8 @@ static inline int l2cap_config_req(struct l2cap_conn *conn, chan->ident = cmd->ident; l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP, len, rsp); - chan->num_conf_rsp++; + if (chan->num_conf_rsp < L2CAP_CONF_MAX_CONF_RSP) + chan->num_conf_rsp++; /* Reset config buffer. */ chan->conf_len = 0; @@ -4455,14 +4349,12 @@ static inline int l2cap_config_req(struct l2cap_conn *conn, /* check compatibility */ /* Send rsp for BR/EDR channel */ - if (!chan->hs_hcon) - l2cap_send_efs_conf_rsp(chan, rsp, cmd->ident, flags); - else - chan->ident = cmd->ident; + l2cap_send_efs_conf_rsp(chan, rsp, cmd->ident, flags); } unlock: l2cap_chan_unlock(chan); + l2cap_chan_put(chan); return err; } @@ -4509,15 +4401,7 @@ static inline int l2cap_config_rsp(struct l2cap_conn *conn, goto done; } - if (!chan->hs_hcon) { - l2cap_send_efs_conf_rsp(chan, buf, cmd->ident, - 0); - } else { - if (l2cap_check_efs(chan)) { - amp_create_logical_link(chan); - chan->ident = cmd->ident; - } - } + l2cap_send_efs_conf_rsp(chan, buf, cmd->ident, 0); } goto done; @@ -4577,6 +4461,7 @@ static inline int l2cap_config_rsp(struct l2cap_conn *conn, done: l2cap_chan_unlock(chan); + l2cap_chan_put(chan); return err; } @@ -4597,18 +4482,12 @@ static inline int l2cap_disconnect_req(struct l2cap_conn *conn, BT_DBG("scid 0x%4.4x dcid 0x%4.4x", scid, dcid); - mutex_lock(&conn->chan_lock); - - chan = __l2cap_get_chan_by_scid(conn, dcid); + chan = l2cap_get_chan_by_scid(conn, dcid); if (!chan) { - mutex_unlock(&conn->chan_lock); cmd_reject_invalid_cid(conn, cmd->ident, dcid, scid); return 0; } - l2cap_chan_hold(chan); - l2cap_chan_lock(chan); - rsp.dcid = cpu_to_le16(chan->scid); rsp.scid = cpu_to_le16(chan->dcid); l2cap_send_cmd(conn, cmd->ident, L2CAP_DISCONN_RSP, sizeof(rsp), &rsp); @@ -4622,8 +4501,6 @@ static inline int l2cap_disconnect_req(struct l2cap_conn *conn, l2cap_chan_unlock(chan); l2cap_chan_put(chan); - mutex_unlock(&conn->chan_lock); - return 0; } @@ -4643,21 +4520,14 @@ static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn, BT_DBG("dcid 0x%4.4x scid 0x%4.4x", dcid, scid); - mutex_lock(&conn->chan_lock); - - chan = __l2cap_get_chan_by_scid(conn, scid); + chan = l2cap_get_chan_by_scid(conn, scid); if (!chan) { - mutex_unlock(&conn->chan_lock); return 0; } - l2cap_chan_hold(chan); - l2cap_chan_lock(chan); - if (chan->state != BT_DISCONN) { l2cap_chan_unlock(chan); l2cap_chan_put(chan); - mutex_unlock(&conn->chan_lock); return 0; } @@ -4668,8 +4538,6 @@ static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn, l2cap_chan_unlock(chan); l2cap_chan_put(chan); - mutex_unlock(&conn->chan_lock); - return 0; } @@ -4696,9 +4564,6 @@ static inline int l2cap_information_req(struct l2cap_conn *conn, if (!disable_ertm) feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING | L2CAP_FEAT_FCS; - if (conn->local_fixed_chan & L2CAP_FC_A2MP) - feat_mask |= L2CAP_FEAT_EXT_FLOW - | L2CAP_FEAT_EXT_WINDOW; put_unaligned_le32(feat_mask, rsp->data); l2cap_send_cmd(conn, cmd->ident, L2CAP_INFO_RSP, sizeof(buf), @@ -4787,746 +4652,6 @@ static inline int l2cap_information_rsp(struct l2cap_conn *conn, return 0; } -static int l2cap_create_channel_req(struct l2cap_conn *conn, - struct l2cap_cmd_hdr *cmd, - u16 cmd_len, void *data) -{ - struct l2cap_create_chan_req *req = data; - struct l2cap_create_chan_rsp rsp; - struct l2cap_chan *chan; - struct hci_dev *hdev; - u16 psm, scid; - - if (cmd_len != sizeof(*req)) - return -EPROTO; - - if (!(conn->local_fixed_chan & L2CAP_FC_A2MP)) - return -EINVAL; - - psm = le16_to_cpu(req->psm); - scid = le16_to_cpu(req->scid); - - BT_DBG("psm 0x%2.2x, scid 0x%4.4x, amp_id %d", psm, scid, req->amp_id); - - /* For controller id 0 make BR/EDR connection */ - if (req->amp_id == AMP_ID_BREDR) { - l2cap_connect(conn, cmd, data, L2CAP_CREATE_CHAN_RSP, - req->amp_id); - return 0; - } - - /* Validate AMP controller id */ - hdev = hci_dev_get(req->amp_id); - if (!hdev) - goto error; - - if (hdev->dev_type != HCI_AMP || !test_bit(HCI_UP, &hdev->flags)) { - hci_dev_put(hdev); - goto error; - } - - chan = l2cap_connect(conn, cmd, data, L2CAP_CREATE_CHAN_RSP, - req->amp_id); - if (chan) { - struct amp_mgr *mgr = conn->hcon->amp_mgr; - struct hci_conn *hs_hcon; - - hs_hcon = hci_conn_hash_lookup_ba(hdev, AMP_LINK, - &conn->hcon->dst); - if (!hs_hcon) { - hci_dev_put(hdev); - cmd_reject_invalid_cid(conn, cmd->ident, chan->scid, - chan->dcid); - return 0; - } - - BT_DBG("mgr %p bredr_chan %p hs_hcon %p", mgr, chan, hs_hcon); - - mgr->bredr_chan = chan; - chan->hs_hcon = hs_hcon; - chan->fcs = L2CAP_FCS_NONE; - conn->mtu = hdev->block_mtu; - } - - hci_dev_put(hdev); - - return 0; - -error: - rsp.dcid = 0; - rsp.scid = cpu_to_le16(scid); - rsp.result = cpu_to_le16(L2CAP_CR_BAD_AMP); - rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO); - - l2cap_send_cmd(conn, cmd->ident, L2CAP_CREATE_CHAN_RSP, - sizeof(rsp), &rsp); - - return 0; -} - -static void l2cap_send_move_chan_req(struct l2cap_chan *chan, u8 dest_amp_id) -{ - struct l2cap_move_chan_req req; - u8 ident; - - BT_DBG("chan %p, dest_amp_id %d", chan, dest_amp_id); - - ident = l2cap_get_ident(chan->conn); - chan->ident = ident; - - req.icid = cpu_to_le16(chan->scid); - req.dest_amp_id = dest_amp_id; - - l2cap_send_cmd(chan->conn, ident, L2CAP_MOVE_CHAN_REQ, sizeof(req), - &req); - - __set_chan_timer(chan, L2CAP_MOVE_TIMEOUT); -} - -static void l2cap_send_move_chan_rsp(struct l2cap_chan *chan, u16 result) -{ - struct l2cap_move_chan_rsp rsp; - - BT_DBG("chan %p, result 0x%4.4x", chan, result); - - rsp.icid = cpu_to_le16(chan->dcid); - rsp.result = cpu_to_le16(result); - - l2cap_send_cmd(chan->conn, chan->ident, L2CAP_MOVE_CHAN_RSP, - sizeof(rsp), &rsp); -} - -static void l2cap_send_move_chan_cfm(struct l2cap_chan *chan, u16 result) -{ - struct l2cap_move_chan_cfm cfm; - - BT_DBG("chan %p, result 0x%4.4x", chan, result); - - chan->ident = l2cap_get_ident(chan->conn); - - cfm.icid = cpu_to_le16(chan->scid); - cfm.result = cpu_to_le16(result); - - l2cap_send_cmd(chan->conn, chan->ident, L2CAP_MOVE_CHAN_CFM, - sizeof(cfm), &cfm); - - __set_chan_timer(chan, L2CAP_MOVE_TIMEOUT); -} - -static void l2cap_send_move_chan_cfm_icid(struct l2cap_conn *conn, u16 icid) -{ - struct l2cap_move_chan_cfm cfm; - - BT_DBG("conn %p, icid 0x%4.4x", conn, icid); - - cfm.icid = cpu_to_le16(icid); - cfm.result = cpu_to_le16(L2CAP_MC_UNCONFIRMED); - - l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_MOVE_CHAN_CFM, - sizeof(cfm), &cfm); -} - -static void l2cap_send_move_chan_cfm_rsp(struct l2cap_conn *conn, u8 ident, - u16 icid) -{ - struct l2cap_move_chan_cfm_rsp rsp; - - BT_DBG("icid 0x%4.4x", icid); - - rsp.icid = cpu_to_le16(icid); - l2cap_send_cmd(conn, ident, L2CAP_MOVE_CHAN_CFM_RSP, sizeof(rsp), &rsp); -} - -static void __release_logical_link(struct l2cap_chan *chan) -{ - chan->hs_hchan = NULL; - chan->hs_hcon = NULL; - - /* Placeholder - release the logical link */ -} - -static void l2cap_logical_fail(struct l2cap_chan *chan) -{ - /* Logical link setup failed */ - if (chan->state != BT_CONNECTED) { - /* Create channel failure, disconnect */ - l2cap_send_disconn_req(chan, ECONNRESET); - return; - } - - switch (chan->move_role) { - case L2CAP_MOVE_ROLE_RESPONDER: - l2cap_move_done(chan); - l2cap_send_move_chan_rsp(chan, L2CAP_MR_NOT_SUPP); - break; - case L2CAP_MOVE_ROLE_INITIATOR: - if (chan->move_state == L2CAP_MOVE_WAIT_LOGICAL_COMP || - chan->move_state == L2CAP_MOVE_WAIT_LOGICAL_CFM) { - /* Remote has only sent pending or - * success responses, clean up - */ - l2cap_move_done(chan); - } - - /* Other amp move states imply that the move - * has already aborted - */ - l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED); - break; - } -} - -static void l2cap_logical_finish_create(struct l2cap_chan *chan, - struct hci_chan *hchan) -{ - struct l2cap_conf_rsp rsp; - - chan->hs_hchan = hchan; - chan->hs_hcon->l2cap_data = chan->conn; - - l2cap_send_efs_conf_rsp(chan, &rsp, chan->ident, 0); - - if (test_bit(CONF_INPUT_DONE, &chan->conf_state)) { - int err; - - set_default_fcs(chan); - - err = l2cap_ertm_init(chan); - if (err < 0) - l2cap_send_disconn_req(chan, -err); - else - l2cap_chan_ready(chan); - } -} - -static void l2cap_logical_finish_move(struct l2cap_chan *chan, - struct hci_chan *hchan) -{ - chan->hs_hcon = hchan->conn; - chan->hs_hcon->l2cap_data = chan->conn; - - BT_DBG("move_state %d", chan->move_state); - - switch (chan->move_state) { - case L2CAP_MOVE_WAIT_LOGICAL_COMP: - /* Move confirm will be sent after a success - * response is received - */ - chan->move_state = L2CAP_MOVE_WAIT_RSP_SUCCESS; - break; - case L2CAP_MOVE_WAIT_LOGICAL_CFM: - if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) { - chan->move_state = L2CAP_MOVE_WAIT_LOCAL_BUSY; - } else if (chan->move_role == L2CAP_MOVE_ROLE_INITIATOR) { - chan->move_state = L2CAP_MOVE_WAIT_CONFIRM_RSP; - l2cap_send_move_chan_cfm(chan, L2CAP_MC_CONFIRMED); - } else if (chan->move_role == L2CAP_MOVE_ROLE_RESPONDER) { - chan->move_state = L2CAP_MOVE_WAIT_CONFIRM; - l2cap_send_move_chan_rsp(chan, L2CAP_MR_SUCCESS); - } - break; - default: - /* Move was not in expected state, free the channel */ - __release_logical_link(chan); - - chan->move_state = L2CAP_MOVE_STABLE; - } -} - -/* Call with chan locked */ -void l2cap_logical_cfm(struct l2cap_chan *chan, struct hci_chan *hchan, - u8 status) -{ - BT_DBG("chan %p, hchan %p, status %d", chan, hchan, status); - - if (status) { - l2cap_logical_fail(chan); - __release_logical_link(chan); - return; - } - - if (chan->state != BT_CONNECTED) { - /* Ignore logical link if channel is on BR/EDR */ - if (chan->local_amp_id != AMP_ID_BREDR) - l2cap_logical_finish_create(chan, hchan); - } else { - l2cap_logical_finish_move(chan, hchan); - } -} - -void l2cap_move_start(struct l2cap_chan *chan) -{ - BT_DBG("chan %p", chan); - - if (chan->local_amp_id == AMP_ID_BREDR) { - if (chan->chan_policy != BT_CHANNEL_POLICY_AMP_PREFERRED) - return; - chan->move_role = L2CAP_MOVE_ROLE_INITIATOR; - chan->move_state = L2CAP_MOVE_WAIT_PREPARE; - /* Placeholder - start physical link setup */ - } else { - chan->move_role = L2CAP_MOVE_ROLE_INITIATOR; - chan->move_state = L2CAP_MOVE_WAIT_RSP_SUCCESS; - chan->move_id = 0; - l2cap_move_setup(chan); - l2cap_send_move_chan_req(chan, 0); - } -} - -static void l2cap_do_create(struct l2cap_chan *chan, int result, - u8 local_amp_id, u8 remote_amp_id) -{ - BT_DBG("chan %p state %s %u -> %u", chan, state_to_string(chan->state), - local_amp_id, remote_amp_id); - - chan->fcs = L2CAP_FCS_NONE; - - /* Outgoing channel on AMP */ - if (chan->state == BT_CONNECT) { - if (result == L2CAP_CR_SUCCESS) { - chan->local_amp_id = local_amp_id; - l2cap_send_create_chan_req(chan, remote_amp_id); - } else { - /* Revert to BR/EDR connect */ - l2cap_send_conn_req(chan); - } - - return; - } - - /* Incoming channel on AMP */ - if (__l2cap_no_conn_pending(chan)) { - struct l2cap_conn_rsp rsp; - char buf[128]; - rsp.scid = cpu_to_le16(chan->dcid); - rsp.dcid = cpu_to_le16(chan->scid); - - if (result == L2CAP_CR_SUCCESS) { - /* Send successful response */ - rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS); - rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO); - } else { - /* Send negative response */ - rsp.result = cpu_to_le16(L2CAP_CR_NO_MEM); - rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO); - } - - l2cap_send_cmd(chan->conn, chan->ident, L2CAP_CREATE_CHAN_RSP, - sizeof(rsp), &rsp); - - if (result == L2CAP_CR_SUCCESS) { - l2cap_state_change(chan, BT_CONFIG); - set_bit(CONF_REQ_SENT, &chan->conf_state); - l2cap_send_cmd(chan->conn, l2cap_get_ident(chan->conn), - L2CAP_CONF_REQ, - l2cap_build_conf_req(chan, buf, sizeof(buf)), buf); - chan->num_conf_req++; - } - } -} - -static void l2cap_do_move_initiate(struct l2cap_chan *chan, u8 local_amp_id, - u8 remote_amp_id) -{ - l2cap_move_setup(chan); - chan->move_id = local_amp_id; - chan->move_state = L2CAP_MOVE_WAIT_RSP; - - l2cap_send_move_chan_req(chan, remote_amp_id); -} - -static void l2cap_do_move_respond(struct l2cap_chan *chan, int result) -{ - struct hci_chan *hchan = NULL; - - /* Placeholder - get hci_chan for logical link */ - - if (hchan) { - if (hchan->state == BT_CONNECTED) { - /* Logical link is ready to go */ - chan->hs_hcon = hchan->conn; - chan->hs_hcon->l2cap_data = chan->conn; - chan->move_state = L2CAP_MOVE_WAIT_CONFIRM; - l2cap_send_move_chan_rsp(chan, L2CAP_MR_SUCCESS); - - l2cap_logical_cfm(chan, hchan, L2CAP_MR_SUCCESS); - } else { - /* Wait for logical link to be ready */ - chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_CFM; - } - } else { - /* Logical link not available */ - l2cap_send_move_chan_rsp(chan, L2CAP_MR_NOT_ALLOWED); - } -} - -static void l2cap_do_move_cancel(struct l2cap_chan *chan, int result) -{ - if (chan->move_role == L2CAP_MOVE_ROLE_RESPONDER) { - u8 rsp_result; - if (result == -EINVAL) - rsp_result = L2CAP_MR_BAD_ID; - else - rsp_result = L2CAP_MR_NOT_ALLOWED; - - l2cap_send_move_chan_rsp(chan, rsp_result); - } - - chan->move_role = L2CAP_MOVE_ROLE_NONE; - chan->move_state = L2CAP_MOVE_STABLE; - - /* Restart data transmission */ - l2cap_ertm_send(chan); -} - -/* Invoke with locked chan */ -void __l2cap_physical_cfm(struct l2cap_chan *chan, int result) -{ - u8 local_amp_id = chan->local_amp_id; - u8 remote_amp_id = chan->remote_amp_id; - - BT_DBG("chan %p, result %d, local_amp_id %d, remote_amp_id %d", - chan, result, local_amp_id, remote_amp_id); - - if (chan->state == BT_DISCONN || chan->state == BT_CLOSED) - return; - - if (chan->state != BT_CONNECTED) { - l2cap_do_create(chan, result, local_amp_id, remote_amp_id); - } else if (result != L2CAP_MR_SUCCESS) { - l2cap_do_move_cancel(chan, result); - } else { - switch (chan->move_role) { - case L2CAP_MOVE_ROLE_INITIATOR: - l2cap_do_move_initiate(chan, local_amp_id, - remote_amp_id); - break; - case L2CAP_MOVE_ROLE_RESPONDER: - l2cap_do_move_respond(chan, result); - break; - default: - l2cap_do_move_cancel(chan, result); - break; - } - } -} - -static inline int l2cap_move_channel_req(struct l2cap_conn *conn, - struct l2cap_cmd_hdr *cmd, - u16 cmd_len, void *data) -{ - struct l2cap_move_chan_req *req = data; - struct l2cap_move_chan_rsp rsp; - struct l2cap_chan *chan; - u16 icid = 0; - u16 result = L2CAP_MR_NOT_ALLOWED; - - if (cmd_len != sizeof(*req)) - return -EPROTO; - - icid = le16_to_cpu(req->icid); - - BT_DBG("icid 0x%4.4x, dest_amp_id %d", icid, req->dest_amp_id); - - if (!(conn->local_fixed_chan & L2CAP_FC_A2MP)) - return -EINVAL; - - chan = l2cap_get_chan_by_dcid(conn, icid); - if (!chan) { - rsp.icid = cpu_to_le16(icid); - rsp.result = cpu_to_le16(L2CAP_MR_NOT_ALLOWED); - l2cap_send_cmd(conn, cmd->ident, L2CAP_MOVE_CHAN_RSP, - sizeof(rsp), &rsp); - return 0; - } - - chan->ident = cmd->ident; - - if (chan->scid < L2CAP_CID_DYN_START || - chan->chan_policy == BT_CHANNEL_POLICY_BREDR_ONLY || - (chan->mode != L2CAP_MODE_ERTM && - chan->mode != L2CAP_MODE_STREAMING)) { - result = L2CAP_MR_NOT_ALLOWED; - goto send_move_response; - } - - if (chan->local_amp_id == req->dest_amp_id) { - result = L2CAP_MR_SAME_ID; - goto send_move_response; - } - - if (req->dest_amp_id != AMP_ID_BREDR) { - struct hci_dev *hdev; - hdev = hci_dev_get(req->dest_amp_id); - if (!hdev || hdev->dev_type != HCI_AMP || - !test_bit(HCI_UP, &hdev->flags)) { - if (hdev) - hci_dev_put(hdev); - - result = L2CAP_MR_BAD_ID; - goto send_move_response; - } - hci_dev_put(hdev); - } - - /* Detect a move collision. Only send a collision response - * if this side has "lost", otherwise proceed with the move. - * The winner has the larger bd_addr. - */ - if ((__chan_is_moving(chan) || - chan->move_role != L2CAP_MOVE_ROLE_NONE) && - bacmp(&conn->hcon->src, &conn->hcon->dst) > 0) { - result = L2CAP_MR_COLLISION; - goto send_move_response; - } - - chan->move_role = L2CAP_MOVE_ROLE_RESPONDER; - l2cap_move_setup(chan); - chan->move_id = req->dest_amp_id; - - if (req->dest_amp_id == AMP_ID_BREDR) { - /* Moving to BR/EDR */ - if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) { - chan->move_state = L2CAP_MOVE_WAIT_LOCAL_BUSY; - result = L2CAP_MR_PEND; - } else { - chan->move_state = L2CAP_MOVE_WAIT_CONFIRM; - result = L2CAP_MR_SUCCESS; - } - } else { - chan->move_state = L2CAP_MOVE_WAIT_PREPARE; - /* Placeholder - uncomment when amp functions are available */ - /*amp_accept_physical(chan, req->dest_amp_id);*/ - result = L2CAP_MR_PEND; - } - -send_move_response: - l2cap_send_move_chan_rsp(chan, result); - - l2cap_chan_unlock(chan); - - return 0; -} - -static void l2cap_move_continue(struct l2cap_conn *conn, u16 icid, u16 result) -{ - struct l2cap_chan *chan; - struct hci_chan *hchan = NULL; - - chan = l2cap_get_chan_by_scid(conn, icid); - if (!chan) { - l2cap_send_move_chan_cfm_icid(conn, icid); - return; - } - - __clear_chan_timer(chan); - if (result == L2CAP_MR_PEND) - __set_chan_timer(chan, L2CAP_MOVE_ERTX_TIMEOUT); - - switch (chan->move_state) { - case L2CAP_MOVE_WAIT_LOGICAL_COMP: - /* Move confirm will be sent when logical link - * is complete. - */ - chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_CFM; - break; - case L2CAP_MOVE_WAIT_RSP_SUCCESS: - if (result == L2CAP_MR_PEND) { - break; - } else if (test_bit(CONN_LOCAL_BUSY, - &chan->conn_state)) { - chan->move_state = L2CAP_MOVE_WAIT_LOCAL_BUSY; - } else { - /* Logical link is up or moving to BR/EDR, - * proceed with move - */ - chan->move_state = L2CAP_MOVE_WAIT_CONFIRM_RSP; - l2cap_send_move_chan_cfm(chan, L2CAP_MC_CONFIRMED); - } - break; - case L2CAP_MOVE_WAIT_RSP: - /* Moving to AMP */ - if (result == L2CAP_MR_SUCCESS) { - /* Remote is ready, send confirm immediately - * after logical link is ready - */ - chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_CFM; - } else { - /* Both logical link and move success - * are required to confirm - */ - chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_COMP; - } - - /* Placeholder - get hci_chan for logical link */ - if (!hchan) { - /* Logical link not available */ - l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED); - break; - } - - /* If the logical link is not yet connected, do not - * send confirmation. - */ - if (hchan->state != BT_CONNECTED) - break; - - /* Logical link is already ready to go */ - - chan->hs_hcon = hchan->conn; - chan->hs_hcon->l2cap_data = chan->conn; - - if (result == L2CAP_MR_SUCCESS) { - /* Can confirm now */ - l2cap_send_move_chan_cfm(chan, L2CAP_MC_CONFIRMED); - } else { - /* Now only need move success - * to confirm - */ - chan->move_state = L2CAP_MOVE_WAIT_RSP_SUCCESS; - } - - l2cap_logical_cfm(chan, hchan, L2CAP_MR_SUCCESS); - break; - default: - /* Any other amp move state means the move failed. */ - chan->move_id = chan->local_amp_id; - l2cap_move_done(chan); - l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED); - } - - l2cap_chan_unlock(chan); -} - -static void l2cap_move_fail(struct l2cap_conn *conn, u8 ident, u16 icid, - u16 result) -{ - struct l2cap_chan *chan; - - chan = l2cap_get_chan_by_ident(conn, ident); - if (!chan) { - /* Could not locate channel, icid is best guess */ - l2cap_send_move_chan_cfm_icid(conn, icid); - return; - } - - __clear_chan_timer(chan); - - if (chan->move_role == L2CAP_MOVE_ROLE_INITIATOR) { - if (result == L2CAP_MR_COLLISION) { - chan->move_role = L2CAP_MOVE_ROLE_RESPONDER; - } else { - /* Cleanup - cancel move */ - chan->move_id = chan->local_amp_id; - l2cap_move_done(chan); - } - } - - l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED); - - l2cap_chan_unlock(chan); -} - -static int l2cap_move_channel_rsp(struct l2cap_conn *conn, - struct l2cap_cmd_hdr *cmd, - u16 cmd_len, void *data) -{ - struct l2cap_move_chan_rsp *rsp = data; - u16 icid, result; - - if (cmd_len != sizeof(*rsp)) - return -EPROTO; - - icid = le16_to_cpu(rsp->icid); - result = le16_to_cpu(rsp->result); - - BT_DBG("icid 0x%4.4x, result 0x%4.4x", icid, result); - - if (result == L2CAP_MR_SUCCESS || result == L2CAP_MR_PEND) - l2cap_move_continue(conn, icid, result); - else - l2cap_move_fail(conn, cmd->ident, icid, result); - - return 0; -} - -static int l2cap_move_channel_confirm(struct l2cap_conn *conn, - struct l2cap_cmd_hdr *cmd, - u16 cmd_len, void *data) -{ - struct l2cap_move_chan_cfm *cfm = data; - struct l2cap_chan *chan; - u16 icid, result; - - if (cmd_len != sizeof(*cfm)) - return -EPROTO; - - icid = le16_to_cpu(cfm->icid); - result = le16_to_cpu(cfm->result); - - BT_DBG("icid 0x%4.4x, result 0x%4.4x", icid, result); - - chan = l2cap_get_chan_by_dcid(conn, icid); - if (!chan) { - /* Spec requires a response even if the icid was not found */ - l2cap_send_move_chan_cfm_rsp(conn, cmd->ident, icid); - return 0; - } - - if (chan->move_state == L2CAP_MOVE_WAIT_CONFIRM) { - if (result == L2CAP_MC_CONFIRMED) { - chan->local_amp_id = chan->move_id; - if (chan->local_amp_id == AMP_ID_BREDR) - __release_logical_link(chan); - } else { - chan->move_id = chan->local_amp_id; - } - - l2cap_move_done(chan); - } - - l2cap_send_move_chan_cfm_rsp(conn, cmd->ident, icid); - - l2cap_chan_unlock(chan); - - return 0; -} - -static inline int l2cap_move_channel_confirm_rsp(struct l2cap_conn *conn, - struct l2cap_cmd_hdr *cmd, - u16 cmd_len, void *data) -{ - struct l2cap_move_chan_cfm_rsp *rsp = data; - struct l2cap_chan *chan; - u16 icid; - - if (cmd_len != sizeof(*rsp)) - return -EPROTO; - - icid = le16_to_cpu(rsp->icid); - - BT_DBG("icid 0x%4.4x", icid); - - chan = l2cap_get_chan_by_scid(conn, icid); - if (!chan) - return 0; - - __clear_chan_timer(chan); - - if (chan->move_state == L2CAP_MOVE_WAIT_CONFIRM_RSP) { - chan->local_amp_id = chan->move_id; - - if (chan->local_amp_id == AMP_ID_BREDR && chan->hs_hchan) - __release_logical_link(chan); - - l2cap_move_done(chan); - } - - l2cap_chan_unlock(chan); - - return 0; -} - static inline int l2cap_conn_param_update_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data) @@ -5604,13 +4729,9 @@ static int l2cap_le_connect_rsp(struct l2cap_conn *conn, BT_DBG("dcid 0x%4.4x mtu %u mps %u credits %u result 0x%2.2x", dcid, mtu, mps, credits, result); - mutex_lock(&conn->chan_lock); - chan = __l2cap_get_chan_by_ident(conn, cmd->ident); - if (!chan) { - err = -EBADSLT; - goto unlock; - } + if (!chan) + return -EBADSLT; err = 0; @@ -5658,9 +4779,6 @@ static int l2cap_le_connect_rsp(struct l2cap_conn *conn, l2cap_chan_unlock(chan); -unlock: - mutex_unlock(&conn->chan_lock); - return err; } @@ -5680,7 +4798,6 @@ static inline int l2cap_bredr_sig_cmd(struct l2cap_conn *conn, break; case L2CAP_CONN_RSP: - case L2CAP_CREATE_CHAN_RSP: l2cap_connect_create_rsp(conn, cmd, cmd_len, data); break; @@ -5715,26 +4832,6 @@ static inline int l2cap_bredr_sig_cmd(struct l2cap_conn *conn, l2cap_information_rsp(conn, cmd, cmd_len, data); break; - case L2CAP_CREATE_CHAN_REQ: - err = l2cap_create_channel_req(conn, cmd, cmd_len, data); - break; - - case L2CAP_MOVE_CHAN_REQ: - err = l2cap_move_channel_req(conn, cmd, cmd_len, data); - break; - - case L2CAP_MOVE_CHAN_RSP: - l2cap_move_channel_rsp(conn, cmd, cmd_len, data); - break; - - case L2CAP_MOVE_CHAN_CFM: - err = l2cap_move_channel_confirm(conn, cmd, cmd_len, data); - break; - - case L2CAP_MOVE_CHAN_CFM_RSP: - l2cap_move_channel_confirm_rsp(conn, cmd, cmd_len, data); - break; - default: BT_ERR("Unknown BR/EDR signaling command 0x%2.2x", cmd->code); err = -EINVAL; @@ -5771,6 +4868,19 @@ static int l2cap_le_connect_req(struct l2cap_conn *conn, BT_DBG("psm 0x%2.2x scid 0x%4.4x mtu %u mps %u", __le16_to_cpu(psm), scid, mtu, mps); + /* BLUETOOTH CORE SPECIFICATION Version 5.3 | Vol 3, Part A + * page 1059: + * + * Valid range: 0x0001-0x00ff + * + * Table 4.15: L2CAP_LE_CREDIT_BASED_CONNECTION_REQ SPSM ranges + */ + if (!psm || __le16_to_cpu(psm) > L2CAP_PSM_LE_DYN_END) { + result = L2CAP_CR_LE_BAD_PSM; + chan = NULL; + goto response; + } + /* Check if we have socket listening on psm */ pchan = l2cap_global_chan_by_psm(BT_LISTEN, psm, &conn->hcon->src, &conn->hcon->dst, LE_LINK); @@ -5780,12 +4890,12 @@ static int l2cap_le_connect_req(struct l2cap_conn *conn, goto response; } - mutex_lock(&conn->chan_lock); l2cap_chan_lock(pchan); if (!smp_sufficient_security(conn->hcon, pchan->sec_level, SMP_ALLOW_STK)) { - result = L2CAP_CR_LE_AUTHENTICATION; + result = pchan->sec_level == BT_SECURITY_MEDIUM ? + L2CAP_CR_LE_ENCRYPTION : L2CAP_CR_LE_AUTHENTICATION; chan = NULL; goto response_unlock; } @@ -5846,7 +4956,6 @@ static int l2cap_le_connect_req(struct l2cap_conn *conn, response_unlock: l2cap_chan_unlock(pchan); - mutex_unlock(&conn->chan_lock); l2cap_chan_put(pchan); if (result == L2CAP_CR_PEND) @@ -5895,12 +5004,11 @@ static inline int l2cap_le_credits(struct l2cap_conn *conn, if (credits > max_credits) { BT_ERR("LE credits overflow"); l2cap_send_disconn_req(chan, ECONNRESET); - l2cap_chan_unlock(chan); /* Return 0 so that we don't trigger an unnecessary * command reject packet. */ - return 0; + goto unlock; } chan->tx_credits += credits; @@ -5911,7 +5019,9 @@ static inline int l2cap_le_credits(struct l2cap_conn *conn, if (chan->tx_credits) chan->ops->resume(chan); +unlock: l2cap_chan_unlock(chan); + l2cap_chan_put(chan); return 0; } @@ -5921,10 +5031,7 @@ static inline int l2cap_ecred_conn_req(struct l2cap_conn *conn, u8 *data) { struct l2cap_ecred_conn_req *req = (void *) data; - struct { - struct l2cap_ecred_conn_rsp rsp; - __le16 dcid[L2CAP_ECRED_MAX_CID]; - } __packed pdu; + DEFINE_RAW_FLEX(struct l2cap_ecred_conn_rsp, pdu, dcid, L2CAP_ECRED_MAX_CID); struct l2cap_chan *chan, *pchan; u16 mtu, mps; __le16 psm; @@ -5943,7 +5050,7 @@ static inline int l2cap_ecred_conn_req(struct l2cap_conn *conn, cmd_len -= sizeof(*req); num_scid = cmd_len / sizeof(u16); - if (num_scid > ARRAY_SIZE(pdu.dcid)) { + if (num_scid > L2CAP_ECRED_MAX_CID) { result = L2CAP_CR_LE_INVALID_PARAMS; goto response; } @@ -5958,9 +5065,21 @@ static inline int l2cap_ecred_conn_req(struct l2cap_conn *conn, psm = req->psm; + /* BLUETOOTH CORE SPECIFICATION Version 5.3 | Vol 3, Part A + * page 1059: + * + * Valid range: 0x0001-0x00ff + * + * Table 4.15: L2CAP_LE_CREDIT_BASED_CONNECTION_REQ SPSM ranges + */ + if (!psm || __le16_to_cpu(psm) > L2CAP_PSM_LE_DYN_END) { + result = L2CAP_CR_LE_BAD_PSM; + goto response; + } + BT_DBG("psm 0x%2.2x mtu %u mps %u", __le16_to_cpu(psm), mtu, mps); - memset(&pdu, 0, sizeof(pdu)); + memset(pdu, 0, sizeof(*pdu)); /* Check if we have socket listening on psm */ pchan = l2cap_global_chan_by_psm(BT_LISTEN, psm, &conn->hcon->src, @@ -5970,7 +5089,6 @@ static inline int l2cap_ecred_conn_req(struct l2cap_conn *conn, goto response; } - mutex_lock(&conn->chan_lock); l2cap_chan_lock(pchan); if (!smp_sufficient_security(conn->hcon, pchan->sec_level, @@ -5986,8 +5104,8 @@ static inline int l2cap_ecred_conn_req(struct l2cap_conn *conn, BT_DBG("scid[%d] 0x%4.4x", i, scid); - pdu.dcid[i] = 0x0000; - len += sizeof(*pdu.dcid); + pdu->dcid[i] = 0x0000; + len += sizeof(*pdu->dcid); /* Check for valid dynamic CID range */ if (scid < L2CAP_CID_DYN_START || scid > L2CAP_CID_LE_DYN_END) { @@ -6021,17 +5139,18 @@ static inline int l2cap_ecred_conn_req(struct l2cap_conn *conn, l2cap_ecred_init(chan, __le16_to_cpu(req->credits)); /* Init response */ - if (!pdu.rsp.credits) { - pdu.rsp.mtu = cpu_to_le16(chan->imtu); - pdu.rsp.mps = cpu_to_le16(chan->mps); - pdu.rsp.credits = cpu_to_le16(chan->rx_credits); + if (!pdu->credits) { + pdu->mtu = cpu_to_le16(chan->imtu); + pdu->mps = cpu_to_le16(chan->mps); + pdu->credits = cpu_to_le16(chan->rx_credits); } - pdu.dcid[i] = cpu_to_le16(chan->scid); + pdu->dcid[i] = cpu_to_le16(chan->scid); __set_chan_timer(chan, chan->ops->get_sndtimeo(chan)); chan->ident = cmd->ident; + chan->mode = L2CAP_MODE_EXT_FLOWCTL; if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) { l2cap_state_change(chan, BT_CONNECT2); @@ -6044,17 +5163,16 @@ static inline int l2cap_ecred_conn_req(struct l2cap_conn *conn, unlock: l2cap_chan_unlock(pchan); - mutex_unlock(&conn->chan_lock); l2cap_chan_put(pchan); response: - pdu.rsp.result = cpu_to_le16(result); + pdu->result = cpu_to_le16(result); if (defer) return 0; l2cap_send_cmd(conn, cmd->ident, L2CAP_ECRED_CONN_RSP, - sizeof(pdu.rsp) + len, &pdu); + sizeof(*pdu) + len, pdu); return 0; } @@ -6081,8 +5199,6 @@ static inline int l2cap_ecred_conn_rsp(struct l2cap_conn *conn, BT_DBG("mtu %u mps %u credits %u result 0x%4.4x", mtu, mps, credits, result); - mutex_lock(&conn->chan_lock); - cmd_len -= sizeof(*rsp); list_for_each_entry_safe(chan, tmp, &conn->chan_l, list) { @@ -6168,8 +5284,6 @@ static inline int l2cap_ecred_conn_rsp(struct l2cap_conn *conn, l2cap_chan_unlock(chan); } - mutex_unlock(&conn->chan_lock); - return err; } @@ -6282,18 +5396,20 @@ static inline int l2cap_le_command_rej(struct l2cap_conn *conn, if (cmd_len < sizeof(*rej)) return -EPROTO; - mutex_lock(&conn->chan_lock); - chan = __l2cap_get_chan_by_ident(conn, cmd->ident); if (!chan) goto done; + chan = l2cap_chan_hold_unless_zero(chan); + if (!chan) + goto done; + l2cap_chan_lock(chan); l2cap_chan_del(chan, ECONNREFUSED); l2cap_chan_unlock(chan); + l2cap_chan_put(chan); done: - mutex_unlock(&conn->chan_lock); return 0; } @@ -6401,6 +5517,14 @@ drop: kfree_skb(skb); } +static inline void l2cap_sig_send_rej(struct l2cap_conn *conn, u16 ident) +{ + struct l2cap_cmd_rej_unk rej; + + rej.reason = cpu_to_le16(L2CAP_REJ_NOT_UNDERSTOOD); + l2cap_send_cmd(conn, ident, L2CAP_COMMAND_REJ, sizeof(rej), &rej); +} + static inline void l2cap_sig_channel(struct l2cap_conn *conn, struct sk_buff *skb) { @@ -6426,23 +5550,25 @@ static inline void l2cap_sig_channel(struct l2cap_conn *conn, if (len > skb->len || !cmd->ident) { BT_DBG("corrupted command"); - break; + l2cap_sig_send_rej(conn, cmd->ident); + skb_pull(skb, len > skb->len ? skb->len : len); + continue; } err = l2cap_bredr_sig_cmd(conn, cmd, len, skb->data); if (err) { - struct l2cap_cmd_rej_unk rej; - BT_ERR("Wrong link type (%d)", err); - - rej.reason = cpu_to_le16(L2CAP_REJ_NOT_UNDERSTOOD); - l2cap_send_cmd(conn, cmd->ident, L2CAP_COMMAND_REJ, - sizeof(rej), &rej); + l2cap_sig_send_rej(conn, cmd->ident); } skb_pull(skb, len); } + if (skb->len > 0) { + BT_DBG("corrupted command"); + l2cap_sig_send_rej(conn, 0); + } + drop: kfree_skb(skb); } @@ -6842,6 +5968,7 @@ static int l2cap_rx_state_recv(struct l2cap_chan *chan, struct l2cap_ctrl *control, struct sk_buff *skb, u8 event) { + struct l2cap_ctrl local_control; int err = 0; bool skb_in_use = false; @@ -6866,15 +5993,32 @@ static int l2cap_rx_state_recv(struct l2cap_chan *chan, chan->buffer_seq = chan->expected_tx_seq; skb_in_use = true; + /* l2cap_reassemble_sdu may free skb, hence invalidate + * control, so make a copy in advance to use it after + * l2cap_reassemble_sdu returns and to avoid the race + * condition, for example: + * + * The current thread calls: + * l2cap_reassemble_sdu + * chan->ops->recv == l2cap_sock_recv_cb + * __sock_queue_rcv_skb + * Another thread calls: + * bt_sock_recvmsg + * skb_recv_datagram + * skb_free_datagram + * Then the current thread tries to access control, but + * it was freed by skb_free_datagram. + */ + local_control = *control; err = l2cap_reassemble_sdu(chan, skb, control); if (err) break; - if (control->final) { + if (local_control.final) { if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state)) { - control->final = 0; - l2cap_retransmit_all(chan, control); + local_control.final = 0; + l2cap_retransmit_all(chan, &local_control); l2cap_ertm_send(chan); } } @@ -6926,8 +6070,8 @@ static int l2cap_rx_state_recv(struct l2cap_chan *chan, if (control->final) { clear_bit(CONN_REMOTE_BUSY, &chan->conn_state); - if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state) && - !__chan_is_moving(chan)) { + if (!test_and_clear_bit(CONN_REJ_ACT, + &chan->conn_state)) { control->final = 0; l2cap_retransmit_all(chan, control); } @@ -7120,11 +6264,7 @@ static int l2cap_finish_move(struct l2cap_chan *chan) BT_DBG("chan %p", chan); chan->rx_state = L2CAP_RX_STATE_RECV; - - if (chan->hs_hcon) - chan->conn->mtu = chan->hs_hcon->hdev->block_mtu; - else - chan->conn->mtu = chan->conn->hcon->hdev->acl_mtu; + chan->conn->mtu = chan->conn->hcon->mtu; return l2cap_resegment(chan); } @@ -7191,11 +6331,7 @@ static int l2cap_rx_state_wait_f(struct l2cap_chan *chan, */ chan->next_tx_seq = control->reqseq; chan->unacked_frames = 0; - - if (chan->hs_hcon) - chan->conn->mtu = chan->hs_hcon->hdev->block_mtu; - else - chan->conn->mtu = chan->conn->hcon->hdev->acl_mtu; + chan->conn->mtu = chan->conn->hcon->mtu; err = l2cap_resegment(chan); @@ -7254,11 +6390,27 @@ static int l2cap_rx(struct l2cap_chan *chan, struct l2cap_ctrl *control, static int l2cap_stream_rx(struct l2cap_chan *chan, struct l2cap_ctrl *control, struct sk_buff *skb) { + /* l2cap_reassemble_sdu may free skb, hence invalidate control, so store + * the txseq field in advance to use it after l2cap_reassemble_sdu + * returns and to avoid the race condition, for example: + * + * The current thread calls: + * l2cap_reassemble_sdu + * chan->ops->recv == l2cap_sock_recv_cb + * __sock_queue_rcv_skb + * Another thread calls: + * bt_sock_recvmsg + * skb_recv_datagram + * skb_free_datagram + * Then the current thread tries to access control, but it was freed by + * skb_free_datagram. + */ + u16 txseq = control->txseq; + BT_DBG("chan %p, control %p, skb %p, state %d", chan, control, skb, chan->rx_state); - if (l2cap_classify_txseq(chan, control->txseq) == - L2CAP_TXSEQ_EXPECTED) { + if (l2cap_classify_txseq(chan, txseq) == L2CAP_TXSEQ_EXPECTED) { l2cap_pass_to_tx(chan, control); BT_DBG("buffer_seq %u->%u", chan->buffer_seq, @@ -7281,8 +6433,8 @@ static int l2cap_stream_rx(struct l2cap_chan *chan, struct l2cap_ctrl *control, } } - chan->last_acked_seq = control->txseq; - chan->expected_tx_seq = __next_seq(chan, control->txseq); + chan->last_acked_seq = txseq; + chan->expected_tx_seq = __next_seq(chan, txseq); return 0; } @@ -7384,9 +6536,7 @@ static void l2cap_chan_le_send_credits(struct l2cap_chan *chan) { struct l2cap_conn *conn = chan->conn; struct l2cap_le_credits pkt; - u16 return_credits; - - return_credits = (chan->imtu / chan->mps) + 1; + u16 return_credits = l2cap_le_rx_credits(chan); if (chan->rx_credits >= return_credits) return; @@ -7405,6 +6555,19 @@ static void l2cap_chan_le_send_credits(struct l2cap_chan *chan) l2cap_send_cmd(conn, chan->ident, L2CAP_LE_CREDITS, sizeof(pkt), &pkt); } +void l2cap_chan_rx_avail(struct l2cap_chan *chan, ssize_t rx_avail) +{ + if (chan->rx_avail == rx_avail) + return; + + BT_DBG("chan %p has %zd bytes avail for rx", chan, rx_avail); + + chan->rx_avail = rx_avail; + + if (chan->state == BT_CONNECTED) + l2cap_chan_le_send_credits(chan); +} + static int l2cap_ecred_recv(struct l2cap_chan *chan, struct sk_buff *skb) { int err; @@ -7414,6 +6577,12 @@ static int l2cap_ecred_recv(struct l2cap_chan *chan, struct sk_buff *skb) /* Wait recv to confirm reception before updating the credits */ err = chan->ops->recv(chan, skb); + if (err < 0 && chan->rx_avail != -1) { + BT_ERR("Queueing received LE L2CAP data failed"); + l2cap_send_disconn_req(chan, ECONNRESET); + return err; + } + /* Update credits whenever an SDU is received */ l2cap_chan_le_send_credits(chan); @@ -7436,7 +6605,8 @@ static int l2cap_ecred_data_rcv(struct l2cap_chan *chan, struct sk_buff *skb) } chan->rx_credits--; - BT_DBG("rx_credits %u -> %u", chan->rx_credits + 1, chan->rx_credits); + BT_DBG("chan %p: rx_credits %u -> %u", + chan, chan->rx_credits + 1, chan->rx_credits); /* Update if remote had run out of credits, this should only happens * if the remote is not using the entire MPS. @@ -7531,20 +6701,10 @@ static void l2cap_data_channel(struct l2cap_conn *conn, u16 cid, chan = l2cap_get_chan_by_scid(conn, cid); if (!chan) { - if (cid == L2CAP_CID_A2MP) { - chan = a2mp_channel_create(conn, skb); - if (!chan) { - kfree_skb(skb); - return; - } - - l2cap_chan_lock(chan); - } else { - BT_DBG("unknown cid 0x%4.4x", cid); - /* Drop packet and return */ - kfree_skb(skb); - return; - } + BT_DBG("unknown cid 0x%4.4x", cid); + /* Drop packet and return */ + kfree_skb(skb); + return; } BT_DBG("chan %p, len %d", chan, skb->len); @@ -7597,6 +6757,7 @@ drop: done: l2cap_chan_unlock(chan); + l2cap_chan_put(chan); } static void l2cap_conless_channel(struct l2cap_conn *conn, __le16 psm, @@ -7615,6 +6776,8 @@ static void l2cap_conless_channel(struct l2cap_conn *conn, __le16 psm, BT_DBG("chan %p, len %d", chan, skb->len); + l2cap_chan_lock(chan); + if (chan->state != BT_BOUND && chan->state != BT_CONNECTED) goto drop; @@ -7626,11 +6789,13 @@ static void l2cap_conless_channel(struct l2cap_conn *conn, __le16 psm, bt_cb(skb)->l2cap.psm = psm; if (!chan->ops->recv(chan, skb)) { + l2cap_chan_unlock(chan); l2cap_chan_put(chan); return; } drop: + l2cap_chan_unlock(chan); l2cap_chan_put(chan); free_skb: kfree_skb(skb); @@ -7699,8 +6864,12 @@ static void process_pending_rx(struct work_struct *work) BT_DBG(""); + mutex_lock(&conn->lock); + while ((skb = skb_dequeue(&conn->pending_rx))) l2cap_recv_frame(conn, skb); + + mutex_unlock(&conn->lock); } static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon) @@ -7728,33 +6897,18 @@ static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon) BT_DBG("hcon %p conn %p hchan %p", hcon, conn, hchan); - switch (hcon->type) { - case LE_LINK: - if (hcon->hdev->le_mtu) { - conn->mtu = hcon->hdev->le_mtu; - break; - } - fallthrough; - default: - conn->mtu = hcon->hdev->acl_mtu; - break; - } - + conn->mtu = hcon->mtu; conn->feat_mask = 0; conn->local_fixed_chan = L2CAP_FC_SIG_BREDR | L2CAP_FC_CONNLESS; - if (hcon->type == ACL_LINK && - hci_dev_test_flag(hcon->hdev, HCI_HS_ENABLED)) - conn->local_fixed_chan |= L2CAP_FC_A2MP; - if (hci_dev_test_flag(hcon->hdev, HCI_LE_ENABLED) && (bredr_sc_enabled(hcon->hdev) || hci_dev_test_flag(hcon->hdev, HCI_FORCE_BREDR_SMP))) conn->local_fixed_chan |= L2CAP_FC_SMP_BREDR; mutex_init(&conn->ident_lock); - mutex_init(&conn->chan_lock); + mutex_init(&conn->lock); INIT_LIST_HEAD(&conn->chan_l); INIT_LIST_HEAD(&conn->users); @@ -7763,7 +6917,7 @@ static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon) skb_queue_head_init(&conn->pending_rx); INIT_WORK(&conn->pending_rx_work, process_pending_rx); - INIT_WORK(&conn->id_addr_update_work, l2cap_conn_update_id_addr); + INIT_DELAYED_WORK(&conn->id_addr_timer, l2cap_conn_update_id_addr); conn->disc_reason = HCI_ERROR_REMOTE_USER_TERM; @@ -7810,7 +6964,7 @@ static void l2cap_chan_by_pid(struct l2cap_chan *chan, void *data) } int l2cap_chan_connect(struct l2cap_chan *chan, __le16 psm, u16 cid, - bdaddr_t *dst, u8 dst_type) + bdaddr_t *dst, u8 dst_type, u16 timeout) { struct l2cap_conn *conn; struct hci_conn *hcon; @@ -7903,19 +7057,17 @@ int l2cap_chan_connect(struct l2cap_chan *chan, __le16 psm, u16 cid, if (hci_dev_test_flag(hdev, HCI_ADVERTISING)) hcon = hci_connect_le(hdev, dst, dst_type, false, - chan->sec_level, - HCI_LE_CONN_TIMEOUT, - HCI_ROLE_SLAVE); + chan->sec_level, timeout, + HCI_ROLE_SLAVE, 0, 0); else hcon = hci_connect_le_scan(hdev, dst, dst_type, - chan->sec_level, - HCI_LE_CONN_TIMEOUT, + chan->sec_level, timeout, CONN_REASON_L2CAP_CHAN); } else { u8 auth_type = l2cap_get_auth_type(chan); hcon = hci_connect_acl(hdev, dst, chan->sec_level, auth_type, - CONN_REASON_L2CAP_CHAN); + CONN_REASON_L2CAP_CHAN, timeout); } if (IS_ERR(hcon)) { @@ -7947,7 +7099,7 @@ int l2cap_chan_connect(struct l2cap_chan *chan, __le16 psm, u16 cid, } } - mutex_lock(&conn->chan_lock); + mutex_lock(&conn->lock); l2cap_chan_lock(chan); if (cid && __l2cap_get_chan_by_dcid(conn, cid)) { @@ -7988,7 +7140,7 @@ int l2cap_chan_connect(struct l2cap_chan *chan, __le16 psm, u16 cid, chan_unlock: l2cap_chan_unlock(chan); - mutex_unlock(&conn->chan_lock); + mutex_unlock(&conn->lock); done: hci_dev_unlock(hdev); hci_dev_put(hdev); @@ -7999,14 +7151,11 @@ EXPORT_SYMBOL_GPL(l2cap_chan_connect); static void l2cap_ecred_reconfigure(struct l2cap_chan *chan) { struct l2cap_conn *conn = chan->conn; - struct { - struct l2cap_ecred_reconf_req req; - __le16 scid; - } pdu; + DEFINE_RAW_FLEX(struct l2cap_ecred_reconf_req, pdu, scid, 1); - pdu.req.mtu = cpu_to_le16(chan->imtu); - pdu.req.mps = cpu_to_le16(chan->mps); - pdu.scid = cpu_to_le16(chan->scid); + pdu->mtu = cpu_to_le16(chan->imtu); + pdu->mps = cpu_to_le16(chan->mps); + pdu->scid[0] = cpu_to_le16(chan->scid); chan->ident = l2cap_get_ident(conn); @@ -8085,7 +7234,7 @@ static struct l2cap_chan *l2cap_global_fixed_chan(struct l2cap_chan *c, if (src_type != c->src_type) continue; - l2cap_chan_hold(c); + c = l2cap_chan_hold_unless_zero(c); read_unlock(&chan_list_lock); return c; } @@ -8204,7 +7353,7 @@ static void l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt) BT_DBG("conn %p status 0x%2.2x encrypt %u", conn, status, encrypt); - mutex_lock(&conn->chan_lock); + mutex_lock(&conn->lock); list_for_each_entry(chan, &conn->chan_l, list) { l2cap_chan_lock(chan); @@ -8212,11 +7361,6 @@ static void l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt) BT_DBG("chan %p scid 0x%4.4x state %s", chan, chan->scid, state_to_string(chan->state)); - if (chan->scid == L2CAP_CID_A2MP) { - l2cap_chan_unlock(chan); - continue; - } - if (!status && encrypt) chan->sec_level = hcon->sec_level; @@ -8234,7 +7378,7 @@ static void l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt) } if (chan->state == BT_CONNECT) { - if (!status && l2cap_check_enc_key_size(hcon)) + if (!status && l2cap_check_enc_key_size(hcon, chan)) l2cap_start_connection(chan); else __set_chan_timer(chan, L2CAP_DISC_TIMEOUT); @@ -8244,7 +7388,7 @@ static void l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt) struct l2cap_conn_rsp rsp; __u16 res, stat; - if (!status && l2cap_check_enc_key_size(hcon)) { + if (!status && l2cap_check_enc_key_size(hcon, chan)) { if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) { res = L2CAP_CR_PEND; stat = L2CAP_CS_AUTHOR_PEND; @@ -8283,7 +7427,7 @@ static void l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt) l2cap_chan_unlock(chan); } - mutex_unlock(&conn->chan_lock); + mutex_unlock(&conn->lock); } /* Append fragment into frame respecting the maximum len of rx_skb */ @@ -8297,6 +7441,9 @@ static int l2cap_recv_frag(struct l2cap_conn *conn, struct sk_buff *skb, return -ENOMEM; /* Init rx_len */ conn->rx_len = len; + + skb_set_delivery_time(conn->rx_skb, skb->tstamp, + skb->tstamp_type); } /* Copy as much as the rx_skb can hold */ @@ -8350,23 +7497,57 @@ static void l2cap_recv_reset(struct l2cap_conn *conn) conn->rx_len = 0; } -void l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags) +struct l2cap_conn *l2cap_conn_hold_unless_zero(struct l2cap_conn *c) { - struct l2cap_conn *conn = hcon->l2cap_data; + if (!c) + return NULL; + + BT_DBG("conn %p orig refcnt %u", c, kref_read(&c->ref)); + + if (!kref_get_unless_zero(&c->ref)) + return NULL; + + return c; +} + +int l2cap_recv_acldata(struct hci_dev *hdev, u16 handle, + struct sk_buff *skb, u16 flags) +{ + struct hci_conn *hcon; + struct l2cap_conn *conn; int len; - /* For AMP controller do not create l2cap conn */ - if (!conn && hcon->hdev->dev_type != HCI_PRIMARY) - goto drop; + /* Lock hdev for hci_conn, and race on l2cap_data vs. l2cap_conn_del */ + hci_dev_lock(hdev); + + hcon = hci_conn_hash_lookup_handle(hdev, handle); + if (!hcon) { + hci_dev_unlock(hdev); + kfree_skb(skb); + return -ENOENT; + } + + hci_conn_enter_active_mode(hcon, BT_POWER_FORCE_ACTIVE_OFF); + + conn = hcon->l2cap_data; if (!conn) conn = l2cap_conn_add(hcon); - if (!conn) - goto drop; + conn = l2cap_conn_hold_unless_zero(conn); + hcon = NULL; + + hci_dev_unlock(hdev); + + if (!conn) { + kfree_skb(skb); + return -EINVAL; + } BT_DBG("conn %p len %u flags 0x%x", conn, skb->len, flags); + mutex_lock(&conn->lock); + switch (flags) { case ACL_START: case ACL_START_NO_FLUSH: @@ -8382,9 +7563,8 @@ void l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags) * expected length. */ if (skb->len < L2CAP_LEN_SIZE) { - if (l2cap_recv_frag(conn, skb, conn->mtu) < 0) - goto drop; - return; + l2cap_recv_frag(conn, skb, conn->mtu); + break; } len = get_unaligned_le16(skb->data) + L2CAP_HDR_SIZE; @@ -8392,7 +7572,7 @@ void l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags) if (len == skb->len) { /* Complete frame received */ l2cap_recv_frame(conn, skb); - return; + goto unlock; } BT_DBG("Start: total len %d, frag len %u", len, skb->len); @@ -8400,8 +7580,24 @@ void l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags) if (skb->len > len) { BT_ERR("Frame is too long (len %u, expected len %d)", skb->len, len); + /* PTS test cases L2CAP/COS/CED/BI-14-C and BI-15-C + * (Multiple Signaling Command in one PDU, Data + * Truncated, BR/EDR) send a C-frame to the IUT with + * PDU Length set to 8 and Channel ID set to the + * correct signaling channel for the logical link. + * The Information payload contains one L2CAP_ECHO_REQ + * packet with Data Length set to 0 with 0 octets of + * echo data and one invalid command packet due to + * data truncated in PDU but present in HCI packet. + * + * Shorter the socket buffer to the PDU length to + * allow to process valid commands from the PDU before + * setting the socket unreliable. + */ + skb->len = len; + l2cap_recv_frame(conn, skb); l2cap_conn_unreliable(conn, ECOMM); - goto drop; + goto unlock; } /* Append fragment into frame (with header) */ @@ -8428,7 +7624,7 @@ void l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags) /* Header still could not be read just continue */ if (conn->rx_skb->len < L2CAP_LEN_SIZE) - return; + break; } if (skb->len > conn->rx_len) { @@ -8456,6 +7652,10 @@ void l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags) drop: kfree_skb(skb); +unlock: + mutex_unlock(&conn->lock); + l2cap_conn_put(conn); + return 0; } static struct hci_cb l2cap_cb = { diff --git a/net/bluetooth/l2cap_sock.c b/net/bluetooth/l2cap_sock.c index ca8f07f3542b..9ee189c815d4 100644 --- a/net/bluetooth/l2cap_sock.c +++ b/net/bluetooth/l2cap_sock.c @@ -46,6 +46,7 @@ static const struct proto_ops l2cap_sock_ops; static void l2cap_sock_init(struct sock *sk, struct sock *parent); static struct sock *l2cap_sock_alloc(struct net *net, struct socket *sock, int proto, gfp_t prio, int kern); +static void l2cap_sock_cleanup_listen(struct sock *parent); bool l2cap_is_socket(struct socket *sock) { @@ -79,7 +80,7 @@ static int l2cap_validate_le_psm(u16 psm) return 0; } -static int l2cap_sock_bind(struct socket *sock, struct sockaddr *addr, int alen) +static int l2cap_sock_bind(struct socket *sock, struct sockaddr_unsized *addr, int alen) { struct sock *sk = sock->sk; struct l2cap_chan *chan = l2cap_pi(sk)->chan; @@ -177,22 +178,7 @@ done: return err; } -static void l2cap_sock_init_pid(struct sock *sk) -{ - struct l2cap_chan *chan = l2cap_pi(sk)->chan; - - /* Only L2CAP_MODE_EXT_FLOWCTL ever need to access the PID in order to - * group the channels being requested. - */ - if (chan->mode != L2CAP_MODE_EXT_FLOWCTL) - return; - - spin_lock(&sk->sk_peer_lock); - sk->sk_peer_pid = get_pid(task_tgid(current)); - spin_unlock(&sk->sk_peer_lock); -} - -static int l2cap_sock_connect(struct socket *sock, struct sockaddr *addr, +static int l2cap_sock_connect(struct socket *sock, struct sockaddr_unsized *addr, int alen, int flags) { struct sock *sk = sock->sk; @@ -267,10 +253,9 @@ static int l2cap_sock_connect(struct socket *sock, struct sockaddr *addr, chan->mode != L2CAP_MODE_EXT_FLOWCTL) chan->mode = L2CAP_MODE_LE_FLOWCTL; - l2cap_sock_init_pid(sk); - err = l2cap_chan_connect(chan, la.l2_psm, __le16_to_cpu(la.l2_cid), - &la.l2_bdaddr, la.l2_bdaddr_type); + &la.l2_bdaddr, la.l2_bdaddr_type, + READ_ONCE(sk->sk_sndtimeo)); if (err) return err; @@ -324,8 +309,6 @@ static int l2cap_sock_listen(struct socket *sock, int backlog) goto done; } - l2cap_sock_init_pid(sk); - sk->sk_max_ack_backlog = backlog; sk->sk_ack_backlog = 0; @@ -344,7 +327,7 @@ done: } static int l2cap_sock_accept(struct socket *sock, struct socket *newsock, - int flags, bool kern) + struct proto_accept_arg *arg) { DEFINE_WAIT_FUNC(wait, woken_wake_function); struct sock *sk = sock->sk, *nsk; @@ -353,7 +336,7 @@ static int l2cap_sock_accept(struct socket *sock, struct socket *newsock, lock_sock_nested(sk, L2CAP_NESTING_PARENT); - timeo = sock_rcvtimeo(sk, flags & O_NONBLOCK); + timeo = sock_rcvtimeo(sk, arg->flags & O_NONBLOCK); BT_DBG("sk %p timeo %ld", sk, timeo); @@ -456,7 +439,8 @@ static int l2cap_sock_getsockopt_old(struct socket *sock, int optname, struct l2cap_chan *chan = l2cap_pi(sk)->chan; struct l2cap_options opts; struct l2cap_conninfo cinfo; - int len, err = 0; + int err = 0; + size_t len; u32 opt; BT_DBG("sk %p", sk); @@ -503,7 +487,7 @@ static int l2cap_sock_getsockopt_old(struct socket *sock, int optname, BT_DBG("mode 0x%2.2x", chan->mode); - len = min_t(unsigned int, len, sizeof(opts)); + len = min(len, sizeof(opts)); if (copy_to_user(optval, (char *) &opts, len)) err = -EFAULT; @@ -553,7 +537,7 @@ static int l2cap_sock_getsockopt_old(struct socket *sock, int optname, cinfo.hci_handle = chan->conn->hcon->handle; memcpy(cinfo.dev_class, chan->conn->hcon->dev_class, 3); - len = min_t(unsigned int, len, sizeof(cinfo)); + len = min(len, sizeof(cinfo)); if (copy_to_user(optval, (char *) &cinfo, len)) err = -EFAULT; @@ -726,12 +710,12 @@ static bool l2cap_valid_mtu(struct l2cap_chan *chan, u16 mtu) { switch (chan->scid) { case L2CAP_CID_ATT: - if (mtu < L2CAP_LE_MIN_MTU) + if (mtu && mtu < L2CAP_LE_MIN_MTU) return false; break; default: - if (mtu < L2CAP_DEFAULT_MIN_MTU) + if (mtu && mtu < L2CAP_DEFAULT_MIN_MTU) return false; } @@ -744,7 +728,7 @@ static int l2cap_sock_setsockopt_old(struct socket *sock, int optname, struct sock *sk = sock->sk; struct l2cap_chan *chan = l2cap_pi(sk)->chan; struct l2cap_options opts; - int len, err = 0; + int err = 0; u32 opt; BT_DBG("sk %p", sk); @@ -771,11 +755,10 @@ static int l2cap_sock_setsockopt_old(struct socket *sock, int optname, opts.max_tx = chan->max_tx; opts.txwin_size = chan->tx_win; - len = min_t(unsigned int, sizeof(opts), optlen); - if (copy_from_sockptr(&opts, optval, len)) { - err = -EFAULT; + err = copy_safe_from_sockptr(&opts, sizeof(opts), optval, + optlen); + if (err) break; - } if (opts.txwin_size > L2CAP_DEFAULT_EXT_WINDOW) { err = -EINVAL; @@ -818,10 +801,9 @@ static int l2cap_sock_setsockopt_old(struct socket *sock, int optname, break; case L2CAP_LM: - if (copy_from_sockptr(&opt, optval, sizeof(u32))) { - err = -EFAULT; + err = copy_safe_from_sockptr(&opt, sizeof(opt), optval, optlen); + if (err) break; - } if (opt & L2CAP_LM_FIPS) { err = -EINVAL; @@ -902,7 +884,7 @@ static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname, struct bt_security sec; struct bt_power pwr; struct l2cap_conn *conn; - int len, err = 0; + int err = 0; u32 opt; u16 mtu; u8 mode; @@ -928,11 +910,9 @@ static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname, sec.level = BT_SECURITY_LOW; - len = min_t(unsigned int, sizeof(sec), optlen); - if (copy_from_sockptr(&sec, optval, len)) { - err = -EFAULT; + err = copy_safe_from_sockptr(&sec, sizeof(sec), optval, optlen); + if (err) break; - } if (sec.level < BT_SECURITY_LOW || sec.level > BT_SECURITY_FIPS) { @@ -977,10 +957,9 @@ static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname, break; } - if (copy_from_sockptr(&opt, optval, sizeof(u32))) { - err = -EFAULT; + err = copy_safe_from_sockptr(&opt, sizeof(opt), optval, optlen); + if (err) break; - } if (opt) { set_bit(BT_SK_DEFER_SETUP, &bt_sk(sk)->flags); @@ -992,10 +971,9 @@ static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname, break; case BT_FLUSHABLE: - if (copy_from_sockptr(&opt, optval, sizeof(u32))) { - err = -EFAULT; + err = copy_safe_from_sockptr(&opt, sizeof(opt), optval, optlen); + if (err) break; - } if (opt > BT_FLUSHABLE_ON) { err = -EINVAL; @@ -1027,11 +1005,9 @@ static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname, pwr.force_active = BT_POWER_FORCE_ACTIVE_ON; - len = min_t(unsigned int, sizeof(pwr), optlen); - if (copy_from_sockptr(&pwr, optval, len)) { - err = -EFAULT; + err = copy_safe_from_sockptr(&pwr, sizeof(pwr), optval, optlen); + if (err) break; - } if (pwr.force_active) set_bit(FLAG_FORCE_ACTIVE, &chan->flags); @@ -1040,28 +1016,11 @@ static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname, break; case BT_CHANNEL_POLICY: - if (copy_from_sockptr(&opt, optval, sizeof(u32))) { - err = -EFAULT; - break; - } - - if (opt > BT_CHANNEL_POLICY_AMP_PREFERRED) { - err = -EINVAL; - break; - } - - if (chan->mode != L2CAP_MODE_ERTM && - chan->mode != L2CAP_MODE_STREAMING) { - err = -EOPNOTSUPP; + err = copy_safe_from_sockptr(&opt, sizeof(opt), optval, optlen); + if (err) break; - } - - chan->chan_policy = (u8) opt; - - if (sk->sk_state == BT_CONNECTED && - chan->move_role == L2CAP_MOVE_ROLE_NONE) - l2cap_move_start(chan); + err = -EOPNOTSUPP; break; case BT_SNDMTU: @@ -1088,10 +1047,9 @@ static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname, break; } - if (copy_from_sockptr(&mtu, optval, sizeof(u16))) { - err = -EFAULT; + err = copy_safe_from_sockptr(&mtu, sizeof(mtu), optval, optlen); + if (err) break; - } if (chan->mode == L2CAP_MODE_EXT_FLOWCTL && sk->sk_state == BT_CONNECTED) @@ -1119,10 +1077,10 @@ static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname, break; } - if (copy_from_sockptr(&mode, optval, sizeof(u8))) { - err = -EFAULT; + err = copy_safe_from_sockptr(&mode, sizeof(mode), optval, + optlen); + if (err) break; - } BT_DBG("mode %u", mode); @@ -1148,6 +1106,7 @@ static int l2cap_sock_sendmsg(struct socket *sock, struct msghdr *msg, { struct sock *sk = sock->sk; struct l2cap_chan *chan = l2cap_pi(sk)->chan; + struct sockcm_cookie sockc; int err; BT_DBG("sock %p, sk %p", sock, sk); @@ -1162,6 +1121,14 @@ static int l2cap_sock_sendmsg(struct socket *sock, struct msghdr *msg, if (sk->sk_state != BT_CONNECTED) return -ENOTCONN; + hci_sockcm_init(&sockc, sk); + + if (msg->msg_controllen) { + err = sock_cmsg_send(sk, msg, &sockc); + if (err) + return err; + } + lock_sock(sk); err = bt_sock_wait_ready(sk, msg->msg_flags); release_sock(sk); @@ -1169,12 +1136,40 @@ static int l2cap_sock_sendmsg(struct socket *sock, struct msghdr *msg, return err; l2cap_chan_lock(chan); - err = l2cap_chan_send(chan, msg, len); + err = l2cap_chan_send(chan, msg, len, &sockc); l2cap_chan_unlock(chan); return err; } +static void l2cap_publish_rx_avail(struct l2cap_chan *chan) +{ + struct sock *sk = chan->data; + ssize_t avail = sk->sk_rcvbuf - atomic_read(&sk->sk_rmem_alloc); + int expected_skbs, skb_overhead; + + if (avail <= 0) { + l2cap_chan_rx_avail(chan, 0); + return; + } + + if (!chan->mps) { + l2cap_chan_rx_avail(chan, -1); + return; + } + + /* Correct available memory by estimated sk_buff overhead. + * This is significant due to small transfer sizes. However, accept + * at least one full packet if receive space is non-zero. + */ + expected_skbs = DIV_ROUND_UP(avail, chan->mps); + skb_overhead = expected_skbs * sizeof(struct sk_buff); + if (skb_overhead < avail) + l2cap_chan_rx_avail(chan, avail - skb_overhead); + else + l2cap_chan_rx_avail(chan, -1); +} + static int l2cap_sock_recvmsg(struct socket *sock, struct msghdr *msg, size_t len, int flags) { @@ -1182,6 +1177,10 @@ static int l2cap_sock_recvmsg(struct socket *sock, struct msghdr *msg, struct l2cap_pinfo *pi = l2cap_pi(sk); int err; + if (unlikely(flags & MSG_ERRQUEUE)) + return sock_recv_errqueue(sk, msg, len, SOL_BLUETOOTH, + BT_SCM_ERROR); + lock_sock(sk); if (sk->sk_state == BT_CONNECT2 && test_bit(BT_SK_DEFER_SETUP, @@ -1211,28 +1210,33 @@ static int l2cap_sock_recvmsg(struct socket *sock, struct msghdr *msg, else err = bt_sock_recvmsg(sock, msg, len, flags); - if (pi->chan->mode != L2CAP_MODE_ERTM) + if (pi->chan->mode != L2CAP_MODE_ERTM && + pi->chan->mode != L2CAP_MODE_LE_FLOWCTL && + pi->chan->mode != L2CAP_MODE_EXT_FLOWCTL) return err; - /* Attempt to put pending rx data in the socket buffer */ - lock_sock(sk); - if (!test_bit(CONN_LOCAL_BUSY, &pi->chan->conn_state)) - goto done; + l2cap_publish_rx_avail(pi->chan); - if (pi->rx_busy_skb) { - if (!__sock_queue_rcv_skb(sk, pi->rx_busy_skb)) - pi->rx_busy_skb = NULL; - else + /* Attempt to put pending rx data in the socket buffer */ + while (!list_empty(&pi->rx_busy)) { + struct l2cap_rx_busy *rx_busy = + list_first_entry(&pi->rx_busy, + struct l2cap_rx_busy, + list); + if (__sock_queue_rcv_skb(sk, rx_busy->skb) < 0) goto done; + list_del(&rx_busy->list); + kfree(rx_busy); } /* Restore data flow when half of the receive buffer is * available. This avoids resending large numbers of * frames. */ - if (atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf >> 1) + if (test_bit(CONN_LOCAL_BUSY, &pi->chan->conn_state) && + atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf >> 1) l2cap_chan_busy(pi->chan, 0); done: @@ -1250,6 +1254,10 @@ static void l2cap_sock_kill(struct sock *sk) BT_DBG("sk %p state %s", sk, state_to_string(sk->sk_state)); + /* Sock is dead, so set chan data to NULL, avoid other task use invalid + * sock pointer. + */ + l2cap_pi(sk)->chan->data = NULL; /* Kill poor orphan */ l2cap_chan_put(l2cap_pi(sk)->chan); @@ -1331,9 +1339,10 @@ static int l2cap_sock_shutdown(struct socket *sock, int how) /* prevent sk structure from being freed whilst unlocked */ sock_hold(sk); - chan = l2cap_pi(sk)->chan; /* prevent chan structure from being freed whilst unlocked */ - l2cap_chan_hold(chan); + chan = l2cap_chan_hold_unless_zero(l2cap_pi(sk)->chan); + if (!chan) + goto shutdown_already; BT_DBG("chan %p state %s", chan, state_to_string(chan->state)); @@ -1363,22 +1372,20 @@ static int l2cap_sock_shutdown(struct socket *sock, int how) release_sock(sk); l2cap_chan_lock(chan); - conn = chan->conn; - if (conn) - /* prevent conn structure from being freed */ - l2cap_conn_get(conn); + /* prevent conn structure from being freed */ + conn = l2cap_conn_hold_unless_zero(chan->conn); l2cap_chan_unlock(chan); if (conn) /* mutex lock must be taken before l2cap_chan_lock() */ - mutex_lock(&conn->chan_lock); + mutex_lock(&conn->lock); l2cap_chan_lock(chan); l2cap_chan_close(chan, 0); l2cap_chan_unlock(chan); if (conn) { - mutex_unlock(&conn->chan_lock); + mutex_unlock(&conn->lock); l2cap_conn_put(conn); } @@ -1415,6 +1422,10 @@ static int l2cap_sock_release(struct socket *sock) if (!sk) return 0; + lock_sock_nested(sk, L2CAP_NESTING_PARENT); + l2cap_sock_cleanup_listen(sk); + release_sock(sk); + bt_sock_unlink(&l2cap_sk_list, sk); err = l2cap_sock_shutdown(sock, SHUT_RDWR); @@ -1491,18 +1502,25 @@ static struct l2cap_chan *l2cap_sock_new_connection_cb(struct l2cap_chan *chan) static int l2cap_sock_recv_cb(struct l2cap_chan *chan, struct sk_buff *skb) { - struct sock *sk = chan->data; + struct sock *sk; + struct l2cap_pinfo *pi; int err; - lock_sock(sk); + sk = chan->data; + if (!sk) + return -ENXIO; - if (l2cap_pi(sk)->rx_busy_skb) { + pi = l2cap_pi(sk); + lock_sock(sk); + if (chan->mode == L2CAP_MODE_ERTM && !list_empty(&pi->rx_busy)) { err = -ENOMEM; goto done; } if (chan->mode != L2CAP_MODE_ERTM && - chan->mode != L2CAP_MODE_STREAMING) { + chan->mode != L2CAP_MODE_STREAMING && + chan->mode != L2CAP_MODE_LE_FLOWCTL && + chan->mode != L2CAP_MODE_EXT_FLOWCTL) { /* Even if no filter is attached, we could potentially * get errors from security modules, etc. */ @@ -1513,7 +1531,9 @@ static int l2cap_sock_recv_cb(struct l2cap_chan *chan, struct sk_buff *skb) err = __sock_queue_rcv_skb(sk, skb); - /* For ERTM, handle one skb that doesn't fit into the recv + l2cap_publish_rx_avail(chan); + + /* For ERTM and LE, handle a skb that doesn't fit into the recv * buffer. This is important to do because the data frames * have already been acked, so the skb cannot be discarded. * @@ -1522,8 +1542,18 @@ static int l2cap_sock_recv_cb(struct l2cap_chan *chan, struct sk_buff *skb) * acked and reassembled until there is buffer space * available. */ - if (err < 0 && chan->mode == L2CAP_MODE_ERTM) { - l2cap_pi(sk)->rx_busy_skb = skb; + if (err < 0 && + (chan->mode == L2CAP_MODE_ERTM || + chan->mode == L2CAP_MODE_LE_FLOWCTL || + chan->mode == L2CAP_MODE_EXT_FLOWCTL)) { + struct l2cap_rx_busy *rx_busy = + kmalloc(sizeof(*rx_busy), GFP_KERNEL); + if (!rx_busy) { + err = -ENOMEM; + goto done; + } + rx_busy->skb = skb; + list_add_tail(&rx_busy->list, &pi->rx_busy); l2cap_chan_busy(chan, 1); err = 0; } @@ -1624,7 +1654,15 @@ static struct sk_buff *l2cap_sock_alloc_skb_cb(struct l2cap_chan *chan, if (!skb) return ERR_PTR(err); - skb->priority = sk->sk_priority; + /* Channel lock is released before requesting new skb and then + * reacquired thus we need to recheck channel state. + */ + if (chan->state != BT_CONNECTED) { + kfree_skb(skb); + return ERR_PTR(-ENOTCONN); + } + + skb->priority = READ_ONCE(sk->sk_priority); bt_cb(skb)->l2cap.chan = chan; @@ -1668,6 +1706,9 @@ static void l2cap_sock_resume_cb(struct l2cap_chan *chan) { struct sock *sk = chan->data; + if (!sk) + return; + if (test_and_clear_bit(FLAG_PENDING_SECURITY, &chan->flags)) { sk->sk_state = BT_CONNECTED; chan->state = BT_CONNECTED; @@ -1690,7 +1731,7 @@ static long l2cap_sock_get_sndtimeo_cb(struct l2cap_chan *chan) { struct sock *sk = chan->data; - return sk->sk_sndtimeo; + return READ_ONCE(sk->sk_sndtimeo); } static struct pid *l2cap_sock_get_peer_pid_cb(struct l2cap_chan *chan) @@ -1741,6 +1782,8 @@ static const struct l2cap_ops l2cap_chan_ops = { static void l2cap_sock_destruct(struct sock *sk) { + struct l2cap_rx_busy *rx_busy, *next; + BT_DBG("sk %p", sk); if (l2cap_pi(sk)->chan) { @@ -1748,9 +1791,10 @@ static void l2cap_sock_destruct(struct sock *sk) l2cap_chan_put(l2cap_pi(sk)->chan); } - if (l2cap_pi(sk)->rx_busy_skb) { - kfree_skb(l2cap_pi(sk)->rx_busy_skb); - l2cap_pi(sk)->rx_busy_skb = NULL; + list_for_each_entry_safe(rx_busy, next, &l2cap_pi(sk)->rx_busy, list) { + kfree_skb(rx_busy->skb); + list_del(&rx_busy->list); + kfree(rx_busy); } skb_queue_purge(&sk->sk_receive_queue); @@ -1834,6 +1878,8 @@ static void l2cap_sock_init(struct sock *sk, struct sock *parent) chan->data = sk; chan->ops = &l2cap_chan_ops; + + l2cap_publish_rx_avail(chan); } static struct proto l2cap_proto = { @@ -1848,24 +1894,20 @@ static struct sock *l2cap_sock_alloc(struct net *net, struct socket *sock, struct sock *sk; struct l2cap_chan *chan; - sk = sk_alloc(net, PF_BLUETOOTH, prio, &l2cap_proto, kern); + sk = bt_sock_alloc(net, sock, &l2cap_proto, proto, prio, kern); if (!sk) return NULL; - sock_init_data(sock, sk); - INIT_LIST_HEAD(&bt_sk(sk)->accept_q); - sk->sk_destruct = l2cap_sock_destruct; sk->sk_sndtimeo = L2CAP_CONN_TIMEOUT; - sock_reset_flag(sk, SOCK_ZAPPED); - - sk->sk_protocol = proto; - sk->sk_state = BT_OPEN; + INIT_LIST_HEAD(&l2cap_pi(sk)->rx_busy); chan = l2cap_chan_create(); if (!chan) { sk_free(sk); + if (sock) + sock->sk = NULL; return NULL; } diff --git a/net/bluetooth/leds.c b/net/bluetooth/leds.c index f46847632ffa..6e349704efe4 100644 --- a/net/bluetooth/leds.c +++ b/net/bluetooth/leds.c @@ -48,7 +48,7 @@ static int power_activate(struct led_classdev *led_cdev) htrig = to_hci_basic_led_trigger(led_cdev->trigger); powered = test_bit(HCI_UP, &htrig->hdev->flags); - led_trigger_event(led_cdev->trigger, powered ? LED_FULL : LED_OFF); + led_set_brightness(led_cdev, powered ? LED_FULL : LED_OFF); return 0; } diff --git a/net/bluetooth/lib.c b/net/bluetooth/lib.c index 5326f41a58b7..305044a84478 100644 --- a/net/bluetooth/lib.c +++ b/net/bluetooth/lib.c @@ -30,6 +30,15 @@ #include <net/bluetooth/bluetooth.h> +/** + * baswap() - Swaps the order of a bd address + * @dst: Pointer to a bdaddr_t struct that will store the swapped + * bd address. + * @src: Pointer to the bdaddr_t struct to be swapped. + * + * This function reverses the byte order of a Bluetooth device + * address. + */ void baswap(bdaddr_t *dst, const bdaddr_t *src) { const unsigned char *s = (const unsigned char *)src; @@ -41,7 +50,19 @@ void baswap(bdaddr_t *dst, const bdaddr_t *src) } EXPORT_SYMBOL(baswap); -/* Bluetooth error codes to Unix errno mapping */ +/** + * bt_to_errno() - Bluetooth error codes to standard errno + * @code: Bluetooth error code to be converted + * + * This function takes a Bluetooth error code as input and converts + * it to an equivalent Unix/standard errno value. + * + * Return: + * + * If the bt error code is known, an equivalent Unix errno value + * is returned. + * If the given bt error code is not known, ENOSYS is returned. + */ int bt_to_errno(__u16 code) { switch (code) { @@ -135,6 +156,93 @@ int bt_to_errno(__u16 code) } EXPORT_SYMBOL(bt_to_errno); +/** + * bt_status() - Standard errno value to Bluetooth error code + * @err: Unix/standard errno value to be converted + * + * This function converts a standard/Unix errno value to an + * equivalent Bluetooth error code. + * + * Return: Bluetooth error code. + * + * If the given errno is not found, 0x1f is returned by default + * which indicates an unspecified error. + * For err >= 0, no conversion is performed, and the same value + * is immediately returned. + */ +__u8 bt_status(int err) +{ + if (err >= 0) + return err; + + switch (err) { + case -EBADRQC: + return 0x01; + + case -ENOTCONN: + return 0x02; + + case -EIO: + return 0x03; + + case -EHOSTDOWN: + return 0x04; + + case -EACCES: + return 0x05; + + case -EBADE: + return 0x06; + + case -ENOMEM: + return 0x07; + + case -ETIMEDOUT: + return 0x08; + + case -EMLINK: + return 0x09; + + case -EALREADY: + return 0x0b; + + case -EBUSY: + return 0x0c; + + case -ECONNREFUSED: + return 0x0d; + + case -EOPNOTSUPP: + return 0x11; + + case -EINVAL: + return 0x12; + + case -ECONNRESET: + return 0x13; + + case -ECONNABORTED: + return 0x16; + + case -ELOOP: + return 0x17; + + case -EPROTONOSUPPORT: + return 0x1a; + + case -EPROTO: + return 0x19; + + default: + return 0x1f; + } +} +EXPORT_SYMBOL(bt_status); + +/** + * bt_info() - Log Bluetooth information message + * @format: Message's format string + */ void bt_info(const char *format, ...) { struct va_format vaf; @@ -151,6 +259,10 @@ void bt_info(const char *format, ...) } EXPORT_SYMBOL(bt_info); +/** + * bt_warn() - Log Bluetooth warning message + * @format: Message's format string + */ void bt_warn(const char *format, ...) { struct va_format vaf; @@ -167,6 +279,10 @@ void bt_warn(const char *format, ...) } EXPORT_SYMBOL(bt_warn); +/** + * bt_err() - Log Bluetooth error message + * @format: Message's format string + */ void bt_err(const char *format, ...) { struct va_format vaf; @@ -196,6 +312,10 @@ bool bt_dbg_get(void) return debug_enable; } +/** + * bt_dbg() - Log Bluetooth debugging message + * @format: Message's format string + */ void bt_dbg(const char *format, ...) { struct va_format vaf; @@ -216,6 +336,13 @@ void bt_dbg(const char *format, ...) EXPORT_SYMBOL(bt_dbg); #endif +/** + * bt_warn_ratelimited() - Log rate-limited Bluetooth warning message + * @format: Message's format string + * + * This functions works like bt_warn, but it uses rate limiting + * to prevent the message from being logged too often. + */ void bt_warn_ratelimited(const char *format, ...) { struct va_format vaf; @@ -232,6 +359,13 @@ void bt_warn_ratelimited(const char *format, ...) } EXPORT_SYMBOL(bt_warn_ratelimited); +/** + * bt_err_ratelimited() - Log rate-limited Bluetooth error message + * @format: Message's format string + * + * This functions works like bt_err, but it uses rate limiting + * to prevent the message from being logged too often. + */ void bt_err_ratelimited(const char *format, ...) { struct va_format vaf; diff --git a/net/bluetooth/mgmt.c b/net/bluetooth/mgmt.c index 37087cf7dc5a..c11cdef42b6f 100644 --- a/net/bluetooth/mgmt.c +++ b/net/bluetooth/mgmt.c @@ -25,7 +25,7 @@ /* Bluetooth HCI Management interface */ #include <linux/module.h> -#include <asm/unaligned.h> +#include <linux/unaligned.h> #include <net/bluetooth/bluetooth.h> #include <net/bluetooth/hci_core.h> @@ -33,7 +33,6 @@ #include <net/bluetooth/l2cap.h> #include <net/bluetooth/mgmt.h> -#include "hci_request.h" #include "smp.h" #include "mgmt_util.h" #include "mgmt_config.h" @@ -42,7 +41,7 @@ #include "aosp.h" #define MGMT_VERSION 1 -#define MGMT_REVISION 21 +#define MGMT_REVISION 23 static const u16 mgmt_commands[] = { MGMT_OP_READ_INDEX_LIST, @@ -129,6 +128,11 @@ static const u16 mgmt_commands[] = { MGMT_OP_ADD_EXT_ADV_PARAMS, MGMT_OP_ADD_EXT_ADV_DATA, MGMT_OP_ADD_ADV_PATTERNS_MONITOR_RSSI, + MGMT_OP_SET_MESH_RECEIVER, + MGMT_OP_MESH_READ_FEATURES, + MGMT_OP_MESH_SEND, + MGMT_OP_MESH_SEND_CANCEL, + MGMT_OP_HCI_CMD_SYNC, }; static const u16 mgmt_events[] = { @@ -174,6 +178,8 @@ static const u16 mgmt_events[] = { MGMT_EV_ADV_MONITOR_REMOVED, MGMT_EV_CONTROLLER_SUSPEND, MGMT_EV_CONTROLLER_RESUME, + MGMT_EV_ADV_MONITOR_DEVICE_FOUND, + MGMT_EV_ADV_MONITOR_DEVICE_LOST, }; static const u16 mgmt_untrusted_commands[] = { @@ -204,7 +210,7 @@ static const u16 mgmt_untrusted_events[] = { MGMT_EV_EXP_FEATURE_CHANGED, }; -#define CACHE_TIMEOUT msecs_to_jiffies(2 * 1000) +#define CACHE_TIMEOUT secs_to_jiffies(2) #define ZERO_KEY "\x00\x00\x00\x00\x00\x00\x00\x00" \ "\x00\x00\x00\x00\x00\x00\x00\x00" @@ -437,8 +443,7 @@ static int read_index_list(struct sock *sk, struct hci_dev *hdev, void *data, count = 0; list_for_each_entry(d, &hci_dev_list, list) { - if (d->dev_type == HCI_PRIMARY && - !hci_dev_test_flag(d, HCI_UNCONFIGURED)) + if (!hci_dev_test_flag(d, HCI_UNCONFIGURED)) count++; } @@ -459,11 +464,10 @@ static int read_index_list(struct sock *sk, struct hci_dev *hdev, void *data, /* Devices marked as raw-only are neither configured * nor unconfigured controllers. */ - if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks)) + if (hci_test_quirk(d, HCI_QUIRK_RAW_DEVICE)) continue; - if (d->dev_type == HCI_PRIMARY && - !hci_dev_test_flag(d, HCI_UNCONFIGURED)) { + if (!hci_dev_test_flag(d, HCI_UNCONFIGURED)) { rp->index[count++] = cpu_to_le16(d->id); bt_dev_dbg(hdev, "Added hci%u", d->id); } @@ -497,8 +501,7 @@ static int read_unconf_index_list(struct sock *sk, struct hci_dev *hdev, count = 0; list_for_each_entry(d, &hci_dev_list, list) { - if (d->dev_type == HCI_PRIMARY && - hci_dev_test_flag(d, HCI_UNCONFIGURED)) + if (hci_dev_test_flag(d, HCI_UNCONFIGURED)) count++; } @@ -519,11 +522,10 @@ static int read_unconf_index_list(struct sock *sk, struct hci_dev *hdev, /* Devices marked as raw-only are neither configured * nor unconfigured controllers. */ - if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks)) + if (hci_test_quirk(d, HCI_QUIRK_RAW_DEVICE)) continue; - if (d->dev_type == HCI_PRIMARY && - hci_dev_test_flag(d, HCI_UNCONFIGURED)) { + if (hci_dev_test_flag(d, HCI_UNCONFIGURED)) { rp->index[count++] = cpu_to_le16(d->id); bt_dev_dbg(hdev, "Added hci%u", d->id); } @@ -555,10 +557,8 @@ static int read_ext_index_list(struct sock *sk, struct hci_dev *hdev, read_lock(&hci_dev_list_lock); count = 0; - list_for_each_entry(d, &hci_dev_list, list) { - if (d->dev_type == HCI_PRIMARY || d->dev_type == HCI_AMP) - count++; - } + list_for_each_entry(d, &hci_dev_list, list) + count++; rp = kmalloc(struct_size(rp, entry, count), GFP_ATOMIC); if (!rp) { @@ -576,19 +576,13 @@ static int read_ext_index_list(struct sock *sk, struct hci_dev *hdev, /* Devices marked as raw-only are neither configured * nor unconfigured controllers. */ - if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks)) + if (hci_test_quirk(d, HCI_QUIRK_RAW_DEVICE)) continue; - if (d->dev_type == HCI_PRIMARY) { - if (hci_dev_test_flag(d, HCI_UNCONFIGURED)) - rp->entry[count].type = 0x01; - else - rp->entry[count].type = 0x00; - } else if (d->dev_type == HCI_AMP) { - rp->entry[count].type = 0x02; - } else { - continue; - } + if (hci_dev_test_flag(d, HCI_UNCONFIGURED)) + rp->entry[count].type = 0x01; + else + rp->entry[count].type = 0x00; rp->entry[count].bus = d->bus; rp->entry[count++].index = cpu_to_le16(d->id); @@ -618,12 +612,12 @@ static int read_ext_index_list(struct sock *sk, struct hci_dev *hdev, static bool is_configured(struct hci_dev *hdev) { - if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) && + if (hci_test_quirk(hdev, HCI_QUIRK_EXTERNAL_CONFIG) && !hci_dev_test_flag(hdev, HCI_EXT_CONFIGURED)) return false; - if ((test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks) || - test_bit(HCI_QUIRK_USE_BDADDR_PROPERTY, &hdev->quirks)) && + if ((hci_test_quirk(hdev, HCI_QUIRK_INVALID_BDADDR) || + hci_test_quirk(hdev, HCI_QUIRK_USE_BDADDR_PROPERTY)) && !bacmp(&hdev->public_addr, BDADDR_ANY)) return false; @@ -634,12 +628,12 @@ static __le32 get_missing_options(struct hci_dev *hdev) { u32 options = 0; - if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) && + if (hci_test_quirk(hdev, HCI_QUIRK_EXTERNAL_CONFIG) && !hci_dev_test_flag(hdev, HCI_EXT_CONFIGURED)) options |= MGMT_OPTION_EXTERNAL_CONFIG; - if ((test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks) || - test_bit(HCI_QUIRK_USE_BDADDR_PROPERTY, &hdev->quirks)) && + if ((hci_test_quirk(hdev, HCI_QUIRK_INVALID_BDADDR) || + hci_test_quirk(hdev, HCI_QUIRK_USE_BDADDR_PROPERTY)) && !bacmp(&hdev->public_addr, BDADDR_ANY)) options |= MGMT_OPTION_PUBLIC_ADDRESS; @@ -675,7 +669,7 @@ static int read_config_info(struct sock *sk, struct hci_dev *hdev, memset(&rp, 0, sizeof(rp)); rp.manufacturer = cpu_to_le16(hdev->manufacturer); - if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks)) + if (hci_test_quirk(hdev, HCI_QUIRK_EXTERNAL_CONFIG)) options |= MGMT_OPTION_EXTERNAL_CONFIG; if (hdev->set_bdaddr) @@ -829,15 +823,12 @@ static u32 get_supported_settings(struct hci_dev *hdev) if (lmp_ssp_capable(hdev)) { settings |= MGMT_SETTING_SSP; - if (IS_ENABLED(CONFIG_BT_HS)) - settings |= MGMT_SETTING_HS; } if (lmp_sc_capable(hdev)) settings |= MGMT_SETTING_SECURE_CONN; - if (test_bit(HCI_QUIRK_WIDEBAND_SPEECH_SUPPORTED, - &hdev->quirks)) + if (hci_test_quirk(hdev, HCI_QUIRK_WIDEBAND_SPEECH_SUPPORTED)) settings |= MGMT_SETTING_WIDEBAND_SPEECH; } @@ -849,10 +840,24 @@ static u32 get_supported_settings(struct hci_dev *hdev) settings |= MGMT_SETTING_ADVERTISING; } - if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) || - hdev->set_bdaddr) + if (hci_test_quirk(hdev, HCI_QUIRK_EXTERNAL_CONFIG) || hdev->set_bdaddr) settings |= MGMT_SETTING_CONFIGURATION; + if (cis_central_capable(hdev)) + settings |= MGMT_SETTING_CIS_CENTRAL; + + if (cis_peripheral_capable(hdev)) + settings |= MGMT_SETTING_CIS_PERIPHERAL; + + if (ll_privacy_capable(hdev)) + settings |= MGMT_SETTING_LL_PRIVACY; + + if (past_sender_capable(hdev)) + settings |= MGMT_SETTING_PAST_SENDER; + + if (past_receiver_capable(hdev)) + settings |= MGMT_SETTING_PAST_RECEIVER; + settings |= MGMT_SETTING_PHY_CONFIGURATION; return settings; @@ -889,9 +894,6 @@ static u32 get_current_settings(struct hci_dev *hdev) if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) settings |= MGMT_SETTING_SSP; - if (hci_dev_test_flag(hdev, HCI_HS_ENABLED)) - settings |= MGMT_SETTING_HS; - if (hci_dev_test_flag(hdev, HCI_ADVERTISING)) settings |= MGMT_SETTING_ADVERTISING; @@ -926,6 +928,27 @@ static u32 get_current_settings(struct hci_dev *hdev) if (hci_dev_test_flag(hdev, HCI_WIDEBAND_SPEECH_ENABLED)) settings |= MGMT_SETTING_WIDEBAND_SPEECH; + if (cis_central_enabled(hdev)) + settings |= MGMT_SETTING_CIS_CENTRAL; + + if (cis_peripheral_enabled(hdev)) + settings |= MGMT_SETTING_CIS_PERIPHERAL; + + if (bis_enabled(hdev)) + settings |= MGMT_SETTING_ISO_BROADCASTER; + + if (sync_recv_enabled(hdev)) + settings |= MGMT_SETTING_ISO_SYNC_RECEIVER; + + if (ll_privacy_enabled(hdev)) + settings |= MGMT_SETTING_LL_PRIVACY; + + if (past_sender_enabled(hdev)) + settings |= MGMT_SETTING_PAST_SENDER; + + if (past_receiver_enabled(hdev)) + settings |= MGMT_SETTING_PAST_RECEIVER; + return settings; } @@ -1021,13 +1044,102 @@ static void rpa_expired(struct work_struct *work) hci_cmd_sync_queue(hdev, rpa_expired_sync, NULL, NULL); } +static int set_discoverable_sync(struct hci_dev *hdev, void *data); + +static void discov_off(struct work_struct *work) +{ + struct hci_dev *hdev = container_of(work, struct hci_dev, + discov_off.work); + + bt_dev_dbg(hdev, ""); + + hci_dev_lock(hdev); + + /* When discoverable timeout triggers, then just make sure + * the limited discoverable flag is cleared. Even in the case + * of a timeout triggered from general discoverable, it is + * safe to unconditionally clear the flag. + */ + hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE); + hci_dev_clear_flag(hdev, HCI_DISCOVERABLE); + hdev->discov_timeout = 0; + + hci_cmd_sync_queue(hdev, set_discoverable_sync, NULL, NULL); + + mgmt_new_settings(hdev); + + hci_dev_unlock(hdev); +} + +static int send_settings_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev); + +static void mesh_send_complete(struct hci_dev *hdev, + struct mgmt_mesh_tx *mesh_tx, bool silent) +{ + u8 handle = mesh_tx->handle; + + if (!silent) + mgmt_event(MGMT_EV_MESH_PACKET_CMPLT, hdev, &handle, + sizeof(handle), NULL); + + mgmt_mesh_remove(mesh_tx); +} + +static int mesh_send_done_sync(struct hci_dev *hdev, void *data) +{ + struct mgmt_mesh_tx *mesh_tx; + + hci_dev_clear_flag(hdev, HCI_MESH_SENDING); + if (list_empty(&hdev->adv_instances)) + hci_disable_advertising_sync(hdev); + mesh_tx = mgmt_mesh_next(hdev, NULL); + + if (mesh_tx) + mesh_send_complete(hdev, mesh_tx, false); + + return 0; +} + +static int mesh_send_sync(struct hci_dev *hdev, void *data); +static void mesh_send_start_complete(struct hci_dev *hdev, void *data, int err); +static void mesh_next(struct hci_dev *hdev, void *data, int err) +{ + struct mgmt_mesh_tx *mesh_tx = mgmt_mesh_next(hdev, NULL); + + if (!mesh_tx) + return; + + err = hci_cmd_sync_queue(hdev, mesh_send_sync, mesh_tx, + mesh_send_start_complete); + + if (err < 0) + mesh_send_complete(hdev, mesh_tx, false); + else + hci_dev_set_flag(hdev, HCI_MESH_SENDING); +} + +static void mesh_send_done(struct work_struct *work) +{ + struct hci_dev *hdev = container_of(work, struct hci_dev, + mesh_send_done.work); + + if (!hci_dev_test_flag(hdev, HCI_MESH_SENDING)) + return; + + hci_cmd_sync_queue(hdev, mesh_send_done_sync, NULL, mesh_next); +} + static void mgmt_init_hdev(struct sock *sk, struct hci_dev *hdev) { - if (hci_dev_test_and_set_flag(hdev, HCI_MGMT)) + if (hci_dev_test_flag(hdev, HCI_MGMT)) return; + BT_INFO("MGMT ver %d.%d", MGMT_VERSION, MGMT_REVISION); + + INIT_DELAYED_WORK(&hdev->discov_off, discov_off); INIT_DELAYED_WORK(&hdev->service_cache, service_cache_off); INIT_DELAYED_WORK(&hdev->rpa_expired, rpa_expired); + INIT_DELAYED_WORK(&hdev->mesh_send_done, mesh_send_done); /* Non-mgmt controlled devices get this bit set * implicitly so that pairing works for them, however @@ -1035,6 +1147,8 @@ static void mgmt_init_hdev(struct sock *sk, struct hci_dev *hdev) * it */ hci_dev_clear_flag(hdev, HCI_BONDABLE); + + hci_dev_set_flag(hdev, HCI_MGMT); } static int read_controller_info(struct sock *sk, struct hci_dev *hdev, @@ -1080,11 +1194,11 @@ static u16 append_eir_data_to_buf(struct hci_dev *hdev, u8 *eir) eir_len = eir_append_le16(eir, eir_len, EIR_APPEARANCE, hdev->appearance); - name_len = strlen(hdev->dev_name); + name_len = strnlen(hdev->dev_name, sizeof(hdev->dev_name)); eir_len = eir_append_data(eir, eir_len, EIR_NAME_COMPLETE, hdev->dev_name, name_len); - name_len = strlen(hdev->short_name); + name_len = strnlen(hdev->short_name, sizeof(hdev->short_name)); eir_len = eir_append_data(eir, eir_len, EIR_NAME_SHORT, hdev->short_name, name_len); @@ -1191,15 +1305,15 @@ static void restart_le_actions(struct hci_dev *hdev) /* Needed for AUTO_OFF case where might not "really" * have been powered off. */ - list_del_init(&p->action); + hci_pend_le_list_del_init(p); switch (p->auto_connect) { case HCI_AUTO_CONN_DIRECT: case HCI_AUTO_CONN_ALWAYS: - list_add(&p->action, &hdev->pend_le_conns); + hci_pend_le_list_add(p, &hdev->pend_le_conns); break; case HCI_AUTO_CONN_REPORT: - list_add(&p->action, &hdev->pend_le_reports); + hci_pend_le_list_add(p, &hdev->pend_le_reports); break; default: break; @@ -1218,7 +1332,13 @@ static int new_settings(struct hci_dev *hdev, struct sock *skip) static void mgmt_set_powered_complete(struct hci_dev *hdev, void *data, int err) { struct mgmt_pending_cmd *cmd = data; - struct mgmt_mode *cp = cmd->param; + struct mgmt_mode *cp; + + /* Make sure cmd still outstanding. */ + if (err == -ECANCELED || !mgmt_pending_valid(hdev, cmd)) + return; + + cp = cmd->param; bt_dev_dbg(hdev, "err %d", err); @@ -1248,11 +1368,23 @@ static void mgmt_set_powered_complete(struct hci_dev *hdev, void *data, int err) static int set_powered_sync(struct hci_dev *hdev, void *data) { struct mgmt_pending_cmd *cmd = data; - struct mgmt_mode *cp = cmd->param; + struct mgmt_mode cp; + + mutex_lock(&hdev->mgmt_pending_lock); + + /* Make sure cmd still outstanding. */ + if (!__mgmt_pending_listed(hdev, cmd)) { + mutex_unlock(&hdev->mgmt_pending_lock); + return -ECANCELED; + } + + memcpy(&cp, cmd->param, sizeof(cp)); + + mutex_unlock(&hdev->mgmt_pending_lock); BT_DBG("%s", hdev->name); - return hci_set_powered_sync(hdev, cp->val); + return hci_set_powered_sync(hdev, cp.val); } static int set_powered(struct sock *sk, struct hci_dev *hdev, void *data, @@ -1270,6 +1402,14 @@ static int set_powered(struct sock *sk, struct hci_dev *hdev, void *data, hci_dev_lock(hdev); + if (!cp->val) { + if (hci_dev_test_flag(hdev, HCI_POWERING_DOWN)) { + err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED, + MGMT_STATUS_BUSY); + goto failed; + } + } + if (pending_find(MGMT_OP_SET_POWERED, hdev)) { err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED, MGMT_STATUS_BUSY); @@ -1281,14 +1421,25 @@ static int set_powered(struct sock *sk, struct hci_dev *hdev, void *data, goto failed; } - cmd = mgmt_pending_new(sk, MGMT_OP_SET_POWERED, hdev, data, len); + cmd = mgmt_pending_add(sk, MGMT_OP_SET_POWERED, hdev, data, len); if (!cmd) { err = -ENOMEM; goto failed; } - err = hci_cmd_sync_queue(hdev, set_powered_sync, cmd, - mgmt_set_powered_complete); + /* Cancel potentially blocking sync operation before power off */ + if (cp->val == 0x00) { + hci_cmd_sync_cancel_sync(hdev, -EHOSTDOWN); + err = hci_cmd_sync_queue(hdev, set_powered_sync, cmd, + mgmt_set_powered_complete); + } else { + /* Use hci_cmd_sync_submit since hdev might not be running */ + err = hci_cmd_sync_submit(hdev, set_powered_sync, cmd, + mgmt_set_powered_complete); + } + + if (err < 0) + mgmt_pending_remove(cmd); failed: hci_dev_unlock(hdev); @@ -1312,32 +1463,30 @@ static void settings_rsp(struct mgmt_pending_cmd *cmd, void *data) send_settings_rsp(cmd->sk, cmd->opcode, match->hdev); - list_del(&cmd->list); - if (match->sk == NULL) { match->sk = cmd->sk; sock_hold(match->sk); } - - mgmt_pending_free(cmd); } static void cmd_status_rsp(struct mgmt_pending_cmd *cmd, void *data) { u8 *status = data; - mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, *status); - mgmt_pending_remove(cmd); + mgmt_cmd_status(cmd->sk, cmd->hdev->id, cmd->opcode, *status); } static void cmd_complete_rsp(struct mgmt_pending_cmd *cmd, void *data) { - if (cmd->cmd_complete) { - u8 *status = data; + struct cmd_lookup *match = data; - cmd->cmd_complete(cmd, *status); - mgmt_pending_remove(cmd); + /* dequeue cmd_sync entries using cmd as data as that is about to be + * removed/freed. + */ + hci_cmd_sync_dequeue(match->hdev, NULL, cmd, NULL); + if (cmd->cmd_complete) { + cmd->cmd_complete(cmd, match->mgmt_status); return; } @@ -1346,13 +1495,13 @@ static void cmd_complete_rsp(struct mgmt_pending_cmd *cmd, void *data) static int generic_cmd_complete(struct mgmt_pending_cmd *cmd, u8 status) { - return mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, status, + return mgmt_cmd_complete(cmd->sk, cmd->hdev->id, cmd->opcode, status, cmd->param, cmd->param_len); } static int addr_cmd_complete(struct mgmt_pending_cmd *cmd, u8 status) { - return mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, status, + return mgmt_cmd_complete(cmd->sk, cmd->hdev->id, cmd->opcode, status, cmd->param, sizeof(struct mgmt_addr_info)); } @@ -1383,18 +1532,22 @@ static void mgmt_set_discoverable_complete(struct hci_dev *hdev, void *data, bt_dev_dbg(hdev, "err %d", err); + /* Make sure cmd still outstanding. */ + if (err == -ECANCELED || !mgmt_pending_valid(hdev, cmd)) + return; + hci_dev_lock(hdev); if (err) { u8 mgmt_err = mgmt_status(err); - mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err); + mgmt_cmd_status(cmd->sk, cmd->hdev->id, cmd->opcode, mgmt_err); hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE); goto done; } if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE) && hdev->discov_timeout > 0) { - int to = msecs_to_jiffies(hdev->discov_timeout * 1000); + int to = secs_to_jiffies(hdev->discov_timeout); queue_delayed_work(hdev->req_workqueue, &hdev->discov_off, to); } @@ -1408,6 +1561,9 @@ done: static int set_discoverable_sync(struct hci_dev *hdev, void *data) { + if (!mgmt_pending_listed(hdev, data)) + return -ECANCELED; + BT_DBG("%s", hdev->name); return hci_update_discoverable_sync(hdev); @@ -1502,7 +1658,7 @@ static int set_discoverable(struct sock *sk, struct hci_dev *hdev, void *data, hdev->discov_timeout = timeout; if (cp->val && hdev->discov_timeout > 0) { - int to = msecs_to_jiffies(hdev->discov_timeout * 1000); + int to = secs_to_jiffies(hdev->discov_timeout); queue_delayed_work(hdev->req_workqueue, &hdev->discov_off, to); } @@ -1511,7 +1667,7 @@ static int set_discoverable(struct sock *sk, struct hci_dev *hdev, void *data, goto failed; } - cmd = mgmt_pending_new(sk, MGMT_OP_SET_DISCOVERABLE, hdev, data, len); + cmd = mgmt_pending_add(sk, MGMT_OP_SET_DISCOVERABLE, hdev, data, len); if (!cmd) { err = -ENOMEM; goto failed; @@ -1538,6 +1694,9 @@ static int set_discoverable(struct sock *sk, struct hci_dev *hdev, void *data, err = hci_cmd_sync_queue(hdev, set_discoverable_sync, cmd, mgmt_set_discoverable_complete); + if (err < 0) + mgmt_pending_remove(cmd); + failed: hci_dev_unlock(hdev); return err; @@ -1550,11 +1709,15 @@ static void mgmt_set_connectable_complete(struct hci_dev *hdev, void *data, bt_dev_dbg(hdev, "err %d", err); + /* Make sure cmd still outstanding. */ + if (err == -ECANCELED || !mgmt_pending_valid(hdev, cmd)) + return; + hci_dev_lock(hdev); if (err) { u8 mgmt_err = mgmt_status(err); - mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err); + mgmt_cmd_status(cmd->sk, cmd->hdev->id, cmd->opcode, mgmt_err); goto done; } @@ -1563,6 +1726,7 @@ static void mgmt_set_connectable_complete(struct hci_dev *hdev, void *data, done: mgmt_pending_free(cmd); + hci_dev_unlock(hdev); } @@ -1587,7 +1751,7 @@ static int set_connectable_update_settings(struct hci_dev *hdev, return err; if (changed) { - hci_req_update_scan(hdev); + hci_update_scan(hdev); hci_update_passive_scan(hdev); return new_settings(hdev, sk); } @@ -1597,6 +1761,9 @@ static int set_connectable_update_settings(struct hci_dev *hdev, static int set_connectable_sync(struct hci_dev *hdev, void *data) { + if (!mgmt_pending_listed(hdev, data)) + return -ECANCELED; + BT_DBG("%s", hdev->name); return hci_update_connectable_sync(hdev); @@ -1634,7 +1801,7 @@ static int set_connectable(struct sock *sk, struct hci_dev *hdev, void *data, goto failed; } - cmd = mgmt_pending_new(sk, MGMT_OP_SET_CONNECTABLE, hdev, data, len); + cmd = mgmt_pending_add(sk, MGMT_OP_SET_CONNECTABLE, hdev, data, len); if (!cmd) { err = -ENOMEM; goto failed; @@ -1654,6 +1821,9 @@ static int set_connectable(struct sock *sk, struct hci_dev *hdev, void *data, err = hci_cmd_sync_queue(hdev, set_connectable_sync, cmd, mgmt_set_connectable_complete); + if (err < 0) + mgmt_pending_remove(cmd); + failed: hci_dev_unlock(hdev); return err; @@ -1770,21 +1940,26 @@ static void set_ssp_complete(struct hci_dev *hdev, void *data, int err) { struct cmd_lookup match = { NULL, hdev }; struct mgmt_pending_cmd *cmd = data; - struct mgmt_mode *cp = cmd->param; - u8 enable = cp->val; + struct mgmt_mode *cp; + u8 enable; bool changed; + /* Make sure cmd still outstanding. */ + if (err == -ECANCELED || !mgmt_pending_valid(hdev, cmd)) + return; + + cp = cmd->param; + enable = cp->val; + if (err) { u8 mgmt_err = mgmt_status(err); if (enable && hci_dev_test_and_clear_flag(hdev, HCI_SSP_ENABLED)) { - hci_dev_clear_flag(hdev, HCI_HS_ENABLED); new_settings(hdev, NULL); } - mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, cmd_status_rsp, - &mgmt_err); + mgmt_cmd_status(cmd->sk, cmd->hdev->id, cmd->opcode, mgmt_err); return; } @@ -1792,15 +1967,9 @@ static void set_ssp_complete(struct hci_dev *hdev, void *data, int err) changed = !hci_dev_test_and_set_flag(hdev, HCI_SSP_ENABLED); } else { changed = hci_dev_test_and_clear_flag(hdev, HCI_SSP_ENABLED); - - if (!changed) - changed = hci_dev_test_and_clear_flag(hdev, - HCI_HS_ENABLED); - else - hci_dev_clear_flag(hdev, HCI_HS_ENABLED); } - mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, settings_rsp, &match); + settings_rsp(cmd, &match); if (changed) new_settings(hdev, match.sk); @@ -1814,14 +1983,25 @@ static void set_ssp_complete(struct hci_dev *hdev, void *data, int err) static int set_ssp_sync(struct hci_dev *hdev, void *data) { struct mgmt_pending_cmd *cmd = data; - struct mgmt_mode *cp = cmd->param; + struct mgmt_mode cp; bool changed = false; int err; - if (cp->val) + mutex_lock(&hdev->mgmt_pending_lock); + + if (!__mgmt_pending_listed(hdev, cmd)) { + mutex_unlock(&hdev->mgmt_pending_lock); + return -ECANCELED; + } + + memcpy(&cp, cmd->param, sizeof(cp)); + + mutex_unlock(&hdev->mgmt_pending_lock); + + if (cp.val) changed = !hci_dev_test_and_set_flag(hdev, HCI_SSP_ENABLED); - err = hci_write_ssp_mode_sync(hdev, cp->val); + err = hci_write_ssp_mode_sync(hdev, cp.val); if (!err && changed) hci_dev_clear_flag(hdev, HCI_SSP_ENABLED); @@ -1861,11 +2041,6 @@ static int set_ssp(struct sock *sk, struct hci_dev *hdev, void *data, u16 len) } else { changed = hci_dev_test_and_clear_flag(hdev, HCI_SSP_ENABLED); - if (!changed) - changed = hci_dev_test_and_clear_flag(hdev, - HCI_HS_ENABLED); - else - hci_dev_clear_flag(hdev, HCI_HS_ENABLED); } err = send_settings_rsp(sk, MGMT_OP_SET_SSP, hdev); @@ -1911,94 +2086,61 @@ failed: static int set_hs(struct sock *sk, struct hci_dev *hdev, void *data, u16 len) { - struct mgmt_mode *cp = data; - bool changed; - u8 status; - int err; - bt_dev_dbg(hdev, "sock %p", sk); - if (!IS_ENABLED(CONFIG_BT_HS)) - return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS, - MGMT_STATUS_NOT_SUPPORTED); - - status = mgmt_bredr_support(hdev); - if (status) - return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS, status); - - if (!lmp_ssp_capable(hdev)) - return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS, + return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS, MGMT_STATUS_NOT_SUPPORTED); - - if (!hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) - return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS, - MGMT_STATUS_REJECTED); - - if (cp->val != 0x00 && cp->val != 0x01) - return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS, - MGMT_STATUS_INVALID_PARAMS); - - hci_dev_lock(hdev); - - if (pending_find(MGMT_OP_SET_SSP, hdev)) { - err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS, - MGMT_STATUS_BUSY); - goto unlock; - } - - if (cp->val) { - changed = !hci_dev_test_and_set_flag(hdev, HCI_HS_ENABLED); - } else { - if (hdev_is_powered(hdev)) { - err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS, - MGMT_STATUS_REJECTED); - goto unlock; - } - - changed = hci_dev_test_and_clear_flag(hdev, HCI_HS_ENABLED); - } - - err = send_settings_rsp(sk, MGMT_OP_SET_HS, hdev); - if (err < 0) - goto unlock; - - if (changed) - err = new_settings(hdev, sk); - -unlock: - hci_dev_unlock(hdev); - return err; } static void set_le_complete(struct hci_dev *hdev, void *data, int err) { + struct mgmt_pending_cmd *cmd = data; struct cmd_lookup match = { NULL, hdev }; u8 status = mgmt_status(err); bt_dev_dbg(hdev, "err %d", err); - if (status) { - mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, cmd_status_rsp, - &status); + if (err == -ECANCELED || !mgmt_pending_valid(hdev, data)) return; + + if (status) { + mgmt_cmd_status(cmd->sk, cmd->hdev->id, cmd->opcode, status); + goto done; } - mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, settings_rsp, &match); + settings_rsp(cmd, &match); new_settings(hdev, match.sk); if (match.sk) sock_put(match.sk); + +done: + mgmt_pending_free(cmd); } static int set_le_sync(struct hci_dev *hdev, void *data) { struct mgmt_pending_cmd *cmd = data; - struct mgmt_mode *cp = cmd->param; - u8 val = !!cp->val; + struct mgmt_mode cp; + u8 val; int err; + mutex_lock(&hdev->mgmt_pending_lock); + + if (!__mgmt_pending_listed(hdev, cmd)) { + mutex_unlock(&hdev->mgmt_pending_lock); + return -ECANCELED; + } + + memcpy(&cp, cmd->param, sizeof(cp)); + val = !!cp.val; + + mutex_unlock(&hdev->mgmt_pending_lock); + if (!val) { + hci_clear_adv_instance_sync(hdev, NULL, 0x00, true); + if (hci_dev_test_flag(hdev, HCI_LE_ADV)) hci_disable_advertising_sync(hdev); @@ -2033,6 +2175,360 @@ static int set_le_sync(struct hci_dev *hdev, void *data) return err; } +static void set_mesh_complete(struct hci_dev *hdev, void *data, int err) +{ + struct mgmt_pending_cmd *cmd = data; + u8 status = mgmt_status(err); + struct sock *sk; + + if (err == -ECANCELED || !mgmt_pending_valid(hdev, cmd)) + return; + + sk = cmd->sk; + + if (status) { + mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_MESH_RECEIVER, + status); + mgmt_pending_foreach(MGMT_OP_SET_MESH_RECEIVER, hdev, true, + cmd_status_rsp, &status); + goto done; + } + + mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_MESH_RECEIVER, 0, NULL, 0); + +done: + mgmt_pending_free(cmd); +} + +static int set_mesh_sync(struct hci_dev *hdev, void *data) +{ + struct mgmt_pending_cmd *cmd = data; + DEFINE_FLEX(struct mgmt_cp_set_mesh, cp, ad_types, num_ad_types, + sizeof(hdev->mesh_ad_types)); + size_t len; + + mutex_lock(&hdev->mgmt_pending_lock); + + if (!__mgmt_pending_listed(hdev, cmd)) { + mutex_unlock(&hdev->mgmt_pending_lock); + return -ECANCELED; + } + + len = cmd->param_len; + memcpy(cp, cmd->param, min(__struct_size(cp), len)); + + mutex_unlock(&hdev->mgmt_pending_lock); + + memset(hdev->mesh_ad_types, 0, sizeof(hdev->mesh_ad_types)); + + if (cp->enable) + hci_dev_set_flag(hdev, HCI_MESH); + else + hci_dev_clear_flag(hdev, HCI_MESH); + + hdev->le_scan_interval = __le16_to_cpu(cp->period); + hdev->le_scan_window = __le16_to_cpu(cp->window); + + len -= sizeof(struct mgmt_cp_set_mesh); + + /* If filters don't fit, forward all adv pkts */ + if (len <= sizeof(hdev->mesh_ad_types)) + memcpy(hdev->mesh_ad_types, cp->ad_types, len); + + hci_update_passive_scan_sync(hdev); + return 0; +} + +static int set_mesh(struct sock *sk, struct hci_dev *hdev, void *data, u16 len) +{ + struct mgmt_cp_set_mesh *cp = data; + struct mgmt_pending_cmd *cmd; + __u16 period, window; + int err = 0; + + bt_dev_dbg(hdev, "sock %p", sk); + + if (!lmp_le_capable(hdev) || + !hci_dev_test_flag(hdev, HCI_MESH_EXPERIMENTAL)) + return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_MESH_RECEIVER, + MGMT_STATUS_NOT_SUPPORTED); + + if (cp->enable != 0x00 && cp->enable != 0x01) + return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_MESH_RECEIVER, + MGMT_STATUS_INVALID_PARAMS); + + /* Keep allowed ranges in sync with set_scan_params() */ + period = __le16_to_cpu(cp->period); + + if (period < 0x0004 || period > 0x4000) + return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_MESH_RECEIVER, + MGMT_STATUS_INVALID_PARAMS); + + window = __le16_to_cpu(cp->window); + + if (window < 0x0004 || window > 0x4000) + return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_MESH_RECEIVER, + MGMT_STATUS_INVALID_PARAMS); + + if (window > period) + return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_MESH_RECEIVER, + MGMT_STATUS_INVALID_PARAMS); + + hci_dev_lock(hdev); + + cmd = mgmt_pending_add(sk, MGMT_OP_SET_MESH_RECEIVER, hdev, data, len); + if (!cmd) + err = -ENOMEM; + else + err = hci_cmd_sync_queue(hdev, set_mesh_sync, cmd, + set_mesh_complete); + + if (err < 0) { + err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_MESH_RECEIVER, + MGMT_STATUS_FAILED); + + if (cmd) + mgmt_pending_remove(cmd); + } + + hci_dev_unlock(hdev); + return err; +} + +static void mesh_send_start_complete(struct hci_dev *hdev, void *data, int err) +{ + struct mgmt_mesh_tx *mesh_tx = data; + struct mgmt_cp_mesh_send *send = (void *)mesh_tx->param; + unsigned long mesh_send_interval; + u8 mgmt_err = mgmt_status(err); + + /* Report any errors here, but don't report completion */ + + if (mgmt_err) { + hci_dev_clear_flag(hdev, HCI_MESH_SENDING); + /* Send Complete Error Code for handle */ + mesh_send_complete(hdev, mesh_tx, false); + return; + } + + mesh_send_interval = msecs_to_jiffies((send->cnt) * 25); + queue_delayed_work(hdev->req_workqueue, &hdev->mesh_send_done, + mesh_send_interval); +} + +static int mesh_send_sync(struct hci_dev *hdev, void *data) +{ + struct mgmt_mesh_tx *mesh_tx = data; + struct mgmt_cp_mesh_send *send = (void *)mesh_tx->param; + struct adv_info *adv, *next_instance; + u8 instance = hdev->le_num_of_adv_sets + 1; + u16 timeout, duration; + int err = 0; + + if (hdev->le_num_of_adv_sets <= hdev->adv_instance_cnt) + return MGMT_STATUS_BUSY; + + timeout = 1000; + duration = send->cnt * INTERVAL_TO_MS(hdev->le_adv_max_interval); + adv = hci_add_adv_instance(hdev, instance, 0, + send->adv_data_len, send->adv_data, + 0, NULL, + timeout, duration, + HCI_ADV_TX_POWER_NO_PREFERENCE, + hdev->le_adv_min_interval, + hdev->le_adv_max_interval, + mesh_tx->handle); + + if (!IS_ERR(adv)) + mesh_tx->instance = instance; + else + err = PTR_ERR(adv); + + if (hdev->cur_adv_instance == instance) { + /* If the currently advertised instance is being changed then + * cancel the current advertising and schedule the next + * instance. If there is only one instance then the overridden + * advertising data will be visible right away. + */ + cancel_adv_timeout(hdev); + + next_instance = hci_get_next_instance(hdev, instance); + if (next_instance) + instance = next_instance->instance; + else + instance = 0; + } else if (hdev->adv_instance_timeout) { + /* Immediately advertise the new instance if no other, or + * let it go naturally from queue if ADV is already happening + */ + instance = 0; + } + + if (instance) + return hci_schedule_adv_instance_sync(hdev, instance, true); + + return err; +} + +static void send_count(struct mgmt_mesh_tx *mesh_tx, void *data) +{ + struct mgmt_rp_mesh_read_features *rp = data; + + if (rp->used_handles >= rp->max_handles) + return; + + rp->handles[rp->used_handles++] = mesh_tx->handle; +} + +static int mesh_features(struct sock *sk, struct hci_dev *hdev, + void *data, u16 len) +{ + struct mgmt_rp_mesh_read_features rp; + + if (!lmp_le_capable(hdev) || + !hci_dev_test_flag(hdev, HCI_MESH_EXPERIMENTAL)) + return mgmt_cmd_status(sk, hdev->id, MGMT_OP_MESH_READ_FEATURES, + MGMT_STATUS_NOT_SUPPORTED); + + memset(&rp, 0, sizeof(rp)); + rp.index = cpu_to_le16(hdev->id); + if (hci_dev_test_flag(hdev, HCI_LE_ENABLED)) + rp.max_handles = MESH_HANDLES_MAX; + + hci_dev_lock(hdev); + + if (rp.max_handles) + mgmt_mesh_foreach(hdev, send_count, &rp, sk); + + mgmt_cmd_complete(sk, hdev->id, MGMT_OP_MESH_READ_FEATURES, 0, &rp, + rp.used_handles + sizeof(rp) - MESH_HANDLES_MAX); + + hci_dev_unlock(hdev); + return 0; +} + +static int send_cancel(struct hci_dev *hdev, void *data) +{ + struct mgmt_pending_cmd *cmd = data; + struct mgmt_cp_mesh_send_cancel *cancel = (void *)cmd->param; + struct mgmt_mesh_tx *mesh_tx; + + if (!cancel->handle) { + do { + mesh_tx = mgmt_mesh_next(hdev, cmd->sk); + + if (mesh_tx) + mesh_send_complete(hdev, mesh_tx, false); + } while (mesh_tx); + } else { + mesh_tx = mgmt_mesh_find(hdev, cancel->handle); + + if (mesh_tx && mesh_tx->sk == cmd->sk) + mesh_send_complete(hdev, mesh_tx, false); + } + + mgmt_cmd_complete(cmd->sk, hdev->id, MGMT_OP_MESH_SEND_CANCEL, + 0, NULL, 0); + mgmt_pending_free(cmd); + + return 0; +} + +static int mesh_send_cancel(struct sock *sk, struct hci_dev *hdev, + void *data, u16 len) +{ + struct mgmt_pending_cmd *cmd; + int err; + + if (!lmp_le_capable(hdev) || + !hci_dev_test_flag(hdev, HCI_MESH_EXPERIMENTAL)) + return mgmt_cmd_status(sk, hdev->id, MGMT_OP_MESH_SEND_CANCEL, + MGMT_STATUS_NOT_SUPPORTED); + + if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED)) + return mgmt_cmd_status(sk, hdev->id, MGMT_OP_MESH_SEND_CANCEL, + MGMT_STATUS_REJECTED); + + hci_dev_lock(hdev); + cmd = mgmt_pending_new(sk, MGMT_OP_MESH_SEND_CANCEL, hdev, data, len); + if (!cmd) + err = -ENOMEM; + else + err = hci_cmd_sync_queue(hdev, send_cancel, cmd, NULL); + + if (err < 0) { + err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_MESH_SEND_CANCEL, + MGMT_STATUS_FAILED); + + if (cmd) + mgmt_pending_free(cmd); + } + + hci_dev_unlock(hdev); + return err; +} + +static int mesh_send(struct sock *sk, struct hci_dev *hdev, void *data, u16 len) +{ + struct mgmt_mesh_tx *mesh_tx; + struct mgmt_cp_mesh_send *send = data; + struct mgmt_rp_mesh_read_features rp; + bool sending; + int err = 0; + + if (!lmp_le_capable(hdev) || + !hci_dev_test_flag(hdev, HCI_MESH_EXPERIMENTAL)) + return mgmt_cmd_status(sk, hdev->id, MGMT_OP_MESH_SEND, + MGMT_STATUS_NOT_SUPPORTED); + if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED) || + len <= MGMT_MESH_SEND_SIZE || + len > (MGMT_MESH_SEND_SIZE + 31)) + return mgmt_cmd_status(sk, hdev->id, MGMT_OP_MESH_SEND, + MGMT_STATUS_REJECTED); + + hci_dev_lock(hdev); + + memset(&rp, 0, sizeof(rp)); + rp.max_handles = MESH_HANDLES_MAX; + + mgmt_mesh_foreach(hdev, send_count, &rp, sk); + + if (rp.max_handles <= rp.used_handles) { + err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_MESH_SEND, + MGMT_STATUS_BUSY); + goto done; + } + + sending = hci_dev_test_flag(hdev, HCI_MESH_SENDING); + mesh_tx = mgmt_mesh_add(sk, hdev, send, len); + + if (!mesh_tx) + err = -ENOMEM; + else if (!sending) + err = hci_cmd_sync_queue(hdev, mesh_send_sync, mesh_tx, + mesh_send_start_complete); + + if (err < 0) { + bt_dev_err(hdev, "Send Mesh Failed %d", err); + err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_MESH_SEND, + MGMT_STATUS_FAILED); + + if (mesh_tx) { + if (sending) + mgmt_mesh_remove(mesh_tx); + } + } else { + hci_dev_set_flag(hdev, HCI_MESH_SENDING); + + mgmt_cmd_complete(sk, hdev->id, MGMT_OP_MESH_SEND, 0, + &mesh_tx->handle, 1); + } + +done: + hci_dev_unlock(hdev); + return err; +} + static int set_le(struct sock *sk, struct hci_dev *hdev, void *data, u16 len) { struct mgmt_mode *cp = data; @@ -2072,9 +2568,6 @@ static int set_le(struct sock *sk, struct hci_dev *hdev, void *data, u16 len) val = !!cp->val; enabled = lmp_host_le_capable(hdev); - if (!val) - hci_req_clear_adv_instance(hdev, NULL, NULL, 0x00, true); - if (!hdev_is_powered(hdev) || val == enabled) { bool changed = false; @@ -2125,6 +2618,65 @@ unlock: return err; } +static int send_hci_cmd_sync(struct hci_dev *hdev, void *data) +{ + struct mgmt_pending_cmd *cmd = data; + struct mgmt_cp_hci_cmd_sync *cp = cmd->param; + struct sk_buff *skb; + + skb = __hci_cmd_sync_ev(hdev, le16_to_cpu(cp->opcode), + le16_to_cpu(cp->params_len), cp->params, + cp->event, cp->timeout ? + secs_to_jiffies(cp->timeout) : + HCI_CMD_TIMEOUT); + if (IS_ERR(skb)) { + mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_HCI_CMD_SYNC, + mgmt_status(PTR_ERR(skb))); + goto done; + } + + mgmt_cmd_complete(cmd->sk, hdev->id, MGMT_OP_HCI_CMD_SYNC, 0, + skb->data, skb->len); + + kfree_skb(skb); + +done: + mgmt_pending_free(cmd); + + return 0; +} + +static int mgmt_hci_cmd_sync(struct sock *sk, struct hci_dev *hdev, + void *data, u16 len) +{ + struct mgmt_cp_hci_cmd_sync *cp = data; + struct mgmt_pending_cmd *cmd; + int err; + + if (len != (offsetof(struct mgmt_cp_hci_cmd_sync, params) + + le16_to_cpu(cp->params_len))) + return mgmt_cmd_status(sk, hdev->id, MGMT_OP_HCI_CMD_SYNC, + MGMT_STATUS_INVALID_PARAMS); + + hci_dev_lock(hdev); + cmd = mgmt_pending_new(sk, MGMT_OP_HCI_CMD_SYNC, hdev, data, len); + if (!cmd) + err = -ENOMEM; + else + err = hci_cmd_sync_queue(hdev, send_hci_cmd_sync, cmd, NULL); + + if (err < 0) { + err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_HCI_CMD_SYNC, + MGMT_STATUS_FAILED); + + if (cmd) + mgmt_pending_free(cmd); + } + + hci_dev_unlock(hdev); + return err; +} + /* This is a helper function to test for pending mgmt commands that can * cause CoD or EIR HCI commands. We can only allow one such pending * mgmt command at a time since otherwise we cannot easily track what @@ -2173,7 +2725,7 @@ static void mgmt_class_complete(struct hci_dev *hdev, void *data, int err) bt_dev_dbg(hdev, "err %d", err); - mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, + mgmt_cmd_complete(cmd->sk, cmd->hdev->id, cmd->opcode, mgmt_status(err), hdev->dev_class, 3); mgmt_pending_free(cmd); @@ -2225,7 +2777,11 @@ static int add_uuid(struct sock *sk, struct hci_dev *hdev, void *data, u16 len) goto failed; } - err = hci_cmd_sync_queue(hdev, add_uuid_sync, cmd, mgmt_class_complete); + /* MGMT_OP_ADD_UUID don't require adapter the UP/Running so use + * hci_cmd_sync_submit instead of hci_cmd_sync_queue. + */ + err = hci_cmd_sync_submit(hdev, add_uuid_sync, cmd, + mgmt_class_complete); if (err < 0) { mgmt_pending_free(cmd); goto failed; @@ -2267,7 +2823,9 @@ static int remove_uuid(struct sock *sk, struct hci_dev *hdev, void *data, struct mgmt_cp_remove_uuid *cp = data; struct mgmt_pending_cmd *cmd; struct bt_uuid *match, *tmp; - u8 bt_uuid_any[] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }; + static const u8 bt_uuid_any[] = { + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 + }; int err, found; bt_dev_dbg(hdev, "sock %p", sk); @@ -2317,8 +2875,11 @@ update_class: goto unlock; } - err = hci_cmd_sync_queue(hdev, remove_uuid_sync, cmd, - mgmt_class_complete); + /* MGMT_OP_REMOVE_UUID don't require adapter the UP/Running so use + * hci_cmd_sync_submit instead of hci_cmd_sync_queue. + */ + err = hci_cmd_sync_submit(hdev, remove_uuid_sync, cmd, + mgmt_class_complete); if (err < 0) mgmt_pending_free(cmd); @@ -2384,8 +2945,11 @@ static int set_dev_class(struct sock *sk, struct hci_dev *hdev, void *data, goto unlock; } - err = hci_cmd_sync_queue(hdev, set_class_sync, cmd, - mgmt_class_complete); + /* MGMT_OP_SET_DEV_CLASS don't require adapter the UP/Running so use + * hci_cmd_sync_submit instead of hci_cmd_sync_queue. + */ + err = hci_cmd_sync_submit(hdev, set_class_sync, cmd, + mgmt_class_complete); if (err < 0) mgmt_pending_free(cmd); @@ -2433,15 +2997,6 @@ static int load_link_keys(struct sock *sk, struct hci_dev *hdev, void *data, bt_dev_dbg(hdev, "debug_keys %u key_count %u", cp->debug_keys, key_count); - for (i = 0; i < key_count; i++) { - struct mgmt_link_key_info *key = &cp->keys[i]; - - if (key->addr.type != BDADDR_BREDR || key->type > 0x08) - return mgmt_cmd_status(sk, hdev->id, - MGMT_OP_LOAD_LINK_KEYS, - MGMT_STATUS_INVALID_PARAMS); - } - hci_dev_lock(hdev); hci_link_keys_clear(hdev); @@ -2466,6 +3021,19 @@ static int load_link_keys(struct sock *sk, struct hci_dev *hdev, void *data, continue; } + if (key->addr.type != BDADDR_BREDR) { + bt_dev_warn(hdev, + "Invalid link address type %u for %pMR", + key->addr.type, &key->addr.bdaddr); + continue; + } + + if (key->type > 0x08) { + bt_dev_warn(hdev, "Invalid link key type %u for %pMR", + key->type, &key->addr.bdaddr); + continue; + } + /* Always ignore debug keys and require a new pairing if * the user wants to use them. */ @@ -2495,6 +3063,42 @@ static int device_unpaired(struct hci_dev *hdev, bdaddr_t *bdaddr, skip_sk); } +static void unpair_device_complete(struct hci_dev *hdev, void *data, int err) +{ + struct mgmt_pending_cmd *cmd = data; + struct mgmt_cp_unpair_device *cp = cmd->param; + + if (!err) + device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, cmd->sk); + + cmd->cmd_complete(cmd, err); + mgmt_pending_free(cmd); +} + +static int unpair_device_sync(struct hci_dev *hdev, void *data) +{ + struct mgmt_pending_cmd *cmd = data; + struct mgmt_cp_unpair_device *cp = cmd->param; + struct hci_conn *conn; + + if (cp->addr.type == BDADDR_BREDR) + conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, + &cp->addr.bdaddr); + else + conn = hci_conn_hash_lookup_le(hdev, &cp->addr.bdaddr, + le_addr_type(cp->addr.type)); + + if (!conn) + return 0; + + /* Disregard any possible error since the likes of hci_abort_conn_sync + * will clean up the connection no matter the error. + */ + hci_abort_conn(conn, HCI_ERROR_REMOTE_USER_TERM); + + return 0; +} + static int unpair_device(struct sock *sk, struct hci_dev *hdev, void *data, u16 len) { @@ -2605,7 +3209,7 @@ done: goto unlock; } - cmd = mgmt_pending_add(sk, MGMT_OP_UNPAIR_DEVICE, hdev, cp, + cmd = mgmt_pending_new(sk, MGMT_OP_UNPAIR_DEVICE, hdev, cp, sizeof(*cp)); if (!cmd) { err = -ENOMEM; @@ -2614,22 +3218,54 @@ done: cmd->cmd_complete = addr_cmd_complete; - err = hci_abort_conn(conn, HCI_ERROR_REMOTE_USER_TERM); + err = hci_cmd_sync_queue(hdev, unpair_device_sync, cmd, + unpair_device_complete); if (err < 0) - mgmt_pending_remove(cmd); + mgmt_pending_free(cmd); unlock: hci_dev_unlock(hdev); return err; } +static void disconnect_complete(struct hci_dev *hdev, void *data, int err) +{ + struct mgmt_pending_cmd *cmd = data; + + cmd->cmd_complete(cmd, mgmt_status(err)); + mgmt_pending_free(cmd); +} + +static int disconnect_sync(struct hci_dev *hdev, void *data) +{ + struct mgmt_pending_cmd *cmd = data; + struct mgmt_cp_disconnect *cp = cmd->param; + struct hci_conn *conn; + + if (cp->addr.type == BDADDR_BREDR) + conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, + &cp->addr.bdaddr); + else + conn = hci_conn_hash_lookup_le(hdev, &cp->addr.bdaddr, + le_addr_type(cp->addr.type)); + + if (!conn) + return -ENOTCONN; + + /* Disregard any possible error since the likes of hci_abort_conn_sync + * will clean up the connection no matter the error. + */ + hci_abort_conn(conn, HCI_ERROR_REMOTE_USER_TERM); + + return 0; +} + static int disconnect(struct sock *sk, struct hci_dev *hdev, void *data, u16 len) { struct mgmt_cp_disconnect *cp = data; struct mgmt_rp_disconnect rp; struct mgmt_pending_cmd *cmd; - struct hci_conn *conn; int err; bt_dev_dbg(hdev, "sock %p", sk); @@ -2652,27 +3288,7 @@ static int disconnect(struct sock *sk, struct hci_dev *hdev, void *data, goto failed; } - if (pending_find(MGMT_OP_DISCONNECT, hdev)) { - err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT, - MGMT_STATUS_BUSY, &rp, sizeof(rp)); - goto failed; - } - - if (cp->addr.type == BDADDR_BREDR) - conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, - &cp->addr.bdaddr); - else - conn = hci_conn_hash_lookup_le(hdev, &cp->addr.bdaddr, - le_addr_type(cp->addr.type)); - - if (!conn || conn->state == BT_OPEN || conn->state == BT_CLOSED) { - err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT, - MGMT_STATUS_NOT_CONNECTED, &rp, - sizeof(rp)); - goto failed; - } - - cmd = mgmt_pending_add(sk, MGMT_OP_DISCONNECT, hdev, data, len); + cmd = mgmt_pending_new(sk, MGMT_OP_DISCONNECT, hdev, data, len); if (!cmd) { err = -ENOMEM; goto failed; @@ -2680,9 +3296,10 @@ static int disconnect(struct sock *sk, struct hci_dev *hdev, void *data, cmd->cmd_complete = generic_cmd_complete; - err = hci_disconnect(conn, HCI_ERROR_REMOTE_USER_TERM); + err = hci_cmd_sync_queue(hdev, disconnect_sync, cmd, + disconnect_complete); if (err < 0) - mgmt_pending_remove(cmd); + mgmt_pending_free(cmd); failed: hci_dev_unlock(hdev); @@ -2692,6 +3309,9 @@ failed: static u8 link_to_bdaddr(u8 link_type, u8 addr_type) { switch (link_type) { + case CIS_LINK: + case BIS_LINK: + case PA_LINK: case LE_LINK: switch (addr_type) { case ADDR_LE_DEV_PUBLIC: @@ -2895,7 +3515,7 @@ static int pairing_complete(struct mgmt_pending_cmd *cmd, u8 status) bacpy(&rp.addr.bdaddr, &conn->dst); rp.addr.type = link_to_bdaddr(conn->type, conn->dst_type); - err = mgmt_cmd_complete(cmd->sk, cmd->index, MGMT_OP_PAIR_DEVICE, + err = mgmt_cmd_complete(cmd->sk, cmd->hdev->id, MGMT_OP_PAIR_DEVICE, status, &rp, sizeof(rp)); /* So we don't get further callbacks for this connection */ @@ -3009,7 +3629,8 @@ static int pair_device(struct sock *sk, struct hci_dev *hdev, void *data, if (cp->addr.type == BDADDR_BREDR) { conn = hci_connect_acl(hdev, &cp->addr.bdaddr, sec_level, - auth_type, CONN_REASON_PAIR_DEVICE); + auth_type, CONN_REASON_PAIR_DEVICE, + HCI_ACL_CONN_TIMEOUT); } else { u8 addr_type = le_addr_type(cp->addr.type); struct hci_conn_params *p; @@ -3024,6 +3645,10 @@ static int pair_device(struct sock *sk, struct hci_dev *hdev, void *data, * will be kept and this function does nothing. */ p = hci_conn_params_add(hdev, &cp->addr.bdaddr, addr_type); + if (!p) { + err = -EIO; + goto unlock; + } if (p->auto_connect == HCI_AUTO_CONN_EXPLICIT) p->auto_connect = HCI_AUTO_CONN_DISABLED; @@ -3316,11 +3941,16 @@ static int name_changed_sync(struct hci_dev *hdev, void *data) static void set_name_complete(struct hci_dev *hdev, void *data, int err) { struct mgmt_pending_cmd *cmd = data; - struct mgmt_cp_set_local_name *cp = cmd->param; + struct mgmt_cp_set_local_name *cp; u8 status = mgmt_status(err); bt_dev_dbg(hdev, "err %d", err); + if (err == -ECANCELED || !mgmt_pending_valid(hdev, cmd)) + return; + + cp = cmd->param; + if (status) { mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, status); @@ -3332,13 +3962,27 @@ static void set_name_complete(struct hci_dev *hdev, void *data, int err) hci_cmd_sync_queue(hdev, name_changed_sync, NULL, NULL); } - mgmt_pending_remove(cmd); + mgmt_pending_free(cmd); } static int set_name_sync(struct hci_dev *hdev, void *data) { + struct mgmt_pending_cmd *cmd = data; + struct mgmt_cp_set_local_name cp; + + mutex_lock(&hdev->mgmt_pending_lock); + + if (!__mgmt_pending_listed(hdev, cmd)) { + mutex_unlock(&hdev->mgmt_pending_lock); + return -ECANCELED; + } + + memcpy(&cp, cmd->param, sizeof(cp)); + + mutex_unlock(&hdev->mgmt_pending_lock); + if (lmp_bredr_capable(hdev)) { - hci_update_name_sync(hdev); + hci_update_name_sync(hdev, cp.name); hci_update_eir_sync(hdev); } @@ -3490,9 +4134,11 @@ int mgmt_phy_configuration_changed(struct hci_dev *hdev, struct sock *skip) static void set_default_phy_complete(struct hci_dev *hdev, void *data, int err) { struct mgmt_pending_cmd *cmd = data; - struct sk_buff *skb = cmd->skb; + struct sk_buff *skb; u8 status = mgmt_status(err); + skb = cmd->skb; + if (!status) { if (!skb) status = MGMT_STATUS_FAILED; @@ -3518,7 +4164,7 @@ static void set_default_phy_complete(struct hci_dev *hdev, void *data, int err) if (skb && !IS_ERR(skb)) kfree_skb(skb); - mgmt_pending_remove(cmd); + mgmt_pending_free(cmd); } static int set_default_phy_sync(struct hci_dev *hdev, void *data) @@ -3526,7 +4172,9 @@ static int set_default_phy_sync(struct hci_dev *hdev, void *data) struct mgmt_pending_cmd *cmd = data; struct mgmt_cp_set_phy_configuration *cp = cmd->param; struct hci_cp_le_set_default_phy cp_phy; - u32 selected_phys = __le32_to_cpu(cp->selected_phys); + u32 selected_phys; + + selected_phys = __le32_to_cpu(cp->selected_phys); memset(&cp_phy, 0, sizeof(cp_phy)); @@ -3666,7 +4314,7 @@ static int set_phy_configuration(struct sock *sk, struct hci_dev *hdev, goto unlock; } - cmd = mgmt_pending_add(sk, MGMT_OP_SET_PHY_CONFIGURATION, hdev, data, + cmd = mgmt_pending_new(sk, MGMT_OP_SET_PHY_CONFIGURATION, hdev, data, len); if (!cmd) err = -ENOMEM; @@ -3720,7 +4368,7 @@ static int set_blocked_keys(struct sock *sk, struct hci_dev *hdev, void *data, hci_blocked_keys_clear(hdev); - for (i = 0; i < keys->key_count; ++i) { + for (i = 0; i < key_count; ++i) { struct blocked_key *b = kzalloc(sizeof(*b), GFP_KERNEL); if (!b) { @@ -3747,7 +4395,7 @@ static int set_wideband_speech(struct sock *sk, struct hci_dev *hdev, bt_dev_dbg(hdev, "sock %p", sk); - if (!test_bit(HCI_QUIRK_WIDEBAND_SPEECH_SUPPORTED, &hdev->quirks)) + if (!hci_test_quirk(hdev, HCI_QUIRK_WIDEBAND_SPEECH_SUPPORTED)) return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_WIDEBAND_SPEECH, MGMT_STATUS_NOT_SUPPORTED); @@ -3759,13 +4407,6 @@ static int set_wideband_speech(struct sock *sk, struct hci_dev *hdev, hci_dev_lock(hdev); - if (pending_find(MGMT_OP_SET_WIDEBAND_SPEECH, hdev)) { - err = mgmt_cmd_status(sk, hdev->id, - MGMT_OP_SET_WIDEBAND_SPEECH, - MGMT_STATUS_BUSY); - goto unlock; - } - if (hdev_is_powered(hdev) && !!cp->val != hci_dev_test_flag(hdev, HCI_WIDEBAND_SPEECH_ENABLED)) { @@ -3887,32 +4528,41 @@ static const u8 le_simultaneous_roles_uuid[16] = { 0x96, 0x46, 0xc0, 0x42, 0xb5, 0x10, 0x1b, 0x67, }; -/* 15c0a148-c273-11ea-b3de-0242ac130004 */ -static const u8 rpa_resolution_uuid[16] = { - 0x04, 0x00, 0x13, 0xac, 0x42, 0x02, 0xde, 0xb3, - 0xea, 0x11, 0x73, 0xc2, 0x48, 0xa1, 0xc0, 0x15, +/* 6fbaf188-05e0-496a-9885-d6ddfdb4e03e */ +static const u8 iso_socket_uuid[16] = { + 0x3e, 0xe0, 0xb4, 0xfd, 0xdd, 0xd6, 0x85, 0x98, + 0x6a, 0x49, 0xe0, 0x05, 0x88, 0xf1, 0xba, 0x6f, +}; + +/* 2ce463d7-7a03-4d8d-bf05-5f24e8f36e76 */ +static const u8 mgmt_mesh_uuid[16] = { + 0x76, 0x6e, 0xf3, 0xe8, 0x24, 0x5f, 0x05, 0xbf, + 0x8d, 0x4d, 0x03, 0x7a, 0xd7, 0x63, 0xe4, 0x2c, }; static int read_exp_features_info(struct sock *sk, struct hci_dev *hdev, void *data, u16 data_len) { - char buf[102]; /* Enough space for 5 features: 2 + 20 * 5 */ - struct mgmt_rp_read_exp_features_info *rp = (void *)buf; + struct mgmt_rp_read_exp_features_info *rp; + size_t len; u16 idx = 0; u32 flags; + int status; bt_dev_dbg(hdev, "sock %p", sk); - memset(&buf, 0, sizeof(buf)); + /* Enough space for 7 features */ + len = sizeof(*rp) + (sizeof(rp->features[0]) * 7); + rp = kzalloc(len, GFP_KERNEL); + if (!rp) + return -ENOMEM; #ifdef CONFIG_BT_FEATURE_DEBUG - if (!hdev) { - flags = bt_dbg_get() ? BIT(0) : 0; + flags = bt_dbg_get() ? BIT(0) : 0; - memcpy(rp->features[idx].uuid, debug_uuid, 16); - rp->features[idx].flags = cpu_to_le32(flags); - idx++; - } + memcpy(rp->features[idx].uuid, debug_uuid, 16); + rp->features[idx].flags = cpu_to_le32(flags); + idx++; #endif if (hdev && hci_dev_le_state_simultaneous(hdev)) { @@ -3926,17 +4576,6 @@ static int read_exp_features_info(struct sock *sk, struct hci_dev *hdev, idx++; } - if (hdev && ll_privacy_capable(hdev)) { - if (hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY)) - flags = BIT(0) | BIT(1); - else - flags = BIT(1); - - memcpy(rp->features[idx].uuid, rpa_resolution_uuid, 16); - rp->features[idx].flags = cpu_to_le32(flags); - idx++; - } - if (hdev && (aosp_has_quality_report(hdev) || hdev->set_quality_report)) { if (hci_dev_test_flag(hdev, HCI_QUALITY_REPORT)) @@ -3960,6 +4599,24 @@ static int read_exp_features_info(struct sock *sk, struct hci_dev *hdev, idx++; } + if (IS_ENABLED(CONFIG_BT_LE)) { + flags = iso_inited() ? BIT(0) : 0; + memcpy(rp->features[idx].uuid, iso_socket_uuid, 16); + rp->features[idx].flags = cpu_to_le32(flags); + idx++; + } + + if (hdev && lmp_le_capable(hdev)) { + if (hci_dev_test_flag(hdev, HCI_MESH_EXPERIMENTAL)) + flags = BIT(0); + else + flags = 0; + + memcpy(rp->features[idx].uuid, mgmt_mesh_uuid, 16); + rp->features[idx].flags = cpu_to_le32(flags); + idx++; + } + rp->feature_count = cpu_to_le16(idx); /* After reading the experimental features information, enable @@ -3967,29 +4624,12 @@ static int read_exp_features_info(struct sock *sk, struct hci_dev *hdev, */ hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS); - return mgmt_cmd_complete(sk, hdev ? hdev->id : MGMT_INDEX_NONE, - MGMT_OP_READ_EXP_FEATURES_INFO, - 0, rp, sizeof(*rp) + (20 * idx)); -} - -static int exp_ll_privacy_feature_changed(bool enabled, struct hci_dev *hdev, - struct sock *skip) -{ - struct mgmt_ev_exp_feature_changed ev; - - memset(&ev, 0, sizeof(ev)); - memcpy(ev.uuid, rpa_resolution_uuid, 16); - ev.flags = cpu_to_le32((enabled ? BIT(0) : 0) | BIT(1)); - - if (enabled && privacy_mode_capable(hdev)) - set_bit(HCI_CONN_FLAG_DEVICE_PRIVACY, hdev->conn_flags); - else - clear_bit(HCI_CONN_FLAG_DEVICE_PRIVACY, hdev->conn_flags); - - return mgmt_limited_event(MGMT_EV_EXP_FEATURE_CHANGED, hdev, - &ev, sizeof(ev), - HCI_MGMT_EXP_FEATURE_EVENTS, skip); + status = mgmt_cmd_complete(sk, hdev ? hdev->id : MGMT_INDEX_NONE, + MGMT_OP_READ_EXP_FEATURES_INFO, + 0, rp, sizeof(*rp) + (20 * idx)); + kfree(rp); + return status; } static int exp_feature_changed(struct hci_dev *hdev, const u8 *uuid, @@ -4032,16 +4672,6 @@ static int set_zero_key_func(struct sock *sk, struct hci_dev *hdev, } #endif - if (hdev && use_ll_privacy(hdev) && !hdev_is_powered(hdev)) { - bool changed; - - changed = hci_dev_test_and_clear_flag(hdev, - HCI_ENABLE_LL_PRIVACY); - if (changed) - exp_feature_changed(hdev, rpa_resolution_uuid, false, - sk); - } - hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS); return mgmt_cmd_complete(sk, hdev ? hdev->id : MGMT_INDEX_NONE, @@ -4096,14 +4726,12 @@ static int set_debug_func(struct sock *sk, struct hci_dev *hdev, } #endif -static int set_rpa_resolution_func(struct sock *sk, struct hci_dev *hdev, - struct mgmt_cp_set_exp_feature *cp, - u16 data_len) +static int set_mgmt_mesh_func(struct sock *sk, struct hci_dev *hdev, + struct mgmt_cp_set_exp_feature *cp, u16 data_len) { struct mgmt_rp_set_exp_feature rp; bool val, changed; int err; - u32 flags; /* Command requires to use the controller index */ if (!hdev) @@ -4111,12 +4739,6 @@ static int set_rpa_resolution_func(struct sock *sk, struct hci_dev *hdev, MGMT_OP_SET_EXP_FEATURE, MGMT_STATUS_INVALID_INDEX); - /* Changes can only be made when controller is powered down */ - if (hdev_is_powered(hdev)) - return mgmt_cmd_status(sk, hdev->id, - MGMT_OP_SET_EXP_FEATURE, - MGMT_STATUS_REJECTED); - /* Parameters are limited to a single octet */ if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1) return mgmt_cmd_status(sk, hdev->id, @@ -4133,21 +4755,15 @@ static int set_rpa_resolution_func(struct sock *sk, struct hci_dev *hdev, if (val) { changed = !hci_dev_test_and_set_flag(hdev, - HCI_ENABLE_LL_PRIVACY); - hci_dev_clear_flag(hdev, HCI_ADVERTISING); - - /* Enable LL privacy + supported settings changed */ - flags = BIT(0) | BIT(1); + HCI_MESH_EXPERIMENTAL); } else { + hci_dev_clear_flag(hdev, HCI_MESH); changed = hci_dev_test_and_clear_flag(hdev, - HCI_ENABLE_LL_PRIVACY); - - /* Disable LL privacy + supported settings changed */ - flags = BIT(1); + HCI_MESH_EXPERIMENTAL); } - memcpy(rp.uuid, rpa_resolution_uuid, 16); - rp.flags = cpu_to_le32(flags); + memcpy(rp.uuid, mgmt_mesh_uuid, 16); + rp.flags = cpu_to_le32(val ? BIT(0) : 0); hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS); @@ -4156,7 +4772,7 @@ static int set_rpa_resolution_func(struct sock *sk, struct hci_dev *hdev, &rp, sizeof(rp)); if (changed) - exp_ll_privacy_feature_changed(val, hdev, sk); + exp_feature_changed(hdev, mgmt_mesh_uuid, val, sk); return err; } @@ -4351,6 +4967,57 @@ static int set_le_simultaneous_roles_func(struct sock *sk, struct hci_dev *hdev, return err; } +#ifdef CONFIG_BT_LE +static int set_iso_socket_func(struct sock *sk, struct hci_dev *hdev, + struct mgmt_cp_set_exp_feature *cp, u16 data_len) +{ + struct mgmt_rp_set_exp_feature rp; + bool val, changed = false; + int err; + + /* Command requires to use the non-controller index */ + if (hdev) + return mgmt_cmd_status(sk, hdev->id, + MGMT_OP_SET_EXP_FEATURE, + MGMT_STATUS_INVALID_INDEX); + + /* Parameters are limited to a single octet */ + if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1) + return mgmt_cmd_status(sk, MGMT_INDEX_NONE, + MGMT_OP_SET_EXP_FEATURE, + MGMT_STATUS_INVALID_PARAMS); + + /* Only boolean on/off is supported */ + if (cp->param[0] != 0x00 && cp->param[0] != 0x01) + return mgmt_cmd_status(sk, MGMT_INDEX_NONE, + MGMT_OP_SET_EXP_FEATURE, + MGMT_STATUS_INVALID_PARAMS); + + val = cp->param[0] ? true : false; + if (val) + err = iso_init(); + else + err = iso_exit(); + + if (!err) + changed = true; + + memcpy(rp.uuid, iso_socket_uuid, 16); + rp.flags = cpu_to_le32(val ? BIT(0) : 0); + + hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS); + + err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE, + MGMT_OP_SET_EXP_FEATURE, 0, + &rp, sizeof(rp)); + + if (changed) + exp_feature_changed(hdev, iso_socket_uuid, val, sk); + + return err; +} +#endif + static const struct mgmt_exp_feature { const u8 *uuid; int (*set_func)(struct sock *sk, struct hci_dev *hdev, @@ -4360,10 +5027,13 @@ static const struct mgmt_exp_feature { #ifdef CONFIG_BT_FEATURE_DEBUG EXP_FEAT(debug_uuid, set_debug_func), #endif - EXP_FEAT(rpa_resolution_uuid, set_rpa_resolution_func), + EXP_FEAT(mgmt_mesh_uuid, set_mgmt_mesh_func), EXP_FEAT(quality_report_uuid, set_quality_report_func), EXP_FEAT(offload_codecs_uuid, set_offload_codec_func), EXP_FEAT(le_simultaneous_roles_uuid, set_le_simultaneous_roles_func), +#ifdef CONFIG_BT_LE + EXP_FEAT(iso_socket_uuid, set_iso_socket_func), +#endif /* end with a null feature */ EXP_FEAT(NULL, NULL) @@ -4403,8 +5073,7 @@ static int get_device_flags(struct sock *sk, struct hci_dev *hdev, void *data, hci_dev_lock(hdev); - bitmap_to_arr32(&supported_flags, hdev->conn_flags, - __HCI_CONN_NUM_FLAGS); + supported_flags = hdev->conn_flags; memset(&rp, 0, sizeof(rp)); @@ -4415,17 +5084,14 @@ static int get_device_flags(struct sock *sk, struct hci_dev *hdev, void *data, if (!br_params) goto done; - bitmap_to_arr32(¤t_flags, br_params->flags, - __HCI_CONN_NUM_FLAGS); + current_flags = br_params->flags; } else { params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr, le_addr_type(cp->addr.type)); - if (!params) goto done; - bitmap_to_arr32(¤t_flags, params->flags, - __HCI_CONN_NUM_FLAGS); + current_flags = params->flags; } bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr); @@ -4456,6 +5122,69 @@ static void device_flags_changed(struct sock *sk, struct hci_dev *hdev, mgmt_event(MGMT_EV_DEVICE_FLAGS_CHANGED, hdev, &ev, sizeof(ev), sk); } +static bool is_connected(struct hci_dev *hdev, bdaddr_t *addr, u8 type) +{ + struct hci_conn *conn; + + conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, addr); + if (!conn) + return false; + + if (conn->dst_type != type) + return false; + + if (conn->state != BT_CONNECTED) + return false; + + return true; +} + +/* This function requires the caller holds hdev->lock */ +static struct hci_conn_params *hci_conn_params_set(struct hci_dev *hdev, + bdaddr_t *addr, u8 addr_type, + u8 auto_connect) +{ + struct hci_conn_params *params; + + params = hci_conn_params_add(hdev, addr, addr_type); + if (!params) + return NULL; + + if (params->auto_connect == auto_connect) + return params; + + hci_pend_le_list_del_init(params); + + switch (auto_connect) { + case HCI_AUTO_CONN_DISABLED: + case HCI_AUTO_CONN_LINK_LOSS: + /* If auto connect is being disabled when we're trying to + * connect to device, keep connecting. + */ + if (params->explicit_connect) + hci_pend_le_list_add(params, &hdev->pend_le_conns); + break; + case HCI_AUTO_CONN_REPORT: + if (params->explicit_connect) + hci_pend_le_list_add(params, &hdev->pend_le_conns); + else + hci_pend_le_list_add(params, &hdev->pend_le_reports); + break; + case HCI_AUTO_CONN_DIRECT: + case HCI_AUTO_CONN_ALWAYS: + if (!is_connected(hdev, addr, addr_type)) + hci_pend_le_list_add(params, &hdev->pend_le_conns); + break; + } + + params->auto_connect = auto_connect; + + bt_dev_dbg(hdev, "addr %pMR (type %u) auto_connect %u", + addr, addr_type, auto_connect); + + return params; +} + static int set_device_flags(struct sock *sk, struct hci_dev *hdev, void *data, u16 len) { @@ -4467,11 +5196,10 @@ static int set_device_flags(struct sock *sk, struct hci_dev *hdev, void *data, u32 current_flags = __le32_to_cpu(cp->current_flags); bt_dev_dbg(hdev, "Set device flags %pMR (type 0x%x) = 0x%x", - &cp->addr.bdaddr, cp->addr.type, - __le32_to_cpu(current_flags)); + &cp->addr.bdaddr, cp->addr.type, current_flags); - bitmap_to_arr32(&supported_flags, hdev->conn_flags, - __HCI_CONN_NUM_FLAGS); + // We should take hci_dev_lock() early, I think.. conn_flags can change + supported_flags = hdev->conn_flags; if ((supported_flags | current_flags) != supported_flags) { bt_dev_warn(hdev, "Bad flag given (0x%x) vs supported (0x%0x)", @@ -4487,35 +5215,52 @@ static int set_device_flags(struct sock *sk, struct hci_dev *hdev, void *data, cp->addr.type); if (br_params) { - bitmap_from_u64(br_params->flags, current_flags); + br_params->flags = current_flags; status = MGMT_STATUS_SUCCESS; } else { bt_dev_warn(hdev, "No such BR/EDR device %pMR (0x%x)", &cp->addr.bdaddr, cp->addr.type); } - } else { - params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr, - le_addr_type(cp->addr.type)); - if (params) { - bitmap_from_u64(params->flags, current_flags); - status = MGMT_STATUS_SUCCESS; - /* Update passive scan if HCI_CONN_FLAG_DEVICE_PRIVACY - * has been set. - */ - if (test_bit(HCI_CONN_FLAG_DEVICE_PRIVACY, - params->flags)) - hci_update_passive_scan(hdev); - } else { + goto unlock; + } + + params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr, + le_addr_type(cp->addr.type)); + if (!params) { + /* Create a new hci_conn_params if it doesn't exist */ + params = hci_conn_params_set(hdev, &cp->addr.bdaddr, + le_addr_type(cp->addr.type), + HCI_AUTO_CONN_DISABLED); + if (!params) { bt_dev_warn(hdev, "No such LE device %pMR (0x%x)", &cp->addr.bdaddr, le_addr_type(cp->addr.type)); + goto unlock; } } -done: + supported_flags = hdev->conn_flags; + + if ((supported_flags | current_flags) != supported_flags) { + bt_dev_warn(hdev, "Bad flag given (0x%x) vs supported (0x%0x)", + current_flags, supported_flags); + goto unlock; + } + + WRITE_ONCE(params->flags, current_flags); + status = MGMT_STATUS_SUCCESS; + + /* Update passive scan if HCI_CONN_FLAG_DEVICE_PRIVACY + * has been set. + */ + if (params->flags & HCI_CONN_FLAG_DEVICE_PRIVACY) + hci_update_passive_scan(hdev); + +unlock: hci_dev_unlock(hdev); +done: if (status == MGMT_STATUS_SUCCESS) device_flags_changed(sk, hdev, &cp->addr.bdaddr, cp->addr.type, supported_flags, current_flags); @@ -4534,24 +5279,14 @@ static void mgmt_adv_monitor_added(struct sock *sk, struct hci_dev *hdev, mgmt_event(MGMT_EV_ADV_MONITOR_ADDED, hdev, &ev, sizeof(ev), sk); } -void mgmt_adv_monitor_removed(struct hci_dev *hdev, u16 handle) +static void mgmt_adv_monitor_removed(struct sock *sk, struct hci_dev *hdev, + __le16 handle) { struct mgmt_ev_adv_monitor_removed ev; - struct mgmt_pending_cmd *cmd; - struct sock *sk_skip = NULL; - struct mgmt_cp_remove_adv_monitor *cp; - cmd = pending_find(MGMT_OP_REMOVE_ADV_MONITOR, hdev); - if (cmd) { - cp = cmd->param; - - if (cp->monitor_handle) - sk_skip = cmd->sk; - } - - ev.monitor_handle = cpu_to_le16(handle); + ev.monitor_handle = handle; - mgmt_event(MGMT_EV_ADV_MONITOR_REMOVED, hdev, &ev, sizeof(ev), sk_skip); + mgmt_event(MGMT_EV_ADV_MONITOR_REMOVED, hdev, &ev, sizeof(ev), sk); } static int read_adv_mon_features(struct sock *sk, struct hci_dev *hdev, @@ -4603,23 +5338,25 @@ static int read_adv_mon_features(struct sock *sk, struct hci_dev *hdev, return err; } -int mgmt_add_adv_patterns_monitor_complete(struct hci_dev *hdev, u8 status) +static void mgmt_add_adv_patterns_monitor_complete(struct hci_dev *hdev, + void *data, int status) { struct mgmt_rp_add_adv_patterns_monitor rp; - struct mgmt_pending_cmd *cmd; + struct mgmt_pending_cmd *cmd = data; struct adv_monitor *monitor; - int err = 0; - - hci_dev_lock(hdev); - cmd = pending_find(MGMT_OP_ADD_ADV_PATTERNS_MONITOR_RSSI, hdev); - if (!cmd) { - cmd = pending_find(MGMT_OP_ADD_ADV_PATTERNS_MONITOR, hdev); - if (!cmd) - goto done; - } + /* This is likely the result of hdev being closed and mgmt_index_removed + * is attempting to clean up any pending command so + * hci_adv_monitors_clear is about to be called which will take care of + * freeing the adv_monitor instances. + */ + if (status == -ECANCELED && !mgmt_pending_valid(hdev, cmd)) + return; monitor = cmd->user_data; + + hci_dev_lock(hdev); + rp.monitor_handle = cpu_to_le16(monitor->handle); if (!status) { @@ -4630,26 +5367,40 @@ int mgmt_add_adv_patterns_monitor_complete(struct hci_dev *hdev, u8 status) hci_update_passive_scan(hdev); } - err = mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, - mgmt_status(status), &rp, sizeof(rp)); + mgmt_cmd_complete(cmd->sk, cmd->hdev->id, cmd->opcode, + mgmt_status(status), &rp, sizeof(rp)); mgmt_pending_remove(cmd); -done: hci_dev_unlock(hdev); - bt_dev_dbg(hdev, "add monitor %d complete, status %u", + bt_dev_dbg(hdev, "add monitor %d complete, status %d", rp.monitor_handle, status); +} - return err; +static int mgmt_add_adv_patterns_monitor_sync(struct hci_dev *hdev, void *data) +{ + struct mgmt_pending_cmd *cmd = data; + struct adv_monitor *mon; + + mutex_lock(&hdev->mgmt_pending_lock); + + if (!__mgmt_pending_listed(hdev, cmd)) { + mutex_unlock(&hdev->mgmt_pending_lock); + return -ECANCELED; + } + + mon = cmd->user_data; + + mutex_unlock(&hdev->mgmt_pending_lock); + + return hci_add_adv_monitor(hdev, mon); } static int __add_adv_patterns_monitor(struct sock *sk, struct hci_dev *hdev, struct adv_monitor *m, u8 status, void *data, u16 len, u16 op) { - struct mgmt_rp_add_adv_patterns_monitor rp; struct mgmt_pending_cmd *cmd; int err; - bool pending; hci_dev_lock(hdev); @@ -4658,8 +5409,7 @@ static int __add_adv_patterns_monitor(struct sock *sk, struct hci_dev *hdev, if (pending_find(MGMT_OP_SET_LE, hdev) || pending_find(MGMT_OP_ADD_ADV_PATTERNS_MONITOR, hdev) || - pending_find(MGMT_OP_ADD_ADV_PATTERNS_MONITOR_RSSI, hdev) || - pending_find(MGMT_OP_REMOVE_ADV_MONITOR, hdev)) { + pending_find(MGMT_OP_ADD_ADV_PATTERNS_MONITOR_RSSI, hdev)) { status = MGMT_STATUS_BUSY; goto unlock; } @@ -4671,31 +5421,17 @@ static int __add_adv_patterns_monitor(struct sock *sk, struct hci_dev *hdev, } cmd->user_data = m; - pending = hci_add_adv_monitor(hdev, m, &err); + err = hci_cmd_sync_queue(hdev, mgmt_add_adv_patterns_monitor_sync, cmd, + mgmt_add_adv_patterns_monitor_complete); if (err) { - if (err == -ENOSPC || err == -ENOMEM) + if (err == -ENOMEM) status = MGMT_STATUS_NO_RESOURCES; - else if (err == -EINVAL) - status = MGMT_STATUS_INVALID_PARAMS; else status = MGMT_STATUS_FAILED; - mgmt_pending_remove(cmd); goto unlock; } - if (!pending) { - mgmt_pending_remove(cmd); - rp.monitor_handle = cpu_to_le16(m->handle); - mgmt_adv_monitor_added(sk, hdev, m->handle); - m->state = ADV_MONITOR_STATE_REGISTERED; - hdev->adv_monitors_cnt++; - - hci_dev_unlock(hdev); - return mgmt_cmd_complete(sk, hdev->id, op, MGMT_STATUS_SUCCESS, - &rp, sizeof(rp)); - } - hci_dev_unlock(hdev); return 0; @@ -4836,94 +5572,85 @@ done: MGMT_OP_ADD_ADV_PATTERNS_MONITOR_RSSI); } -int mgmt_remove_adv_monitor_complete(struct hci_dev *hdev, u8 status) +static void mgmt_remove_adv_monitor_complete(struct hci_dev *hdev, + void *data, int status) { struct mgmt_rp_remove_adv_monitor rp; + struct mgmt_pending_cmd *cmd = data; struct mgmt_cp_remove_adv_monitor *cp; - struct mgmt_pending_cmd *cmd; - int err = 0; - hci_dev_lock(hdev); + if (status == -ECANCELED) + return; - cmd = pending_find(MGMT_OP_REMOVE_ADV_MONITOR, hdev); - if (!cmd) - goto done; + hci_dev_lock(hdev); cp = cmd->param; + rp.monitor_handle = cp->monitor_handle; - if (!status) + if (!status) { + mgmt_adv_monitor_removed(cmd->sk, hdev, cp->monitor_handle); hci_update_passive_scan(hdev); + } - err = mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, - mgmt_status(status), &rp, sizeof(rp)); - mgmt_pending_remove(cmd); + mgmt_cmd_complete(cmd->sk, cmd->hdev->id, cmd->opcode, + mgmt_status(status), &rp, sizeof(rp)); + mgmt_pending_free(cmd); -done: hci_dev_unlock(hdev); - bt_dev_dbg(hdev, "remove monitor %d complete, status %u", + bt_dev_dbg(hdev, "remove monitor %d complete, status %d", rp.monitor_handle, status); +} - return err; +static int mgmt_remove_adv_monitor_sync(struct hci_dev *hdev, void *data) +{ + struct mgmt_pending_cmd *cmd = data; + struct mgmt_cp_remove_adv_monitor *cp = cmd->param; + u16 handle = __le16_to_cpu(cp->monitor_handle); + + if (!handle) + return hci_remove_all_adv_monitor(hdev); + + return hci_remove_single_adv_monitor(hdev, handle); } static int remove_adv_monitor(struct sock *sk, struct hci_dev *hdev, void *data, u16 len) { - struct mgmt_cp_remove_adv_monitor *cp = data; - struct mgmt_rp_remove_adv_monitor rp; struct mgmt_pending_cmd *cmd; - u16 handle = __le16_to_cpu(cp->monitor_handle); int err, status; - bool pending; - - BT_DBG("request for %s", hdev->name); - rp.monitor_handle = cp->monitor_handle; hci_dev_lock(hdev); if (pending_find(MGMT_OP_SET_LE, hdev) || - pending_find(MGMT_OP_REMOVE_ADV_MONITOR, hdev) || pending_find(MGMT_OP_ADD_ADV_PATTERNS_MONITOR, hdev) || pending_find(MGMT_OP_ADD_ADV_PATTERNS_MONITOR_RSSI, hdev)) { status = MGMT_STATUS_BUSY; goto unlock; } - cmd = mgmt_pending_add(sk, MGMT_OP_REMOVE_ADV_MONITOR, hdev, data, len); + cmd = mgmt_pending_new(sk, MGMT_OP_REMOVE_ADV_MONITOR, hdev, data, len); if (!cmd) { status = MGMT_STATUS_NO_RESOURCES; goto unlock; } - if (handle) - pending = hci_remove_single_adv_monitor(hdev, handle, &err); - else - pending = hci_remove_all_adv_monitor(hdev, &err); + err = hci_cmd_sync_submit(hdev, mgmt_remove_adv_monitor_sync, cmd, + mgmt_remove_adv_monitor_complete); if (err) { - mgmt_pending_remove(cmd); + mgmt_pending_free(cmd); - if (err == -ENOENT) - status = MGMT_STATUS_INVALID_INDEX; + if (err == -ENOMEM) + status = MGMT_STATUS_NO_RESOURCES; else status = MGMT_STATUS_FAILED; goto unlock; } - /* monitor can be removed without forwarding request to controller */ - if (!pending) { - mgmt_pending_remove(cmd); - hci_dev_unlock(hdev); - - return mgmt_cmd_complete(sk, hdev->id, - MGMT_OP_REMOVE_ADV_MONITOR, - MGMT_STATUS_SUCCESS, - &rp, sizeof(rp)); - } - hci_dev_unlock(hdev); + return 0; unlock: @@ -4932,7 +5659,8 @@ unlock: status); } -static void read_local_oob_data_complete(struct hci_dev *hdev, void *data, int err) +static void read_local_oob_data_complete(struct hci_dev *hdev, void *data, + int err) { struct mgmt_rp_read_local_oob_data mgmt_rp; size_t rp_size = sizeof(mgmt_rp); @@ -4952,7 +5680,8 @@ static void read_local_oob_data_complete(struct hci_dev *hdev, void *data, int e bt_dev_dbg(hdev, "status %d", status); if (status) { - mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA, status); + mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA, + status); goto remove; } @@ -5036,12 +5765,6 @@ static int read_local_oob_data(struct sock *sk, struct hci_dev *hdev, goto unlock; } - if (pending_find(MGMT_OP_READ_LOCAL_OOB_DATA, hdev)) { - err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA, - MGMT_STATUS_BUSY); - goto unlock; - } - cmd = mgmt_pending_new(sk, MGMT_OP_READ_LOCAL_OOB_DATA, hdev, NULL, 0); if (!cmd) err = -ENOMEM; @@ -5207,29 +5930,6 @@ done: return err; } -void mgmt_start_discovery_complete(struct hci_dev *hdev, u8 status) -{ - struct mgmt_pending_cmd *cmd; - - bt_dev_dbg(hdev, "status %u", status); - - hci_dev_lock(hdev); - - cmd = pending_find(MGMT_OP_START_DISCOVERY, hdev); - if (!cmd) - cmd = pending_find(MGMT_OP_START_SERVICE_DISCOVERY, hdev); - - if (!cmd) - cmd = pending_find(MGMT_OP_START_LIMITED_DISCOVERY, hdev); - - if (cmd) { - cmd->cmd_complete(cmd, mgmt_status(status)); - mgmt_pending_remove(cmd); - } - - hci_dev_unlock(hdev); -} - static bool discovery_type_is_valid(struct hci_dev *hdev, uint8_t type, uint8_t *mgmt_status) { @@ -5263,7 +5963,10 @@ static void start_discovery_complete(struct hci_dev *hdev, void *data, int err) bt_dev_dbg(hdev, "err %d", err); - mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, mgmt_status(err), + if (err == -ECANCELED || !mgmt_pending_valid(hdev, cmd)) + return; + + mgmt_cmd_complete(cmd->sk, cmd->hdev->id, cmd->opcode, mgmt_status(err), cmd->param, 1); mgmt_pending_free(cmd); @@ -5273,6 +5976,9 @@ static void start_discovery_complete(struct hci_dev *hdev, void *data, int err) static int start_discovery_sync(struct hci_dev *hdev, void *data) { + if (!mgmt_pending_listed(hdev, data)) + return -ECANCELED; + return hci_start_discovery_sync(hdev); } @@ -5327,7 +6033,7 @@ static int start_discovery_internal(struct sock *sk, struct hci_dev *hdev, else hdev->discovery.limited = false; - cmd = mgmt_pending_new(sk, op, hdev, data, len); + cmd = mgmt_pending_add(sk, op, hdev, data, len); if (!cmd) { err = -ENOMEM; goto failed; @@ -5336,7 +6042,7 @@ static int start_discovery_internal(struct sock *sk, struct hci_dev *hdev, err = hci_cmd_sync_queue(hdev, start_discovery_sync, cmd, start_discovery_complete); if (err < 0) { - mgmt_pending_free(cmd); + mgmt_pending_remove(cmd); goto failed; } @@ -5430,7 +6136,7 @@ static int start_service_discovery(struct sock *sk, struct hci_dev *hdev, goto failed; } - cmd = mgmt_pending_new(sk, MGMT_OP_START_SERVICE_DISCOVERY, + cmd = mgmt_pending_add(sk, MGMT_OP_START_SERVICE_DISCOVERY, hdev, data, len); if (!cmd) { err = -ENOMEM; @@ -5463,7 +6169,7 @@ static int start_service_discovery(struct sock *sk, struct hci_dev *hdev, err = hci_cmd_sync_queue(hdev, start_discovery_sync, cmd, start_discovery_complete); if (err < 0) { - mgmt_pending_free(cmd); + mgmt_pending_remove(cmd); goto failed; } @@ -5474,30 +6180,16 @@ failed: return err; } -void mgmt_stop_discovery_complete(struct hci_dev *hdev, u8 status) -{ - struct mgmt_pending_cmd *cmd; - - bt_dev_dbg(hdev, "status %u", status); - - hci_dev_lock(hdev); - - cmd = pending_find(MGMT_OP_STOP_DISCOVERY, hdev); - if (cmd) { - cmd->cmd_complete(cmd, mgmt_status(status)); - mgmt_pending_remove(cmd); - } - - hci_dev_unlock(hdev); -} - static void stop_discovery_complete(struct hci_dev *hdev, void *data, int err) { struct mgmt_pending_cmd *cmd = data; + if (err == -ECANCELED || !mgmt_pending_valid(hdev, cmd)) + return; + bt_dev_dbg(hdev, "err %d", err); - mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, mgmt_status(err), + mgmt_cmd_complete(cmd->sk, cmd->hdev->id, cmd->opcode, mgmt_status(err), cmd->param, 1); mgmt_pending_free(cmd); @@ -5507,6 +6199,9 @@ static void stop_discovery_complete(struct hci_dev *hdev, void *data, int err) static int stop_discovery_sync(struct hci_dev *hdev, void *data) { + if (!mgmt_pending_listed(hdev, data)) + return -ECANCELED; + return hci_stop_discovery_sync(hdev); } @@ -5535,7 +6230,7 @@ static int stop_discovery(struct sock *sk, struct hci_dev *hdev, void *data, goto unlock; } - cmd = mgmt_pending_new(sk, MGMT_OP_STOP_DISCOVERY, hdev, data, len); + cmd = mgmt_pending_add(sk, MGMT_OP_STOP_DISCOVERY, hdev, data, len); if (!cmd) { err = -ENOMEM; goto unlock; @@ -5544,7 +6239,7 @@ static int stop_discovery(struct sock *sk, struct hci_dev *hdev, void *data, err = hci_cmd_sync_queue(hdev, stop_discovery_sync, cmd, stop_discovery_complete); if (err < 0) { - mgmt_pending_free(cmd); + mgmt_pending_remove(cmd); goto unlock; } @@ -5716,14 +6411,18 @@ static void enable_advertising_instance(struct hci_dev *hdev, int err) static void set_advertising_complete(struct hci_dev *hdev, void *data, int err) { + struct mgmt_pending_cmd *cmd = data; struct cmd_lookup match = { NULL, hdev }; u8 instance; struct adv_info *adv_instance; u8 status = mgmt_status(err); + if (err == -ECANCELED || !mgmt_pending_valid(hdev, data)) + return; + if (status) { - mgmt_pending_foreach(MGMT_OP_SET_ADVERTISING, hdev, - cmd_status_rsp, &status); + mgmt_cmd_status(cmd->sk, cmd->hdev->id, cmd->opcode, status); + mgmt_pending_free(cmd); return; } @@ -5732,8 +6431,7 @@ static void set_advertising_complete(struct hci_dev *hdev, void *data, int err) else hci_dev_clear_flag(hdev, HCI_ADVERTISING); - mgmt_pending_foreach(MGMT_OP_SET_ADVERTISING, hdev, settings_rsp, - &match); + settings_rsp(cmd, &match); new_settings(hdev, match.sk); @@ -5765,10 +6463,23 @@ static void set_advertising_complete(struct hci_dev *hdev, void *data, int err) static int set_adv_sync(struct hci_dev *hdev, void *data) { struct mgmt_pending_cmd *cmd = data; - struct mgmt_mode *cp = cmd->param; - u8 val = !!cp->val; + struct mgmt_mode cp; + u8 val; - if (cp->val == 0x02) + mutex_lock(&hdev->mgmt_pending_lock); + + if (!__mgmt_pending_listed(hdev, cmd)) { + mutex_unlock(&hdev->mgmt_pending_lock); + return -ECANCELED; + } + + memcpy(&cp, cmd->param, sizeof(cp)); + + mutex_unlock(&hdev->mgmt_pending_lock); + + val = !!cp.val; + + if (cp.val == 0x02) hci_dev_set_flag(hdev, HCI_ADVERTISING_CONNECTABLE); else hci_dev_clear_flag(hdev, HCI_ADVERTISING_CONNECTABLE); @@ -5831,6 +6542,7 @@ static int set_advertising(struct sock *sk, struct hci_dev *hdev, void *data, if (!hdev_is_powered(hdev) || (val == hci_dev_test_flag(hdev, HCI_ADVERTISING) && (cp->val == 0x02) == hci_dev_test_flag(hdev, HCI_ADVERTISING_CONNECTABLE)) || + hci_dev_test_flag(hdev, HCI_MESH) || hci_conn_num(hdev, LE_LINK) > 0 || (hci_dev_test_flag(hdev, HCI_LE_SCAN) && hdev->le_scan_type == LE_SCAN_ACTIVE)) { @@ -5937,6 +6649,7 @@ static int set_scan_params(struct sock *sk, struct hci_dev *hdev, return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS, MGMT_STATUS_NOT_SUPPORTED); + /* Keep allowed ranges in sync with set_mesh() */ interval = __le16_to_cpu(cp->interval); if (interval < 0x0004 || interval > 0x4000) @@ -6075,7 +6788,7 @@ static void set_bredr_complete(struct hci_dev *hdev, void *data, int err) */ hci_dev_clear_flag(hdev, HCI_BREDR_ENABLED); - mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err); + mgmt_cmd_status(cmd->sk, cmd->hdev->id, cmd->opcode, mgmt_err); } else { send_settings_rsp(cmd->sk, MGMT_OP_SET_BREDR, hdev); new_settings(hdev, cmd->sk); @@ -6135,7 +6848,6 @@ static int set_bredr(struct sock *sk, struct hci_dev *hdev, void *data, u16 len) hci_dev_clear_flag(hdev, HCI_SSP_ENABLED); hci_dev_clear_flag(hdev, HCI_LINK_SECURITY); hci_dev_clear_flag(hdev, HCI_FAST_CONNECTABLE); - hci_dev_clear_flag(hdev, HCI_HS_ENABLED); } hci_dev_change_flag(hdev, HCI_BREDR_ENABLED); @@ -6213,7 +6925,7 @@ static void set_secure_conn_complete(struct hci_dev *hdev, void *data, int err) if (err) { u8 mgmt_err = mgmt_status(err); - mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err); + mgmt_cmd_status(cmd->sk, cmd->hdev->id, cmd->opcode, mgmt_err); goto done; } @@ -6577,15 +7289,6 @@ static int load_long_term_keys(struct sock *sk, struct hci_dev *hdev, bt_dev_dbg(hdev, "key_count %u", key_count); - for (i = 0; i < key_count; i++) { - struct mgmt_ltk_info *key = &cp->keys[i]; - - if (!ltk_is_valid(key)) - return mgmt_cmd_status(sk, hdev->id, - MGMT_OP_LOAD_LONG_TERM_KEYS, - MGMT_STATUS_INVALID_PARAMS); - } - hci_dev_lock(hdev); hci_smp_ltks_clear(hdev); @@ -6602,6 +7305,12 @@ static int load_long_term_keys(struct sock *sk, struct hci_dev *hdev, continue; } + if (!ltk_is_valid(key)) { + bt_dev_warn(hdev, "Invalid LTK for %pMR", + &key->addr.bdaddr); + continue; + } + switch (key->type) { case MGMT_LTK_UNAUTHENTICATED: authenticated = 0x00; @@ -6650,7 +7359,7 @@ static void get_conn_info_complete(struct hci_dev *hdev, void *data, int err) bt_dev_dbg(hdev, "err %d", err); - memcpy(&rp.addr, &cp->addr.bdaddr, sizeof(rp.addr)); + memcpy(&rp.addr, &cp->addr, sizeof(rp.addr)); status = mgmt_status(err); if (status == MGMT_STATUS_SUCCESS) { @@ -6663,14 +7372,9 @@ static void get_conn_info_complete(struct hci_dev *hdev, void *data, int err) rp.max_tx_power = HCI_TX_POWER_INVALID; } - mgmt_cmd_complete(cmd->sk, cmd->index, MGMT_OP_GET_CONN_INFO, status, + mgmt_cmd_complete(cmd->sk, cmd->hdev->id, MGMT_OP_GET_CONN_INFO, status, &rp, sizeof(rp)); - if (conn) { - hci_conn_drop(conn); - hci_conn_put(conn); - } - mgmt_pending_free(cmd); } @@ -6689,15 +7393,10 @@ static int get_conn_info_sync(struct hci_dev *hdev, void *data) else conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->addr.bdaddr); - if (!conn || conn != cmd->user_data || conn->state != BT_CONNECTED) { - if (cmd->user_data) { - hci_conn_drop(cmd->user_data); - hci_conn_put(cmd->user_data); - cmd->user_data = NULL; - } + if (!conn || conn->state != BT_CONNECTED) return MGMT_STATUS_NOT_CONNECTED; - } + cmd->user_data = conn; handle = cpu_to_le16(conn->handle); /* Refresh RSSI each time */ @@ -6762,9 +7461,8 @@ static int get_conn_info(struct sock *sk, struct hci_dev *hdev, void *data, /* To avoid client trying to guess when to poll again for information we * calculate conn info age as random value between min/max set in hdev. */ - conn_info_age = hdev->conn_info_min_age + - prandom_u32_max(hdev->conn_info_max_age - - hdev->conn_info_min_age); + conn_info_age = get_random_u32_inclusive(hdev->conn_info_min_age, + hdev->conn_info_max_age - 1); /* Query controller to refresh cached values if they are too old or were * never read. @@ -6776,11 +7474,12 @@ static int get_conn_info(struct sock *sk, struct hci_dev *hdev, void *data, cmd = mgmt_pending_new(sk, MGMT_OP_GET_CONN_INFO, hdev, data, len); - if (!cmd) + if (!cmd) { err = -ENOMEM; - else + } else { err = hci_cmd_sync_queue(hdev, get_conn_info_sync, cmd, get_conn_info_complete); + } if (err < 0) { mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO, @@ -6792,9 +7491,6 @@ static int get_conn_info(struct sock *sk, struct hci_dev *hdev, void *data, goto unlock; } - hci_conn_hold(conn); - cmd->user_data = hci_conn_get(conn); - conn->conn_info_timestamp = jiffies; } else { /* Cache is valid, just reply with values cached in hci_conn */ @@ -6833,12 +7529,10 @@ static void get_clock_info_complete(struct hci_dev *hdev, void *data, int err) if (conn) { rp.piconet_clock = cpu_to_le32(conn->clock); rp.accuracy = cpu_to_le16(conn->clock_accuracy); - hci_conn_drop(conn); - hci_conn_put(conn); } complete: - mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, status, &rp, + mgmt_cmd_complete(cmd->sk, cmd->hdev->id, cmd->opcode, status, &rp, sizeof(rp)); mgmt_pending_free(cmd); @@ -6849,30 +7543,21 @@ static int get_clock_info_sync(struct hci_dev *hdev, void *data) struct mgmt_pending_cmd *cmd = data; struct mgmt_cp_get_clock_info *cp = cmd->param; struct hci_cp_read_clock hci_cp; - struct hci_conn *conn = cmd->user_data; - int err; + struct hci_conn *conn; memset(&hci_cp, 0, sizeof(hci_cp)); - err = hci_read_clock_sync(hdev, &hci_cp); + hci_read_clock_sync(hdev, &hci_cp); - if (conn) { - /* Make sure connection still exists */ - conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, - &cp->addr.bdaddr); + /* Make sure connection still exists */ + conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->addr.bdaddr); + if (!conn || conn->state != BT_CONNECTED) + return MGMT_STATUS_NOT_CONNECTED; - if (conn && conn == cmd->user_data && - conn->state == BT_CONNECTED) { - hci_cp.handle = cpu_to_le16(conn->handle); - hci_cp.which = 0x01; /* Piconet clock */ - err = hci_read_clock_sync(hdev, &hci_cp); - } else if (cmd->user_data) { - hci_conn_drop(cmd->user_data); - hci_conn_put(cmd->user_data); - cmd->user_data = NULL; - } - } + cmd->user_data = conn; + hci_cp.handle = cpu_to_le16(conn->handle); + hci_cp.which = 0x01; /* Piconet clock */ - return err; + return hci_read_clock_sync(hdev, &hci_cp); } static int get_clock_info(struct sock *sk, struct hci_dev *hdev, void *data, @@ -6931,10 +7616,6 @@ static int get_clock_info(struct sock *sk, struct hci_dev *hdev, void *data, if (cmd) mgmt_pending_free(cmd); - - } else if (conn) { - hci_conn_hold(conn); - cmd->user_data = hci_conn_get(conn); } @@ -6943,78 +7624,39 @@ unlock: return err; } -static bool is_connected(struct hci_dev *hdev, bdaddr_t *addr, u8 type) +static void device_added(struct sock *sk, struct hci_dev *hdev, + bdaddr_t *bdaddr, u8 type, u8 action) { - struct hci_conn *conn; - - conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, addr); - if (!conn) - return false; - - if (conn->dst_type != type) - return false; + struct mgmt_ev_device_added ev; - if (conn->state != BT_CONNECTED) - return false; + bacpy(&ev.addr.bdaddr, bdaddr); + ev.addr.type = type; + ev.action = action; - return true; + mgmt_event(MGMT_EV_DEVICE_ADDED, hdev, &ev, sizeof(ev), sk); } -/* This function requires the caller holds hdev->lock */ -static int hci_conn_params_set(struct hci_dev *hdev, bdaddr_t *addr, - u8 addr_type, u8 auto_connect) +static void add_device_complete(struct hci_dev *hdev, void *data, int err) { - struct hci_conn_params *params; - - params = hci_conn_params_add(hdev, addr, addr_type); - if (!params) - return -EIO; + struct mgmt_pending_cmd *cmd = data; + struct mgmt_cp_add_device *cp = cmd->param; - if (params->auto_connect == auto_connect) - return 0; + if (!err) { + struct hci_conn_params *params; - list_del_init(¶ms->action); + params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr, + le_addr_type(cp->addr.type)); - switch (auto_connect) { - case HCI_AUTO_CONN_DISABLED: - case HCI_AUTO_CONN_LINK_LOSS: - /* If auto connect is being disabled when we're trying to - * connect to device, keep connecting. - */ - if (params->explicit_connect) - list_add(¶ms->action, &hdev->pend_le_conns); - break; - case HCI_AUTO_CONN_REPORT: - if (params->explicit_connect) - list_add(¶ms->action, &hdev->pend_le_conns); - else - list_add(¶ms->action, &hdev->pend_le_reports); - break; - case HCI_AUTO_CONN_DIRECT: - case HCI_AUTO_CONN_ALWAYS: - if (!is_connected(hdev, addr, addr_type)) - list_add(¶ms->action, &hdev->pend_le_conns); - break; + device_added(cmd->sk, hdev, &cp->addr.bdaddr, cp->addr.type, + cp->action); + device_flags_changed(NULL, hdev, &cp->addr.bdaddr, + cp->addr.type, hdev->conn_flags, + params ? params->flags : 0); } - params->auto_connect = auto_connect; - - bt_dev_dbg(hdev, "addr %pMR (type %u) auto_connect %u", - addr, addr_type, auto_connect); - - return 0; -} - -static void device_added(struct sock *sk, struct hci_dev *hdev, - bdaddr_t *bdaddr, u8 type, u8 action) -{ - struct mgmt_ev_device_added ev; - - bacpy(&ev.addr.bdaddr, bdaddr); - ev.addr.type = type; - ev.action = action; - - mgmt_event(MGMT_EV_DEVICE_ADDED, hdev, &ev, sizeof(ev), sk); + mgmt_cmd_complete(cmd->sk, hdev->id, MGMT_OP_ADD_DEVICE, + mgmt_status(err), &cp->addr, sizeof(cp->addr)); + mgmt_pending_free(cmd); } static int add_device_sync(struct hci_dev *hdev, void *data) @@ -7025,6 +7667,7 @@ static int add_device_sync(struct hci_dev *hdev, void *data) static int add_device(struct sock *sk, struct hci_dev *hdev, void *data, u16 len) { + struct mgmt_pending_cmd *cmd; struct mgmt_cp_add_device *cp = data; u8 auto_conn, addr_type; struct hci_conn_params *params; @@ -7063,7 +7706,7 @@ static int add_device(struct sock *sk, struct hci_dev *hdev, if (err) goto unlock; - hci_req_update_scan(hdev); + hci_update_scan(hdev); goto added; } @@ -7092,28 +7735,35 @@ static int add_device(struct sock *sk, struct hci_dev *hdev, /* If the connection parameters don't exist for this device, * they will be created and configured with defaults. */ - if (hci_conn_params_set(hdev, &cp->addr.bdaddr, addr_type, - auto_conn) < 0) { + params = hci_conn_params_set(hdev, &cp->addr.bdaddr, addr_type, + auto_conn); + if (!params) { err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE, MGMT_STATUS_FAILED, &cp->addr, sizeof(cp->addr)); goto unlock; - } else { - params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr, - addr_type); - if (params) - bitmap_to_arr32(¤t_flags, params->flags, - __HCI_CONN_NUM_FLAGS); } - err = hci_cmd_sync_queue(hdev, add_device_sync, NULL, NULL); - if (err < 0) + cmd = mgmt_pending_new(sk, MGMT_OP_ADD_DEVICE, hdev, data, len); + if (!cmd) { + err = -ENOMEM; goto unlock; + } + + err = hci_cmd_sync_queue(hdev, add_device_sync, cmd, + add_device_complete); + if (err < 0) { + err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE, + MGMT_STATUS_FAILED, &cp->addr, + sizeof(cp->addr)); + mgmt_pending_free(cmd); + } + + goto unlock; added: device_added(sk, hdev, &cp->addr.bdaddr, cp->addr.type, cp->action); - bitmap_to_arr32(&supported_flags, hdev->conn_flags, - __HCI_CONN_NUM_FLAGS); + supported_flags = hdev->conn_flags; device_flags_changed(NULL, hdev, &cp->addr.bdaddr, cp->addr.type, supported_flags, current_flags); @@ -7177,7 +7827,7 @@ static int remove_device(struct sock *sk, struct hci_dev *hdev, goto unlock; } - hci_req_update_scan(hdev); + hci_update_scan(hdev); device_removed(sk, hdev, &cp->addr.bdaddr, cp->addr.type); @@ -7218,9 +7868,7 @@ static int remove_device(struct sock *sk, struct hci_dev *hdev, goto unlock; } - list_del(¶ms->action); - list_del(¶ms->list); - kfree(params); + hci_conn_params_free(params); device_removed(sk, hdev, &cp->addr.bdaddr, cp->addr.type); } else { @@ -7241,7 +7889,7 @@ static int remove_device(struct sock *sk, struct hci_dev *hdev, kfree(b); } - hci_req_update_scan(hdev); + hci_update_scan(hdev); list_for_each_entry_safe(p, tmp, &hdev->le_conn_params, list) { if (p->auto_connect == HCI_AUTO_CONN_DISABLED) @@ -7251,9 +7899,7 @@ static int remove_device(struct sock *sk, struct hci_dev *hdev, p->auto_connect = HCI_AUTO_CONN_EXPLICIT; continue; } - list_del(&p->action); - list_del(&p->list); - kfree(p); + hci_conn_params_free(p); } bt_dev_dbg(hdev, "All LE connection parameters were removed"); @@ -7270,6 +7916,18 @@ unlock: return err; } +static int conn_update_sync(struct hci_dev *hdev, void *data) +{ + struct hci_conn_params *params = data; + struct hci_conn *conn; + + conn = hci_conn_hash_lookup_le(hdev, ¶ms->addr, params->addr_type); + if (!conn) + return -ECANCELED; + + return hci_le_conn_update_sync(hdev, conn, params); +} + static int load_conn_param(struct sock *sk, struct hci_dev *hdev, void *data, u16 len) { @@ -7303,12 +7961,14 @@ static int load_conn_param(struct sock *sk, struct hci_dev *hdev, void *data, hci_dev_lock(hdev); - hci_conn_params_clear_disabled(hdev); + if (param_count > 1) + hci_conn_params_clear_disabled(hdev); for (i = 0; i < param_count; i++) { struct mgmt_conn_param *param = &cp->params[i]; struct hci_conn_params *hci_param; u16 min, max, latency, timeout; + bool update = false; u8 addr_type; bt_dev_dbg(hdev, "Adding %pMR (type %u)", ¶m->addr.bdaddr, @@ -7336,6 +7996,19 @@ static int load_conn_param(struct sock *sk, struct hci_dev *hdev, void *data, continue; } + /* Detect when the loading is for an existing parameter then + * attempt to trigger the connection update procedure. + */ + if (!i && param_count == 1) { + hci_param = hci_conn_params_lookup(hdev, + ¶m->addr.bdaddr, + addr_type); + if (hci_param) + update = true; + else + hci_conn_params_clear_disabled(hdev); + } + hci_param = hci_conn_params_add(hdev, ¶m->addr.bdaddr, addr_type); if (!hci_param) { @@ -7347,6 +8020,25 @@ static int load_conn_param(struct sock *sk, struct hci_dev *hdev, void *data, hci_param->conn_max_interval = max; hci_param->conn_latency = latency; hci_param->supervision_timeout = timeout; + + /* Check if we need to trigger a connection update */ + if (update) { + struct hci_conn *conn; + + /* Lookup for existing connection as central and check + * if parameters match and if they don't then trigger + * a connection update. + */ + conn = hci_conn_hash_lookup_le(hdev, &hci_param->addr, + addr_type); + if (conn && conn->role == HCI_ROLE_MASTER && + (conn->le_conn_min_interval != min || + conn->le_conn_max_interval != max || + conn->le_conn_latency != latency || + conn->le_supv_timeout != timeout)) + hci_cmd_sync_queue(hdev, conn_update_sync, + hci_param, NULL); + } } hci_dev_unlock(hdev); @@ -7372,7 +8064,7 @@ static int set_external_config(struct sock *sk, struct hci_dev *hdev, return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG, MGMT_STATUS_INVALID_PARAMS); - if (!test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks)) + if (!hci_test_quirk(hdev, HCI_QUIRK_EXTERNAL_CONFIG)) return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG, MGMT_STATUS_NOT_SUPPORTED); @@ -7580,7 +8272,7 @@ done: kfree_skb(skb); kfree(mgmt_rp); - mgmt_pending_remove(cmd); + mgmt_pending_free(cmd); } static int read_local_ssp_oob_req(struct hci_dev *hdev, struct sock *sk, @@ -7589,7 +8281,7 @@ static int read_local_ssp_oob_req(struct hci_dev *hdev, struct sock *sk, struct mgmt_pending_cmd *cmd; int err; - cmd = mgmt_pending_add(sk, MGMT_OP_READ_LOCAL_OOB_EXT_DATA, hdev, + cmd = mgmt_pending_new(sk, MGMT_OP_READ_LOCAL_OOB_EXT_DATA, hdev, cp, sizeof(*cp)); if (!cmd) return -ENOMEM; @@ -7785,8 +8477,7 @@ static u32 get_supported_adv_flags(struct hci_dev *hdev) /* In extended adv TX_POWER returned from Set Adv Param * will be always valid. */ - if ((hdev->adv_tx_power != HCI_TX_POWER_INVALID) || - ext_adv_capable(hdev)) + if (hdev->adv_tx_power != HCI_TX_POWER_INVALID || ext_adv_capable(hdev)) flags |= MGMT_ADV_FLAG_TX_POWER; if (ext_adv_capable(hdev)) { @@ -7794,10 +8485,10 @@ static u32 get_supported_adv_flags(struct hci_dev *hdev) flags |= MGMT_ADV_FLAG_HW_OFFLOAD; flags |= MGMT_ADV_FLAG_CAN_SET_TX_POWER; - if (hdev->le_features[1] & HCI_LE_PHY_2M) + if (le_2m_capable(hdev)) flags |= MGMT_ADV_FLAG_SEC_2M; - if (hdev->le_features[1] & HCI_LE_PHY_CODED) + if (le_coded_capable(hdev)) flags |= MGMT_ADV_FLAG_SEC_CODED; } @@ -7832,15 +8523,21 @@ static int read_adv_features(struct sock *sk, struct hci_dev *hdev, supported_flags = get_supported_adv_flags(hdev); rp->supported_flags = cpu_to_le32(supported_flags); - rp->max_adv_data_len = HCI_MAX_AD_LENGTH; - rp->max_scan_rsp_len = HCI_MAX_AD_LENGTH; + rp->max_adv_data_len = max_adv_len(hdev); + rp->max_scan_rsp_len = max_adv_len(hdev); rp->max_instances = hdev->le_num_of_adv_sets; rp->num_instances = hdev->adv_instance_cnt; instance = rp->instance; list_for_each_entry(adv_instance, &hdev->adv_instances, list) { - *instance = adv_instance->instance; - instance++; + /* Only instances 1-le_num_of_adv_sets are externally visible */ + if (adv_instance->instance <= hdev->adv_instance_cnt) { + *instance = adv_instance->instance; + instance++; + } else { + rp->num_instances--; + rp_len--; + } } hci_dev_unlock(hdev); @@ -7855,7 +8552,7 @@ static int read_adv_features(struct sock *sk, struct hci_dev *hdev, static u8 calculate_name_len(struct hci_dev *hdev) { - u8 buf[HCI_MAX_SHORT_NAME_LENGTH + 3]; + u8 buf[HCI_MAX_SHORT_NAME_LENGTH + 2]; /* len + type + name */ return eir_append_local_name(hdev, buf, 0); } @@ -7863,7 +8560,7 @@ static u8 calculate_name_len(struct hci_dev *hdev) static u8 tlv_data_max_len(struct hci_dev *hdev, u32 adv_flags, bool is_adv_data) { - u8 max_len = HCI_MAX_AD_LENGTH; + u8 max_len = max_adv_len(hdev); if (is_adv_data) { if (adv_flags & (MGMT_ADV_FLAG_DISCOV | @@ -7918,7 +8615,7 @@ static bool tlv_data_is_valid(struct hci_dev *hdev, u32 adv_flags, u8 *data, return false; /* Make sure that the data is correctly formatted. */ - for (i = 0, cur_len = 0; i < len; i += (cur_len + 1)) { + for (i = 0; i < len; i += (cur_len + 1)) { cur_len = data[i]; if (!cur_len) @@ -7969,11 +8666,7 @@ static bool requested_adv_flags_are_valid(struct hci_dev *hdev, u32 adv_flags) static bool adv_busy(struct hci_dev *hdev) { - return (pending_find(MGMT_OP_ADD_ADVERTISING, hdev) || - pending_find(MGMT_OP_REMOVE_ADVERTISING, hdev) || - pending_find(MGMT_OP_SET_LE, hdev) || - pending_find(MGMT_OP_ADD_EXT_ADV_PARAMS, hdev) || - pending_find(MGMT_OP_ADD_EXT_ADV_DATA, hdev)); + return pending_find(MGMT_OP_SET_LE, hdev); } static void add_adv_complete(struct hci_dev *hdev, struct sock *sk, u8 instance, @@ -8019,10 +8712,10 @@ static void add_advertising_complete(struct hci_dev *hdev, void *data, int err) rp.instance = cp->instance; if (err) - mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, + mgmt_cmd_status(cmd->sk, cmd->hdev->id, cmd->opcode, mgmt_status(err)); else - mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, + mgmt_cmd_complete(cmd->sk, cmd->hdev->id, cmd->opcode, mgmt_status(err), &rp, sizeof(rp)); add_adv_complete(hdev, cmd->sk, cp->instance, err); @@ -8046,9 +8739,9 @@ static int add_advertising(struct sock *sk, struct hci_dev *hdev, u32 flags; u8 status; u16 timeout, duration; - unsigned int prev_instance_cnt = hdev->adv_instance_cnt; + unsigned int prev_instance_cnt; u8 schedule_instance = 0; - struct adv_info *next_instance; + struct adv_info *adv, *next_instance; int err; struct mgmt_pending_cmd *cmd; @@ -8097,15 +8790,17 @@ static int add_advertising(struct sock *sk, struct hci_dev *hdev, goto unlock; } - err = hci_add_adv_instance(hdev, cp->instance, flags, + prev_instance_cnt = hdev->adv_instance_cnt; + + adv = hci_add_adv_instance(hdev, cp->instance, flags, cp->adv_data_len, cp->data, cp->scan_rsp_len, cp->data + cp->adv_data_len, timeout, duration, HCI_ADV_TX_POWER_NO_PREFERENCE, hdev->le_adv_min_interval, - hdev->le_adv_max_interval); - if (err < 0) { + hdev->le_adv_max_interval, 0); + if (IS_ERR(adv)) { err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING, MGMT_STATUS_FAILED); goto unlock; @@ -8208,16 +8903,15 @@ static void add_ext_adv_params_complete(struct hci_dev *hdev, void *data, hci_remove_adv_instance(hdev, cp->instance); - mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, + mgmt_cmd_status(cmd->sk, cmd->hdev->id, cmd->opcode, mgmt_status(err)); } else { - mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, + mgmt_cmd_complete(cmd->sk, cmd->hdev->id, cmd->opcode, mgmt_status(err), &rp, sizeof(rp)); } unlock: - if (cmd) - mgmt_pending_free(cmd); + mgmt_pending_free(cmd); hci_dev_unlock(hdev); } @@ -8236,6 +8930,7 @@ static int add_ext_adv_params(struct sock *sk, struct hci_dev *hdev, struct mgmt_cp_add_ext_adv_params *cp = data; struct mgmt_rp_add_ext_adv_params rp; struct mgmt_pending_cmd *cmd = NULL; + struct adv_info *adv; u32 flags, min_interval, max_interval; u16 timeout, duration; u8 status; @@ -8260,7 +8955,7 @@ static int add_ext_adv_params(struct sock *sk, struct hci_dev *hdev, * extra parameters we don't know about will be ignored in this request. */ if (data_len < MGMT_ADD_EXT_ADV_PARAMS_MIN_SIZE) - return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING, + return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS, MGMT_STATUS_INVALID_PARAMS); flags = __le32_to_cpu(cp->flags); @@ -8305,11 +9000,11 @@ static int add_ext_adv_params(struct sock *sk, struct hci_dev *hdev, HCI_ADV_TX_POWER_NO_PREFERENCE; /* Create advertising instance with no advertising or response data */ - err = hci_add_adv_instance(hdev, cp->instance, flags, - 0, NULL, 0, NULL, timeout, duration, - tx_power, min_interval, max_interval); + adv = hci_add_adv_instance(hdev, cp->instance, flags, 0, NULL, 0, NULL, + timeout, duration, tx_power, min_interval, + max_interval, 0); - if (err < 0) { + if (IS_ERR(adv)) { err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS, MGMT_STATUS_FAILED); goto unlock; @@ -8358,10 +9053,10 @@ static void add_ext_adv_data_complete(struct hci_dev *hdev, void *data, int err) rp.instance = cp->instance; if (err) - mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, + mgmt_cmd_status(cmd->sk, cmd->hdev->id, cmd->opcode, mgmt_status(err)); else - mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, + mgmt_cmd_complete(cmd->sk, cmd->hdev->id, cmd->opcode, mgmt_status(err), &rp, sizeof(rp)); mgmt_pending_free(cmd); @@ -8520,10 +9215,10 @@ static void remove_advertising_complete(struct hci_dev *hdev, void *data, rp.instance = cp->instance; if (err) - mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, + mgmt_cmd_status(cmd->sk, cmd->hdev->id, cmd->opcode, mgmt_status(err)); else - mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, + mgmt_cmd_complete(cmd->sk, cmd->hdev->id, cmd->opcode, MGMT_STATUS_SUCCESS, &rp, sizeof(rp)); mgmt_pending_free(cmd); @@ -8563,9 +9258,7 @@ static int remove_advertising(struct sock *sk, struct hci_dev *hdev, goto unlock; } - if (pending_find(MGMT_OP_ADD_ADVERTISING, hdev) || - pending_find(MGMT_OP_REMOVE_ADVERTISING, hdev) || - pending_find(MGMT_OP_SET_LE, hdev)) { + if (pending_find(MGMT_OP_SET_LE, hdev)) { err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_ADVERTISING, MGMT_STATUS_BUSY); goto unlock; @@ -8601,7 +9294,6 @@ static int get_adv_size_info(struct sock *sk, struct hci_dev *hdev, struct mgmt_cp_get_adv_size_info *cp = data; struct mgmt_rp_get_adv_size_info rp; u32 flags, supported_flags; - int err; bt_dev_dbg(hdev, "sock %p", sk); @@ -8628,10 +9320,8 @@ static int get_adv_size_info(struct sock *sk, struct hci_dev *hdev, rp.max_adv_data_len = tlv_data_max_len(hdev, flags, true); rp.max_scan_rsp_len = tlv_data_max_len(hdev, flags, false); - err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO, - MGMT_STATUS_SUCCESS, &rp, sizeof(rp)); - - return err; + return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO, + MGMT_STATUS_SUCCESS, &rp, sizeof(rp)); } static const struct hci_mgmt_handler mgmt_handlers[] = { @@ -8760,32 +9450,30 @@ static const struct hci_mgmt_handler mgmt_handlers[] = { { add_adv_patterns_monitor_rssi, MGMT_ADD_ADV_PATTERNS_MONITOR_RSSI_SIZE, HCI_MGMT_VAR_LEN }, + { set_mesh, MGMT_SET_MESH_RECEIVER_SIZE, + HCI_MGMT_VAR_LEN }, + { mesh_features, MGMT_MESH_READ_FEATURES_SIZE }, + { mesh_send, MGMT_MESH_SEND_SIZE, + HCI_MGMT_VAR_LEN }, + { mesh_send_cancel, MGMT_MESH_SEND_CANCEL_SIZE }, + { mgmt_hci_cmd_sync, MGMT_HCI_CMD_SYNC_SIZE, HCI_MGMT_VAR_LEN }, }; void mgmt_index_added(struct hci_dev *hdev) { struct mgmt_ev_ext_index ev; - if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks)) + if (hci_test_quirk(hdev, HCI_QUIRK_RAW_DEVICE)) return; - switch (hdev->dev_type) { - case HCI_PRIMARY: - if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) { - mgmt_index_event(MGMT_EV_UNCONF_INDEX_ADDED, hdev, - NULL, 0, HCI_MGMT_UNCONF_INDEX_EVENTS); - ev.type = 0x01; - } else { - mgmt_index_event(MGMT_EV_INDEX_ADDED, hdev, NULL, 0, - HCI_MGMT_INDEX_EVENTS); - ev.type = 0x00; - } - break; - case HCI_AMP: - ev.type = 0x02; - break; - default: - return; + if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) { + mgmt_index_event(MGMT_EV_UNCONF_INDEX_ADDED, hdev, NULL, 0, + HCI_MGMT_UNCONF_INDEX_EVENTS); + ev.type = 0x01; + } else { + mgmt_index_event(MGMT_EV_INDEX_ADDED, hdev, NULL, 0, + HCI_MGMT_INDEX_EVENTS); + ev.type = 0x00; } ev.bus = hdev->bus; @@ -8797,36 +9485,35 @@ void mgmt_index_added(struct hci_dev *hdev) void mgmt_index_removed(struct hci_dev *hdev) { struct mgmt_ev_ext_index ev; - u8 status = MGMT_STATUS_INVALID_INDEX; + struct cmd_lookup match = { NULL, hdev, MGMT_STATUS_INVALID_INDEX }; - if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks)) + if (hci_test_quirk(hdev, HCI_QUIRK_RAW_DEVICE)) return; - switch (hdev->dev_type) { - case HCI_PRIMARY: - mgmt_pending_foreach(0, hdev, cmd_complete_rsp, &status); + mgmt_pending_foreach(0, hdev, true, cmd_complete_rsp, &match); - if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) { - mgmt_index_event(MGMT_EV_UNCONF_INDEX_REMOVED, hdev, - NULL, 0, HCI_MGMT_UNCONF_INDEX_EVENTS); - ev.type = 0x01; - } else { - mgmt_index_event(MGMT_EV_INDEX_REMOVED, hdev, NULL, 0, - HCI_MGMT_INDEX_EVENTS); - ev.type = 0x00; - } - break; - case HCI_AMP: - ev.type = 0x02; - break; - default: - return; + if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) { + mgmt_index_event(MGMT_EV_UNCONF_INDEX_REMOVED, hdev, NULL, 0, + HCI_MGMT_UNCONF_INDEX_EVENTS); + ev.type = 0x01; + } else { + mgmt_index_event(MGMT_EV_INDEX_REMOVED, hdev, NULL, 0, + HCI_MGMT_INDEX_EVENTS); + ev.type = 0x00; } ev.bus = hdev->bus; mgmt_index_event(MGMT_EV_EXT_INDEX_REMOVED, hdev, &ev, sizeof(ev), HCI_MGMT_EXT_INDEX_EVENTS); + + /* Cancel any remaining timed work */ + if (!hci_dev_test_flag(hdev, HCI_MGMT)) + return; + cancel_delayed_work_sync(&hdev->discov_off); + cancel_delayed_work_sync(&hdev->service_cache); + cancel_delayed_work_sync(&hdev->rpa_expired); + cancel_delayed_work_sync(&hdev->mesh_send_done); } void mgmt_power_on(struct hci_dev *hdev, int err) @@ -8842,7 +9529,8 @@ void mgmt_power_on(struct hci_dev *hdev, int err) hci_update_passive_scan(hdev); } - mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp, &match); + mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, true, settings_rsp, + &match); new_settings(hdev, match.sk); @@ -8855,9 +9543,10 @@ void mgmt_power_on(struct hci_dev *hdev, int err) void __mgmt_power_off(struct hci_dev *hdev) { struct cmd_lookup match = { NULL, hdev }; - u8 status, zero_cod[] = { 0, 0, 0 }; + u8 zero_cod[] = { 0, 0, 0 }; - mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp, &match); + mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, true, settings_rsp, + &match); /* If the power off is because of hdev unregistration let * use the appropriate INVALID_INDEX status. Otherwise use @@ -8867,11 +9556,11 @@ void __mgmt_power_off(struct hci_dev *hdev) * status responses. */ if (hci_dev_test_flag(hdev, HCI_UNREGISTER)) - status = MGMT_STATUS_INVALID_INDEX; + match.mgmt_status = MGMT_STATUS_INVALID_INDEX; else - status = MGMT_STATUS_NOT_POWERED; + match.mgmt_status = MGMT_STATUS_NOT_POWERED; - mgmt_pending_foreach(0, hdev, cmd_complete_rsp, &status); + mgmt_pending_foreach(0, hdev, true, cmd_complete_rsp, &match); if (memcmp(hdev->dev_class, zero_cod, sizeof(zero_cod)) != 0) { mgmt_limited_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev, @@ -9059,12 +9748,20 @@ void mgmt_device_connected(struct hci_dev *hdev, struct hci_conn *conn, u16 eir_len = 0; u32 flags = 0; + if (test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags)) + return; + + /* allocate buff for LE or BR/EDR adv */ if (conn->le_adv_data_len > 0) skb = mgmt_alloc_skb(hdev, MGMT_EV_DEVICE_CONNECTED, - conn->le_adv_data_len); + sizeof(*ev) + conn->le_adv_data_len); else skb = mgmt_alloc_skb(hdev, MGMT_EV_DEVICE_CONNECTED, - 2 + name_len + 5); + sizeof(*ev) + (name ? eir_precalc_len(name_len) : 0) + + eir_precalc_len(sizeof(conn->dev_class))); + + if (!skb) + return; ev = skb_put(skb, sizeof(*ev)); bacpy(&ev->addr.bdaddr, &conn->dst); @@ -9083,18 +9780,12 @@ void mgmt_device_connected(struct hci_dev *hdev, struct hci_conn *conn, skb_put_data(skb, conn->le_adv_data, conn->le_adv_data_len); eir_len = conn->le_adv_data_len; } else { - if (name_len > 0) { - eir_len = eir_append_data(ev->eir, 0, EIR_NAME_COMPLETE, - name, name_len); - skb_put(skb, eir_len); - } + if (name) + eir_len += eir_skb_put_data(skb, EIR_NAME_COMPLETE, name, name_len); - if (memcmp(conn->dev_class, "\0\0\0", 3) != 0) { - eir_len = eir_append_data(ev->eir, eir_len, - EIR_CLASS_OF_DEV, - conn->dev_class, 3); - skb_put(skb, 5); - } + if (memcmp(conn->dev_class, "\0\0\0", sizeof(conn->dev_class))) + eir_len += eir_skb_put_data(skb, EIR_CLASS_OF_DEV, + conn->dev_class, sizeof(conn->dev_class)); } ev->eir_len = cpu_to_le16(eir_len); @@ -9102,18 +9793,6 @@ void mgmt_device_connected(struct hci_dev *hdev, struct hci_conn *conn, mgmt_event_skb(skb, NULL); } -static void disconnect_rsp(struct mgmt_pending_cmd *cmd, void *data) -{ - struct sock **sk = data; - - cmd->cmd_complete(cmd, 0); - - *sk = cmd->sk; - sock_hold(*sk); - - mgmt_pending_remove(cmd); -} - static void unpair_device_rsp(struct mgmt_pending_cmd *cmd, void *data) { struct hci_dev *hdev = data; @@ -9122,7 +9801,6 @@ static void unpair_device_rsp(struct mgmt_pending_cmd *cmd, void *data) device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, cmd->sk); cmd->cmd_complete(cmd, 0); - mgmt_pending_remove(cmd); } bool mgmt_powering_down(struct hci_dev *hdev) @@ -9130,6 +9808,9 @@ bool mgmt_powering_down(struct hci_dev *hdev) struct mgmt_pending_cmd *cmd; struct mgmt_mode *cp; + if (hci_dev_test_flag(hdev, HCI_POWERING_DOWN)) + return true; + cmd = pending_find(MGMT_OP_SET_POWERED, hdev); if (!cmd) return false; @@ -9148,22 +9829,14 @@ void mgmt_device_disconnected(struct hci_dev *hdev, bdaddr_t *bdaddr, struct mgmt_ev_device_disconnected ev; struct sock *sk = NULL; - /* The connection is still in hci_conn_hash so test for 1 - * instead of 0 to know if this is the last one. - */ - if (mgmt_powering_down(hdev) && hci_conn_count(hdev) == 1) { - cancel_delayed_work(&hdev->power_off); - queue_work(hdev->req_workqueue, &hdev->power_off.work); - } - if (!mgmt_connected) return; - if (link_type != ACL_LINK && link_type != LE_LINK) + if (link_type != ACL_LINK && + link_type != LE_LINK && + link_type != BIS_LINK) return; - mgmt_pending_foreach(MGMT_OP_DISCONNECT, hdev, disconnect_rsp, &sk); - bacpy(&ev.addr.bdaddr, bdaddr); ev.addr.type = link_to_bdaddr(link_type, addr_type); ev.reason = reason; @@ -9176,9 +9849,6 @@ void mgmt_device_disconnected(struct hci_dev *hdev, bdaddr_t *bdaddr, if (sk) sock_put(sk); - - mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, unpair_device_rsp, - hdev); } void mgmt_disconnect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr, @@ -9188,8 +9858,8 @@ void mgmt_disconnect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr, struct mgmt_cp_disconnect *cp; struct mgmt_pending_cmd *cmd; - mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, unpair_device_rsp, - hdev); + mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, true, + unpair_device_rsp, hdev); cmd = pending_find(MGMT_OP_DISCONNECT, hdev); if (!cmd) @@ -9207,21 +9877,18 @@ void mgmt_disconnect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr, mgmt_pending_remove(cmd); } -void mgmt_connect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type, - u8 addr_type, u8 status) +void mgmt_connect_failed(struct hci_dev *hdev, struct hci_conn *conn, u8 status) { struct mgmt_ev_connect_failed ev; - /* The connection is still in hci_conn_hash so test for 1 - * instead of 0 to know if this is the last one. - */ - if (mgmt_powering_down(hdev) && hci_conn_count(hdev) == 1) { - cancel_delayed_work(&hdev->power_off); - queue_work(hdev->req_workqueue, &hdev->power_off.work); + if (test_and_clear_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags)) { + mgmt_device_disconnected(hdev, &conn->dst, conn->type, + conn->dst_type, status, true); + return; } - bacpy(&ev.addr.bdaddr, bdaddr); - ev.addr.type = link_to_bdaddr(link_type, addr_type); + bacpy(&ev.addr.bdaddr, &conn->dst); + ev.addr.type = link_to_bdaddr(conn->type, conn->dst_type); ev.status = mgmt_status(status); mgmt_event(MGMT_EV_CONNECT_FAILED, hdev, &ev, sizeof(ev), NULL); @@ -9385,7 +10052,7 @@ void mgmt_auth_enable_complete(struct hci_dev *hdev, u8 status) if (status) { u8 mgmt_err = mgmt_status(status); - mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev, + mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev, true, cmd_status_rsp, &mgmt_err); return; } @@ -9395,8 +10062,8 @@ void mgmt_auth_enable_complete(struct hci_dev *hdev, u8 status) else changed = hci_dev_test_and_clear_flag(hdev, HCI_LINK_SECURITY); - mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev, settings_rsp, - &match); + mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev, true, + settings_rsp, &match); if (changed) new_settings(hdev, match.sk); @@ -9420,9 +10087,12 @@ void mgmt_set_class_of_dev_complete(struct hci_dev *hdev, u8 *dev_class, { struct cmd_lookup match = { NULL, hdev, mgmt_status(status) }; - mgmt_pending_foreach(MGMT_OP_SET_DEV_CLASS, hdev, sk_lookup, &match); - mgmt_pending_foreach(MGMT_OP_ADD_UUID, hdev, sk_lookup, &match); - mgmt_pending_foreach(MGMT_OP_REMOVE_UUID, hdev, sk_lookup, &match); + mgmt_pending_foreach(MGMT_OP_SET_DEV_CLASS, hdev, false, sk_lookup, + &match); + mgmt_pending_foreach(MGMT_OP_ADD_UUID, hdev, false, sk_lookup, + &match); + mgmt_pending_foreach(MGMT_OP_REMOVE_UUID, hdev, false, sk_lookup, + &match); if (!status) { mgmt_limited_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev, dev_class, @@ -9453,6 +10123,9 @@ void mgmt_set_local_name_complete(struct hci_dev *hdev, u8 *name, u8 status) /* If this is a HCI command related to powering on the * HCI dev don't send any mgmt signals. */ + if (hci_dev_test_flag(hdev, HCI_POWERING_DOWN)) + return; + if (pending_find(MGMT_OP_SET_POWERED, hdev)) return; } @@ -9529,21 +10202,6 @@ static bool eir_has_uuids(u8 *eir, u16 eir_len, u16 uuid_count, u8 (*uuids)[16]) return false; } -static void restart_le_scan(struct hci_dev *hdev) -{ - /* If controller is not scanning we are done. */ - if (!hci_dev_test_flag(hdev, HCI_LE_SCAN)) - return; - - if (time_after(jiffies + DISCOV_LE_RESTART_DELAY, - hdev->discovery.scan_start + - hdev->discovery.scan_duration)) - return; - - queue_delayed_work(hdev->req_workqueue, &hdev->le_scan_restart, - DISCOV_LE_RESTART_DELAY); -} - static bool is_filter_match(struct hci_dev *hdev, s8 rssi, u8 *eir, u16 eir_len, u8 *scan_rsp, u8 scan_rsp_len) { @@ -9559,7 +10217,7 @@ static bool is_filter_match(struct hci_dev *hdev, s8 rssi, u8 *eir, if (hdev->discovery.rssi != HCI_RSSI_INVALID && (rssi == HCI_RSSI_INVALID || (rssi < hdev->discovery.rssi && - !test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks)))) + !hci_test_quirk(hdev, HCI_QUIRK_STRICT_DUPLICATE_FILTER)))) return false; if (hdev->discovery.uuid_count != 0) { @@ -9577,9 +10235,7 @@ static bool is_filter_match(struct hci_dev *hdev, s8 rssi, u8 *eir, /* If duplicate filtering does not report RSSI changes, then restart * scanning to ensure updated result with updated RSSI values. */ - if (test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks)) { - restart_le_scan(hdev); - + if (hci_test_quirk(hdev, HCI_QUIRK_STRICT_DUPLICATE_FILTER)) { /* Validate RSSI value against the RSSI threshold once more. */ if (hdev->discovery.rssi != HCI_RSSI_INVALID && rssi < hdev->discovery.rssi) @@ -9589,12 +10245,192 @@ static bool is_filter_match(struct hci_dev *hdev, s8 rssi, u8 *eir, return true; } +void mgmt_adv_monitor_device_lost(struct hci_dev *hdev, u16 handle, + bdaddr_t *bdaddr, u8 addr_type) +{ + struct mgmt_ev_adv_monitor_device_lost ev; + + ev.monitor_handle = cpu_to_le16(handle); + bacpy(&ev.addr.bdaddr, bdaddr); + ev.addr.type = addr_type; + + mgmt_event(MGMT_EV_ADV_MONITOR_DEVICE_LOST, hdev, &ev, sizeof(ev), + NULL); +} + +static void mgmt_send_adv_monitor_device_found(struct hci_dev *hdev, + struct sk_buff *skb, + struct sock *skip_sk, + u16 handle) +{ + struct sk_buff *advmon_skb; + size_t advmon_skb_len; + __le16 *monitor_handle; + + if (!skb) + return; + + advmon_skb_len = (sizeof(struct mgmt_ev_adv_monitor_device_found) - + sizeof(struct mgmt_ev_device_found)) + skb->len; + advmon_skb = mgmt_alloc_skb(hdev, MGMT_EV_ADV_MONITOR_DEVICE_FOUND, + advmon_skb_len); + if (!advmon_skb) + return; + + /* ADV_MONITOR_DEVICE_FOUND is similar to DEVICE_FOUND event except + * that it also has 'monitor_handle'. Make a copy of DEVICE_FOUND and + * store monitor_handle of the matched monitor. + */ + monitor_handle = skb_put(advmon_skb, sizeof(*monitor_handle)); + *monitor_handle = cpu_to_le16(handle); + skb_put_data(advmon_skb, skb->data, skb->len); + + mgmt_event_skb(advmon_skb, skip_sk); +} + +static void mgmt_adv_monitor_device_found(struct hci_dev *hdev, + bdaddr_t *bdaddr, bool report_device, + struct sk_buff *skb, + struct sock *skip_sk) +{ + struct monitored_device *dev, *tmp; + bool matched = false; + bool notified = false; + + /* We have received the Advertisement Report because: + * 1. the kernel has initiated active discovery + * 2. if not, we have pend_le_reports > 0 in which case we are doing + * passive scanning + * 3. if none of the above is true, we have one or more active + * Advertisement Monitor + * + * For case 1 and 2, report all advertisements via MGMT_EV_DEVICE_FOUND + * and report ONLY one advertisement per device for the matched Monitor + * via MGMT_EV_ADV_MONITOR_DEVICE_FOUND event. + * + * For case 3, since we are not active scanning and all advertisements + * received are due to a matched Advertisement Monitor, report all + * advertisements ONLY via MGMT_EV_ADV_MONITOR_DEVICE_FOUND event. + */ + if (report_device && !hdev->advmon_pend_notify) { + mgmt_event_skb(skb, skip_sk); + return; + } + + hdev->advmon_pend_notify = false; + + list_for_each_entry_safe(dev, tmp, &hdev->monitored_devices, list) { + if (!bacmp(&dev->bdaddr, bdaddr)) { + matched = true; + + if (!dev->notified) { + mgmt_send_adv_monitor_device_found(hdev, skb, + skip_sk, + dev->handle); + notified = true; + dev->notified = true; + } + } + + if (!dev->notified) + hdev->advmon_pend_notify = true; + } + + if (!report_device && + ((matched && !notified) || !msft_monitor_supported(hdev))) { + /* Handle 0 indicates that we are not active scanning and this + * is a subsequent advertisement report for an already matched + * Advertisement Monitor or the controller offloading support + * is not available. + */ + mgmt_send_adv_monitor_device_found(hdev, skb, skip_sk, 0); + } + + if (report_device) + mgmt_event_skb(skb, skip_sk); + else + kfree_skb(skb); +} + +static void mesh_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr, + u8 addr_type, s8 rssi, u32 flags, u8 *eir, + u16 eir_len, u8 *scan_rsp, u8 scan_rsp_len, + u64 instant) +{ + struct sk_buff *skb; + struct mgmt_ev_mesh_device_found *ev; + int i, j; + + if (!hdev->mesh_ad_types[0]) + goto accepted; + + /* Scan for requested AD types */ + if (eir_len > 0) { + for (i = 0; i + 1 < eir_len; i += eir[i] + 1) { + for (j = 0; j < sizeof(hdev->mesh_ad_types); j++) { + if (!hdev->mesh_ad_types[j]) + break; + + if (hdev->mesh_ad_types[j] == eir[i + 1]) + goto accepted; + } + } + } + + if (scan_rsp_len > 0) { + for (i = 0; i + 1 < scan_rsp_len; i += scan_rsp[i] + 1) { + for (j = 0; j < sizeof(hdev->mesh_ad_types); j++) { + if (!hdev->mesh_ad_types[j]) + break; + + if (hdev->mesh_ad_types[j] == scan_rsp[i + 1]) + goto accepted; + } + } + } + + return; + +accepted: + skb = mgmt_alloc_skb(hdev, MGMT_EV_MESH_DEVICE_FOUND, + sizeof(*ev) + eir_len + scan_rsp_len); + if (!skb) + return; + + ev = skb_put(skb, sizeof(*ev)); + + bacpy(&ev->addr.bdaddr, bdaddr); + ev->addr.type = link_to_bdaddr(LE_LINK, addr_type); + ev->rssi = rssi; + ev->flags = cpu_to_le32(flags); + ev->instant = cpu_to_le64(instant); + + if (eir_len > 0) + /* Copy EIR or advertising data into event */ + skb_put_data(skb, eir, eir_len); + + if (scan_rsp_len > 0) + /* Append scan response data to event */ + skb_put_data(skb, scan_rsp, scan_rsp_len); + + ev->eir_len = cpu_to_le16(eir_len + scan_rsp_len); + + mgmt_event_skb(skb, NULL); +} + void mgmt_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type, u8 addr_type, u8 *dev_class, s8 rssi, u32 flags, - u8 *eir, u16 eir_len, u8 *scan_rsp, u8 scan_rsp_len) + u8 *eir, u16 eir_len, u8 *scan_rsp, u8 scan_rsp_len, + u64 instant) { struct sk_buff *skb; struct mgmt_ev_device_found *ev; + bool report_device = hci_discovery_active(hdev); + + if (hci_dev_test_flag(hdev, HCI_MESH) && link_type == LE_LINK) + mesh_device_found(hdev, bdaddr, addr_type, rssi, flags, + eir, eir_len, scan_rsp, scan_rsp_len, + instant); /* Don't send events for a non-kernel initiated discovery. With * LE one exception is if we have pend_le_reports > 0 in which @@ -9603,11 +10439,10 @@ void mgmt_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type, if (!hci_discovery_active(hdev)) { if (link_type == ACL_LINK) return; - if (link_type == LE_LINK && - list_empty(&hdev->pend_le_reports) && - !hci_is_adv_monitoring(hdev)) { + if (link_type == LE_LINK && !list_empty(&hdev->pend_le_reports)) + report_device = true; + else if (!hci_is_adv_monitoring(hdev)) return; - } } if (hdev->discovery.result_filtering) { @@ -9672,7 +10507,7 @@ void mgmt_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type, ev->eir_len = cpu_to_le16(eir_len + scan_rsp_len); - mgmt_event_skb(skb, NULL); + mgmt_adv_monitor_device_found(hdev, bdaddr, report_device, skb, NULL); } void mgmt_remote_name(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type, @@ -9680,28 +10515,23 @@ void mgmt_remote_name(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type, { struct sk_buff *skb; struct mgmt_ev_device_found *ev; - u16 eir_len; - u32 flags; + u16 eir_len = 0; + u32 flags = 0; - if (name_len) - skb = mgmt_alloc_skb(hdev, MGMT_EV_DEVICE_FOUND, 2 + name_len); - else - skb = mgmt_alloc_skb(hdev, MGMT_EV_DEVICE_FOUND, 0); + skb = mgmt_alloc_skb(hdev, MGMT_EV_DEVICE_FOUND, + sizeof(*ev) + (name ? eir_precalc_len(name_len) : 0)); + if (!skb) + return; ev = skb_put(skb, sizeof(*ev)); bacpy(&ev->addr.bdaddr, bdaddr); ev->addr.type = link_to_bdaddr(link_type, addr_type); ev->rssi = rssi; - if (name) { - eir_len = eir_append_data(ev->eir, 0, EIR_NAME_COMPLETE, name, - name_len); - flags = 0; - skb_put(skb, eir_len); - } else { - eir_len = 0; + if (name) + eir_len += eir_skb_put_data(skb, EIR_NAME_COMPLETE, name, name_len); + else flags = MGMT_DEV_FOUND_NAME_REQUEST_FAILED; - } ev->eir_len = cpu_to_le16(eir_len); ev->flags = cpu_to_le32(flags); @@ -9762,3 +10592,22 @@ void mgmt_exit(void) { hci_mgmt_chan_unregister(&chan); } + +void mgmt_cleanup(struct sock *sk) +{ + struct mgmt_mesh_tx *mesh_tx; + struct hci_dev *hdev; + + read_lock(&hci_dev_list_lock); + + list_for_each_entry(hdev, &hci_dev_list, list) { + do { + mesh_tx = mgmt_mesh_next(hdev, sk); + + if (mesh_tx) + mesh_send_complete(hdev, mesh_tx, true); + } while (mesh_tx); + } + + read_unlock(&hci_dev_list_lock); +} diff --git a/net/bluetooth/mgmt_config.c b/net/bluetooth/mgmt_config.c index 6ef701c27da4..c4063d200c0a 100644 --- a/net/bluetooth/mgmt_config.c +++ b/net/bluetooth/mgmt_config.c @@ -13,13 +13,13 @@ #define HDEV_PARAM_U16(_param_name_) \ struct {\ - struct mgmt_tlv entry; \ + struct mgmt_tlv_hdr entry; \ __le16 value; \ } __packed _param_name_ #define HDEV_PARAM_U8(_param_name_) \ struct {\ - struct mgmt_tlv entry; \ + struct mgmt_tlv_hdr entry; \ __u8 value; \ } __packed _param_name_ diff --git a/net/bluetooth/mgmt_util.c b/net/bluetooth/mgmt_util.c index edee60bbc7b4..aa7b5585cb26 100644 --- a/net/bluetooth/mgmt_util.c +++ b/net/bluetooth/mgmt_util.c @@ -21,7 +21,7 @@ SOFTWARE IS DISCLAIMED. */ -#include <asm/unaligned.h> +#include <linux/unaligned.h> #include <net/bluetooth/bluetooth.h> #include <net/bluetooth/hci_core.h> @@ -77,11 +77,12 @@ int mgmt_send_event_skb(unsigned short channel, struct sk_buff *skb, int flag, { struct hci_dev *hdev; struct mgmt_hdr *hdr; - int len = skb->len; + int len; if (!skb) return -EINVAL; + len = skb->len; hdev = bt_cb(skb)->mgmt.hdev; /* Time stamp */ @@ -216,47 +217,47 @@ int mgmt_cmd_complete(struct sock *sk, u16 index, u16 cmd, u8 status, struct mgmt_pending_cmd *mgmt_pending_find(unsigned short channel, u16 opcode, struct hci_dev *hdev) { - struct mgmt_pending_cmd *cmd; + struct mgmt_pending_cmd *cmd, *tmp; + + mutex_lock(&hdev->mgmt_pending_lock); - list_for_each_entry(cmd, &hdev->mgmt_pending, list) { + list_for_each_entry_safe(cmd, tmp, &hdev->mgmt_pending, list) { if (hci_sock_get_channel(cmd->sk) != channel) continue; - if (cmd->opcode == opcode) - return cmd; - } - - return NULL; -} - -struct mgmt_pending_cmd *mgmt_pending_find_data(unsigned short channel, - u16 opcode, - struct hci_dev *hdev, - const void *data) -{ - struct mgmt_pending_cmd *cmd; - list_for_each_entry(cmd, &hdev->mgmt_pending, list) { - if (cmd->user_data != data) - continue; - if (cmd->opcode == opcode) + if (cmd->opcode == opcode) { + mutex_unlock(&hdev->mgmt_pending_lock); return cmd; + } } + mutex_unlock(&hdev->mgmt_pending_lock); + return NULL; } -void mgmt_pending_foreach(u16 opcode, struct hci_dev *hdev, +void mgmt_pending_foreach(u16 opcode, struct hci_dev *hdev, bool remove, void (*cb)(struct mgmt_pending_cmd *cmd, void *data), void *data) { struct mgmt_pending_cmd *cmd, *tmp; + mutex_lock(&hdev->mgmt_pending_lock); + list_for_each_entry_safe(cmd, tmp, &hdev->mgmt_pending, list) { if (opcode > 0 && cmd->opcode != opcode) continue; + if (remove) + list_del(&cmd->list); + cb(cmd, data); + + if (remove) + mgmt_pending_free(cmd); } + + mutex_unlock(&hdev->mgmt_pending_lock); } struct mgmt_pending_cmd *mgmt_pending_new(struct sock *sk, u16 opcode, @@ -270,7 +271,7 @@ struct mgmt_pending_cmd *mgmt_pending_new(struct sock *sk, u16 opcode, return NULL; cmd->opcode = opcode; - cmd->index = hdev->id; + cmd->hdev = hdev; cmd->param = kmemdup(data, len, GFP_KERNEL); if (!cmd->param) { @@ -296,7 +297,9 @@ struct mgmt_pending_cmd *mgmt_pending_add(struct sock *sk, u16 opcode, if (!cmd) return NULL; - list_add(&cmd->list, &hdev->mgmt_pending); + mutex_lock(&hdev->mgmt_pending_lock); + list_add_tail(&cmd->list, &hdev->mgmt_pending); + mutex_unlock(&hdev->mgmt_pending_lock); return cmd; } @@ -310,6 +313,129 @@ void mgmt_pending_free(struct mgmt_pending_cmd *cmd) void mgmt_pending_remove(struct mgmt_pending_cmd *cmd) { + mutex_lock(&cmd->hdev->mgmt_pending_lock); list_del(&cmd->list); + mutex_unlock(&cmd->hdev->mgmt_pending_lock); + mgmt_pending_free(cmd); } + +bool __mgmt_pending_listed(struct hci_dev *hdev, struct mgmt_pending_cmd *cmd) +{ + struct mgmt_pending_cmd *tmp; + + lockdep_assert_held(&hdev->mgmt_pending_lock); + + if (!cmd) + return false; + + list_for_each_entry(tmp, &hdev->mgmt_pending, list) { + if (cmd == tmp) + return true; + } + + return false; +} + +bool mgmt_pending_listed(struct hci_dev *hdev, struct mgmt_pending_cmd *cmd) +{ + bool listed; + + mutex_lock(&hdev->mgmt_pending_lock); + listed = __mgmt_pending_listed(hdev, cmd); + mutex_unlock(&hdev->mgmt_pending_lock); + + return listed; +} + +bool mgmt_pending_valid(struct hci_dev *hdev, struct mgmt_pending_cmd *cmd) +{ + bool listed; + + if (!cmd) + return false; + + mutex_lock(&hdev->mgmt_pending_lock); + + listed = __mgmt_pending_listed(hdev, cmd); + if (listed) + list_del(&cmd->list); + + mutex_unlock(&hdev->mgmt_pending_lock); + + return listed; +} + +void mgmt_mesh_foreach(struct hci_dev *hdev, + void (*cb)(struct mgmt_mesh_tx *mesh_tx, void *data), + void *data, struct sock *sk) +{ + struct mgmt_mesh_tx *mesh_tx, *tmp; + + list_for_each_entry_safe(mesh_tx, tmp, &hdev->mesh_pending, list) { + if (!sk || mesh_tx->sk == sk) + cb(mesh_tx, data); + } +} + +struct mgmt_mesh_tx *mgmt_mesh_next(struct hci_dev *hdev, struct sock *sk) +{ + struct mgmt_mesh_tx *mesh_tx; + + if (list_empty(&hdev->mesh_pending)) + return NULL; + + list_for_each_entry(mesh_tx, &hdev->mesh_pending, list) { + if (!sk || mesh_tx->sk == sk) + return mesh_tx; + } + + return NULL; +} + +struct mgmt_mesh_tx *mgmt_mesh_find(struct hci_dev *hdev, u8 handle) +{ + struct mgmt_mesh_tx *mesh_tx; + + if (list_empty(&hdev->mesh_pending)) + return NULL; + + list_for_each_entry(mesh_tx, &hdev->mesh_pending, list) { + if (mesh_tx->handle == handle) + return mesh_tx; + } + + return NULL; +} + +struct mgmt_mesh_tx *mgmt_mesh_add(struct sock *sk, struct hci_dev *hdev, + void *data, u16 len) +{ + struct mgmt_mesh_tx *mesh_tx; + + mesh_tx = kzalloc(sizeof(*mesh_tx), GFP_KERNEL); + if (!mesh_tx) + return NULL; + + hdev->mesh_send_ref++; + if (!hdev->mesh_send_ref) + hdev->mesh_send_ref++; + + mesh_tx->handle = hdev->mesh_send_ref; + mesh_tx->index = hdev->id; + memcpy(mesh_tx->param, data, len); + mesh_tx->param_len = len; + mesh_tx->sk = sk; + sock_hold(sk); + + list_add_tail(&mesh_tx->list, &hdev->mesh_pending); + + return mesh_tx; +} + +void mgmt_mesh_remove(struct mgmt_mesh_tx *mesh_tx) +{ + list_del(&mesh_tx->list); + sock_put(mesh_tx->sk); + kfree(mesh_tx); +} diff --git a/net/bluetooth/mgmt_util.h b/net/bluetooth/mgmt_util.h index 98e40395a383..bcba8c9d8952 100644 --- a/net/bluetooth/mgmt_util.h +++ b/net/bluetooth/mgmt_util.h @@ -20,10 +20,20 @@ SOFTWARE IS DISCLAIMED. */ +struct mgmt_mesh_tx { + struct list_head list; + int index; + size_t param_len; + struct sock *sk; + u8 handle; + u8 instance; + u8 param[sizeof(struct mgmt_cp_mesh_send) + 31]; +}; + struct mgmt_pending_cmd { struct list_head list; u16 opcode; - int index; + struct hci_dev *hdev; void *param; size_t param_len; struct sock *sk; @@ -44,11 +54,7 @@ int mgmt_cmd_complete(struct sock *sk, u16 index, u16 cmd, u8 status, struct mgmt_pending_cmd *mgmt_pending_find(unsigned short channel, u16 opcode, struct hci_dev *hdev); -struct mgmt_pending_cmd *mgmt_pending_find_data(unsigned short channel, - u16 opcode, - struct hci_dev *hdev, - const void *data); -void mgmt_pending_foreach(u16 opcode, struct hci_dev *hdev, +void mgmt_pending_foreach(u16 opcode, struct hci_dev *hdev, bool remove, void (*cb)(struct mgmt_pending_cmd *cmd, void *data), void *data); struct mgmt_pending_cmd *mgmt_pending_add(struct sock *sk, u16 opcode, @@ -59,3 +65,14 @@ struct mgmt_pending_cmd *mgmt_pending_new(struct sock *sk, u16 opcode, void *data, u16 len); void mgmt_pending_free(struct mgmt_pending_cmd *cmd); void mgmt_pending_remove(struct mgmt_pending_cmd *cmd); +bool __mgmt_pending_listed(struct hci_dev *hdev, struct mgmt_pending_cmd *cmd); +bool mgmt_pending_listed(struct hci_dev *hdev, struct mgmt_pending_cmd *cmd); +bool mgmt_pending_valid(struct hci_dev *hdev, struct mgmt_pending_cmd *cmd); +void mgmt_mesh_foreach(struct hci_dev *hdev, + void (*cb)(struct mgmt_mesh_tx *mesh_tx, void *data), + void *data, struct sock *sk); +struct mgmt_mesh_tx *mgmt_mesh_find(struct hci_dev *hdev, u8 handle); +struct mgmt_mesh_tx *mgmt_mesh_next(struct hci_dev *hdev, struct sock *sk); +struct mgmt_mesh_tx *mgmt_mesh_add(struct sock *sk, struct hci_dev *hdev, + void *data, u16 len); +void mgmt_mesh_remove(struct mgmt_mesh_tx *mesh_tx); diff --git a/net/bluetooth/msft.c b/net/bluetooth/msft.c index 6a943634b31a..c560d8467669 100644 --- a/net/bluetooth/msft.c +++ b/net/bluetooth/msft.c @@ -7,7 +7,6 @@ #include <net/bluetooth/hci_core.h> #include <net/bluetooth/mgmt.h> -#include "hci_request.h" #include "mgmt_util.h" #include "msft.h" @@ -80,9 +79,44 @@ struct msft_rp_le_set_advertisement_filter_enable { __u8 sub_opcode; } __packed; +#define MSFT_EV_LE_MONITOR_DEVICE 0x02 +struct msft_ev_le_monitor_device { + __u8 addr_type; + bdaddr_t bdaddr; + __u8 monitor_handle; + __u8 monitor_state; +} __packed; + struct msft_monitor_advertisement_handle_data { __u8 msft_handle; __u16 mgmt_handle; + __s8 rssi_high; + __s8 rssi_low; + __u8 rssi_low_interval; + __u8 rssi_sampling_period; + __u8 cond_type; + struct list_head list; +}; + +enum monitor_addr_filter_state { + AF_STATE_IDLE, + AF_STATE_ADDING, + AF_STATE_ADDED, + AF_STATE_REMOVING, +}; + +#define MSFT_MONITOR_ADVERTISEMENT_TYPE_ADDR 0x04 +struct msft_monitor_addr_filter_data { + __u8 msft_handle; + __u8 pattern_handle; /* address filters pertain to */ + __u16 mgmt_handle; + int state; + __s8 rssi_high; + __s8 rssi_low; + __u8 rssi_low_interval; + __u8 rssi_sampling_period; + __u8 addr_type; + bdaddr_t bdaddr; struct list_head list; }; @@ -91,18 +125,14 @@ struct msft_data { __u8 evt_prefix_len; __u8 *evt_prefix; struct list_head handle_map; - __u16 pending_add_handle; - __u16 pending_remove_handle; + struct list_head address_filters; __u8 resuming; __u8 suspending; __u8 filter_enabled; + /* To synchronize add/remove address filter and monitor device event.*/ + struct mutex filter_lock; }; -static int __msft_add_monitor_pattern(struct hci_dev *hdev, - struct adv_monitor *monitor); -static int __msft_remove_monitor(struct hci_dev *hdev, - struct adv_monitor *monitor, u16 handle); - bool msft_monitor_supported(struct hci_dev *hdev) { return !!(msft_get_features(hdev) & MSFT_FEATURE_MASK_LE_ADV_MONITOR); @@ -156,34 +186,6 @@ failed: return false; } -static void reregister_monitor(struct hci_dev *hdev, int handle) -{ - struct adv_monitor *monitor; - struct msft_data *msft = hdev->msft_data; - int err; - - while (1) { - monitor = idr_get_next(&hdev->adv_monitors_idr, &handle); - if (!monitor) { - /* All monitors have been resumed */ - msft->resuming = false; - hci_update_passive_scan(hdev); - return; - } - - msft->pending_add_handle = (u16)handle; - err = __msft_add_monitor_pattern(hdev, monitor); - - /* If success, we return and wait for monitor added callback */ - if (!err) - return; - - /* Otherwise remove the monitor and keep registering */ - hci_free_adv_monitor(hdev, monitor); - handle++; - } -} - /* is_mgmt = true matches the handle exposed to userspace via mgmt. * is_mgmt = false matches the handle used by the msft controller. * This function requires the caller holds hdev->lock @@ -204,34 +206,76 @@ static struct msft_monitor_advertisement_handle_data *msft_find_handle_data return NULL; } -static void msft_le_monitor_advertisement_cb(struct hci_dev *hdev, - u8 status, u16 opcode, - struct sk_buff *skb) +/* This function requires the caller holds msft->filter_lock */ +static struct msft_monitor_addr_filter_data *msft_find_address_data + (struct hci_dev *hdev, u8 addr_type, bdaddr_t *addr, + u8 pattern_handle) +{ + struct msft_monitor_addr_filter_data *entry; + struct msft_data *msft = hdev->msft_data; + + list_for_each_entry(entry, &msft->address_filters, list) { + if (entry->pattern_handle == pattern_handle && + addr_type == entry->addr_type && + !bacmp(addr, &entry->bdaddr)) + return entry; + } + + return NULL; +} + +/* This function requires the caller holds hdev->lock */ +static int msft_monitor_device_del(struct hci_dev *hdev, __u16 mgmt_handle, + bdaddr_t *bdaddr, __u8 addr_type, + bool notify) +{ + struct monitored_device *dev, *tmp; + int count = 0; + + list_for_each_entry_safe(dev, tmp, &hdev->monitored_devices, list) { + /* mgmt_handle == 0 indicates remove all devices, whereas, + * bdaddr == NULL indicates remove all devices matching the + * mgmt_handle. + */ + if ((!mgmt_handle || dev->handle == mgmt_handle) && + (!bdaddr || (!bacmp(bdaddr, &dev->bdaddr) && + addr_type == dev->addr_type))) { + if (notify && dev->notified) { + mgmt_adv_monitor_device_lost(hdev, dev->handle, + &dev->bdaddr, + dev->addr_type); + } + + list_del(&dev->list); + kfree(dev); + count++; + } + } + + return count; +} + +static int msft_le_monitor_advertisement_cb(struct hci_dev *hdev, u16 opcode, + struct adv_monitor *monitor, + struct sk_buff *skb) { struct msft_rp_le_monitor_advertisement *rp; - struct adv_monitor *monitor; struct msft_monitor_advertisement_handle_data *handle_data; struct msft_data *msft = hdev->msft_data; + int status = 0; hci_dev_lock(hdev); - monitor = idr_find(&hdev->adv_monitors_idr, msft->pending_add_handle); - if (!monitor) { - bt_dev_err(hdev, "msft add advmon: monitor %u is not found!", - msft->pending_add_handle); + rp = (struct msft_rp_le_monitor_advertisement *)skb->data; + if (skb->len < sizeof(*rp)) { status = HCI_ERROR_UNSPECIFIED; goto unlock; } + status = rp->status; if (status) goto unlock; - rp = (struct msft_rp_le_monitor_advertisement *)skb->data; - if (skb->len < sizeof(*rp)) { - status = HCI_ERROR_UNSPECIFIED; - goto unlock; - } - handle_data = kmalloc(sizeof(*handle_data), GFP_KERNEL); if (!handle_data) { status = HCI_ERROR_UNSPECIFIED; @@ -240,35 +284,95 @@ static void msft_le_monitor_advertisement_cb(struct hci_dev *hdev, handle_data->mgmt_handle = monitor->handle; handle_data->msft_handle = rp->handle; + handle_data->cond_type = MSFT_MONITOR_ADVERTISEMENT_TYPE_PATTERN; INIT_LIST_HEAD(&handle_data->list); list_add(&handle_data->list, &msft->handle_map); monitor->state = ADV_MONITOR_STATE_OFFLOADED; unlock: - if (status && monitor) + if (status) hci_free_adv_monitor(hdev, monitor); hci_dev_unlock(hdev); - if (!msft->resuming) - hci_add_adv_patterns_monitor_complete(hdev, status); + return status; +} + +/* This function requires the caller holds hci_req_sync_lock */ +static void msft_remove_addr_filters_sync(struct hci_dev *hdev, u8 handle) +{ + struct msft_monitor_addr_filter_data *address_filter, *n; + struct msft_cp_le_cancel_monitor_advertisement cp; + struct msft_data *msft = hdev->msft_data; + struct list_head head; + struct sk_buff *skb; + + INIT_LIST_HEAD(&head); + + /* Cancel all corresponding address monitors */ + mutex_lock(&msft->filter_lock); + + list_for_each_entry_safe(address_filter, n, &msft->address_filters, + list) { + if (address_filter->pattern_handle != handle) + continue; + + list_del(&address_filter->list); + + /* Keep the address filter and let + * msft_add_address_filter_sync() remove and free the address + * filter. + */ + if (address_filter->state == AF_STATE_ADDING) { + address_filter->state = AF_STATE_REMOVING; + continue; + } + + /* Keep the address filter and let + * msft_cancel_address_filter_sync() remove and free the address + * filter + */ + if (address_filter->state == AF_STATE_REMOVING) + continue; + + list_add_tail(&address_filter->list, &head); + } + + mutex_unlock(&msft->filter_lock); + + list_for_each_entry_safe(address_filter, n, &head, list) { + list_del(&address_filter->list); + + cp.sub_opcode = MSFT_OP_LE_CANCEL_MONITOR_ADVERTISEMENT; + cp.handle = address_filter->msft_handle; + + skb = __hci_cmd_sync(hdev, hdev->msft_opcode, sizeof(cp), &cp, + HCI_CMD_TIMEOUT); + if (IS_ERR(skb)) { + kfree(address_filter); + continue; + } + + kfree_skb(skb); + + bt_dev_dbg(hdev, "MSFT: Canceled device %pMR address filter", + &address_filter->bdaddr); + + kfree(address_filter); + } } -static void msft_le_cancel_monitor_advertisement_cb(struct hci_dev *hdev, - u8 status, u16 opcode, - struct sk_buff *skb) +static int msft_le_cancel_monitor_advertisement_cb(struct hci_dev *hdev, + u16 opcode, + struct adv_monitor *monitor, + struct sk_buff *skb) { - struct msft_cp_le_cancel_monitor_advertisement *cp; struct msft_rp_le_cancel_monitor_advertisement *rp; - struct adv_monitor *monitor; struct msft_monitor_advertisement_handle_data *handle_data; struct msft_data *msft = hdev->msft_data; - int err; - bool pending; - - if (status) - goto done; + int status = 0; + u8 msft_handle; rp = (struct msft_rp_le_cancel_monitor_advertisement *)skb->data; if (skb->len < sizeof(*rp)) { @@ -276,57 +380,52 @@ static void msft_le_cancel_monitor_advertisement_cb(struct hci_dev *hdev, goto done; } + status = rp->status; + if (status) + goto done; + hci_dev_lock(hdev); - cp = hci_sent_cmd_data(hdev, hdev->msft_opcode); - handle_data = msft_find_handle_data(hdev, cp->handle, false); + handle_data = msft_find_handle_data(hdev, monitor->handle, true); if (handle_data) { - monitor = idr_find(&hdev->adv_monitors_idr, - handle_data->mgmt_handle); - - if (monitor && monitor->state == ADV_MONITOR_STATE_OFFLOADED) + if (monitor->state == ADV_MONITOR_STATE_OFFLOADED) monitor->state = ADV_MONITOR_STATE_REGISTERED; /* Do not free the monitor if it is being removed due to * suspend. It will be re-monitored on resume. */ - if (monitor && !msft->suspending) + if (!msft->suspending) { hci_free_adv_monitor(hdev, monitor); + /* Clear any monitored devices by this Adv Monitor */ + msft_monitor_device_del(hdev, handle_data->mgmt_handle, + NULL, 0, false); + } + + msft_handle = handle_data->msft_handle; + list_del(&handle_data->list); kfree(handle_data); - } - /* If remove all monitors is required, we need to continue the process - * here because the earlier it was paused when waiting for the - * response from controller. - */ - if (msft->pending_remove_handle == 0) { - pending = hci_remove_all_adv_monitor(hdev, &err); - if (pending) { - hci_dev_unlock(hdev); - return; - } + hci_dev_unlock(hdev); - if (err) - status = HCI_ERROR_UNSPECIFIED; + msft_remove_addr_filters_sync(hdev, msft_handle); + } else { + hci_dev_unlock(hdev); } - hci_dev_unlock(hdev); - done: - if (!msft->suspending) - hci_remove_adv_monitor_complete(hdev, status); + return status; } +/* This function requires the caller holds hci_req_sync_lock */ static int msft_remove_monitor_sync(struct hci_dev *hdev, struct adv_monitor *monitor) { struct msft_cp_le_cancel_monitor_advertisement cp; struct msft_monitor_advertisement_handle_data *handle_data; struct sk_buff *skb; - u8 status; handle_data = msft_find_handle_data(hdev, monitor->handle, true); @@ -342,13 +441,8 @@ static int msft_remove_monitor_sync(struct hci_dev *hdev, if (IS_ERR(skb)) return PTR_ERR(skb); - status = skb->data[0]; - skb_pull(skb, 1); - - msft_le_cancel_monitor_advertisement_cb(hdev, status, hdev->msft_opcode, - skb); - - return status; + return msft_le_cancel_monitor_advertisement_cb(hdev, hdev->msft_opcode, + monitor, skb); } /* This function requires the caller holds hci_req_sync_lock */ @@ -413,13 +507,14 @@ static int msft_add_monitor_sync(struct hci_dev *hdev, { struct msft_cp_le_monitor_advertisement *cp; struct msft_le_monitor_advertisement_pattern_data *pattern_data; + struct msft_monitor_advertisement_handle_data *handle_data; struct msft_le_monitor_advertisement_pattern *pattern; struct adv_pattern *entry; size_t total_size = sizeof(*cp) + sizeof(*pattern_data); ptrdiff_t offset = 0; u8 pattern_count = 0; struct sk_buff *skb; - u8 status; + int err; if (!msft_monitor_pattern_valid(monitor)) return -EINVAL; @@ -456,28 +551,42 @@ static int msft_add_monitor_sync(struct hci_dev *hdev, skb = __hci_cmd_sync(hdev, hdev->msft_opcode, total_size, cp, HCI_CMD_TIMEOUT); - kfree(cp); - if (IS_ERR(skb)) - return PTR_ERR(skb); + if (IS_ERR(skb)) { + err = PTR_ERR(skb); + goto out_free; + } - status = skb->data[0]; - skb_pull(skb, 1); + err = msft_le_monitor_advertisement_cb(hdev, hdev->msft_opcode, + monitor, skb); + if (err) + goto out_free; - msft_le_monitor_advertisement_cb(hdev, status, hdev->msft_opcode, skb); + handle_data = msft_find_handle_data(hdev, monitor->handle, true); + if (!handle_data) { + err = -ENODATA; + goto out_free; + } - return status; + handle_data->rssi_high = cp->rssi_high; + handle_data->rssi_low = cp->rssi_low; + handle_data->rssi_low_interval = cp->rssi_low_interval; + handle_data->rssi_sampling_period = cp->rssi_sampling_period; + +out_free: + kfree(cp); + return err; } /* This function requires the caller holds hci_req_sync_lock */ -int msft_resume_sync(struct hci_dev *hdev) +static void reregister_monitor(struct hci_dev *hdev) { - struct msft_data *msft = hdev->msft_data; struct adv_monitor *monitor; + struct msft_data *msft = hdev->msft_data; int handle = 0; - if (!msft || !msft_monitor_supported(hdev)) - return 0; + if (!msft) + return; msft->resuming = true; @@ -491,12 +600,34 @@ int msft_resume_sync(struct hci_dev *hdev) handle++; } - /* All monitors have been resumed */ + /* All monitors have been reregistered */ msft->resuming = false; +} + +/* This function requires the caller holds hci_req_sync_lock */ +int msft_resume_sync(struct hci_dev *hdev) +{ + struct msft_data *msft = hdev->msft_data; + + if (!msft || !msft_monitor_supported(hdev)) + return 0; + + hci_dev_lock(hdev); + + /* Clear already tracked devices on resume. Once the monitors are + * reregistered, devices in range will be found again after resume. + */ + hdev->advmon_pend_notify = false; + msft_monitor_device_del(hdev, 0, NULL, 0, true); + + hci_dev_unlock(hdev); + + reregister_monitor(hdev); return 0; } +/* This function requires the caller holds hci_req_sync_lock */ void msft_do_open(struct hci_dev *hdev) { struct msft_data *msft = hdev->msft_data; @@ -529,7 +660,7 @@ void msft_do_open(struct hci_dev *hdev) /* Monitors get removed on power off, so we need to explicitly * tell the controller to re-monitor. */ - reregister_monitor(hdev, 0); + reregister_monitor(hdev); } } @@ -537,6 +668,7 @@ void msft_do_close(struct hci_dev *hdev) { struct msft_data *msft = hdev->msft_data; struct msft_monitor_advertisement_handle_data *handle_data, *tmp; + struct msft_monitor_addr_filter_data *address_filter, *n; struct adv_monitor *monitor; if (!msft) @@ -557,6 +689,65 @@ void msft_do_close(struct hci_dev *hdev) list_del(&handle_data->list); kfree(handle_data); } + + mutex_lock(&msft->filter_lock); + list_for_each_entry_safe(address_filter, n, &msft->address_filters, + list) { + list_del(&address_filter->list); + kfree(address_filter); + } + mutex_unlock(&msft->filter_lock); + + hci_dev_lock(hdev); + + /* Clear any devices that are being monitored and notify device lost */ + hdev->advmon_pend_notify = false; + msft_monitor_device_del(hdev, 0, NULL, 0, true); + + hci_dev_unlock(hdev); +} + +static int msft_cancel_address_filter_sync(struct hci_dev *hdev, void *data) +{ + struct msft_monitor_addr_filter_data *address_filter = data; + struct msft_cp_le_cancel_monitor_advertisement cp; + struct msft_data *msft = hdev->msft_data; + struct sk_buff *skb; + int err = 0; + + if (!msft) { + bt_dev_err(hdev, "MSFT: msft data is freed"); + return -EINVAL; + } + + /* The address filter has been removed by hci dev close */ + if (!test_bit(HCI_UP, &hdev->flags)) + return 0; + + mutex_lock(&msft->filter_lock); + list_del(&address_filter->list); + mutex_unlock(&msft->filter_lock); + + cp.sub_opcode = MSFT_OP_LE_CANCEL_MONITOR_ADVERTISEMENT; + cp.handle = address_filter->msft_handle; + + skb = __hci_cmd_sync(hdev, hdev->msft_opcode, sizeof(cp), &cp, + HCI_CMD_TIMEOUT); + if (IS_ERR(skb)) { + bt_dev_err(hdev, "MSFT: Failed to cancel address (%pMR) filter", + &address_filter->bdaddr); + err = PTR_ERR(skb); + goto done; + } + kfree_skb(skb); + + bt_dev_dbg(hdev, "MSFT: Canceled device %pMR address filter", + &address_filter->bdaddr); + +done: + kfree(address_filter); + + return err; } void msft_register(struct hci_dev *hdev) @@ -572,10 +763,12 @@ void msft_register(struct hci_dev *hdev) } INIT_LIST_HEAD(&msft->handle_map); + INIT_LIST_HEAD(&msft->address_filters); hdev->msft_data = msft; + mutex_init(&msft->filter_lock); } -void msft_unregister(struct hci_dev *hdev) +void msft_release(struct hci_dev *hdev) { struct msft_data *msft = hdev->msft_data; @@ -587,13 +780,296 @@ void msft_unregister(struct hci_dev *hdev) hdev->msft_data = NULL; kfree(msft->evt_prefix); + mutex_destroy(&msft->filter_lock); kfree(msft); } +/* This function requires the caller holds hdev->lock */ +static void msft_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr, + __u8 addr_type, __u16 mgmt_handle) +{ + struct monitored_device *dev; + + dev = kmalloc(sizeof(*dev), GFP_KERNEL); + if (!dev) { + bt_dev_err(hdev, "MSFT vendor event %u: no memory", + MSFT_EV_LE_MONITOR_DEVICE); + return; + } + + bacpy(&dev->bdaddr, bdaddr); + dev->addr_type = addr_type; + dev->handle = mgmt_handle; + dev->notified = false; + + INIT_LIST_HEAD(&dev->list); + list_add(&dev->list, &hdev->monitored_devices); + hdev->advmon_pend_notify = true; +} + +/* This function requires the caller holds hdev->lock */ +static void msft_device_lost(struct hci_dev *hdev, bdaddr_t *bdaddr, + __u8 addr_type, __u16 mgmt_handle) +{ + if (!msft_monitor_device_del(hdev, mgmt_handle, bdaddr, addr_type, + true)) { + bt_dev_err(hdev, "MSFT vendor event %u: dev %pMR not in list", + MSFT_EV_LE_MONITOR_DEVICE, bdaddr); + } +} + +static void *msft_skb_pull(struct hci_dev *hdev, struct sk_buff *skb, + u8 ev, size_t len) +{ + void *data; + + data = skb_pull_data(skb, len); + if (!data) + bt_dev_err(hdev, "Malformed MSFT vendor event: 0x%02x", ev); + + return data; +} + +static int msft_add_address_filter_sync(struct hci_dev *hdev, void *data) +{ + struct msft_monitor_addr_filter_data *address_filter = data; + struct msft_rp_le_monitor_advertisement *rp; + struct msft_cp_le_monitor_advertisement *cp; + struct msft_data *msft = hdev->msft_data; + struct sk_buff *skb = NULL; + bool remove = false; + size_t size; + + if (!msft) { + bt_dev_err(hdev, "MSFT: msft data is freed"); + return -EINVAL; + } + + /* The address filter has been removed by hci dev close */ + if (!test_bit(HCI_UP, &hdev->flags)) + return -ENODEV; + + /* We are safe to use the address filter from now on. + * msft_monitor_device_evt() wouldn't delete this filter because it's + * not been added by now. + * And all other functions that requiring hci_req_sync_lock wouldn't + * touch this filter before this func completes because it's protected + * by hci_req_sync_lock. + */ + + if (address_filter->state == AF_STATE_REMOVING) { + mutex_lock(&msft->filter_lock); + list_del(&address_filter->list); + mutex_unlock(&msft->filter_lock); + kfree(address_filter); + return 0; + } + + size = sizeof(*cp) + + sizeof(address_filter->addr_type) + + sizeof(address_filter->bdaddr); + cp = kzalloc(size, GFP_KERNEL); + if (!cp) { + bt_dev_err(hdev, "MSFT: Alloc cmd param err"); + remove = true; + goto done; + } + + cp->sub_opcode = MSFT_OP_LE_MONITOR_ADVERTISEMENT; + cp->rssi_high = address_filter->rssi_high; + cp->rssi_low = address_filter->rssi_low; + cp->rssi_low_interval = address_filter->rssi_low_interval; + cp->rssi_sampling_period = address_filter->rssi_sampling_period; + cp->cond_type = MSFT_MONITOR_ADVERTISEMENT_TYPE_ADDR; + cp->data[0] = address_filter->addr_type; + memcpy(&cp->data[1], &address_filter->bdaddr, + sizeof(address_filter->bdaddr)); + + skb = __hci_cmd_sync(hdev, hdev->msft_opcode, size, cp, + HCI_CMD_TIMEOUT); + kfree(cp); + + if (IS_ERR(skb)) { + bt_dev_err(hdev, "Failed to enable address %pMR filter", + &address_filter->bdaddr); + skb = NULL; + remove = true; + goto done; + } + + rp = skb_pull_data(skb, sizeof(*rp)); + if (!rp || rp->sub_opcode != MSFT_OP_LE_MONITOR_ADVERTISEMENT || + rp->status) + remove = true; + +done: + mutex_lock(&msft->filter_lock); + + if (remove) { + bt_dev_warn(hdev, "MSFT: Remove address (%pMR) filter", + &address_filter->bdaddr); + list_del(&address_filter->list); + kfree(address_filter); + } else { + address_filter->state = AF_STATE_ADDED; + address_filter->msft_handle = rp->handle; + bt_dev_dbg(hdev, "MSFT: Address %pMR filter enabled", + &address_filter->bdaddr); + } + mutex_unlock(&msft->filter_lock); + + kfree_skb(skb); + + return 0; +} + +/* This function requires the caller holds msft->filter_lock */ +static struct msft_monitor_addr_filter_data *msft_add_address_filter + (struct hci_dev *hdev, u8 addr_type, bdaddr_t *bdaddr, + struct msft_monitor_advertisement_handle_data *handle_data) +{ + struct msft_monitor_addr_filter_data *address_filter = NULL; + struct msft_data *msft = hdev->msft_data; + int err; + + address_filter = kzalloc(sizeof(*address_filter), GFP_KERNEL); + if (!address_filter) + return NULL; + + address_filter->state = AF_STATE_ADDING; + address_filter->msft_handle = 0xff; + address_filter->pattern_handle = handle_data->msft_handle; + address_filter->mgmt_handle = handle_data->mgmt_handle; + address_filter->rssi_high = handle_data->rssi_high; + address_filter->rssi_low = handle_data->rssi_low; + address_filter->rssi_low_interval = handle_data->rssi_low_interval; + address_filter->rssi_sampling_period = handle_data->rssi_sampling_period; + address_filter->addr_type = addr_type; + bacpy(&address_filter->bdaddr, bdaddr); + + /* With the above AF_STATE_ADDING, duplicated address filter can be + * avoided when receiving monitor device event (found/lost) frequently + * for the same device. + */ + list_add_tail(&address_filter->list, &msft->address_filters); + + err = hci_cmd_sync_queue(hdev, msft_add_address_filter_sync, + address_filter, NULL); + if (err < 0) { + bt_dev_err(hdev, "MSFT: Add address %pMR filter err", bdaddr); + list_del(&address_filter->list); + kfree(address_filter); + return NULL; + } + + bt_dev_dbg(hdev, "MSFT: Add device %pMR address filter", + &address_filter->bdaddr); + + return address_filter; +} + +/* This function requires the caller holds hdev->lock */ +static void msft_monitor_device_evt(struct hci_dev *hdev, struct sk_buff *skb) +{ + struct msft_monitor_addr_filter_data *n, *address_filter = NULL; + struct msft_ev_le_monitor_device *ev; + struct msft_monitor_advertisement_handle_data *handle_data; + struct msft_data *msft = hdev->msft_data; + u16 mgmt_handle = 0xffff; + u8 addr_type; + + ev = msft_skb_pull(hdev, skb, MSFT_EV_LE_MONITOR_DEVICE, sizeof(*ev)); + if (!ev) + return; + + bt_dev_dbg(hdev, + "MSFT vendor event 0x%02x: handle 0x%04x state %d addr %pMR", + MSFT_EV_LE_MONITOR_DEVICE, ev->monitor_handle, + ev->monitor_state, &ev->bdaddr); + + handle_data = msft_find_handle_data(hdev, ev->monitor_handle, false); + + if (!hci_test_quirk(hdev, HCI_QUIRK_USE_MSFT_EXT_ADDRESS_FILTER)) { + if (!handle_data) + return; + mgmt_handle = handle_data->mgmt_handle; + goto report_state; + } + + if (handle_data) { + /* Don't report any device found/lost event from pattern + * monitors. Pattern monitor always has its address filters for + * tracking devices. + */ + + address_filter = msft_find_address_data(hdev, ev->addr_type, + &ev->bdaddr, + handle_data->msft_handle); + if (address_filter) + return; + + if (ev->monitor_state && handle_data->cond_type == + MSFT_MONITOR_ADVERTISEMENT_TYPE_PATTERN) + msft_add_address_filter(hdev, ev->addr_type, + &ev->bdaddr, handle_data); + + return; + } + + /* This device event is not from pattern monitor. + * Report it if there is a corresponding address_filter for it. + */ + list_for_each_entry(n, &msft->address_filters, list) { + if (n->state == AF_STATE_ADDED && + n->msft_handle == ev->monitor_handle) { + mgmt_handle = n->mgmt_handle; + address_filter = n; + break; + } + } + + if (!address_filter) { + bt_dev_warn(hdev, "MSFT: Unexpected device event %pMR, %u, %u", + &ev->bdaddr, ev->monitor_handle, ev->monitor_state); + return; + } + +report_state: + switch (ev->addr_type) { + case ADDR_LE_DEV_PUBLIC: + addr_type = BDADDR_LE_PUBLIC; + break; + + case ADDR_LE_DEV_RANDOM: + addr_type = BDADDR_LE_RANDOM; + break; + + default: + bt_dev_err(hdev, + "MSFT vendor event 0x%02x: unknown addr type 0x%02x", + MSFT_EV_LE_MONITOR_DEVICE, ev->addr_type); + return; + } + + if (ev->monitor_state) { + msft_device_found(hdev, &ev->bdaddr, addr_type, mgmt_handle); + } else { + if (address_filter && address_filter->state == AF_STATE_ADDED) { + address_filter->state = AF_STATE_REMOVING; + hci_cmd_sync_queue(hdev, + msft_cancel_address_filter_sync, + address_filter, + NULL); + } + msft_device_lost(hdev, &ev->bdaddr, addr_type, mgmt_handle); + } +} + void msft_vendor_evt(struct hci_dev *hdev, void *data, struct sk_buff *skb) { struct msft_data *msft = hdev->msft_data; - u8 event; + u8 *evt_prefix; + u8 *evt; if (!msft) return; @@ -602,13 +1078,12 @@ void msft_vendor_evt(struct hci_dev *hdev, void *data, struct sk_buff *skb) * matches, and otherwise just return. */ if (msft->evt_prefix_len > 0) { - if (skb->len < msft->evt_prefix_len) + evt_prefix = msft_skb_pull(hdev, skb, 0, msft->evt_prefix_len); + if (!evt_prefix) return; - if (memcmp(skb->data, msft->evt_prefix, msft->evt_prefix_len)) + if (memcmp(evt_prefix, msft->evt_prefix, msft->evt_prefix_len)) return; - - skb_pull(skb, msft->evt_prefix_len); } /* Every event starts at least with an event code and the rest of @@ -617,10 +1092,25 @@ void msft_vendor_evt(struct hci_dev *hdev, void *data, struct sk_buff *skb) if (skb->len < 1) return; - event = *skb->data; - skb_pull(skb, 1); + evt = msft_skb_pull(hdev, skb, 0, sizeof(*evt)); + if (!evt) + return; + + hci_dev_lock(hdev); + + switch (*evt) { + case MSFT_EV_LE_MONITOR_DEVICE: + mutex_lock(&msft->filter_lock); + msft_monitor_device_evt(hdev, skb); + mutex_unlock(&msft->filter_lock); + break; + + default: + bt_dev_dbg(hdev, "MSFT vendor event 0x%02x", *evt); + break; + } - bt_dev_dbg(hdev, "MSFT vendor event %u", event); + hci_dev_unlock(hdev); } __u64 msft_get_features(struct hci_dev *hdev) @@ -631,17 +1121,12 @@ __u64 msft_get_features(struct hci_dev *hdev) } static void msft_le_set_advertisement_filter_enable_cb(struct hci_dev *hdev, - u8 status, u16 opcode, - struct sk_buff *skb) + void *user_data, + u8 status) { - struct msft_cp_le_set_advertisement_filter_enable *cp; - struct msft_rp_le_set_advertisement_filter_enable *rp; + struct msft_cp_le_set_advertisement_filter_enable *cp = user_data; struct msft_data *msft = hdev->msft_data; - rp = (struct msft_rp_le_set_advertisement_filter_enable *)skb->data; - if (skb->len < sizeof(*rp)) - return; - /* Error 0x0C would be returned if the filter enabled status is * already set to whatever we were trying to set. * Although the default state should be disabled, some controller set @@ -654,7 +1139,6 @@ static void msft_le_set_advertisement_filter_enable_cb(struct hci_dev *hdev, hci_dev_lock(hdev); - cp = hci_sent_cmd_data(hdev, hdev->msft_opcode); msft->filter_enabled = cp->enable; if (status == 0x0C) @@ -664,66 +1148,7 @@ static void msft_le_set_advertisement_filter_enable_cb(struct hci_dev *hdev, hci_dev_unlock(hdev); } -/* This function requires the caller holds hdev->lock */ -static int __msft_add_monitor_pattern(struct hci_dev *hdev, - struct adv_monitor *monitor) -{ - struct msft_cp_le_monitor_advertisement *cp; - struct msft_le_monitor_advertisement_pattern_data *pattern_data; - struct msft_le_monitor_advertisement_pattern *pattern; - struct adv_pattern *entry; - struct hci_request req; - struct msft_data *msft = hdev->msft_data; - size_t total_size = sizeof(*cp) + sizeof(*pattern_data); - ptrdiff_t offset = 0; - u8 pattern_count = 0; - int err = 0; - - if (!msft_monitor_pattern_valid(monitor)) - return -EINVAL; - - list_for_each_entry(entry, &monitor->patterns, list) { - pattern_count++; - total_size += sizeof(*pattern) + entry->length; - } - - cp = kmalloc(total_size, GFP_KERNEL); - if (!cp) - return -ENOMEM; - - cp->sub_opcode = MSFT_OP_LE_MONITOR_ADVERTISEMENT; - cp->rssi_high = monitor->rssi.high_threshold; - cp->rssi_low = monitor->rssi.low_threshold; - cp->rssi_low_interval = (u8)monitor->rssi.low_threshold_timeout; - cp->rssi_sampling_period = monitor->rssi.sampling_period; - - cp->cond_type = MSFT_MONITOR_ADVERTISEMENT_TYPE_PATTERN; - - pattern_data = (void *)cp->data; - pattern_data->count = pattern_count; - - list_for_each_entry(entry, &monitor->patterns, list) { - pattern = (void *)(pattern_data->data + offset); - /* the length also includes data_type and offset */ - pattern->length = entry->length + 2; - pattern->data_type = entry->ad_type; - pattern->start_byte = entry->offset; - memcpy(pattern->pattern, entry->value, entry->length); - offset += sizeof(*pattern) + entry->length; - } - - hci_req_init(&req, hdev); - hci_req_add(&req, hdev->msft_opcode, total_size, cp); - err = hci_req_run_skb(&req, msft_le_monitor_advertisement_cb); - kfree(cp); - - if (!err) - msft->pending_add_handle = monitor->handle; - - return err; -} - -/* This function requires the caller holds hdev->lock */ +/* This function requires the caller holds hci_req_sync_lock */ int msft_add_monitor_pattern(struct hci_dev *hdev, struct adv_monitor *monitor) { struct msft_data *msft = hdev->msft_data; @@ -734,41 +1159,11 @@ int msft_add_monitor_pattern(struct hci_dev *hdev, struct adv_monitor *monitor) if (msft->resuming || msft->suspending) return -EBUSY; - return __msft_add_monitor_pattern(hdev, monitor); -} - -/* This function requires the caller holds hdev->lock */ -static int __msft_remove_monitor(struct hci_dev *hdev, - struct adv_monitor *monitor, u16 handle) -{ - struct msft_cp_le_cancel_monitor_advertisement cp; - struct msft_monitor_advertisement_handle_data *handle_data; - struct hci_request req; - struct msft_data *msft = hdev->msft_data; - int err = 0; - - handle_data = msft_find_handle_data(hdev, monitor->handle, true); - - /* If no matched handle, just remove without telling controller */ - if (!handle_data) - return -ENOENT; - - cp.sub_opcode = MSFT_OP_LE_CANCEL_MONITOR_ADVERTISEMENT; - cp.handle = handle_data->msft_handle; - - hci_req_init(&req, hdev); - hci_req_add(&req, hdev->msft_opcode, sizeof(cp), &cp); - err = hci_req_run_skb(&req, msft_le_cancel_monitor_advertisement_cb); - - if (!err) - msft->pending_remove_handle = handle; - - return err; + return msft_add_monitor_sync(hdev, monitor); } -/* This function requires the caller holds hdev->lock */ -int msft_remove_monitor(struct hci_dev *hdev, struct adv_monitor *monitor, - u16 handle) +/* This function requires the caller holds hci_req_sync_lock */ +int msft_remove_monitor(struct hci_dev *hdev, struct adv_monitor *monitor) { struct msft_data *msft = hdev->msft_data; @@ -778,34 +1173,26 @@ int msft_remove_monitor(struct hci_dev *hdev, struct adv_monitor *monitor, if (msft->resuming || msft->suspending) return -EBUSY; - return __msft_remove_monitor(hdev, monitor, handle); -} - -void msft_req_add_set_filter_enable(struct hci_request *req, bool enable) -{ - struct hci_dev *hdev = req->hdev; - struct msft_cp_le_set_advertisement_filter_enable cp; - - cp.sub_opcode = MSFT_OP_LE_SET_ADVERTISEMENT_FILTER_ENABLE; - cp.enable = enable; - - hci_req_add(req, hdev->msft_opcode, sizeof(cp), &cp); + return msft_remove_monitor_sync(hdev, monitor); } int msft_set_filter_enable(struct hci_dev *hdev, bool enable) { - struct hci_request req; + struct msft_cp_le_set_advertisement_filter_enable cp; struct msft_data *msft = hdev->msft_data; int err; if (!msft) return -EOPNOTSUPP; - hci_req_init(&req, hdev); - msft_req_add_set_filter_enable(&req, enable); - err = hci_req_run_skb(&req, msft_le_set_advertisement_filter_enable_cb); + cp.sub_opcode = MSFT_OP_LE_SET_ADVERTISEMENT_FILTER_ENABLE; + cp.enable = enable; + err = __hci_cmd_sync_status(hdev, hdev->msft_opcode, sizeof(cp), &cp, + HCI_CMD_TIMEOUT); + + msft_le_set_advertisement_filter_enable_cb(hdev, &cp, err); - return err; + return 0; } bool msft_curve_validity(struct hci_dev *hdev) diff --git a/net/bluetooth/msft.h b/net/bluetooth/msft.h index afcaf7d3b1cb..fe538e9c91c0 100644 --- a/net/bluetooth/msft.h +++ b/net/bluetooth/msft.h @@ -14,14 +14,13 @@ bool msft_monitor_supported(struct hci_dev *hdev); void msft_register(struct hci_dev *hdev); -void msft_unregister(struct hci_dev *hdev); +void msft_release(struct hci_dev *hdev); void msft_do_open(struct hci_dev *hdev); void msft_do_close(struct hci_dev *hdev); void msft_vendor_evt(struct hci_dev *hdev, void *data, struct sk_buff *skb); __u64 msft_get_features(struct hci_dev *hdev); int msft_add_monitor_pattern(struct hci_dev *hdev, struct adv_monitor *monitor); -int msft_remove_monitor(struct hci_dev *hdev, struct adv_monitor *monitor, - u16 handle); +int msft_remove_monitor(struct hci_dev *hdev, struct adv_monitor *monitor); void msft_req_add_set_filter_enable(struct hci_request *req, bool enable); int msft_set_filter_enable(struct hci_dev *hdev, bool enable); int msft_suspend_sync(struct hci_dev *hdev); @@ -36,7 +35,7 @@ static inline bool msft_monitor_supported(struct hci_dev *hdev) } static inline void msft_register(struct hci_dev *hdev) {} -static inline void msft_unregister(struct hci_dev *hdev) {} +static inline void msft_release(struct hci_dev *hdev) {} static inline void msft_do_open(struct hci_dev *hdev) {} static inline void msft_do_close(struct hci_dev *hdev) {} static inline void msft_vendor_evt(struct hci_dev *hdev, void *data, @@ -49,8 +48,7 @@ static inline int msft_add_monitor_pattern(struct hci_dev *hdev, } static inline int msft_remove_monitor(struct hci_dev *hdev, - struct adv_monitor *monitor, - u16 handle) + struct adv_monitor *monitor) { return -EOPNOTSUPP; } diff --git a/net/bluetooth/rfcomm/core.c b/net/bluetooth/rfcomm/core.c index 7324764384b6..57b1dca8141f 100644 --- a/net/bluetooth/rfcomm/core.c +++ b/net/bluetooth/rfcomm/core.c @@ -28,13 +28,15 @@ #include <linux/module.h> #include <linux/debugfs.h> #include <linux/kthread.h> -#include <asm/unaligned.h> +#include <linux/unaligned.h> #include <net/bluetooth/bluetooth.h> #include <net/bluetooth/hci_core.h> #include <net/bluetooth/l2cap.h> #include <net/bluetooth/rfcomm.h> +#include <trace/events/sock.h> + #define VERSION "1.11" static bool disable_cfc; @@ -186,6 +188,8 @@ static void rfcomm_l2state_change(struct sock *sk) static void rfcomm_l2data_ready(struct sock *sk) { + trace_sk_data_ready(sk); + BT_DBG("%p", sk); rfcomm_schedule(); } @@ -231,7 +235,7 @@ static int rfcomm_check_security(struct rfcomm_dlc *d) static void rfcomm_session_timeout(struct timer_list *t) { - struct rfcomm_session *s = from_timer(s, t, timer); + struct rfcomm_session *s = timer_container_of(s, t, timer); BT_DBG("session %p state %ld", s, s->state); @@ -250,13 +254,13 @@ static void rfcomm_session_clear_timer(struct rfcomm_session *s) { BT_DBG("session %p state %ld", s, s->state); - del_timer_sync(&s->timer); + timer_delete_sync(&s->timer); } /* ---- RFCOMM DLCs ---- */ static void rfcomm_dlc_timeout(struct timer_list *t) { - struct rfcomm_dlc *d = from_timer(d, t, timer); + struct rfcomm_dlc *d = timer_container_of(d, t, timer); BT_DBG("dlc %p state %ld", d, d->state); @@ -277,7 +281,7 @@ static void rfcomm_dlc_clear_timer(struct rfcomm_dlc *d) { BT_DBG("dlc %p state %ld", d, d->state); - if (del_timer(&d->timer)) + if (timer_delete(&d->timer)) rfcomm_dlc_put(d); } @@ -590,7 +594,7 @@ int rfcomm_dlc_send(struct rfcomm_dlc *d, struct sk_buff *skb) ret = rfcomm_dlc_send_frag(d, frag); if (ret < 0) { - kfree_skb(frag); + dev_kfree_skb_irq(frag); goto unlock; } @@ -777,7 +781,7 @@ static struct rfcomm_session *rfcomm_session_create(bdaddr_t *src, addr.l2_psm = 0; addr.l2_cid = 0; addr.l2_bdaddr_type = BDADDR_BREDR; - *err = kernel_bind(sock, (struct sockaddr *) &addr, sizeof(addr)); + *err = kernel_bind(sock, (struct sockaddr_unsized *)&addr, sizeof(addr)); if (*err < 0) goto failed; @@ -804,7 +808,7 @@ static struct rfcomm_session *rfcomm_session_create(bdaddr_t *src, addr.l2_psm = cpu_to_le16(L2CAP_PSM_RFCOMM); addr.l2_cid = 0; addr.l2_bdaddr_type = BDADDR_BREDR; - *err = kernel_connect(sock, (struct sockaddr *) &addr, sizeof(addr), O_NONBLOCK); + *err = kernel_connect(sock, (struct sockaddr_unsized *)&addr, sizeof(addr), O_NONBLOCK); if (*err == 0 || *err == -EINPROGRESS) return s; @@ -1937,7 +1941,7 @@ static struct rfcomm_session *rfcomm_process_rx(struct rfcomm_session *s) /* Get data directly from socket receive queue without copying it. */ while ((skb = skb_dequeue(&sk->sk_receive_queue))) { skb_orphan(skb); - if (!skb_linearize(skb)) { + if (!skb_linearize(skb) && sk->sk_state != BT_CLOSED) { s = rfcomm_recv_frame(s, skb); if (!s) break; @@ -1958,7 +1962,8 @@ static void rfcomm_accept_connection(struct rfcomm_session *s) int err; /* Fast check for a new connection. - * Avoids unnesesary socket allocations. */ + * Avoids unnecessary socket allocations. + */ if (list_empty(&bt_sk(sock->sk)->accept_q)) return; @@ -2063,7 +2068,7 @@ static int rfcomm_add_listener(bdaddr_t *ba) addr.l2_psm = cpu_to_le16(L2CAP_PSM_RFCOMM); addr.l2_cid = 0; addr.l2_bdaddr_type = BDADDR_BREDR; - err = kernel_bind(sock, (struct sockaddr *) &addr, sizeof(addr)); + err = kernel_bind(sock, (struct sockaddr_unsized *)&addr, sizeof(addr)); if (err < 0) { BT_ERR("Bind failed %d", err); goto failed; diff --git a/net/bluetooth/rfcomm/sock.c b/net/bluetooth/rfcomm/sock.c index 4bf4ea6cbb5e..be6639cd6f59 100644 --- a/net/bluetooth/rfcomm/sock.c +++ b/net/bluetooth/rfcomm/sock.c @@ -268,21 +268,19 @@ static struct proto rfcomm_proto = { .obj_size = sizeof(struct rfcomm_pinfo) }; -static struct sock *rfcomm_sock_alloc(struct net *net, struct socket *sock, int proto, gfp_t prio, int kern) +static struct sock *rfcomm_sock_alloc(struct net *net, struct socket *sock, + int proto, gfp_t prio, int kern) { struct rfcomm_dlc *d; struct sock *sk; - sk = sk_alloc(net, PF_BLUETOOTH, prio, &rfcomm_proto, kern); - if (!sk) + d = rfcomm_dlc_alloc(prio); + if (!d) return NULL; - sock_init_data(sock, sk); - INIT_LIST_HEAD(&bt_sk(sk)->accept_q); - - d = rfcomm_dlc_alloc(prio); - if (!d) { - sk_free(sk); + sk = bt_sock_alloc(net, sock, &rfcomm_proto, proto, prio, kern); + if (!sk) { + rfcomm_dlc_free(d); return NULL; } @@ -298,11 +296,6 @@ static struct sock *rfcomm_sock_alloc(struct net *net, struct socket *sock, int sk->sk_sndbuf = RFCOMM_MAX_CREDITS * RFCOMM_DEFAULT_MTU * 10; sk->sk_rcvbuf = RFCOMM_MAX_CREDITS * RFCOMM_DEFAULT_MTU * 10; - sock_reset_flag(sk, SOCK_ZAPPED); - - sk->sk_protocol = proto; - sk->sk_state = BT_OPEN; - bt_sock_link(&rfcomm_sk_list, sk); BT_DBG("sk %p", sk); @@ -331,7 +324,7 @@ static int rfcomm_sock_create(struct net *net, struct socket *sock, return 0; } -static int rfcomm_sock_bind(struct socket *sock, struct sockaddr *addr, int addr_len) +static int rfcomm_sock_bind(struct socket *sock, struct sockaddr_unsized *addr, int addr_len) { struct sockaddr_rc sa; struct sock *sk = sock->sk; @@ -378,7 +371,8 @@ done: return err; } -static int rfcomm_sock_connect(struct socket *sock, struct sockaddr *addr, int alen, int flags) +static int rfcomm_sock_connect(struct socket *sock, struct sockaddr_unsized *addr, + int alen, int flags) { struct sockaddr_rc *sa = (struct sockaddr_rc *) addr; struct sock *sk = sock->sk; @@ -391,6 +385,7 @@ static int rfcomm_sock_connect(struct socket *sock, struct sockaddr *addr, int a addr->sa_family != AF_BLUETOOTH) return -EINVAL; + sock_hold(sk); lock_sock(sk); if (sk->sk_state != BT_OPEN && sk->sk_state != BT_BOUND) { @@ -410,14 +405,18 @@ static int rfcomm_sock_connect(struct socket *sock, struct sockaddr *addr, int a d->sec_level = rfcomm_pi(sk)->sec_level; d->role_switch = rfcomm_pi(sk)->role_switch; + /* Drop sock lock to avoid potential deadlock with the RFCOMM lock */ + release_sock(sk); err = rfcomm_dlc_open(d, &rfcomm_pi(sk)->src, &sa->rc_bdaddr, sa->rc_channel); - if (!err) + lock_sock(sk); + if (!err && !sock_flag(sk, SOCK_ZAPPED)) err = bt_sock_wait_state(sk, BT_CONNECTED, sock_sndtimeo(sk, flags & O_NONBLOCK)); done: release_sock(sk); + sock_put(sk); return err; } @@ -470,8 +469,8 @@ done: return err; } -static int rfcomm_sock_accept(struct socket *sock, struct socket *newsock, int flags, - bool kern) +static int rfcomm_sock_accept(struct socket *sock, struct socket *newsock, + struct proto_accept_arg *arg) { DEFINE_WAIT_FUNC(wait, woken_wake_function); struct sock *sk = sock->sk, *nsk; @@ -485,7 +484,7 @@ static int rfcomm_sock_accept(struct socket *sock, struct socket *newsock, int f goto done; } - timeo = sock_rcvtimeo(sk, flags & O_NONBLOCK); + timeo = sock_rcvtimeo(sk, arg->flags & O_NONBLOCK); BT_DBG("sk %p timeo %ld", sk, timeo); @@ -631,10 +630,9 @@ static int rfcomm_sock_setsockopt_old(struct socket *sock, int optname, switch (optname) { case RFCOMM_LM: - if (copy_from_sockptr(&opt, optval, sizeof(u32))) { - err = -EFAULT; + err = copy_safe_from_sockptr(&opt, sizeof(opt), optval, optlen); + if (err) break; - } if (opt & RFCOMM_LM_FIPS) { err = -EINVAL; @@ -666,7 +664,6 @@ static int rfcomm_sock_setsockopt(struct socket *sock, int level, int optname, struct sock *sk = sock->sk; struct bt_security sec; int err = 0; - size_t len; u32 opt; BT_DBG("sk %p", sk); @@ -688,11 +685,9 @@ static int rfcomm_sock_setsockopt(struct socket *sock, int level, int optname, sec.level = BT_SECURITY_LOW; - len = min_t(unsigned int, sizeof(sec), optlen); - if (copy_from_sockptr(&sec, optval, len)) { - err = -EFAULT; + err = copy_safe_from_sockptr(&sec, sizeof(sec), optval, optlen); + if (err) break; - } if (sec.level > BT_SECURITY_HIGH) { err = -EINVAL; @@ -708,10 +703,9 @@ static int rfcomm_sock_setsockopt(struct socket *sock, int level, int optname, break; } - if (copy_from_sockptr(&opt, optval, sizeof(u32))) { - err = -EFAULT; + err = copy_safe_from_sockptr(&opt, sizeof(opt), optval, optlen); + if (err) break; - } if (opt) set_bit(BT_SK_DEFER_SETUP, &bt_sk(sk)->flags); @@ -735,7 +729,8 @@ static int rfcomm_sock_getsockopt_old(struct socket *sock, int optname, char __u struct sock *l2cap_sk; struct l2cap_conn *conn; struct rfcomm_conninfo cinfo; - int len, err = 0; + int err = 0; + size_t len; u32 opt; BT_DBG("sk %p", sk); @@ -789,7 +784,7 @@ static int rfcomm_sock_getsockopt_old(struct socket *sock, int optname, char __u cinfo.hci_handle = conn->hcon->handle; memcpy(cinfo.dev_class, conn->hcon->dev_class, 3); - len = min_t(unsigned int, len, sizeof(cinfo)); + len = min(len, sizeof(cinfo)); if (copy_to_user(optval, (char *) &cinfo, len)) err = -EFAULT; @@ -808,7 +803,8 @@ static int rfcomm_sock_getsockopt(struct socket *sock, int level, int optname, c { struct sock *sk = sock->sk; struct bt_security sec; - int len, err = 0; + int err = 0; + size_t len; BT_DBG("sk %p", sk); @@ -833,7 +829,7 @@ static int rfcomm_sock_getsockopt(struct socket *sock, int level, int optname, c sec.level = rfcomm_pi(sk)->sec_level; sec.key_size = 0; - len = min_t(unsigned int, len, sizeof(sec)); + len = min(len, sizeof(sec)); if (copy_to_user(optval, (char *) &sec, len)) err = -EFAULT; @@ -871,9 +867,7 @@ static int rfcomm_sock_ioctl(struct socket *sock, unsigned int cmd, unsigned lon if (err == -ENOIOCTLCMD) { #ifdef CONFIG_BT_RFCOMM_TTY - lock_sock(sk); err = rfcomm_dev_ioctl(sk, cmd, (void __user *) arg); - release_sock(sk); #else err = -EOPNOTSUPP; #endif @@ -902,7 +896,10 @@ static int rfcomm_sock_shutdown(struct socket *sock, int how) lock_sock(sk); if (!sk->sk_shutdown) { sk->sk_shutdown = SHUTDOWN_MASK; + + release_sock(sk); __rfcomm_sock_close(sk); + lock_sock(sk); if (sock_flag(sk, SOCK_LINGER) && sk->sk_lingertime && !(current->flags & PF_EXITING)) diff --git a/net/bluetooth/rfcomm/tty.c b/net/bluetooth/rfcomm/tty.c index ebd78fdbd6e8..b783526ab588 100644 --- a/net/bluetooth/rfcomm/tty.c +++ b/net/bluetooth/rfcomm/tty.c @@ -35,7 +35,6 @@ #include <net/bluetooth/hci_core.h> #include <net/bluetooth/rfcomm.h> -#define RFCOMM_TTY_MAGIC 0x6d02 /* magic number for rfcomm struct */ #define RFCOMM_TTY_PORTS RFCOMM_MAX_DEV /* whole lotta rfcomm devices */ #define RFCOMM_TTY_MAJOR 216 /* device node major id of the usb/bluetooth.c driver */ #define RFCOMM_TTY_MINOR 0 @@ -120,7 +119,7 @@ static int rfcomm_dev_activate(struct tty_port *port, struct tty_struct *tty) } /* we block the open until the dlc->state becomes BT_CONNECTED */ -static int rfcomm_dev_carrier_raised(struct tty_port *port) +static bool rfcomm_dev_carrier_raised(struct tty_port *port) { struct rfcomm_dev *dev = container_of(port, struct rfcomm_dev, port); @@ -202,14 +201,14 @@ static ssize_t address_show(struct device *tty_dev, struct device_attribute *attr, char *buf) { struct rfcomm_dev *dev = dev_get_drvdata(tty_dev); - return sprintf(buf, "%pMR\n", &dev->dst); + return sysfs_emit(buf, "%pMR\n", &dev->dst); } static ssize_t channel_show(struct device *tty_dev, struct device_attribute *attr, char *buf) { struct rfcomm_dev *dev = dev_get_drvdata(tty_dev); - return sprintf(buf, "%d\n", dev->channel); + return sysfs_emit(buf, "%d\n", dev->channel); } static DEVICE_ATTR_RO(address); @@ -439,7 +438,6 @@ static int __rfcomm_release_dev(void __user *arg) { struct rfcomm_dev_req req; struct rfcomm_dev *dev; - struct tty_struct *tty; if (copy_from_user(&req, arg, sizeof(req))) return -EFAULT; @@ -465,11 +463,7 @@ static int __rfcomm_release_dev(void __user *arg) rfcomm_dlc_close(dev->dlc, 0); /* Shut down TTY synchronously before freeing rfcomm_dev */ - tty = tty_port_tty_get(&dev->port); - if (tty) { - tty_vhangup(tty); - tty_kref_put(tty); - } + tty_port_tty_vhangup(&dev->port); if (!test_bit(RFCOMM_TTY_OWNED, &dev->status)) tty_port_put(&dev->port); @@ -505,7 +499,7 @@ static int rfcomm_get_dev_list(void __user *arg) struct rfcomm_dev *dev; struct rfcomm_dev_list_req *dl; struct rfcomm_dev_info *di; - int n = 0, size, err; + int n = 0, err; u16 dev_num; BT_DBG(""); @@ -516,12 +510,11 @@ static int rfcomm_get_dev_list(void __user *arg) if (!dev_num || dev_num > (PAGE_SIZE * 4) / sizeof(*di)) return -EINVAL; - size = sizeof(*dl) + dev_num * sizeof(*di); - - dl = kzalloc(size, GFP_KERNEL); + dl = kzalloc(struct_size(dl, dev_info, dev_num), GFP_KERNEL); if (!dl) return -ENOMEM; + dl->dev_num = dev_num; di = dl->dev_info; mutex_lock(&rfcomm_dev_lock); @@ -529,12 +522,12 @@ static int rfcomm_get_dev_list(void __user *arg) list_for_each_entry(dev, &rfcomm_dev_list, list) { if (!tty_port_get(&dev->port)) continue; - (di + n)->id = dev->id; - (di + n)->flags = dev->flags; - (di + n)->state = dev->dlc->state; - (di + n)->channel = dev->channel; - bacpy(&(di + n)->src, &dev->src); - bacpy(&(di + n)->dst, &dev->dst); + di[n].id = dev->id; + di[n].flags = dev->flags; + di[n].state = dev->dlc->state; + di[n].channel = dev->channel; + bacpy(&di[n].src, &dev->src); + bacpy(&di[n].dst, &dev->dst); tty_port_put(&dev->port); if (++n >= dev_num) break; @@ -543,9 +536,7 @@ static int rfcomm_get_dev_list(void __user *arg) mutex_unlock(&rfcomm_dev_lock); dl->dev_num = n; - size = sizeof(*dl) + n * sizeof(*di); - - err = copy_to_user(arg, dl, size); + err = copy_to_user(arg, dl, struct_size(dl, dev_info, n)); kfree(dl); return err ? -EFAULT : 0; @@ -652,8 +643,8 @@ static void rfcomm_dev_modem_status(struct rfcomm_dlc *dlc, u8 v24_sig) tty_port_tty_hangup(&dev->port, true); dev->modem_status = - ((v24_sig & RFCOMM_V24_RTC) ? (TIOCM_DSR | TIOCM_DTR) : 0) | - ((v24_sig & RFCOMM_V24_RTR) ? (TIOCM_RTS | TIOCM_CTS) : 0) | + ((v24_sig & RFCOMM_V24_RTC) ? TIOCM_DSR : 0) | + ((v24_sig & RFCOMM_V24_RTR) ? TIOCM_CTS : 0) | ((v24_sig & RFCOMM_V24_IC) ? TIOCM_RI : 0) | ((v24_sig & RFCOMM_V24_DV) ? TIOCM_CD : 0); } @@ -772,7 +763,7 @@ static int rfcomm_tty_open(struct tty_struct *tty, struct file *filp) static void rfcomm_tty_close(struct tty_struct *tty, struct file *filp) { - struct rfcomm_dev *dev = (struct rfcomm_dev *) tty->driver_data; + struct rfcomm_dev *dev = tty->driver_data; BT_DBG("tty %p dev %p dlc %p opened %d", tty, dev, dev->dlc, dev->port.count); @@ -780,17 +771,18 @@ static void rfcomm_tty_close(struct tty_struct *tty, struct file *filp) tty_port_close(&dev->port, tty, filp); } -static int rfcomm_tty_write(struct tty_struct *tty, const unsigned char *buf, int count) +static ssize_t rfcomm_tty_write(struct tty_struct *tty, const u8 *buf, + size_t count) { - struct rfcomm_dev *dev = (struct rfcomm_dev *) tty->driver_data; + struct rfcomm_dev *dev = tty->driver_data; struct rfcomm_dlc *dlc = dev->dlc; struct sk_buff *skb; - int sent = 0, size; + size_t sent = 0, size; - BT_DBG("tty %p count %d", tty, count); + BT_DBG("tty %p count %zu", tty, count); while (count) { - size = min_t(uint, count, dlc->mtu); + size = min_t(size_t, count, dlc->mtu); skb = rfcomm_wmalloc(dev, size + RFCOMM_SKB_RESERVE, GFP_ATOMIC); if (!skb) @@ -811,7 +803,7 @@ static int rfcomm_tty_write(struct tty_struct *tty, const unsigned char *buf, in static unsigned int rfcomm_tty_write_room(struct tty_struct *tty) { - struct rfcomm_dev *dev = (struct rfcomm_dev *) tty->driver_data; + struct rfcomm_dev *dev = tty->driver_data; int room = 0; if (dev && dev->dlc) @@ -855,7 +847,8 @@ static int rfcomm_tty_ioctl(struct tty_struct *tty, unsigned int cmd, unsigned l return -ENOIOCTLCMD; } -static void rfcomm_tty_set_termios(struct tty_struct *tty, struct ktermios *old) +static void rfcomm_tty_set_termios(struct tty_struct *tty, + const struct ktermios *old) { struct ktermios *new = &tty->termios; int old_baud_rate = tty_termios_baud_rate(old); @@ -864,7 +857,7 @@ static void rfcomm_tty_set_termios(struct tty_struct *tty, struct ktermios *old) u8 baud, data_bits, stop_bits, parity, x_on, x_off; u16 changes = 0; - struct rfcomm_dev *dev = (struct rfcomm_dev *) tty->driver_data; + struct rfcomm_dev *dev = tty->driver_data; BT_DBG("tty %p termios %p", tty, old); @@ -982,7 +975,7 @@ static void rfcomm_tty_set_termios(struct tty_struct *tty, struct ktermios *old) baud = RFCOMM_RPN_BR_230400; break; default: - /* 9600 is standard accordinag to the RFCOMM specification */ + /* 9600 is standard according to the RFCOMM specification */ baud = RFCOMM_RPN_BR_9600; break; @@ -996,7 +989,7 @@ static void rfcomm_tty_set_termios(struct tty_struct *tty, struct ktermios *old) static void rfcomm_tty_throttle(struct tty_struct *tty) { - struct rfcomm_dev *dev = (struct rfcomm_dev *) tty->driver_data; + struct rfcomm_dev *dev = tty->driver_data; BT_DBG("tty %p dev %p", tty, dev); @@ -1005,7 +998,7 @@ static void rfcomm_tty_throttle(struct tty_struct *tty) static void rfcomm_tty_unthrottle(struct tty_struct *tty) { - struct rfcomm_dev *dev = (struct rfcomm_dev *) tty->driver_data; + struct rfcomm_dev *dev = tty->driver_data; BT_DBG("tty %p dev %p", tty, dev); @@ -1014,7 +1007,7 @@ static void rfcomm_tty_unthrottle(struct tty_struct *tty) static unsigned int rfcomm_tty_chars_in_buffer(struct tty_struct *tty) { - struct rfcomm_dev *dev = (struct rfcomm_dev *) tty->driver_data; + struct rfcomm_dev *dev = tty->driver_data; BT_DBG("tty %p dev %p", tty, dev); @@ -1029,7 +1022,7 @@ static unsigned int rfcomm_tty_chars_in_buffer(struct tty_struct *tty) static void rfcomm_tty_flush_buffer(struct tty_struct *tty) { - struct rfcomm_dev *dev = (struct rfcomm_dev *) tty->driver_data; + struct rfcomm_dev *dev = tty->driver_data; BT_DBG("tty %p dev %p", tty, dev); @@ -1040,7 +1033,7 @@ static void rfcomm_tty_flush_buffer(struct tty_struct *tty) tty_wakeup(tty); } -static void rfcomm_tty_send_xchar(struct tty_struct *tty, char ch) +static void rfcomm_tty_send_xchar(struct tty_struct *tty, u8 ch) { BT_DBG("tty %p ch %c", tty, ch); } @@ -1052,7 +1045,7 @@ static void rfcomm_tty_wait_until_sent(struct tty_struct *tty, int timeout) static void rfcomm_tty_hangup(struct tty_struct *tty) { - struct rfcomm_dev *dev = (struct rfcomm_dev *) tty->driver_data; + struct rfcomm_dev *dev = tty->driver_data; BT_DBG("tty %p dev %p", tty, dev); @@ -1061,16 +1054,20 @@ static void rfcomm_tty_hangup(struct tty_struct *tty) static int rfcomm_tty_tiocmget(struct tty_struct *tty) { - struct rfcomm_dev *dev = (struct rfcomm_dev *) tty->driver_data; + struct rfcomm_dev *dev = tty->driver_data; + struct rfcomm_dlc *dlc = dev->dlc; + u8 v24_sig; BT_DBG("tty %p dev %p", tty, dev); - return dev->modem_status; + rfcomm_dlc_get_modem_status(dlc, &v24_sig); + + return (v24_sig & (TIOCM_DTR | TIOCM_RTS)) | dev->modem_status; } static int rfcomm_tty_tiocmset(struct tty_struct *tty, unsigned int set, unsigned int clear) { - struct rfcomm_dev *dev = (struct rfcomm_dev *) tty->driver_data; + struct rfcomm_dev *dev = tty->driver_data; struct rfcomm_dlc *dlc = dev->dlc; u8 v24_sig; @@ -1078,23 +1075,15 @@ static int rfcomm_tty_tiocmset(struct tty_struct *tty, unsigned int set, unsigne rfcomm_dlc_get_modem_status(dlc, &v24_sig); - if (set & TIOCM_DSR || set & TIOCM_DTR) + if (set & TIOCM_DTR) v24_sig |= RFCOMM_V24_RTC; - if (set & TIOCM_RTS || set & TIOCM_CTS) + if (set & TIOCM_RTS) v24_sig |= RFCOMM_V24_RTR; - if (set & TIOCM_RI) - v24_sig |= RFCOMM_V24_IC; - if (set & TIOCM_CD) - v24_sig |= RFCOMM_V24_DV; - if (clear & TIOCM_DSR || clear & TIOCM_DTR) + if (clear & TIOCM_DTR) v24_sig &= ~RFCOMM_V24_RTC; - if (clear & TIOCM_RTS || clear & TIOCM_CTS) + if (clear & TIOCM_RTS) v24_sig &= ~RFCOMM_V24_RTR; - if (clear & TIOCM_RI) - v24_sig &= ~RFCOMM_V24_IC; - if (clear & TIOCM_CD) - v24_sig &= ~RFCOMM_V24_DV; rfcomm_dlc_set_modem_status(dlc, v24_sig); diff --git a/net/bluetooth/sco.c b/net/bluetooth/sco.c index 8eabf41b2993..87ba90336e80 100644 --- a/net/bluetooth/sco.c +++ b/net/bluetooth/sco.c @@ -51,6 +51,7 @@ struct sco_conn { struct delayed_work timeout_work; unsigned int mtu; + struct kref ref; }; #define sco_conn_lock(c) spin_lock(&c->lock) @@ -68,7 +69,6 @@ struct sco_pinfo { bdaddr_t dst; __u32 flags; __u16 setting; - __u8 cmsg_mask; struct bt_codec codec; struct sco_conn *conn; }; @@ -77,17 +77,86 @@ struct sco_pinfo { #define SCO_CONN_TIMEOUT (HZ * 40) #define SCO_DISCONN_TIMEOUT (HZ * 2) +static void sco_conn_free(struct kref *ref) +{ + struct sco_conn *conn = container_of(ref, struct sco_conn, ref); + + BT_DBG("conn %p", conn); + + if (conn->sk) + sco_pi(conn->sk)->conn = NULL; + + if (conn->hcon) { + conn->hcon->sco_data = NULL; + hci_conn_drop(conn->hcon); + } + + /* Ensure no more work items will run since hci_conn has been dropped */ + disable_delayed_work_sync(&conn->timeout_work); + + kfree(conn); +} + +static void sco_conn_put(struct sco_conn *conn) +{ + if (!conn) + return; + + BT_DBG("conn %p refcnt %d", conn, kref_read(&conn->ref)); + + kref_put(&conn->ref, sco_conn_free); +} + +static struct sco_conn *sco_conn_hold(struct sco_conn *conn) +{ + BT_DBG("conn %p refcnt %u", conn, kref_read(&conn->ref)); + + kref_get(&conn->ref); + return conn; +} + +static struct sco_conn *sco_conn_hold_unless_zero(struct sco_conn *conn) +{ + if (!conn) + return NULL; + + BT_DBG("conn %p refcnt %u", conn, kref_read(&conn->ref)); + + if (!kref_get_unless_zero(&conn->ref)) + return NULL; + + return conn; +} + +static struct sock *sco_sock_hold(struct sco_conn *conn) +{ + if (!conn || !bt_sock_linked(&sco_sk_list, conn->sk)) + return NULL; + + sock_hold(conn->sk); + + return conn->sk; +} + static void sco_sock_timeout(struct work_struct *work) { struct sco_conn *conn = container_of(work, struct sco_conn, timeout_work.work); struct sock *sk; + conn = sco_conn_hold_unless_zero(conn); + if (!conn) + return; + sco_conn_lock(conn); - sk = conn->sk; - if (sk) - sock_hold(sk); + if (!conn->hcon) { + sco_conn_unlock(conn); + sco_conn_put(conn); + return; + } + sk = sco_sock_hold(conn); sco_conn_unlock(conn); + sco_conn_put(conn); if (!sk) return; @@ -123,24 +192,32 @@ static void sco_sock_clear_timer(struct sock *sk) /* ---- SCO connections ---- */ static struct sco_conn *sco_conn_add(struct hci_conn *hcon) { - struct hci_dev *hdev = hcon->hdev; struct sco_conn *conn = hcon->sco_data; - if (conn) + conn = sco_conn_hold_unless_zero(conn); + if (conn) { + if (!conn->hcon) { + sco_conn_lock(conn); + conn->hcon = hcon; + sco_conn_unlock(conn); + } return conn; + } conn = kzalloc(sizeof(struct sco_conn), GFP_KERNEL); if (!conn) return NULL; + kref_init(&conn->ref); spin_lock_init(&conn->lock); INIT_DELAYED_WORK(&conn->timeout_work, sco_sock_timeout); hcon->sco_data = conn; conn->hcon = hcon; + conn->mtu = hcon->mtu; - if (hdev->sco_mtu > 0) - conn->mtu = hdev->sco_mtu; + if (hcon->mtu > 0) + conn->mtu = hcon->mtu; else conn->mtu = 60; @@ -156,17 +233,15 @@ static void sco_chan_del(struct sock *sk, int err) struct sco_conn *conn; conn = sco_pi(sk)->conn; + sco_pi(sk)->conn = NULL; BT_DBG("sk %p, conn %p, err %d", sk, conn, err); if (conn) { sco_conn_lock(conn); conn->sk = NULL; - sco_pi(sk)->conn = NULL; sco_conn_unlock(conn); - - if (conn->hcon) - hci_conn_drop(conn->hcon); + sco_conn_put(conn); } sk->sk_state = BT_CLOSED; @@ -181,31 +256,28 @@ static void sco_conn_del(struct hci_conn *hcon, int err) struct sco_conn *conn = hcon->sco_data; struct sock *sk; + conn = sco_conn_hold_unless_zero(conn); if (!conn) return; BT_DBG("hcon %p conn %p, err %d", hcon, conn, err); - /* Kill socket */ sco_conn_lock(conn); - sk = conn->sk; - if (sk) - sock_hold(sk); + sk = sco_sock_hold(conn); sco_conn_unlock(conn); + sco_conn_put(conn); - if (sk) { - lock_sock(sk); - sco_sock_clear_timer(sk); - sco_chan_del(sk, err); - release_sock(sk); - sock_put(sk); + if (!sk) { + sco_conn_put(conn); + return; } - /* Ensure no more work items will run before freeing conn. */ - cancel_delayed_work_sync(&conn->timeout_work); - - hcon->sco_data = NULL; - kfree(conn); + /* Kill socket */ + lock_sock(sk); + sco_sock_clear_timer(sk); + sco_chan_del(sk, err); + release_sock(sk); + sock_put(sk); } static void __sco_chan_add(struct sco_conn *conn, struct sock *sk, @@ -235,53 +307,79 @@ static int sco_chan_add(struct sco_conn *conn, struct sock *sk, return err; } -static int sco_connect(struct hci_dev *hdev, struct sock *sk) +static int sco_connect(struct sock *sk) { struct sco_conn *conn; struct hci_conn *hcon; + struct hci_dev *hdev; int err, type; BT_DBG("%pMR -> %pMR", &sco_pi(sk)->src, &sco_pi(sk)->dst); + hdev = hci_get_route(&sco_pi(sk)->dst, &sco_pi(sk)->src, BDADDR_BREDR); + if (!hdev) + return -EHOSTUNREACH; + + hci_dev_lock(hdev); + if (lmp_esco_capable(hdev) && !disable_esco) type = ESCO_LINK; else type = SCO_LINK; - if (sco_pi(sk)->setting == BT_VOICE_TRANSPARENT && - (!lmp_transp_capable(hdev) || !lmp_esco_capable(hdev))) - return -EOPNOTSUPP; + switch (sco_pi(sk)->setting & SCO_AIRMODE_MASK) { + case SCO_AIRMODE_TRANSP: + if (!lmp_transp_capable(hdev) || !lmp_esco_capable(hdev)) { + err = -EOPNOTSUPP; + goto unlock; + } + break; + } hcon = hci_connect_sco(hdev, type, &sco_pi(sk)->dst, - sco_pi(sk)->setting, &sco_pi(sk)->codec); - if (IS_ERR(hcon)) - return PTR_ERR(hcon); + sco_pi(sk)->setting, &sco_pi(sk)->codec, + READ_ONCE(sk->sk_sndtimeo)); + if (IS_ERR(hcon)) { + err = PTR_ERR(hcon); + goto unlock; + } conn = sco_conn_add(hcon); if (!conn) { hci_conn_drop(hcon); - return -ENOMEM; + err = -ENOMEM; + goto unlock; } - /* Update source addr of the socket */ - bacpy(&sco_pi(sk)->src, &hcon->src); + lock_sock(sk); err = sco_chan_add(conn, sk, NULL); - if (err) - return err; + if (err) { + release_sock(sk); + goto unlock; + } + + /* Update source addr of the socket */ + bacpy(&sco_pi(sk)->src, &hcon->src); if (hcon->state == BT_CONNECTED) { sco_sock_clear_timer(sk); sk->sk_state = BT_CONNECTED; } else { sk->sk_state = BT_CONNECT; - sco_sock_set_timer(sk, sk->sk_sndtimeo); + sco_sock_set_timer(sk, READ_ONCE(sk->sk_sndtimeo)); } + release_sock(sk); + +unlock: + hci_dev_unlock(hdev); + hci_dev_put(hdev); return err; } -static int sco_send_frame(struct sock *sk, struct sk_buff *skb) +static int sco_send_frame(struct sock *sk, struct sk_buff *skb, + const struct sockcm_cookie *sockc) { struct sco_conn *conn = sco_pi(sk)->conn; int len = skb->len; @@ -292,6 +390,7 @@ static int sco_send_frame(struct sock *sk, struct sk_buff *skb) BT_DBG("sk %p len %d", sk, len); + hci_setup_tx_timestamp(skb, 1, sockc); hci_send_sco(conn->hcon, skb); return len; @@ -367,6 +466,8 @@ static void sco_sock_destruct(struct sock *sk) { BT_DBG("sk %p", sk); + sco_conn_put(sco_pi(sk)->conn); + skb_queue_purge(&sk->sk_receive_queue); skb_queue_purge(&sk->sk_write_queue); } @@ -397,6 +498,13 @@ static void sco_sock_kill(struct sock *sk) BT_DBG("sk %p state %d", sk, sk->sk_state); + /* Sock is dead, so set conn->sk to NULL to avoid possible UAF */ + if (sco_pi(sk)->conn) { + sco_conn_lock(sco_pi(sk)->conn); + sco_pi(sk)->conn->sk = NULL; + sco_conn_unlock(sco_pi(sk)->conn); + } + /* Kill poor orphan */ bt_sock_unlink(&sco_sk_list, sk); sock_set_flag(sk, SOCK_DEAD); @@ -414,17 +522,6 @@ static void __sco_sock_close(struct sock *sk) case BT_CONNECTED: case BT_CONFIG: - if (sco_pi(sk)->conn->hcon) { - sk->sk_state = BT_DISCONN; - sco_sock_set_timer(sk, SCO_DISCONN_TIMEOUT); - sco_conn_lock(sco_pi(sk)->conn); - hci_conn_drop(sco_pi(sk)->conn->hcon); - sco_pi(sk)->conn->hcon = NULL; - sco_conn_unlock(sco_pi(sk)->conn); - } else - sco_chan_del(sk, ECONNRESET); - break; - case BT_CONNECT2: case BT_CONNECT: case BT_DISCONN: @@ -447,15 +544,6 @@ static void sco_sock_close(struct sock *sk) release_sock(sk); } -static void sco_skb_put_cmsg(struct sk_buff *skb, struct msghdr *msg, - struct sock *sk) -{ - if (sco_pi(sk)->cmsg_mask & SCO_CMSG_PKT_STATUS) - put_cmsg(msg, SOL_BLUETOOTH, BT_SCM_PKT_STATUS, - sizeof(bt_cb(skb)->sco.pkt_status), - &bt_cb(skb)->sco.pkt_status); -} - static void sco_sock_init(struct sock *sk, struct sock *parent) { BT_DBG("sk %p", sk); @@ -464,8 +552,6 @@ static void sco_sock_init(struct sock *sk, struct sock *parent) sk->sk_type = parent->sk_type; bt_sk(sk)->flags = bt_sk(parent)->flags; security_sk_clone(parent, sk); - } else { - bt_sk(sk)->skb_put_cmsg = sco_skb_put_cmsg; } } @@ -480,21 +566,13 @@ static struct sock *sco_sock_alloc(struct net *net, struct socket *sock, { struct sock *sk; - sk = sk_alloc(net, PF_BLUETOOTH, prio, &sco_proto, kern); + sk = bt_sock_alloc(net, sock, &sco_proto, proto, prio, kern); if (!sk) return NULL; - sock_init_data(sock, sk); - INIT_LIST_HEAD(&bt_sk(sk)->accept_q); - sk->sk_destruct = sco_sock_destruct; sk->sk_sndtimeo = SCO_CONN_TIMEOUT; - sock_reset_flag(sk, SOCK_ZAPPED); - - sk->sk_protocol = proto; - sk->sk_state = BT_OPEN; - sco_pi(sk)->setting = BT_VOICE_CVSD_16BIT; sco_pi(sk)->codec.id = BT_CODEC_CVSD; sco_pi(sk)->codec.cid = 0xffff; @@ -527,7 +605,7 @@ static int sco_sock_create(struct net *net, struct socket *sock, int protocol, return 0; } -static int sco_sock_bind(struct socket *sock, struct sockaddr *addr, +static int sco_sock_bind(struct socket *sock, struct sockaddr_unsized *addr, int addr_len) { struct sockaddr_sco *sa = (struct sockaddr_sco *) addr; @@ -561,11 +639,10 @@ done: return err; } -static int sco_sock_connect(struct socket *sock, struct sockaddr *addr, int alen, int flags) +static int sco_sock_connect(struct socket *sock, struct sockaddr_unsized *addr, int alen, int flags) { struct sockaddr_sco *sa = (struct sockaddr_sco *) addr; struct sock *sk = sock->sk; - struct hci_dev *hdev; int err; BT_DBG("sk %p", sk); @@ -578,28 +655,22 @@ static int sco_sock_connect(struct socket *sock, struct sockaddr *addr, int alen return -EBADFD; if (sk->sk_type != SOCK_SEQPACKET) - return -EINVAL; - - hdev = hci_get_route(&sa->sco_bdaddr, &sco_pi(sk)->src, BDADDR_BREDR); - if (!hdev) - return -EHOSTUNREACH; - hci_dev_lock(hdev); + err = -EINVAL; lock_sock(sk); - /* Set destination address and psm */ bacpy(&sco_pi(sk)->dst, &sa->sco_bdaddr); + release_sock(sk); - err = sco_connect(hdev, sk); - hci_dev_unlock(hdev); - hci_dev_put(hdev); + err = sco_connect(sk); if (err) - goto done; + return err; + + lock_sock(sk); err = bt_sock_wait_state(sk, BT_CONNECTED, sock_sndtimeo(sk, flags & O_NONBLOCK)); -done: release_sock(sk); return err; } @@ -645,7 +716,7 @@ done: } static int sco_sock_accept(struct socket *sock, struct socket *newsock, - int flags, bool kern) + struct proto_accept_arg *arg) { DEFINE_WAIT_FUNC(wait, woken_wake_function); struct sock *sk = sock->sk, *ch; @@ -654,7 +725,7 @@ static int sco_sock_accept(struct socket *sock, struct socket *newsock, lock_sock(sk); - timeo = sock_rcvtimeo(sk, flags & O_NONBLOCK); + timeo = sock_rcvtimeo(sk, arg->flags & O_NONBLOCK); BT_DBG("sk %p timeo %ld", sk, timeo); @@ -722,6 +793,7 @@ static int sco_sock_sendmsg(struct socket *sock, struct msghdr *msg, { struct sock *sk = sock->sk; struct sk_buff *skb; + struct sockcm_cookie sockc; int err; BT_DBG("sock %p, sk %p", sock, sk); @@ -733,6 +805,14 @@ static int sco_sock_sendmsg(struct socket *sock, struct msghdr *msg, if (msg->msg_flags & MSG_OOB) return -EOPNOTSUPP; + hci_sockcm_init(&sockc, sk); + + if (msg->msg_controllen) { + err = sock_cmsg_send(sk, msg, &sockc); + if (err) + return err; + } + skb = bt_skb_sendmsg(sk, msg, len, len, 0, 0); if (IS_ERR(skb)) return PTR_ERR(skb); @@ -740,7 +820,7 @@ static int sco_sock_sendmsg(struct socket *sock, struct msghdr *msg, lock_sock(sk); if (sk->sk_state == BT_CONNECTED) - err = sco_send_frame(sk, skb); + err = sco_send_frame(sk, skb, &sockc); else err = -ENOTCONN; @@ -806,6 +886,10 @@ static int sco_sock_recvmsg(struct socket *sock, struct msghdr *msg, struct sock *sk = sock->sk; struct sco_pinfo *pi = sco_pi(sk); + if (unlikely(flags & MSG_ERRQUEUE)) + return sock_recv_errqueue(sk, msg, len, SOL_BLUETOOTH, + BT_SCM_ERROR); + lock_sock(sk); if (sk->sk_state == BT_CONNECT2 && @@ -826,7 +910,7 @@ static int sco_sock_setsockopt(struct socket *sock, int level, int optname, sockptr_t optval, unsigned int optlen) { struct sock *sk = sock->sk; - int len, err = 0; + int err = 0; struct bt_voice voice; u32 opt; struct bt_codecs *codecs; @@ -845,10 +929,9 @@ static int sco_sock_setsockopt(struct socket *sock, int level, int optname, break; } - if (copy_from_sockptr(&opt, optval, sizeof(u32))) { - err = -EFAULT; + err = copy_safe_from_sockptr(&opt, sizeof(opt), optval, optlen); + if (err) break; - } if (opt) set_bit(BT_SK_DEFER_SETUP, &bt_sk(sk)->flags); @@ -865,18 +948,10 @@ static int sco_sock_setsockopt(struct socket *sock, int level, int optname, voice.setting = sco_pi(sk)->setting; - len = min_t(unsigned int, sizeof(voice), optlen); - if (copy_from_sockptr(&voice, optval, len)) { - err = -EFAULT; - break; - } - - /* Explicitly check for these values */ - if (voice.setting != BT_VOICE_TRANSPARENT && - voice.setting != BT_VOICE_CVSD_16BIT) { - err = -EINVAL; + err = copy_safe_from_sockptr(&voice, sizeof(voice), optval, + optlen); + if (err) break; - } sco_pi(sk)->setting = voice.setting; hdev = hci_get_route(&sco_pi(sk)->dst, &sco_pi(sk)->src, @@ -885,22 +960,26 @@ static int sco_sock_setsockopt(struct socket *sock, int level, int optname, err = -EBADFD; break; } - if (enhanced_sco_capable(hdev) && - voice.setting == BT_VOICE_TRANSPARENT) - sco_pi(sk)->codec.id = BT_CODEC_TRANSPARENT; + + switch (sco_pi(sk)->setting & SCO_AIRMODE_MASK) { + case SCO_AIRMODE_TRANSP: + if (enhanced_sync_conn_capable(hdev)) + sco_pi(sk)->codec.id = BT_CODEC_TRANSPARENT; + break; + } + hci_dev_put(hdev); break; case BT_PKT_STATUS: - if (copy_from_sockptr(&opt, optval, sizeof(u32))) { - err = -EFAULT; + err = copy_safe_from_sockptr(&opt, sizeof(opt), optval, optlen); + if (err) break; - } if (opt) - sco_pi(sk)->cmsg_mask |= SCO_CMSG_PKT_STATUS; + set_bit(BT_SK_PKT_STATUS, &bt_sk(sk)->flags); else - sco_pi(sk)->cmsg_mask &= SCO_CMSG_PKT_STATUS; + clear_bit(BT_SK_PKT_STATUS, &bt_sk(sk)->flags); break; case BT_CODEC: @@ -936,9 +1015,10 @@ static int sco_sock_setsockopt(struct socket *sock, int level, int optname, break; } - if (copy_from_sockptr(buffer, optval, optlen)) { + err = copy_struct_from_sockptr(buffer, sizeof(buffer), optval, + optlen); + if (err) { hci_dev_put(hdev); - err = -EFAULT; break; } @@ -969,7 +1049,8 @@ static int sco_sock_getsockopt_old(struct socket *sock, int optname, struct sock *sk = sock->sk; struct sco_options opts; struct sco_conninfo cinfo; - int len, err = 0; + int err = 0; + size_t len; BT_DBG("sk %p", sk); @@ -991,7 +1072,7 @@ static int sco_sock_getsockopt_old(struct socket *sock, int optname, BT_DBG("mtu %u", opts.mtu); - len = min_t(unsigned int, len, sizeof(opts)); + len = min(len, sizeof(opts)); if (copy_to_user(optval, (char *)&opts, len)) err = -EFAULT; @@ -1009,7 +1090,7 @@ static int sco_sock_getsockopt_old(struct socket *sock, int optname, cinfo.hci_handle = sco_pi(sk)->conn->hcon->handle; memcpy(cinfo.dev_class, sco_pi(sk)->conn->hcon->dev_class, 3); - len = min_t(unsigned int, len, sizeof(cinfo)); + len = min(len, sizeof(cinfo)); if (copy_to_user(optval, (char *)&cinfo, len)) err = -EFAULT; @@ -1031,7 +1112,6 @@ static int sco_sock_getsockopt(struct socket *sock, int level, int optname, int len, err = 0; struct bt_voice voice; u32 phys; - int pkt_status; int buf_len; struct codec_list *c; u8 num_codecs, i, __user *ptr; @@ -1085,9 +1165,8 @@ static int sco_sock_getsockopt(struct socket *sock, int level, int optname, break; case BT_PKT_STATUS: - pkt_status = (sco_pi(sk)->cmsg_mask & SCO_CMSG_PKT_STATUS); - - if (put_user(pkt_status, (int __user *)optval)) + if (put_user(test_bit(BT_SK_PKT_STATUS, &bt_sk(sk)->flags), + (int __user *)optval)) err = -EFAULT; break; @@ -1124,6 +1203,8 @@ static int sco_sock_getsockopt(struct socket *sock, int level, int optname, break; } + release_sock(sk); + /* find total buffer size required to copy codec + caps */ hci_dev_lock(hdev); list_for_each_entry(c, &hdev->local_codecs, list) { @@ -1141,15 +1222,13 @@ static int sco_sock_getsockopt(struct socket *sock, int level, int optname, buf_len += sizeof(struct bt_codecs); if (buf_len > len) { hci_dev_put(hdev); - err = -ENOBUFS; - break; + return -ENOBUFS; } ptr = optval; if (put_user(num_codecs, ptr)) { hci_dev_put(hdev); - err = -EFAULT; - break; + return -EFAULT; } ptr += sizeof(num_codecs); @@ -1189,12 +1268,14 @@ static int sco_sock_getsockopt(struct socket *sock, int level, int optname, ptr += len; } - if (!err && put_user(buf_len, optlen)) - err = -EFAULT; - hci_dev_unlock(hdev); hci_dev_put(hdev); + lock_sock(sk); + + if (!err && put_user(buf_len, optlen)) + err = -EFAULT; + break; default: @@ -1248,7 +1329,7 @@ static int sco_sock_release(struct socket *sock) sco_sock_close(sk); - if (sock_flag(sk, SOCK_LINGER) && sk->sk_lingertime && + if (sock_flag(sk, SOCK_LINGER) && READ_ONCE(sk->sk_lingertime) && !(current->flags & PF_EXITING)) { lock_sock(sk); err = bt_sock_wait_state(sk, BT_CLOSED, sk->sk_lingertime); @@ -1302,6 +1383,7 @@ static void sco_conn_ready(struct sco_conn *conn) bacpy(&sco_pi(sk)->src, &conn->hcon->src); bacpy(&sco_pi(sk)->dst, &conn->hcon->dst); + sco_conn_hold(conn); hci_conn_hold(conn->hcon); __sco_chan_add(conn, sk, parent); @@ -1358,8 +1440,10 @@ static void sco_connect_cfm(struct hci_conn *hcon, __u8 status) struct sco_conn *conn; conn = sco_conn_add(hcon); - if (conn) + if (conn) { sco_conn_ready(conn); + sco_conn_put(conn); + } } else sco_conn_del(hcon, bt_to_errno(status)); } @@ -1374,22 +1458,39 @@ static void sco_disconn_cfm(struct hci_conn *hcon, __u8 reason) sco_conn_del(hcon, bt_to_errno(reason)); } -void sco_recv_scodata(struct hci_conn *hcon, struct sk_buff *skb) +int sco_recv_scodata(struct hci_dev *hdev, u16 handle, struct sk_buff *skb) { - struct sco_conn *conn = hcon->sco_data; + struct hci_conn *hcon; + struct sco_conn *conn; - if (!conn) - goto drop; + hci_dev_lock(hdev); + + hcon = hci_conn_hash_lookup_handle(hdev, handle); + if (!hcon) { + hci_dev_unlock(hdev); + kfree_skb(skb); + return -ENOENT; + } + + conn = sco_conn_hold_unless_zero(hcon->sco_data); + hcon = NULL; + + hci_dev_unlock(hdev); + + if (!conn) { + kfree_skb(skb); + return -EINVAL; + } BT_DBG("conn %p len %u", conn, skb->len); - if (skb->len) { + if (skb->len) sco_recv_frame(conn, skb); - return; - } + else + kfree_skb(skb); -drop: - kfree_skb(skb); + sco_conn_put(conn); + return 0; } static struct hci_cb sco_cb = { diff --git a/net/bluetooth/smp.c b/net/bluetooth/smp.c index 11f853d0500f..3a1ce04a7a53 100644 --- a/net/bluetooth/smp.c +++ b/net/bluetooth/smp.c @@ -22,11 +22,10 @@ #include <linux/debugfs.h> #include <linux/scatterlist.h> -#include <linux/crypto.h> #include <crypto/aes.h> -#include <crypto/algapi.h> #include <crypto/hash.h> #include <crypto/kpp.h> +#include <crypto/utils.h> #include <net/bluetooth/bluetooth.h> #include <net/bluetooth/hci_core.h> @@ -56,7 +55,9 @@ /* Keys which are not distributed with Secure Connections */ #define SMP_SC_NO_DIST (SMP_DIST_ENC_KEY | SMP_DIST_LINK_KEY) -#define SMP_TIMEOUT msecs_to_jiffies(30000) +#define SMP_TIMEOUT secs_to_jiffies(30) + +#define ID_ADDR_TIMEOUT msecs_to_jiffies(200) #define AUTH_REQ_MASK(dev) (hci_dev_test_flag(dev, HCI_SC_ENABLED) ? \ 0x3f : 0x07) @@ -605,9 +606,9 @@ static void smp_send_cmd(struct l2cap_conn *conn, u8 code, u16 len, void *data) memset(&msg, 0, sizeof(msg)); - iov_iter_kvec(&msg.msg_iter, WRITE, iv, 2, 1 + len); + iov_iter_kvec(&msg.msg_iter, ITER_SOURCE, iv, 2, 1 + len); - l2cap_chan_send(chan, &msg, 1 + len); + l2cap_chan_send(chan, &msg, 1 + len, NULL); if (!chan->data) return; @@ -913,7 +914,7 @@ static int tk_request(struct l2cap_conn *conn, u8 remote_oob, u8 auth, * Confirms and the responder Enters the passkey. */ if (smp->method == OVERLAP) { - if (hcon->role == HCI_ROLE_MASTER) + if (test_bit(SMP_FLAG_INITIATOR, &smp->flags)) smp->method = CFM_PASSKEY; else smp->method = REQ_PASSKEY; @@ -963,7 +964,7 @@ static u8 smp_confirm(struct smp_chan *smp) smp_send_cmd(smp->conn, SMP_CMD_PAIRING_CONFIRM, sizeof(cp), &cp); - if (conn->hcon->out) + if (test_bit(SMP_FLAG_INITIATOR, &smp->flags)) SMP_ALLOW_CMD(smp, SMP_CMD_PAIRING_CONFIRM); else SMP_ALLOW_CMD(smp, SMP_CMD_PAIRING_RANDOM); @@ -979,7 +980,8 @@ static u8 smp_random(struct smp_chan *smp) int ret; bt_dev_dbg(conn->hcon->hdev, "conn %p %s", conn, - conn->hcon->out ? "initiator" : "responder"); + test_bit(SMP_FLAG_INITIATOR, &smp->flags) ? "initiator" : + "responder"); ret = smp_c1(smp->tk, smp->rrnd, smp->preq, smp->prsp, hcon->init_addr_type, &hcon->init_addr, @@ -993,7 +995,7 @@ static u8 smp_random(struct smp_chan *smp) return SMP_CONFIRM_FAILED; } - if (hcon->out) { + if (test_bit(SMP_FLAG_INITIATOR, &smp->flags)) { u8 stk[16]; __le64 rand = 0; __le16 ediv = 0; @@ -1067,7 +1069,12 @@ static void smp_notify_keys(struct l2cap_conn *conn) if (hcon->type == LE_LINK) { bacpy(&hcon->dst, &smp->remote_irk->bdaddr); hcon->dst_type = smp->remote_irk->addr_type; - queue_work(hdev->workqueue, &conn->id_addr_update_work); + /* Use a short delay to make sure the new address is + * propagated _before_ the channels. + */ + queue_delayed_work(hdev->workqueue, + &conn->id_addr_timer, + ID_ADDR_TIMEOUT); } } @@ -1243,14 +1250,15 @@ static void smp_distribute_keys(struct smp_chan *smp) rsp = (void *) &smp->prsp[1]; /* The responder sends its keys first */ - if (hcon->out && (smp->remote_key_dist & KEY_DIST_MASK)) { + if (test_bit(SMP_FLAG_INITIATOR, &smp->flags) && + (smp->remote_key_dist & KEY_DIST_MASK)) { smp_allow_key_dist(smp); return; } req = (void *) &smp->preq[1]; - if (hcon->out) { + if (test_bit(SMP_FLAG_INITIATOR, &smp->flags)) { keydist = &rsp->init_key_dist; *keydist &= req->init_key_dist; } else { @@ -1371,7 +1379,7 @@ static void smp_timeout(struct work_struct *work) bt_dev_dbg(conn->hcon->hdev, "conn %p", conn); - hci_disconnect(conn->hcon, HCI_ERROR_REMOTE_USER_TERM); + hci_disconnect(conn->hcon, HCI_ERROR_AUTH_FAILURE); } static struct smp_chan *smp_chan_create(struct l2cap_conn *conn) @@ -1419,7 +1427,7 @@ static int sc_mackey_and_ltk(struct smp_chan *smp, u8 mackey[16], u8 ltk[16]) struct hci_conn *hcon = smp->conn->hcon; u8 *na, *nb, a[7], b[7]; - if (hcon->out) { + if (test_bit(SMP_FLAG_INITIATOR, &smp->flags)) { na = smp->prnd; nb = smp->rrnd; } else { @@ -1447,7 +1455,7 @@ static void sc_dhkey_check(struct smp_chan *smp) a[6] = hcon->init_addr_type; b[6] = hcon->resp_addr_type; - if (hcon->out) { + if (test_bit(SMP_FLAG_INITIATOR, &smp->flags)) { local_addr = a; remote_addr = b; memcpy(io_cap, &smp->preq[1], 3); @@ -1526,7 +1534,7 @@ static u8 sc_passkey_round(struct smp_chan *smp, u8 smp_op) /* The round is only complete when the initiator * receives pairing random. */ - if (!hcon->out) { + if (!test_bit(SMP_FLAG_INITIATOR, &smp->flags)) { smp_send_cmd(conn, SMP_CMD_PAIRING_RANDOM, sizeof(smp->prnd), smp->prnd); if (smp->passkey_round == 20) @@ -1554,7 +1562,7 @@ static u8 sc_passkey_round(struct smp_chan *smp, u8 smp_op) SMP_ALLOW_CMD(smp, SMP_CMD_PAIRING_RANDOM); - if (hcon->out) { + if (test_bit(SMP_FLAG_INITIATOR, &smp->flags)) { smp_send_cmd(conn, SMP_CMD_PAIRING_RANDOM, sizeof(smp->prnd), smp->prnd); return 0; @@ -1565,7 +1573,7 @@ static u8 sc_passkey_round(struct smp_chan *smp, u8 smp_op) case SMP_CMD_PUBLIC_KEY: default: /* Initiating device starts the round */ - if (!hcon->out) + if (!test_bit(SMP_FLAG_INITIATOR, &smp->flags)) return 0; bt_dev_dbg(hdev, "Starting passkey round %u", @@ -1610,7 +1618,7 @@ static int sc_user_reply(struct smp_chan *smp, u16 mgmt_op, __le32 passkey) } /* Initiator sends DHKey check first */ - if (hcon->out) { + if (test_bit(SMP_FLAG_INITIATOR, &smp->flags)) { sc_dhkey_check(smp); SMP_ALLOW_CMD(smp, SMP_CMD_DHKEY_CHECK); } else if (test_and_clear_bit(SMP_FLAG_DHKEY_PENDING, &smp->flags)) { @@ -1733,7 +1741,7 @@ static u8 smp_cmd_pairing_req(struct l2cap_conn *conn, struct sk_buff *skb) struct smp_cmd_pairing rsp, *req = (void *) skb->data; struct l2cap_chan *chan = conn->smp; struct hci_dev *hdev = conn->hcon->hdev; - struct smp_chan *smp; + struct smp_chan *smp = chan->data; u8 key_size, auth, sec_level; int ret; @@ -1742,16 +1750,14 @@ static u8 smp_cmd_pairing_req(struct l2cap_conn *conn, struct sk_buff *skb) if (skb->len < sizeof(*req)) return SMP_INVALID_PARAMS; - if (conn->hcon->role != HCI_ROLE_SLAVE) + if (smp && test_bit(SMP_FLAG_INITIATOR, &smp->flags)) return SMP_CMD_NOTSUPP; - if (!chan->data) + if (!smp) { smp = smp_chan_create(conn); - else - smp = chan->data; - - if (!smp) - return SMP_UNSPECIFIED; + if (!smp) + return SMP_UNSPECIFIED; + } /* We didn't start the pairing, so match remote */ auth = req->auth_req & AUTH_REQ_MASK(hdev); @@ -1933,7 +1939,7 @@ static u8 smp_cmd_pairing_rsp(struct l2cap_conn *conn, struct sk_buff *skb) if (skb->len < sizeof(*rsp)) return SMP_INVALID_PARAMS; - if (conn->hcon->role != HCI_ROLE_MASTER) + if (!test_bit(SMP_FLAG_INITIATOR, &smp->flags)) return SMP_CMD_NOTSUPP; skb_pull(skb, sizeof(*rsp)); @@ -2028,7 +2034,7 @@ static u8 sc_check_confirm(struct smp_chan *smp) if (smp->method == REQ_PASSKEY || smp->method == DSP_PASSKEY) return sc_passkey_round(smp, SMP_CMD_PAIRING_CONFIRM); - if (conn->hcon->out) { + if (test_bit(SMP_FLAG_INITIATOR, &smp->flags)) { smp_send_cmd(conn, SMP_CMD_PAIRING_RANDOM, sizeof(smp->prnd), smp->prnd); SMP_ALLOW_CMD(smp, SMP_CMD_PAIRING_RANDOM); @@ -2050,7 +2056,7 @@ static int fixup_sc_false_positive(struct smp_chan *smp) u8 auth; /* The issue is only observed when we're in responder role */ - if (hcon->out) + if (test_bit(SMP_FLAG_INITIATOR, &smp->flags)) return SMP_UNSPECIFIED; if (hci_dev_test_flag(hdev, HCI_SC_ONLY)) { @@ -2086,7 +2092,8 @@ static u8 smp_cmd_pairing_confirm(struct l2cap_conn *conn, struct sk_buff *skb) struct hci_dev *hdev = hcon->hdev; bt_dev_dbg(hdev, "conn %p %s", conn, - hcon->out ? "initiator" : "responder"); + test_bit(SMP_FLAG_INITIATOR, &smp->flags) ? "initiator" : + "responder"); if (skb->len < sizeof(smp->pcnf)) return SMP_INVALID_PARAMS; @@ -2108,7 +2115,7 @@ static u8 smp_cmd_pairing_confirm(struct l2cap_conn *conn, struct sk_buff *skb) return ret; } - if (conn->hcon->out) { + if (test_bit(SMP_FLAG_INITIATOR, &smp->flags)) { smp_send_cmd(conn, SMP_CMD_PAIRING_RANDOM, sizeof(smp->prnd), smp->prnd); SMP_ALLOW_CMD(smp, SMP_CMD_PAIRING_RANDOM); @@ -2129,7 +2136,7 @@ static u8 smp_cmd_pairing_random(struct l2cap_conn *conn, struct sk_buff *skb) struct smp_chan *smp = chan->data; struct hci_conn *hcon = conn->hcon; u8 *pkax, *pkbx, *na, *nb, confirm_hint; - u32 passkey; + u32 passkey = 0; int err; bt_dev_dbg(hcon->hdev, "conn %p", conn); @@ -2143,7 +2150,7 @@ static u8 smp_cmd_pairing_random(struct l2cap_conn *conn, struct sk_buff *skb) if (!test_bit(SMP_FLAG_SC, &smp->flags)) return smp_random(smp); - if (hcon->out) { + if (test_bit(SMP_FLAG_INITIATOR, &smp->flags)) { pkax = smp->local_pk; pkbx = smp->remote_pk; na = smp->prnd; @@ -2156,7 +2163,7 @@ static u8 smp_cmd_pairing_random(struct l2cap_conn *conn, struct sk_buff *skb) } if (smp->method == REQ_OOB) { - if (!hcon->out) + if (!test_bit(SMP_FLAG_INITIATOR, &smp->flags)) smp_send_cmd(conn, SMP_CMD_PAIRING_RANDOM, sizeof(smp->prnd), smp->prnd); SMP_ALLOW_CMD(smp, SMP_CMD_DHKEY_CHECK); @@ -2167,7 +2174,7 @@ static u8 smp_cmd_pairing_random(struct l2cap_conn *conn, struct sk_buff *skb) if (smp->method == REQ_PASSKEY || smp->method == DSP_PASSKEY) return sc_passkey_round(smp, SMP_CMD_PAIRING_RANDOM); - if (hcon->out) { + if (test_bit(SMP_FLAG_INITIATOR, &smp->flags)) { u8 cfm[16]; err = smp_f4(smp->tfm_cmac, smp->remote_pk, smp->local_pk, @@ -2181,24 +2188,6 @@ static u8 smp_cmd_pairing_random(struct l2cap_conn *conn, struct sk_buff *skb) smp_send_cmd(conn, SMP_CMD_PAIRING_RANDOM, sizeof(smp->prnd), smp->prnd); SMP_ALLOW_CMD(smp, SMP_CMD_DHKEY_CHECK); - - /* Only Just-Works pairing requires extra checks */ - if (smp->method != JUST_WORKS) - goto mackey_and_ltk; - - /* If there already exists long term key in local host, leave - * the decision to user space since the remote device could - * be legitimate or malicious. - */ - if (hci_find_ltk(hcon->hdev, &hcon->dst, hcon->dst_type, - hcon->role)) { - /* Set passkey to 0. The value can be any number since - * it'll be ignored anyway. - */ - passkey = 0; - confirm_hint = 1; - goto confirm; - } } mackey_and_ltk: @@ -2208,7 +2197,7 @@ mackey_and_ltk: return SMP_UNSPECIFIED; if (smp->method == REQ_OOB) { - if (hcon->out) { + if (test_bit(SMP_FLAG_INITIATOR, &smp->flags)) { sc_dhkey_check(smp); SMP_ALLOW_CMD(smp, SMP_CMD_DHKEY_CHECK); } @@ -2219,11 +2208,12 @@ mackey_and_ltk: if (err) return SMP_UNSPECIFIED; - confirm_hint = 0; - -confirm: - if (smp->method == JUST_WORKS) - confirm_hint = 1; + /* Always require user confirmation for Just-Works pairing to prevent + * impersonation attacks, or in case of a legitimate device that is + * repairing use the confirmation as acknowledgment to proceed with the + * creation of new keys. + */ + confirm_hint = smp->method == JUST_WORKS ? 1 : 0; err = mgmt_user_confirm_request(hcon->hdev, &hcon->dst, hcon->type, hcon->dst_type, passkey, confirm_hint); @@ -2282,10 +2272,27 @@ bool smp_sufficient_security(struct hci_conn *hcon, u8 sec_level, return false; } +static void smp_send_pairing_req(struct smp_chan *smp, __u8 auth) +{ + struct smp_cmd_pairing cp; + + if (smp->conn->hcon->type == ACL_LINK) + build_bredr_pairing_cmd(smp, &cp, NULL); + else + build_pairing_cmd(smp->conn, &cp, NULL, auth); + + smp->preq[0] = SMP_CMD_PAIRING_REQ; + memcpy(&smp->preq[1], &cp, sizeof(cp)); + + smp_send_cmd(smp->conn, SMP_CMD_PAIRING_REQ, sizeof(cp), &cp); + SMP_ALLOW_CMD(smp, SMP_CMD_PAIRING_RSP); + + set_bit(SMP_FLAG_INITIATOR, &smp->flags); +} + static u8 smp_cmd_security_req(struct l2cap_conn *conn, struct sk_buff *skb) { struct smp_cmd_security_req *rp = (void *) skb->data; - struct smp_cmd_pairing cp; struct hci_conn *hcon = conn->hcon; struct hci_dev *hdev = hcon->hdev; struct smp_chan *smp; @@ -2334,16 +2341,20 @@ static u8 smp_cmd_security_req(struct l2cap_conn *conn, struct sk_buff *skb) skb_pull(skb, sizeof(*rp)); - memset(&cp, 0, sizeof(cp)); - build_pairing_cmd(conn, &cp, NULL, auth); + smp_send_pairing_req(smp, auth); - smp->preq[0] = SMP_CMD_PAIRING_REQ; - memcpy(&smp->preq[1], &cp, sizeof(cp)); + return 0; +} - smp_send_cmd(conn, SMP_CMD_PAIRING_REQ, sizeof(cp), &cp); - SMP_ALLOW_CMD(smp, SMP_CMD_PAIRING_RSP); +static void smp_send_security_req(struct smp_chan *smp, __u8 auth) +{ + struct smp_cmd_security_req cp; - return 0; + cp.auth_req = auth; + smp_send_cmd(smp->conn, SMP_CMD_SECURITY_REQ, sizeof(cp), &cp); + SMP_ALLOW_CMD(smp, SMP_CMD_PAIRING_REQ); + + clear_bit(SMP_FLAG_INITIATOR, &smp->flags); } int smp_conn_security(struct hci_conn *hcon, __u8 sec_level) @@ -2414,23 +2425,11 @@ int smp_conn_security(struct hci_conn *hcon, __u8 sec_level) authreq |= SMP_AUTH_MITM; } - if (hcon->role == HCI_ROLE_MASTER) { - struct smp_cmd_pairing cp; - - build_pairing_cmd(conn, &cp, NULL, authreq); - smp->preq[0] = SMP_CMD_PAIRING_REQ; - memcpy(&smp->preq[1], &cp, sizeof(cp)); - - smp_send_cmd(conn, SMP_CMD_PAIRING_REQ, sizeof(cp), &cp); - SMP_ALLOW_CMD(smp, SMP_CMD_PAIRING_RSP); - } else { - struct smp_cmd_security_req cp; - cp.auth_req = authreq; - smp_send_cmd(conn, SMP_CMD_SECURITY_REQ, sizeof(cp), &cp); - SMP_ALLOW_CMD(smp, SMP_CMD_PAIRING_REQ); - } + if (hcon->role == HCI_ROLE_MASTER) + smp_send_pairing_req(smp, authreq); + else + smp_send_security_req(smp, authreq); - set_bit(SMP_FLAG_INITIATOR, &smp->flags); ret = 0; unlock: @@ -2681,8 +2680,6 @@ static int smp_cmd_sign_info(struct l2cap_conn *conn, struct sk_buff *skb) static u8 sc_select_method(struct smp_chan *smp) { - struct l2cap_conn *conn = smp->conn; - struct hci_conn *hcon = conn->hcon; struct smp_cmd_pairing *local, *remote; u8 local_mitm, remote_mitm, local_io, remote_io, method; @@ -2695,7 +2692,7 @@ static u8 sc_select_method(struct smp_chan *smp) * the "struct smp_cmd_pairing" from them we need to skip the * first byte which contains the opcode. */ - if (hcon->out) { + if (test_bit(SMP_FLAG_INITIATOR, &smp->flags)) { local = (void *) &smp->preq[1]; remote = (void *) &smp->prsp[1]; } else { @@ -2764,7 +2761,7 @@ static int smp_cmd_public_key(struct l2cap_conn *conn, struct sk_buff *skb) /* Non-initiating device sends its public key after receiving * the key from the initiating device. */ - if (!hcon->out) { + if (!test_bit(SMP_FLAG_INITIATOR, &smp->flags)) { err = sc_send_public_key(smp); if (err) return err; @@ -2826,7 +2823,7 @@ static int smp_cmd_public_key(struct l2cap_conn *conn, struct sk_buff *skb) } if (smp->method == REQ_OOB) { - if (hcon->out) + if (test_bit(SMP_FLAG_INITIATOR, &smp->flags)) smp_send_cmd(conn, SMP_CMD_PAIRING_RANDOM, sizeof(smp->prnd), smp->prnd); @@ -2835,7 +2832,7 @@ static int smp_cmd_public_key(struct l2cap_conn *conn, struct sk_buff *skb) return 0; } - if (hcon->out) + if (test_bit(SMP_FLAG_INITIATOR, &smp->flags)) SMP_ALLOW_CMD(smp, SMP_CMD_PAIRING_CONFIRM); if (smp->method == REQ_PASSKEY) { @@ -2850,7 +2847,7 @@ static int smp_cmd_public_key(struct l2cap_conn *conn, struct sk_buff *skb) /* The Initiating device waits for the non-initiating device to * send the confirm value. */ - if (conn->hcon->out) + if (test_bit(SMP_FLAG_INITIATOR, &smp->flags)) return 0; err = smp_f4(smp->tfm_cmac, smp->local_pk, smp->remote_pk, smp->prnd, @@ -2884,7 +2881,7 @@ static int smp_cmd_dhkey_check(struct l2cap_conn *conn, struct sk_buff *skb) a[6] = hcon->init_addr_type; b[6] = hcon->resp_addr_type; - if (hcon->out) { + if (test_bit(SMP_FLAG_INITIATOR, &smp->flags)) { local_addr = a; remote_addr = b; memcpy(io_cap, &smp->prsp[1], 3); @@ -2909,7 +2906,7 @@ static int smp_cmd_dhkey_check(struct l2cap_conn *conn, struct sk_buff *skb) if (crypto_memneq(check->e, e, 16)) return SMP_DHKEY_CHECK_FAILED; - if (!hcon->out) { + if (!test_bit(SMP_FLAG_INITIATOR, &smp->flags)) { if (test_bit(SMP_FLAG_WAIT_USER, &smp->flags)) { set_bit(SMP_FLAG_DHKEY_PENDING, &smp->flags); return 0; @@ -2921,7 +2918,7 @@ static int smp_cmd_dhkey_check(struct l2cap_conn *conn, struct sk_buff *skb) sc_add_ltk(smp); - if (hcon->out) { + if (test_bit(SMP_FLAG_INITIATOR, &smp->flags)) { hci_le_start_enc(hcon, 0, 0, smp->tk, smp->enc_key_size); hcon->enc_key_size = smp->enc_key_size; } @@ -2963,8 +2960,25 @@ static int smp_sig_channel(struct l2cap_chan *chan, struct sk_buff *skb) if (code > SMP_CMD_MAX) goto drop; - if (smp && !test_and_clear_bit(code, &smp->allow_cmd)) + if (smp && !test_and_clear_bit(code, &smp->allow_cmd)) { + /* If there is a context and the command is not allowed consider + * it a failure so the session is cleanup properly. + */ + switch (code) { + case SMP_CMD_IDENT_INFO: + case SMP_CMD_IDENT_ADDR_INFO: + case SMP_CMD_SIGN_INFO: + /* 3.6.1. Key distribution and generation + * + * A device may reject a distributed key by sending the + * Pairing Failed command with the reason set to + * "Key Rejected". + */ + smp_failure(conn, SMP_KEY_REJECTED); + break; + } goto drop; + } /* If we don't have a context the only allowed commands are * pairing request and security request. @@ -3070,7 +3084,6 @@ static void bredr_pairing(struct l2cap_chan *chan) struct l2cap_conn *conn = chan->conn; struct hci_conn *hcon = conn->hcon; struct hci_dev *hdev = hcon->hdev; - struct smp_cmd_pairing req; struct smp_chan *smp; bt_dev_dbg(hdev, "chan %p", chan); @@ -3122,14 +3135,7 @@ static void bredr_pairing(struct l2cap_chan *chan) bt_dev_dbg(hdev, "starting SMP over BR/EDR"); - /* Prepare and send the BR/EDR SMP Pairing Request */ - build_bredr_pairing_cmd(smp, &req, NULL); - - smp->preq[0] = SMP_CMD_PAIRING_REQ; - memcpy(&smp->preq[1], &req, sizeof(req)); - - smp_send_cmd(conn, SMP_CMD_PAIRING_REQ, sizeof(req), &req); - SMP_ALLOW_CMD(smp, SMP_CMD_PAIRING_RSP); + smp_send_pairing_req(smp, 0x00); } static void smp_resume_cb(struct l2cap_chan *chan) @@ -3166,7 +3172,7 @@ static void smp_ready_cb(struct l2cap_chan *chan) /* No need to call l2cap_chan_hold() here since we already own * the reference taken in smp_new_conn_cb(). This is just the * first time that we tie it to a specific pointer. The code in - * l2cap_core.c ensures that there's no risk this function wont + * l2cap_core.c ensures that there's no risk this function won't * get called if smp_new_conn_cb was previously called. */ conn->smp = chan; diff --git a/net/bluetooth/smp.h b/net/bluetooth/smp.h index 87a59ec2c9f0..c5da53dfab04 100644 --- a/net/bluetooth/smp.h +++ b/net/bluetooth/smp.h @@ -138,6 +138,7 @@ struct smp_cmd_keypress_notify { #define SMP_NUMERIC_COMP_FAILED 0x0c #define SMP_BREDR_PAIRING_IN_PROGRESS 0x0d #define SMP_CROSS_TRANSP_NOT_ALLOWED 0x0e +#define SMP_KEY_REJECTED 0x0f #define SMP_MIN_ENC_KEY_SIZE 7 #define SMP_MAX_ENC_KEY_SIZE 16 |
