summaryrefslogtreecommitdiff
path: root/net
diff options
context:
space:
mode:
authorJohannes Berg <johannes.berg@intel.com>2022-10-10 11:03:31 +0200
committerJohannes Berg <johannes.berg@intel.com>2022-10-10 11:03:43 +0200
commitdfd2d876b3fda1790bc0239ba4c6967e25d16e91 (patch)
tree45c2ec4b25afdf7b521dec642f6b75112bb401a3 /net
parenta790cc3a4fad75048295571a350b95b87e022a5a (diff)
parent10d5ea5a436da8d60cdb5845f454d595accdbce0 (diff)
Merge remote-tracking branch 'wireless/main' into wireless-next
Pull in wireless/main content since some new code would otherwise conflict with it. Signed-off-by: Johannes Berg <johannes.berg@intel.com>
Diffstat (limited to 'net')
-rw-r--r--net/bluetooth/hci_conn.c162
-rw-r--r--net/bluetooth/hci_core.c68
-rw-r--r--net/bluetooth/hci_debugfs.c2
-rw-r--r--net/bluetooth/hci_event.c175
-rw-r--r--net/bluetooth/hci_request.c1534
-rw-r--r--net/bluetooth/hci_request.h53
-rw-r--r--net/bluetooth/hci_sock.c4
-rw-r--r--net/bluetooth/hci_sync.c491
-rw-r--r--net/bluetooth/hci_sysfs.c3
-rw-r--r--net/bluetooth/l2cap_core.c17
-rw-r--r--net/bluetooth/mgmt.c610
-rw-r--r--net/bluetooth/mgmt_util.c74
-rw-r--r--net/bluetooth/mgmt_util.h18
-rw-r--r--net/bluetooth/rfcomm/sock.c3
-rw-r--r--net/bpf/test_run.c37
-rw-r--r--net/core/devlink.c80
-rw-r--r--net/core/filter.c124
-rw-r--r--net/core/gro.c18
-rw-r--r--net/core/lwtunnel.c1
-rw-r--r--net/core/skmsg.c12
-rw-r--r--net/core/stream.c3
-rw-r--r--net/dsa/dsa2.c184
-rw-r--r--net/dsa/dsa_priv.h1
-rw-r--r--net/dsa/port.c22
-rw-r--r--net/dsa/slave.c6
-rw-r--r--net/ethtool/Makefile3
-rw-r--r--net/ethtool/common.h1
-rw-r--r--net/ethtool/netlink.c17
-rw-r--r--net/ethtool/netlink.h4
-rw-r--r--net/ethtool/pse-pd.c185
-rw-r--r--net/ieee802154/socket.c3
-rw-r--r--net/ipv4/ah4.c23
-rw-r--r--net/ipv4/bpf_tcp_ca.c2
-rw-r--r--net/ipv4/esp4.c55
-rw-r--r--net/ipv4/esp4_offload.c5
-rw-r--r--net/ipv4/inet_hashtables.c4
-rw-r--r--net/ipv4/ip_tunnel_core.c67
-rw-r--r--net/ipv4/ipcomp.c10
-rw-r--r--net/ipv4/ipip.c62
-rw-r--r--net/ipv4/ping.c15
-rw-r--r--net/ipv4/tcp.c2
-rw-r--r--net/ipv4/tcp_minisocks.c1
-rw-r--r--net/ipv4/tcp_offload.c17
-rw-r--r--net/ipv4/tcp_output.c19
-rw-r--r--net/ipv4/xfrm4_tunnel.c10
-rw-r--r--net/ipv6/ah6.c23
-rw-r--r--net/ipv6/esp6.c55
-rw-r--r--net/ipv6/esp6_offload.c5
-rw-r--r--net/ipv6/ip6_tunnel.c37
-rw-r--r--net/ipv6/ipcomp6.c10
-rw-r--r--net/ipv6/mip6.c14
-rw-r--r--net/ipv6/ping.c16
-rw-r--r--net/ipv6/sit.c65
-rw-r--r--net/ipv6/xfrm6_tunnel.c10
-rw-r--r--net/mac80211/iface.c8
-rw-r--r--net/mac80211/mlme.c7
-rw-r--r--net/mac80211/rx.c9
-rw-r--r--net/mac80211/tx.c10
-rw-r--r--net/mac80211/util.c2
-rw-r--r--net/mptcp/protocol.c124
-rw-r--r--net/netfilter/Makefile6
-rw-r--r--net/netfilter/nf_conntrack_bpf.c74
-rw-r--r--net/netfilter/nf_conntrack_core.c1
-rw-r--r--net/netfilter/nf_nat_bpf.c79
-rw-r--r--net/netfilter/nf_nat_core.c4
-rw-r--r--net/netlink/genetlink.c32
-rw-r--r--net/rds/tcp.c2
-rw-r--r--net/sched/cls_basic.c7
-rw-r--r--net/sched/cls_bpf.c7
-rw-r--r--net/sched/cls_flower.c7
-rw-r--r--net/sched/cls_fw.c7
-rw-r--r--net/sched/cls_matchall.c7
-rw-r--r--net/sched/cls_route.c7
-rw-r--r--net/sched/cls_rsvp.h7
-rw-r--r--net/sched/cls_tcindex.c7
-rw-r--r--net/sched/cls_u32.c7
-rw-r--r--net/sched/sch_api.c2
-rw-r--r--net/sctp/auth.c18
-rw-r--r--net/sunrpc/svc.c34
-rw-r--r--net/sunrpc/xdr.c24
-rw-r--r--net/unix/af_unix.c13
-rw-r--r--net/vmw_vsock/virtio_transport_common.c2
-rw-r--r--net/wireless/nl80211.c4
-rw-r--r--net/wireless/util.c40
-rw-r--r--net/wireless/wext-core.c17
-rw-r--r--net/xdp/xsk.c26
-rw-r--r--net/xdp/xsk_buff_pool.c5
-rw-r--r--net/xdp/xsk_queue.h22
-rw-r--r--net/xfrm/xfrm_device.c20
-rw-r--r--net/xfrm/xfrm_input.c25
-rw-r--r--net/xfrm/xfrm_interface.c206
-rw-r--r--net/xfrm/xfrm_ipcomp.c11
-rw-r--r--net/xfrm/xfrm_policy.c25
-rw-r--r--net/xfrm/xfrm_replay.c10
-rw-r--r--net/xfrm/xfrm_state.c30
-rw-r--r--net/xfrm/xfrm_user.c370
96 files changed, 3327 insertions, 2440 deletions
diff --git a/net/bluetooth/hci_conn.c b/net/bluetooth/hci_conn.c
index 9777e7b109ee..7a59c4487050 100644
--- a/net/bluetooth/hci_conn.c
+++ b/net/bluetooth/hci_conn.c
@@ -44,6 +44,11 @@ struct sco_param {
u8 retrans_effort;
};
+struct conn_handle_t {
+ struct hci_conn *conn;
+ __u16 handle;
+};
+
static const struct sco_param esco_param_cvsd[] = {
{ EDR_ESCO_MASK & ~ESCO_2EV3, 0x000a, 0x01 }, /* S3 */
{ EDR_ESCO_MASK & ~ESCO_2EV3, 0x0007, 0x01 }, /* S2 */
@@ -316,17 +321,60 @@ static bool find_next_esco_param(struct hci_conn *conn,
return conn->attempt <= size;
}
-static bool hci_enhanced_setup_sync_conn(struct hci_conn *conn, __u16 handle)
+static int configure_datapath_sync(struct hci_dev *hdev, struct bt_codec *codec)
{
- struct hci_dev *hdev = conn->hdev;
+ int err;
+ __u8 vnd_len, *vnd_data = NULL;
+ struct hci_op_configure_data_path *cmd = NULL;
+
+ err = hdev->get_codec_config_data(hdev, ESCO_LINK, codec, &vnd_len,
+ &vnd_data);
+ if (err < 0)
+ goto error;
+
+ cmd = kzalloc(sizeof(*cmd) + vnd_len, GFP_KERNEL);
+ if (!cmd) {
+ err = -ENOMEM;
+ goto error;
+ }
+
+ err = hdev->get_data_path_id(hdev, &cmd->data_path_id);
+ if (err < 0)
+ goto error;
+
+ cmd->vnd_len = vnd_len;
+ memcpy(cmd->vnd_data, vnd_data, vnd_len);
+
+ cmd->direction = 0x00;
+ __hci_cmd_sync_status(hdev, HCI_CONFIGURE_DATA_PATH,
+ sizeof(*cmd) + vnd_len, cmd, HCI_CMD_TIMEOUT);
+
+ cmd->direction = 0x01;
+ err = __hci_cmd_sync_status(hdev, HCI_CONFIGURE_DATA_PATH,
+ sizeof(*cmd) + vnd_len, cmd,
+ HCI_CMD_TIMEOUT);
+error:
+
+ kfree(cmd);
+ kfree(vnd_data);
+ return err;
+}
+
+static int hci_enhanced_setup_sync(struct hci_dev *hdev, void *data)
+{
+ struct conn_handle_t *conn_handle = data;
+ struct hci_conn *conn = conn_handle->conn;
+ __u16 handle = conn_handle->handle;
struct hci_cp_enhanced_setup_sync_conn cp;
const struct sco_param *param;
+ kfree(conn_handle);
+
bt_dev_dbg(hdev, "hcon %p", conn);
/* for offload use case, codec needs to configured before opening SCO */
if (conn->codec.data_path)
- hci_req_configure_datapath(hdev, &conn->codec);
+ configure_datapath_sync(hdev, &conn->codec);
conn->state = BT_CONNECT;
conn->out = true;
@@ -344,7 +392,7 @@ static bool hci_enhanced_setup_sync_conn(struct hci_conn *conn, __u16 handle)
case BT_CODEC_MSBC:
if (!find_next_esco_param(conn, esco_param_msbc,
ARRAY_SIZE(esco_param_msbc)))
- return false;
+ return -EINVAL;
param = &esco_param_msbc[conn->attempt - 1];
cp.tx_coding_format.id = 0x05;
@@ -396,11 +444,11 @@ static bool hci_enhanced_setup_sync_conn(struct hci_conn *conn, __u16 handle)
if (lmp_esco_capable(conn->link)) {
if (!find_next_esco_param(conn, esco_param_cvsd,
ARRAY_SIZE(esco_param_cvsd)))
- return false;
+ return -EINVAL;
param = &esco_param_cvsd[conn->attempt - 1];
} else {
if (conn->attempt > ARRAY_SIZE(sco_param_cvsd))
- return false;
+ return -EINVAL;
param = &sco_param_cvsd[conn->attempt - 1];
}
cp.tx_coding_format.id = 2;
@@ -423,7 +471,7 @@ static bool hci_enhanced_setup_sync_conn(struct hci_conn *conn, __u16 handle)
cp.out_transport_unit_size = 16;
break;
default:
- return false;
+ return -EINVAL;
}
cp.retrans_effort = param->retrans_effort;
@@ -431,9 +479,9 @@ static bool hci_enhanced_setup_sync_conn(struct hci_conn *conn, __u16 handle)
cp.max_latency = __cpu_to_le16(param->max_latency);
if (hci_send_cmd(hdev, HCI_OP_ENHANCED_SETUP_SYNC_CONN, sizeof(cp), &cp) < 0)
- return false;
+ return -EIO;
- return true;
+ return 0;
}
static bool hci_setup_sync_conn(struct hci_conn *conn, __u16 handle)
@@ -490,8 +538,24 @@ static bool hci_setup_sync_conn(struct hci_conn *conn, __u16 handle)
bool hci_setup_sync(struct hci_conn *conn, __u16 handle)
{
- if (enhanced_sync_conn_capable(conn->hdev))
- return hci_enhanced_setup_sync_conn(conn, handle);
+ int result;
+ struct conn_handle_t *conn_handle;
+
+ if (enhanced_sync_conn_capable(conn->hdev)) {
+ conn_handle = kzalloc(sizeof(*conn_handle), GFP_KERNEL);
+
+ if (!conn_handle)
+ return false;
+
+ conn_handle->conn = conn;
+ conn_handle->handle = handle;
+ result = hci_cmd_sync_queue(conn->hdev, hci_enhanced_setup_sync,
+ conn_handle, NULL);
+ if (result < 0)
+ kfree(conn_handle);
+
+ return result == 0;
+ }
return hci_setup_sync_conn(conn, handle);
}
@@ -2696,3 +2760,79 @@ u32 hci_conn_get_phy(struct hci_conn *conn)
return phys;
}
+
+int hci_abort_conn(struct hci_conn *conn, u8 reason)
+{
+ int r = 0;
+
+ switch (conn->state) {
+ case BT_CONNECTED:
+ case BT_CONFIG:
+ if (conn->type == AMP_LINK) {
+ struct hci_cp_disconn_phy_link cp;
+
+ cp.phy_handle = HCI_PHY_HANDLE(conn->handle);
+ cp.reason = reason;
+ r = hci_send_cmd(conn->hdev, HCI_OP_DISCONN_PHY_LINK,
+ sizeof(cp), &cp);
+ } else {
+ struct hci_cp_disconnect dc;
+
+ dc.handle = cpu_to_le16(conn->handle);
+ dc.reason = reason;
+ r = hci_send_cmd(conn->hdev, HCI_OP_DISCONNECT,
+ sizeof(dc), &dc);
+ }
+
+ conn->state = BT_DISCONN;
+
+ break;
+ case BT_CONNECT:
+ if (conn->type == LE_LINK) {
+ if (test_bit(HCI_CONN_SCANNING, &conn->flags))
+ break;
+ r = hci_send_cmd(conn->hdev,
+ HCI_OP_LE_CREATE_CONN_CANCEL, 0, NULL);
+ } else if (conn->type == ACL_LINK) {
+ if (conn->hdev->hci_ver < BLUETOOTH_VER_1_2)
+ break;
+ r = hci_send_cmd(conn->hdev,
+ HCI_OP_CREATE_CONN_CANCEL,
+ 6, &conn->dst);
+ }
+ break;
+ case BT_CONNECT2:
+ if (conn->type == ACL_LINK) {
+ struct hci_cp_reject_conn_req rej;
+
+ bacpy(&rej.bdaddr, &conn->dst);
+ rej.reason = reason;
+
+ r = hci_send_cmd(conn->hdev,
+ HCI_OP_REJECT_CONN_REQ,
+ sizeof(rej), &rej);
+ } else if (conn->type == SCO_LINK || conn->type == ESCO_LINK) {
+ struct hci_cp_reject_sync_conn_req rej;
+
+ bacpy(&rej.bdaddr, &conn->dst);
+
+ /* SCO rejection has its own limited set of
+ * allowed error values (0x0D-0x0F) which isn't
+ * compatible with most values passed to this
+ * function. To be safe hard-code one of the
+ * values that's suitable for SCO.
+ */
+ rej.reason = HCI_ERROR_REJ_LIMITED_RESOURCES;
+
+ r = hci_send_cmd(conn->hdev,
+ HCI_OP_REJECT_SYNC_CONN_REQ,
+ sizeof(rej), &rej);
+ }
+ break;
+ default:
+ conn->state = BT_CLOSED;
+ break;
+ }
+
+ return r;
+}
diff --git a/net/bluetooth/hci_core.c b/net/bluetooth/hci_core.c
index b3a5a3cc9372..0540555b3704 100644
--- a/net/bluetooth/hci_core.c
+++ b/net/bluetooth/hci_core.c
@@ -597,6 +597,15 @@ static int hci_dev_do_reset(struct hci_dev *hdev)
/* Cancel these to avoid queueing non-chained pending work */
hci_dev_set_flag(hdev, HCI_CMD_DRAIN_WORKQUEUE);
+ /* Wait for
+ *
+ * if (!hci_dev_test_flag(hdev, HCI_CMD_DRAIN_WORKQUEUE))
+ * queue_delayed_work(&hdev->{cmd,ncmd}_timer)
+ *
+ * inside RCU section to see the flag or complete scheduling.
+ */
+ synchronize_rcu();
+ /* Explicitly cancel works in case scheduled after setting the flag. */
cancel_delayed_work(&hdev->cmd_timer);
cancel_delayed_work(&hdev->ncmd_timer);
@@ -714,7 +723,7 @@ static void hci_update_passive_scan_state(struct hci_dev *hdev, u8 scan)
hci_dev_set_flag(hdev, HCI_BREDR_ENABLED);
if (hci_dev_test_flag(hdev, HCI_LE_ENABLED))
- hci_req_update_adv_data(hdev, hdev->cur_adv_instance);
+ hci_update_adv_data(hdev, hdev->cur_adv_instance);
mgmt_new_settings(hdev);
}
@@ -1706,7 +1715,8 @@ struct adv_info *hci_add_adv_instance(struct hci_dev *hdev, u8 instance,
u32 flags, u16 adv_data_len, u8 *adv_data,
u16 scan_rsp_len, u8 *scan_rsp_data,
u16 timeout, u16 duration, s8 tx_power,
- u32 min_interval, u32 max_interval)
+ u32 min_interval, u32 max_interval,
+ u8 mesh_handle)
{
struct adv_info *adv;
@@ -1717,7 +1727,7 @@ struct adv_info *hci_add_adv_instance(struct hci_dev *hdev, u8 instance,
memset(adv->per_adv_data, 0, sizeof(adv->per_adv_data));
} else {
if (hdev->adv_instance_cnt >= hdev->le_num_of_adv_sets ||
- instance < 1 || instance > hdev->le_num_of_adv_sets)
+ instance < 1 || instance > hdev->le_num_of_adv_sets + 1)
return ERR_PTR(-EOVERFLOW);
adv = kzalloc(sizeof(*adv), GFP_KERNEL);
@@ -1734,6 +1744,11 @@ struct adv_info *hci_add_adv_instance(struct hci_dev *hdev, u8 instance,
adv->min_interval = min_interval;
adv->max_interval = max_interval;
adv->tx_power = tx_power;
+ /* Defining a mesh_handle changes the timing units to ms,
+ * rather than seconds, and ties the instance to the requested
+ * mesh_tx queue.
+ */
+ adv->mesh = mesh_handle;
hci_set_adv_instance_data(hdev, instance, adv_data_len, adv_data,
scan_rsp_len, scan_rsp_data);
@@ -1762,7 +1777,7 @@ struct adv_info *hci_add_per_instance(struct hci_dev *hdev, u8 instance,
adv = hci_add_adv_instance(hdev, instance, flags, 0, NULL, 0, NULL,
0, 0, HCI_ADV_TX_POWER_NO_PREFERENCE,
- min_interval, max_interval);
+ min_interval, max_interval, 0);
if (IS_ERR(adv))
return adv;
@@ -2391,6 +2406,10 @@ static int hci_suspend_notifier(struct notifier_block *nb, unsigned long action,
container_of(nb, struct hci_dev, suspend_notifier);
int ret = 0;
+ /* Userspace has full control of this device. Do nothing. */
+ if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL))
+ return NOTIFY_DONE;
+
if (action == PM_SUSPEND_PREPARE)
ret = hci_suspend_dev(hdev);
else if (action == PM_POST_SUSPEND)
@@ -2486,6 +2505,7 @@ struct hci_dev *hci_alloc_dev_priv(int sizeof_priv)
mutex_init(&hdev->lock);
mutex_init(&hdev->req_lock);
+ INIT_LIST_HEAD(&hdev->mesh_pending);
INIT_LIST_HEAD(&hdev->mgmt_pending);
INIT_LIST_HEAD(&hdev->reject_list);
INIT_LIST_HEAD(&hdev->accept_list);
@@ -3469,15 +3489,27 @@ static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
}
-static void __check_timeout(struct hci_dev *hdev, unsigned int cnt)
+static void __check_timeout(struct hci_dev *hdev, unsigned int cnt, u8 type)
{
- if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
- /* ACL tx timeout must be longer than maximum
- * link supervision timeout (40.9 seconds) */
- if (!cnt && time_after(jiffies, hdev->acl_last_tx +
- HCI_ACL_TX_TIMEOUT))
- hci_link_tx_to(hdev, ACL_LINK);
+ unsigned long last_tx;
+
+ if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
+ return;
+
+ switch (type) {
+ case LE_LINK:
+ last_tx = hdev->le_last_tx;
+ break;
+ default:
+ last_tx = hdev->acl_last_tx;
+ break;
}
+
+ /* tx timeout must be longer than maximum link supervision timeout
+ * (40.9 seconds)
+ */
+ if (!cnt && time_after(jiffies, last_tx + HCI_ACL_TX_TIMEOUT))
+ hci_link_tx_to(hdev, type);
}
/* Schedule SCO */
@@ -3535,7 +3567,7 @@ static void hci_sched_acl_pkt(struct hci_dev *hdev)
struct sk_buff *skb;
int quote;
- __check_timeout(hdev, cnt);
+ __check_timeout(hdev, cnt, ACL_LINK);
while (hdev->acl_cnt &&
(chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
@@ -3578,8 +3610,6 @@ static void hci_sched_acl_blk(struct hci_dev *hdev)
int quote;
u8 type;
- __check_timeout(hdev, cnt);
-
BT_DBG("%s", hdev->name);
if (hdev->dev_type == HCI_AMP)
@@ -3587,6 +3617,8 @@ static void hci_sched_acl_blk(struct hci_dev *hdev)
else
type = ACL_LINK;
+ __check_timeout(hdev, cnt, type);
+
while (hdev->block_cnt > 0 &&
(chan = hci_chan_sent(hdev, type, &quote))) {
u32 priority = (skb_peek(&chan->data_q))->priority;
@@ -3660,7 +3692,7 @@ static void hci_sched_le(struct hci_dev *hdev)
cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
- __check_timeout(hdev, cnt);
+ __check_timeout(hdev, cnt, LE_LINK);
tmp = cnt;
while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, &quote))) {
@@ -4056,12 +4088,14 @@ static void hci_cmd_work(struct work_struct *work)
if (res < 0)
__hci_cmd_sync_cancel(hdev, -res);
+ rcu_read_lock();
if (test_bit(HCI_RESET, &hdev->flags) ||
hci_dev_test_flag(hdev, HCI_CMD_DRAIN_WORKQUEUE))
cancel_delayed_work(&hdev->cmd_timer);
else
- schedule_delayed_work(&hdev->cmd_timer,
- HCI_CMD_TIMEOUT);
+ queue_delayed_work(hdev->workqueue, &hdev->cmd_timer,
+ HCI_CMD_TIMEOUT);
+ rcu_read_unlock();
} else {
skb_queue_head(&hdev->cmd_q, skb);
queue_work(hdev->workqueue, &hdev->cmd_work);
diff --git a/net/bluetooth/hci_debugfs.c b/net/bluetooth/hci_debugfs.c
index 902b40a90b91..3f401ec5bb0c 100644
--- a/net/bluetooth/hci_debugfs.c
+++ b/net/bluetooth/hci_debugfs.c
@@ -1245,7 +1245,7 @@ void hci_debugfs_create_conn(struct hci_conn *conn)
struct hci_dev *hdev = conn->hdev;
char name[6];
- if (IS_ERR_OR_NULL(hdev->debugfs))
+ if (IS_ERR_OR_NULL(hdev->debugfs) || conn->debugfs)
return;
snprintf(name, sizeof(name), "%u", conn->handle);
diff --git a/net/bluetooth/hci_event.c b/net/bluetooth/hci_event.c
index 6643c9c20fa4..faca701bce2a 100644
--- a/net/bluetooth/hci_event.c
+++ b/net/bluetooth/hci_event.c
@@ -712,6 +712,47 @@ static u8 hci_cc_read_local_version(struct hci_dev *hdev, void *data,
return rp->status;
}
+static u8 hci_cc_read_enc_key_size(struct hci_dev *hdev, void *data,
+ struct sk_buff *skb)
+{
+ struct hci_rp_read_enc_key_size *rp = data;
+ struct hci_conn *conn;
+ u16 handle;
+ u8 status = rp->status;
+
+ bt_dev_dbg(hdev, "status 0x%2.2x", status);
+
+ handle = le16_to_cpu(rp->handle);
+
+ hci_dev_lock(hdev);
+
+ conn = hci_conn_hash_lookup_handle(hdev, handle);
+ if (!conn) {
+ status = 0xFF;
+ goto done;
+ }
+
+ /* While unexpected, the read_enc_key_size command may fail. The most
+ * secure approach is to then assume the key size is 0 to force a
+ * disconnection.
+ */
+ if (status) {
+ bt_dev_err(hdev, "failed to read key size for handle %u",
+ handle);
+ conn->enc_key_size = 0;
+ } else {
+ conn->enc_key_size = rp->key_size;
+ status = 0;
+ }
+
+ hci_encrypt_cfm(conn, 0);
+
+done:
+ hci_dev_unlock(hdev);
+
+ return status;
+}
+
static u8 hci_cc_read_local_commands(struct hci_dev *hdev, void *data,
struct sk_buff *skb)
{
@@ -1715,6 +1756,8 @@ static void le_set_scan_enable_complete(struct hci_dev *hdev, u8 enable)
hci_dev_set_flag(hdev, HCI_LE_SCAN);
if (hdev->le_scan_type == LE_SCAN_ACTIVE)
clear_pending_adv_report(hdev);
+ if (hci_dev_test_flag(hdev, HCI_MESH))
+ hci_discovery_set_state(hdev, DISCOVERY_FINDING);
break;
case LE_SCAN_DISABLE:
@@ -1729,7 +1772,7 @@ static void le_set_scan_enable_complete(struct hci_dev *hdev, u8 enable)
d->last_adv_addr_type, NULL,
d->last_adv_rssi, d->last_adv_flags,
d->last_adv_data,
- d->last_adv_data_len, NULL, 0);
+ d->last_adv_data_len, NULL, 0, 0);
}
/* Cancel this timer so that we don't try to disable scanning
@@ -1745,6 +1788,9 @@ static void le_set_scan_enable_complete(struct hci_dev *hdev, u8 enable)
*/
if (hci_dev_test_and_clear_flag(hdev, HCI_LE_SCAN_INTERRUPTED))
hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
+ else if (!hci_dev_test_flag(hdev, HCI_LE_ADV) &&
+ hdev->discovery.state == DISCOVERY_FINDING)
+ queue_work(hdev->workqueue, &hdev->reenable_adv_work);
break;
@@ -2152,7 +2198,7 @@ static u8 hci_cc_set_ext_adv_param(struct hci_dev *hdev, void *data,
adv_instance->tx_power = rp->tx_power;
}
/* Update adv data as tx power is known now */
- hci_req_update_adv_data(hdev, cp->handle);
+ hci_update_adv_data(hdev, cp->handle);
hci_dev_unlock(hdev);
@@ -3071,7 +3117,7 @@ static void hci_inquiry_result_evt(struct hci_dev *hdev, void *edata,
mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
info->dev_class, HCI_RSSI_INVALID,
- flags, NULL, 0, NULL, 0);
+ flags, NULL, 0, NULL, 0, 0);
}
hci_dev_unlock(hdev);
@@ -3534,47 +3580,6 @@ unlock:
hci_dev_unlock(hdev);
}
-static void read_enc_key_size_complete(struct hci_dev *hdev, u8 status,
- u16 opcode, struct sk_buff *skb)
-{
- const struct hci_rp_read_enc_key_size *rp;
- struct hci_conn *conn;
- u16 handle;
-
- BT_DBG("%s status 0x%02x", hdev->name, status);
-
- if (!skb || skb->len < sizeof(*rp)) {
- bt_dev_err(hdev, "invalid read key size response");
- return;
- }
-
- rp = (void *)skb->data;
- handle = le16_to_cpu(rp->handle);
-
- hci_dev_lock(hdev);
-
- conn = hci_conn_hash_lookup_handle(hdev, handle);
- if (!conn)
- goto unlock;
-
- /* While unexpected, the read_enc_key_size command may fail. The most
- * secure approach is to then assume the key size is 0 to force a
- * disconnection.
- */
- if (rp->status) {
- bt_dev_err(hdev, "failed to read key size for handle %u",
- handle);
- conn->enc_key_size = 0;
- } else {
- conn->enc_key_size = rp->key_size;
- }
-
- hci_encrypt_cfm(conn, 0);
-
-unlock:
- hci_dev_unlock(hdev);
-}
-
static void hci_encrypt_change_evt(struct hci_dev *hdev, void *data,
struct sk_buff *skb)
{
@@ -3639,7 +3644,6 @@ static void hci_encrypt_change_evt(struct hci_dev *hdev, void *data,
/* Try reading the encryption key size for encrypted ACL links */
if (!ev->status && ev->encrypt && conn->type == ACL_LINK) {
struct hci_cp_read_enc_key_size cp;
- struct hci_request req;
/* Only send HCI_Read_Encryption_Key_Size if the
* controller really supports it. If it doesn't, assume
@@ -3650,12 +3654,9 @@ static void hci_encrypt_change_evt(struct hci_dev *hdev, void *data,
goto notify;
}
- hci_req_init(&req, hdev);
-
cp.handle = cpu_to_le16(conn->handle);
- hci_req_add(&req, HCI_OP_READ_ENC_KEY_SIZE, sizeof(cp), &cp);
-
- if (hci_req_run_skb(&req, read_enc_key_size_complete)) {
+ if (hci_send_cmd(hdev, HCI_OP_READ_ENC_KEY_SIZE,
+ sizeof(cp), &cp)) {
bt_dev_err(hdev, "sending read key size failed");
conn->enc_key_size = HCI_LINK_KEY_SIZE;
goto notify;
@@ -3766,16 +3767,18 @@ static inline void handle_cmd_cnt_and_timer(struct hci_dev *hdev, u8 ncmd)
{
cancel_delayed_work(&hdev->cmd_timer);
+ rcu_read_lock();
if (!test_bit(HCI_RESET, &hdev->flags)) {
if (ncmd) {
cancel_delayed_work(&hdev->ncmd_timer);
atomic_set(&hdev->cmd_cnt, 1);
} else {
if (!hci_dev_test_flag(hdev, HCI_CMD_DRAIN_WORKQUEUE))
- schedule_delayed_work(&hdev->ncmd_timer,
- HCI_NCMD_TIMEOUT);
+ queue_delayed_work(hdev->workqueue, &hdev->ncmd_timer,
+ HCI_NCMD_TIMEOUT);
}
}
+ rcu_read_unlock();
}
static u8 hci_cc_le_read_buffer_size_v2(struct hci_dev *hdev, void *data,
@@ -4037,6 +4040,8 @@ static const struct hci_cc {
sizeof(struct hci_rp_read_local_amp_info)),
HCI_CC(HCI_OP_READ_CLOCK, hci_cc_read_clock,
sizeof(struct hci_rp_read_clock)),
+ HCI_CC(HCI_OP_READ_ENC_KEY_SIZE, hci_cc_read_enc_key_size,
+ sizeof(struct hci_rp_read_enc_key_size)),
HCI_CC(HCI_OP_READ_INQ_RSP_TX_POWER, hci_cc_read_inq_rsp_tx_power,
sizeof(struct hci_rp_read_inq_rsp_tx_power)),
HCI_CC(HCI_OP_READ_DEF_ERR_DATA_REPORTING,
@@ -4829,7 +4834,7 @@ static void hci_inquiry_result_with_rssi_evt(struct hci_dev *hdev, void *edata,
mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
info->dev_class, info->rssi,
- flags, NULL, 0, NULL, 0);
+ flags, NULL, 0, NULL, 0, 0);
}
} else if (skb->len == array_size(ev->num,
sizeof(struct inquiry_info_rssi))) {
@@ -4860,7 +4865,7 @@ static void hci_inquiry_result_with_rssi_evt(struct hci_dev *hdev, void *edata,
mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
info->dev_class, info->rssi,
- flags, NULL, 0, NULL, 0);
+ flags, NULL, 0, NULL, 0, 0);
}
} else {
bt_dev_err(hdev, "Malformed HCI Event: 0x%2.2x",
@@ -5116,7 +5121,7 @@ static void hci_extended_inquiry_result_evt(struct hci_dev *hdev, void *edata,
mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
info->dev_class, info->rssi,
- flags, info->data, eir_len, NULL, 0);
+ flags, info->data, eir_len, NULL, 0, 0);
}
hci_dev_unlock(hdev);
@@ -6172,7 +6177,7 @@ static struct hci_conn *check_pending_le_conn(struct hci_dev *hdev,
static void process_adv_report(struct hci_dev *hdev, u8 type, bdaddr_t *bdaddr,
u8 bdaddr_type, bdaddr_t *direct_addr,
u8 direct_addr_type, s8 rssi, u8 *data, u8 len,
- bool ext_adv)
+ bool ext_adv, bool ctl_time, u64 instant)
{
struct discovery_state *d = &hdev->discovery;
struct smp_irk *irk;
@@ -6220,7 +6225,7 @@ static void process_adv_report(struct hci_dev *hdev, u8 type, bdaddr_t *bdaddr,
* important to see if the address is matching the local
* controller address.
*/
- if (direct_addr) {
+ if (!hci_dev_test_flag(hdev, HCI_MESH) && direct_addr) {
direct_addr_type = ev_bdaddr_type(hdev, direct_addr_type,
&bdaddr_resolved);
@@ -6268,6 +6273,18 @@ static void process_adv_report(struct hci_dev *hdev, u8 type, bdaddr_t *bdaddr,
conn->le_adv_data_len = len;
}
+ if (type == LE_ADV_NONCONN_IND || type == LE_ADV_SCAN_IND)
+ flags = MGMT_DEV_FOUND_NOT_CONNECTABLE;
+ else
+ flags = 0;
+
+ /* All scan results should be sent up for Mesh systems */
+ if (hci_dev_test_flag(hdev, HCI_MESH)) {
+ mgmt_device_found(hdev, bdaddr, LE_LINK, bdaddr_type, NULL,
+ rssi, flags, data, len, NULL, 0, instant);
+ return;
+ }
+
/* Passive scanning shouldn't trigger any device found events,
* except for devices marked as CONN_REPORT for which we do send
* device found events, or advertisement monitoring requested.
@@ -6281,12 +6298,8 @@ static void process_adv_report(struct hci_dev *hdev, u8 type, bdaddr_t *bdaddr,
idr_is_empty(&hdev->adv_monitors_idr))
return;
- if (type == LE_ADV_NONCONN_IND || type == LE_ADV_SCAN_IND)
- flags = MGMT_DEV_FOUND_NOT_CONNECTABLE;
- else
- flags = 0;
mgmt_device_found(hdev, bdaddr, LE_LINK, bdaddr_type, NULL,
- rssi, flags, data, len, NULL, 0);
+ rssi, flags, data, len, NULL, 0, 0);
return;
}
@@ -6305,11 +6318,8 @@ static void process_adv_report(struct hci_dev *hdev, u8 type, bdaddr_t *bdaddr,
* and just sends a scan response event, then it is marked as
* not connectable as well.
*/
- if (type == LE_ADV_NONCONN_IND || type == LE_ADV_SCAN_IND ||
- type == LE_ADV_SCAN_RSP)
+ if (type == LE_ADV_SCAN_RSP)
flags = MGMT_DEV_FOUND_NOT_CONNECTABLE;
- else
- flags = 0;
/* If there's nothing pending either store the data from this
* event or send an immediate device found event if the data
@@ -6326,7 +6336,7 @@ static void process_adv_report(struct hci_dev *hdev, u8 type, bdaddr_t *bdaddr,
}
mgmt_device_found(hdev, bdaddr, LE_LINK, bdaddr_type, NULL,
- rssi, flags, data, len, NULL, 0);
+ rssi, flags, data, len, NULL, 0, 0);
return;
}
@@ -6345,7 +6355,7 @@ static void process_adv_report(struct hci_dev *hdev, u8 type, bdaddr_t *bdaddr,
d->last_adv_addr_type, NULL,
d->last_adv_rssi, d->last_adv_flags,
d->last_adv_data,
- d->last_adv_data_len, NULL, 0);
+ d->last_adv_data_len, NULL, 0, 0);
/* If the new report will trigger a SCAN_REQ store it for
* later merging.
@@ -6362,7 +6372,7 @@ static void process_adv_report(struct hci_dev *hdev, u8 type, bdaddr_t *bdaddr,
*/
clear_pending_adv_report(hdev);
mgmt_device_found(hdev, bdaddr, LE_LINK, bdaddr_type, NULL,
- rssi, flags, data, len, NULL, 0);
+ rssi, flags, data, len, NULL, 0, 0);
return;
}
@@ -6372,7 +6382,7 @@ static void process_adv_report(struct hci_dev *hdev, u8 type, bdaddr_t *bdaddr,
*/
mgmt_device_found(hdev, &d->last_adv_addr, LE_LINK,
d->last_adv_addr_type, NULL, rssi, d->last_adv_flags,
- d->last_adv_data, d->last_adv_data_len, data, len);
+ d->last_adv_data, d->last_adv_data_len, data, len, 0);
clear_pending_adv_report(hdev);
}
@@ -6380,6 +6390,7 @@ static void hci_le_adv_report_evt(struct hci_dev *hdev, void *data,
struct sk_buff *skb)
{
struct hci_ev_le_advertising_report *ev = data;
+ u64 instant = jiffies;
if (!ev->num)
return;
@@ -6404,7 +6415,8 @@ static void hci_le_adv_report_evt(struct hci_dev *hdev, void *data,
rssi = info->data[info->length];
process_adv_report(hdev, info->type, &info->bdaddr,
info->bdaddr_type, NULL, 0, rssi,
- info->data, info->length, false);
+ info->data, info->length, false,
+ false, instant);
} else {
bt_dev_err(hdev, "Dropping invalid advertising data");
}
@@ -6461,6 +6473,7 @@ static void hci_le_ext_adv_report_evt(struct hci_dev *hdev, void *data,
struct sk_buff *skb)
{
struct hci_ev_le_ext_adv_report *ev = data;
+ u64 instant = jiffies;
if (!ev->num)
return;
@@ -6487,7 +6500,8 @@ static void hci_le_ext_adv_report_evt(struct hci_dev *hdev, void *data,
process_adv_report(hdev, legacy_evt_type, &info->bdaddr,
info->bdaddr_type, NULL, 0,
info->rssi, info->data, info->length,
- !(evt_type & LE_EXT_ADV_LEGACY_PDU));
+ !(evt_type & LE_EXT_ADV_LEGACY_PDU),
+ false, instant);
}
}
@@ -6710,6 +6724,7 @@ static void hci_le_direct_adv_report_evt(struct hci_dev *hdev, void *data,
struct sk_buff *skb)
{
struct hci_ev_le_direct_adv_report *ev = data;
+ u64 instant = jiffies;
int i;
if (!hci_le_ev_skb_pull(hdev, skb, HCI_EV_LE_DIRECT_ADV_REPORT,
@@ -6727,7 +6742,7 @@ static void hci_le_direct_adv_report_evt(struct hci_dev *hdev, void *data,
process_adv_report(hdev, info->type, &info->bdaddr,
info->bdaddr_type, &info->direct_addr,
info->direct_addr_type, info->rssi, NULL, 0,
- false);
+ false, false, instant);
}
hci_dev_unlock(hdev);
@@ -6776,6 +6791,13 @@ static void hci_le_cis_estabilished_evt(struct hci_dev *hdev, void *data,
goto unlock;
}
+ if (conn->type != ISO_LINK) {
+ bt_dev_err(hdev,
+ "Invalid connection link type handle 0x%4.4x",
+ handle);
+ goto unlock;
+ }
+
if (conn->role == HCI_ROLE_SLAVE) {
__le32 interval;
@@ -6896,6 +6918,13 @@ static void hci_le_create_big_complete_evt(struct hci_dev *hdev, void *data,
if (!conn)
goto unlock;
+ if (conn->type != ISO_LINK) {
+ bt_dev_err(hdev,
+ "Invalid connection link type handle 0x%2.2x",
+ ev->handle);
+ goto unlock;
+ }
+
if (ev->num_bis)
conn->handle = __le16_to_cpu(ev->bis_handle[0]);
diff --git a/net/bluetooth/hci_request.c b/net/bluetooth/hci_request.c
index e64d558e5d69..5a0296a4352e 100644
--- a/net/bluetooth/hci_request.c
+++ b/net/bluetooth/hci_request.c
@@ -269,42 +269,10 @@ void hci_req_add_ev(struct hci_request *req, u16 opcode, u32 plen,
void hci_req_add(struct hci_request *req, u16 opcode, u32 plen,
const void *param)
{
+ bt_dev_err(req->hdev, "HCI_REQ-0x%4.4x", opcode);
hci_req_add_ev(req, opcode, plen, param, 0);
}
-void __hci_req_write_fast_connectable(struct hci_request *req, bool enable)
-{
- struct hci_dev *hdev = req->hdev;
- struct hci_cp_write_page_scan_activity acp;
- u8 type;
-
- if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
- return;
-
- if (hdev->hci_ver < BLUETOOTH_VER_1_2)
- return;
-
- if (enable) {
- type = PAGE_SCAN_TYPE_INTERLACED;
-
- /* 160 msec page scan interval */
- acp.interval = cpu_to_le16(0x0100);
- } else {
- type = hdev->def_page_scan_type;
- acp.interval = cpu_to_le16(hdev->def_page_scan_int);
- }
-
- acp.window = cpu_to_le16(hdev->def_page_scan_window);
-
- if (__cpu_to_le16(hdev->page_scan_interval) != acp.interval ||
- __cpu_to_le16(hdev->page_scan_window) != acp.window)
- hci_req_add(req, HCI_OP_WRITE_PAGE_SCAN_ACTIVITY,
- sizeof(acp), &acp);
-
- if (hdev->page_scan_type != type)
- hci_req_add(req, HCI_OP_WRITE_PAGE_SCAN_TYPE, 1, &type);
-}
-
static void start_interleave_scan(struct hci_dev *hdev)
{
hdev->interleave_scan_state = INTERLEAVE_SCAN_NO_FILTER;
@@ -357,45 +325,6 @@ static bool __hci_update_interleaved_scan(struct hci_dev *hdev)
return false;
}
-void __hci_req_update_name(struct hci_request *req)
-{
- struct hci_dev *hdev = req->hdev;
- struct hci_cp_write_local_name cp;
-
- memcpy(cp.name, hdev->dev_name, sizeof(cp.name));
-
- hci_req_add(req, HCI_OP_WRITE_LOCAL_NAME, sizeof(cp), &cp);
-}
-
-void __hci_req_update_eir(struct hci_request *req)
-{
- struct hci_dev *hdev = req->hdev;
- struct hci_cp_write_eir cp;
-
- if (!hdev_is_powered(hdev))
- return;
-
- if (!lmp_ext_inq_capable(hdev))
- return;
-
- if (!hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
- return;
-
- if (hci_dev_test_flag(hdev, HCI_SERVICE_CACHE))
- return;
-
- memset(&cp, 0, sizeof(cp));
-
- eir_create(hdev, cp.data);
-
- if (memcmp(cp.data, hdev->eir, sizeof(cp.data)) == 0)
- return;
-
- memcpy(hdev->eir, cp.data, sizeof(cp.data));
-
- hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
-}
-
void hci_req_add_le_scan_disable(struct hci_request *req, bool rpa_le_conn)
{
struct hci_dev *hdev = req->hdev;
@@ -721,6 +650,96 @@ static inline bool hci_is_le_conn_scanning(struct hci_dev *hdev)
return false;
}
+static void set_random_addr(struct hci_request *req, bdaddr_t *rpa);
+static int hci_update_random_address(struct hci_request *req,
+ bool require_privacy, bool use_rpa,
+ u8 *own_addr_type)
+{
+ struct hci_dev *hdev = req->hdev;
+ int err;
+
+ /* If privacy is enabled use a resolvable private address. If
+ * current RPA has expired or there is something else than
+ * the current RPA in use, then generate a new one.
+ */
+ if (use_rpa) {
+ /* If Controller supports LL Privacy use own address type is
+ * 0x03
+ */
+ if (use_ll_privacy(hdev))
+ *own_addr_type = ADDR_LE_DEV_RANDOM_RESOLVED;
+ else
+ *own_addr_type = ADDR_LE_DEV_RANDOM;
+
+ if (rpa_valid(hdev))
+ return 0;
+
+ err = smp_generate_rpa(hdev, hdev->irk, &hdev->rpa);
+ if (err < 0) {
+ bt_dev_err(hdev, "failed to generate new RPA");
+ return err;
+ }
+
+ set_random_addr(req, &hdev->rpa);
+
+ return 0;
+ }
+
+ /* In case of required privacy without resolvable private address,
+ * use an non-resolvable private address. This is useful for active
+ * scanning and non-connectable advertising.
+ */
+ if (require_privacy) {
+ bdaddr_t nrpa;
+
+ while (true) {
+ /* The non-resolvable private address is generated
+ * from random six bytes with the two most significant
+ * bits cleared.
+ */
+ get_random_bytes(&nrpa, 6);
+ nrpa.b[5] &= 0x3f;
+
+ /* The non-resolvable private address shall not be
+ * equal to the public address.
+ */
+ if (bacmp(&hdev->bdaddr, &nrpa))
+ break;
+ }
+
+ *own_addr_type = ADDR_LE_DEV_RANDOM;
+ set_random_addr(req, &nrpa);
+ return 0;
+ }
+
+ /* If forcing static address is in use or there is no public
+ * address use the static address as random address (but skip
+ * the HCI command if the current random address is already the
+ * static one.
+ *
+ * In case BR/EDR has been disabled on a dual-mode controller
+ * and a static address has been configured, then use that
+ * address instead of the public BR/EDR address.
+ */
+ if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
+ !bacmp(&hdev->bdaddr, BDADDR_ANY) ||
+ (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
+ bacmp(&hdev->static_addr, BDADDR_ANY))) {
+ *own_addr_type = ADDR_LE_DEV_RANDOM;
+ if (bacmp(&hdev->static_addr, &hdev->random_addr))
+ hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6,
+ &hdev->static_addr);
+ return 0;
+ }
+
+ /* Neither privacy nor static address is being used so use a
+ * public address.
+ */
+ *own_addr_type = ADDR_LE_DEV_PUBLIC;
+
+ return 0;
+}
+
/* Ensure to call hci_req_add_le_scan_disable() first to disable the
* controller based address resolution to be able to reconfigure
* resolving list.
@@ -810,366 +829,6 @@ void hci_req_add_le_passive_scan(struct hci_request *req)
addr_resolv);
}
-static void cancel_adv_timeout(struct hci_dev *hdev)
-{
- if (hdev->adv_instance_timeout) {
- hdev->adv_instance_timeout = 0;
- cancel_delayed_work(&hdev->adv_instance_expire);
- }
-}
-
-static bool adv_cur_instance_is_scannable(struct hci_dev *hdev)
-{
- return hci_adv_instance_is_scannable(hdev, hdev->cur_adv_instance);
-}
-
-void __hci_req_disable_advertising(struct hci_request *req)
-{
- if (ext_adv_capable(req->hdev)) {
- __hci_req_disable_ext_adv_instance(req, 0x00);
- } else {
- u8 enable = 0x00;
-
- hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable);
- }
-}
-
-static bool adv_use_rpa(struct hci_dev *hdev, uint32_t flags)
-{
- /* If privacy is not enabled don't use RPA */
- if (!hci_dev_test_flag(hdev, HCI_PRIVACY))
- return false;
-
- /* If basic privacy mode is enabled use RPA */
- if (!hci_dev_test_flag(hdev, HCI_LIMITED_PRIVACY))
- return true;
-
- /* If limited privacy mode is enabled don't use RPA if we're
- * both discoverable and bondable.
- */
- if ((flags & MGMT_ADV_FLAG_DISCOV) &&
- hci_dev_test_flag(hdev, HCI_BONDABLE))
- return false;
-
- /* We're neither bondable nor discoverable in the limited
- * privacy mode, therefore use RPA.
- */
- return true;
-}
-
-static bool is_advertising_allowed(struct hci_dev *hdev, bool connectable)
-{
- /* If there is no connection we are OK to advertise. */
- if (hci_conn_num(hdev, LE_LINK) == 0)
- return true;
-
- /* Check le_states if there is any connection in peripheral role. */
- if (hdev->conn_hash.le_num_peripheral > 0) {
- /* Peripheral connection state and non connectable mode bit 20.
- */
- if (!connectable && !(hdev->le_states[2] & 0x10))
- return false;
-
- /* Peripheral connection state and connectable mode bit 38
- * and scannable bit 21.
- */
- if (connectable && (!(hdev->le_states[4] & 0x40) ||
- !(hdev->le_states[2] & 0x20)))
- return false;
- }
-
- /* Check le_states if there is any connection in central role. */
- if (hci_conn_num(hdev, LE_LINK) != hdev->conn_hash.le_num_peripheral) {
- /* Central connection state and non connectable mode bit 18. */
- if (!connectable && !(hdev->le_states[2] & 0x02))
- return false;
-
- /* Central connection state and connectable mode bit 35 and
- * scannable 19.
- */
- if (connectable && (!(hdev->le_states[4] & 0x08) ||
- !(hdev->le_states[2] & 0x08)))
- return false;
- }
-
- return true;
-}
-
-void __hci_req_enable_advertising(struct hci_request *req)
-{
- struct hci_dev *hdev = req->hdev;
- struct adv_info *adv;
- struct hci_cp_le_set_adv_param cp;
- u8 own_addr_type, enable = 0x01;
- bool connectable;
- u16 adv_min_interval, adv_max_interval;
- u32 flags;
-
- flags = hci_adv_instance_flags(hdev, hdev->cur_adv_instance);
- adv = hci_find_adv_instance(hdev, hdev->cur_adv_instance);
-
- /* If the "connectable" instance flag was not set, then choose between
- * ADV_IND and ADV_NONCONN_IND based on the global connectable setting.
- */
- connectable = (flags & MGMT_ADV_FLAG_CONNECTABLE) ||
- mgmt_get_connectable(hdev);
-
- if (!is_advertising_allowed(hdev, connectable))
- return;
-
- if (hci_dev_test_flag(hdev, HCI_LE_ADV))
- __hci_req_disable_advertising(req);
-
- /* Clear the HCI_LE_ADV bit temporarily so that the
- * hci_update_random_address knows that it's safe to go ahead
- * and write a new random address. The flag will be set back on
- * as soon as the SET_ADV_ENABLE HCI command completes.
- */
- hci_dev_clear_flag(hdev, HCI_LE_ADV);
-
- /* Set require_privacy to true only when non-connectable
- * advertising is used. In that case it is fine to use a
- * non-resolvable private address.
- */
- if (hci_update_random_address(req, !connectable,
- adv_use_rpa(hdev, flags),
- &own_addr_type) < 0)
- return;
-
- memset(&cp, 0, sizeof(cp));
-
- if (adv) {
- adv_min_interval = adv->min_interval;
- adv_max_interval = adv->max_interval;
- } else {
- adv_min_interval = hdev->le_adv_min_interval;
- adv_max_interval = hdev->le_adv_max_interval;
- }
-
- if (connectable) {
- cp.type = LE_ADV_IND;
- } else {
- if (adv_cur_instance_is_scannable(hdev))
- cp.type = LE_ADV_SCAN_IND;
- else
- cp.type = LE_ADV_NONCONN_IND;
-
- if (!hci_dev_test_flag(hdev, HCI_DISCOVERABLE) ||
- hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE)) {
- adv_min_interval = DISCOV_LE_FAST_ADV_INT_MIN;
- adv_max_interval = DISCOV_LE_FAST_ADV_INT_MAX;
- }
- }
-
- cp.min_interval = cpu_to_le16(adv_min_interval);
- cp.max_interval = cpu_to_le16(adv_max_interval);
- cp.own_address_type = own_addr_type;
- cp.channel_map = hdev->le_adv_channel_map;
-
- hci_req_add(req, HCI_OP_LE_SET_ADV_PARAM, sizeof(cp), &cp);
-
- hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable);
-}
-
-void __hci_req_update_scan_rsp_data(struct hci_request *req, u8 instance)
-{
- struct hci_dev *hdev = req->hdev;
- u8 len;
-
- if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
- return;
-
- if (ext_adv_capable(hdev)) {
- struct {
- struct hci_cp_le_set_ext_scan_rsp_data cp;
- u8 data[HCI_MAX_EXT_AD_LENGTH];
- } pdu;
-
- memset(&pdu, 0, sizeof(pdu));
-
- len = eir_create_scan_rsp(hdev, instance, pdu.data);
-
- if (hdev->scan_rsp_data_len == len &&
- !memcmp(pdu.data, hdev->scan_rsp_data, len))
- return;
-
- memcpy(hdev->scan_rsp_data, pdu.data, len);
- hdev->scan_rsp_data_len = len;
-
- pdu.cp.handle = instance;
- pdu.cp.length = len;
- pdu.cp.operation = LE_SET_ADV_DATA_OP_COMPLETE;
- pdu.cp.frag_pref = LE_SET_ADV_DATA_NO_FRAG;
-
- hci_req_add(req, HCI_OP_LE_SET_EXT_SCAN_RSP_DATA,
- sizeof(pdu.cp) + len, &pdu.cp);
- } else {
- struct hci_cp_le_set_scan_rsp_data cp;
-
- memset(&cp, 0, sizeof(cp));
-
- len = eir_create_scan_rsp(hdev, instance, cp.data);
-
- if (hdev->scan_rsp_data_len == len &&
- !memcmp(cp.data, hdev->scan_rsp_data, len))
- return;
-
- memcpy(hdev->scan_rsp_data, cp.data, sizeof(cp.data));
- hdev->scan_rsp_data_len = len;
-
- cp.length = len;
-
- hci_req_add(req, HCI_OP_LE_SET_SCAN_RSP_DATA, sizeof(cp), &cp);
- }
-}
-
-void __hci_req_update_adv_data(struct hci_request *req, u8 instance)
-{
- struct hci_dev *hdev = req->hdev;
- u8 len;
-
- if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
- return;
-
- if (ext_adv_capable(hdev)) {
- struct {
- struct hci_cp_le_set_ext_adv_data cp;
- u8 data[HCI_MAX_EXT_AD_LENGTH];
- } pdu;
-
- memset(&pdu, 0, sizeof(pdu));
-
- len = eir_create_adv_data(hdev, instance, pdu.data);
-
- /* There's nothing to do if the data hasn't changed */
- if (hdev->adv_data_len == len &&
- memcmp(pdu.data, hdev->adv_data, len) == 0)
- return;
-
- memcpy(hdev->adv_data, pdu.data, len);
- hdev->adv_data_len = len;
-
- pdu.cp.length = len;
- pdu.cp.handle = instance;
- pdu.cp.operation = LE_SET_ADV_DATA_OP_COMPLETE;
- pdu.cp.frag_pref = LE_SET_ADV_DATA_NO_FRAG;
-
- hci_req_add(req, HCI_OP_LE_SET_EXT_ADV_DATA,
- sizeof(pdu.cp) + len, &pdu.cp);
- } else {
- struct hci_cp_le_set_adv_data cp;
-
- memset(&cp, 0, sizeof(cp));
-
- len = eir_create_adv_data(hdev, instance, cp.data);
-
- /* There's nothing to do if the data hasn't changed */
- if (hdev->adv_data_len == len &&
- memcmp(cp.data, hdev->adv_data, len) == 0)
- return;
-
- memcpy(hdev->adv_data, cp.data, sizeof(cp.data));
- hdev->adv_data_len = len;
-
- cp.length = len;
-
- hci_req_add(req, HCI_OP_LE_SET_ADV_DATA, sizeof(cp), &cp);
- }
-}
-
-int hci_req_update_adv_data(struct hci_dev *hdev, u8 instance)
-{
- struct hci_request req;
-
- hci_req_init(&req, hdev);
- __hci_req_update_adv_data(&req, instance);
-
- return hci_req_run(&req, NULL);
-}
-
-static void enable_addr_resolution_complete(struct hci_dev *hdev, u8 status,
- u16 opcode)
-{
- BT_DBG("%s status %u", hdev->name, status);
-}
-
-void hci_req_disable_address_resolution(struct hci_dev *hdev)
-{
- struct hci_request req;
- __u8 enable = 0x00;
-
- if (!hci_dev_test_flag(hdev, HCI_LL_RPA_RESOLUTION))
- return;
-
- hci_req_init(&req, hdev);
-
- hci_req_add(&req, HCI_OP_LE_SET_ADDR_RESOLV_ENABLE, 1, &enable);
-
- hci_req_run(&req, enable_addr_resolution_complete);
-}
-
-static void adv_enable_complete(struct hci_dev *hdev, u8 status, u16 opcode)
-{
- bt_dev_dbg(hdev, "status %u", status);
-}
-
-void hci_req_reenable_advertising(struct hci_dev *hdev)
-{
- struct hci_request req;
-
- if (!hci_dev_test_flag(hdev, HCI_ADVERTISING) &&
- list_empty(&hdev->adv_instances))
- return;
-
- hci_req_init(&req, hdev);
-
- if (hdev->cur_adv_instance) {
- __hci_req_schedule_adv_instance(&req, hdev->cur_adv_instance,
- true);
- } else {
- if (ext_adv_capable(hdev)) {
- __hci_req_start_ext_adv(&req, 0x00);
- } else {
- __hci_req_update_adv_data(&req, 0x00);
- __hci_req_update_scan_rsp_data(&req, 0x00);
- __hci_req_enable_advertising(&req);
- }
- }
-
- hci_req_run(&req, adv_enable_complete);
-}
-
-static void adv_timeout_expire(struct work_struct *work)
-{
- struct hci_dev *hdev = container_of(work, struct hci_dev,
- adv_instance_expire.work);
-
- struct hci_request req;
- u8 instance;
-
- bt_dev_dbg(hdev, "");
-
- hci_dev_lock(hdev);
-
- hdev->adv_instance_timeout = 0;
-
- instance = hdev->cur_adv_instance;
- if (instance == 0x00)
- goto unlock;
-
- hci_req_init(&req, hdev);
-
- hci_req_clear_adv_instance(hdev, NULL, &req, instance, false);
-
- if (list_empty(&hdev->adv_instances))
- __hci_req_disable_advertising(&req);
-
- hci_req_run(&req, NULL);
-
-unlock:
- hci_dev_unlock(hdev);
-}
-
static int hci_req_add_le_interleaved_scan(struct hci_request *req,
unsigned long opt)
{
@@ -1226,84 +885,6 @@ static void interleave_scan_work(struct work_struct *work)
&hdev->interleave_scan, timeout);
}
-int hci_get_random_address(struct hci_dev *hdev, bool require_privacy,
- bool use_rpa, struct adv_info *adv_instance,
- u8 *own_addr_type, bdaddr_t *rand_addr)
-{
- int err;
-
- bacpy(rand_addr, BDADDR_ANY);
-
- /* If privacy is enabled use a resolvable private address. If
- * current RPA has expired then generate a new one.
- */
- if (use_rpa) {
- /* If Controller supports LL Privacy use own address type is
- * 0x03
- */
- if (use_ll_privacy(hdev))
- *own_addr_type = ADDR_LE_DEV_RANDOM_RESOLVED;
- else
- *own_addr_type = ADDR_LE_DEV_RANDOM;
-
- if (adv_instance) {
- if (adv_rpa_valid(adv_instance))
- return 0;
- } else {
- if (rpa_valid(hdev))
- return 0;
- }
-
- err = smp_generate_rpa(hdev, hdev->irk, &hdev->rpa);
- if (err < 0) {
- bt_dev_err(hdev, "failed to generate new RPA");
- return err;
- }
-
- bacpy(rand_addr, &hdev->rpa);
-
- return 0;
- }
-
- /* In case of required privacy without resolvable private address,
- * use an non-resolvable private address. This is useful for
- * non-connectable advertising.
- */
- if (require_privacy) {
- bdaddr_t nrpa;
-
- while (true) {
- /* The non-resolvable private address is generated
- * from random six bytes with the two most significant
- * bits cleared.
- */
- get_random_bytes(&nrpa, 6);
- nrpa.b[5] &= 0x3f;
-
- /* The non-resolvable private address shall not be
- * equal to the public address.
- */
- if (bacmp(&hdev->bdaddr, &nrpa))
- break;
- }
-
- *own_addr_type = ADDR_LE_DEV_RANDOM;
- bacpy(rand_addr, &nrpa);
-
- return 0;
- }
-
- /* No privacy so use a public address. */
- *own_addr_type = ADDR_LE_DEV_PUBLIC;
-
- return 0;
-}
-
-void __hci_req_clear_ext_adv_sets(struct hci_request *req)
-{
- hci_req_add(req, HCI_OP_LE_CLEAR_ADV_SETS, 0, NULL);
-}
-
static void set_random_addr(struct hci_request *req, bdaddr_t *rpa)
{
struct hci_dev *hdev = req->hdev;
@@ -1328,933 +909,8 @@ static void set_random_addr(struct hci_request *req, bdaddr_t *rpa)
hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6, rpa);
}
-int __hci_req_setup_ext_adv_instance(struct hci_request *req, u8 instance)
-{
- struct hci_cp_le_set_ext_adv_params cp;
- struct hci_dev *hdev = req->hdev;
- bool connectable;
- u32 flags;
- bdaddr_t random_addr;
- u8 own_addr_type;
- int err;
- struct adv_info *adv;
- bool secondary_adv, require_privacy;
-
- if (instance > 0) {
- adv = hci_find_adv_instance(hdev, instance);
- if (!adv)
- return -EINVAL;
- } else {
- adv = NULL;
- }
-
- flags = hci_adv_instance_flags(hdev, instance);
-
- /* If the "connectable" instance flag was not set, then choose between
- * ADV_IND and ADV_NONCONN_IND based on the global connectable setting.
- */
- connectable = (flags & MGMT_ADV_FLAG_CONNECTABLE) ||
- mgmt_get_connectable(hdev);
-
- if (!is_advertising_allowed(hdev, connectable))
- return -EPERM;
-
- /* Set require_privacy to true only when non-connectable
- * advertising is used. In that case it is fine to use a
- * non-resolvable private address.
- */
- require_privacy = !connectable;
-
- /* Don't require privacy for periodic adv? */
- if (adv && adv->periodic)
- require_privacy = false;
-
- err = hci_get_random_address(hdev, require_privacy,
- adv_use_rpa(hdev, flags), adv,
- &own_addr_type, &random_addr);
- if (err < 0)
- return err;
-
- memset(&cp, 0, sizeof(cp));
-
- if (adv) {
- hci_cpu_to_le24(adv->min_interval, cp.min_interval);
- hci_cpu_to_le24(adv->max_interval, cp.max_interval);
- cp.tx_power = adv->tx_power;
- } else {
- hci_cpu_to_le24(hdev->le_adv_min_interval, cp.min_interval);
- hci_cpu_to_le24(hdev->le_adv_max_interval, cp.max_interval);
- cp.tx_power = HCI_ADV_TX_POWER_NO_PREFERENCE;
- }
-
- secondary_adv = (flags & MGMT_ADV_FLAG_SEC_MASK);
-
- if (connectable) {
- if (secondary_adv)
- cp.evt_properties = cpu_to_le16(LE_EXT_ADV_CONN_IND);
- else
- cp.evt_properties = cpu_to_le16(LE_LEGACY_ADV_IND);
- } else if (hci_adv_instance_is_scannable(hdev, instance) ||
- (flags & MGMT_ADV_PARAM_SCAN_RSP)) {
- if (secondary_adv)
- cp.evt_properties = cpu_to_le16(LE_EXT_ADV_SCAN_IND);
- else
- cp.evt_properties = cpu_to_le16(LE_LEGACY_ADV_SCAN_IND);
- } else {
- /* Secondary and periodic cannot use legacy PDUs */
- if (secondary_adv || (adv && adv->periodic))
- cp.evt_properties = cpu_to_le16(LE_EXT_ADV_NON_CONN_IND);
- else
- cp.evt_properties = cpu_to_le16(LE_LEGACY_NONCONN_IND);
- }
-
- cp.own_addr_type = own_addr_type;
- cp.channel_map = hdev->le_adv_channel_map;
- cp.handle = instance;
-
- if (flags & MGMT_ADV_FLAG_SEC_2M) {
- cp.primary_phy = HCI_ADV_PHY_1M;
- cp.secondary_phy = HCI_ADV_PHY_2M;
- } else if (flags & MGMT_ADV_FLAG_SEC_CODED) {
- cp.primary_phy = HCI_ADV_PHY_CODED;
- cp.secondary_phy = HCI_ADV_PHY_CODED;
- } else {
- /* In all other cases use 1M */
- cp.primary_phy = HCI_ADV_PHY_1M;
- cp.secondary_phy = HCI_ADV_PHY_1M;
- }
-
- hci_req_add(req, HCI_OP_LE_SET_EXT_ADV_PARAMS, sizeof(cp), &cp);
-
- if ((own_addr_type == ADDR_LE_DEV_RANDOM ||
- own_addr_type == ADDR_LE_DEV_RANDOM_RESOLVED) &&
- bacmp(&random_addr, BDADDR_ANY)) {
- struct hci_cp_le_set_adv_set_rand_addr cp;
-
- /* Check if random address need to be updated */
- if (adv) {
- if (!bacmp(&random_addr, &adv->random_addr))
- return 0;
- } else {
- if (!bacmp(&random_addr, &hdev->random_addr))
- return 0;
- /* Instance 0x00 doesn't have an adv_info, instead it
- * uses hdev->random_addr to track its address so
- * whenever it needs to be updated this also set the
- * random address since hdev->random_addr is shared with
- * scan state machine.
- */
- set_random_addr(req, &random_addr);
- }
-
- memset(&cp, 0, sizeof(cp));
-
- cp.handle = instance;
- bacpy(&cp.bdaddr, &random_addr);
-
- hci_req_add(req,
- HCI_OP_LE_SET_ADV_SET_RAND_ADDR,
- sizeof(cp), &cp);
- }
-
- return 0;
-}
-
-int __hci_req_enable_ext_advertising(struct hci_request *req, u8 instance)
-{
- struct hci_dev *hdev = req->hdev;
- struct hci_cp_le_set_ext_adv_enable *cp;
- struct hci_cp_ext_adv_set *adv_set;
- u8 data[sizeof(*cp) + sizeof(*adv_set) * 1];
- struct adv_info *adv_instance;
-
- if (instance > 0) {
- adv_instance = hci_find_adv_instance(hdev, instance);
- if (!adv_instance)
- return -EINVAL;
- } else {
- adv_instance = NULL;
- }
-
- cp = (void *) data;
- adv_set = (void *) cp->data;
-
- memset(cp, 0, sizeof(*cp));
-
- cp->enable = 0x01;
- cp->num_of_sets = 0x01;
-
- memset(adv_set, 0, sizeof(*adv_set));
-
- adv_set->handle = instance;
-
- /* Set duration per instance since controller is responsible for
- * scheduling it.
- */
- if (adv_instance && adv_instance->duration) {
- u16 duration = adv_instance->timeout * MSEC_PER_SEC;
-
- /* Time = N * 10 ms */
- adv_set->duration = cpu_to_le16(duration / 10);
- }
-
- hci_req_add(req, HCI_OP_LE_SET_EXT_ADV_ENABLE,
- sizeof(*cp) + sizeof(*adv_set) * cp->num_of_sets,
- data);
-
- return 0;
-}
-
-int __hci_req_disable_ext_adv_instance(struct hci_request *req, u8 instance)
-{
- struct hci_dev *hdev = req->hdev;
- struct hci_cp_le_set_ext_adv_enable *cp;
- struct hci_cp_ext_adv_set *adv_set;
- u8 data[sizeof(*cp) + sizeof(*adv_set) * 1];
- u8 req_size;
-
- /* If request specifies an instance that doesn't exist, fail */
- if (instance > 0 && !hci_find_adv_instance(hdev, instance))
- return -EINVAL;
-
- memset(data, 0, sizeof(data));
-
- cp = (void *)data;
- adv_set = (void *)cp->data;
-
- /* Instance 0x00 indicates all advertising instances will be disabled */
- cp->num_of_sets = !!instance;
- cp->enable = 0x00;
-
- adv_set->handle = instance;
-
- req_size = sizeof(*cp) + sizeof(*adv_set) * cp->num_of_sets;
- hci_req_add(req, HCI_OP_LE_SET_EXT_ADV_ENABLE, req_size, data);
-
- return 0;
-}
-
-int __hci_req_remove_ext_adv_instance(struct hci_request *req, u8 instance)
-{
- struct hci_dev *hdev = req->hdev;
-
- /* If request specifies an instance that doesn't exist, fail */
- if (instance > 0 && !hci_find_adv_instance(hdev, instance))
- return -EINVAL;
-
- hci_req_add(req, HCI_OP_LE_REMOVE_ADV_SET, sizeof(instance), &instance);
-
- return 0;
-}
-
-int __hci_req_start_ext_adv(struct hci_request *req, u8 instance)
-{
- struct hci_dev *hdev = req->hdev;
- struct adv_info *adv_instance = hci_find_adv_instance(hdev, instance);
- int err;
-
- /* If instance isn't pending, the chip knows about it, and it's safe to
- * disable
- */
- if (adv_instance && !adv_instance->pending)
- __hci_req_disable_ext_adv_instance(req, instance);
-
- err = __hci_req_setup_ext_adv_instance(req, instance);
- if (err < 0)
- return err;
-
- __hci_req_update_scan_rsp_data(req, instance);
- __hci_req_enable_ext_advertising(req, instance);
-
- return 0;
-}
-
-int __hci_req_schedule_adv_instance(struct hci_request *req, u8 instance,
- bool force)
-{
- struct hci_dev *hdev = req->hdev;
- struct adv_info *adv_instance = NULL;
- u16 timeout;
-
- if (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
- list_empty(&hdev->adv_instances))
- return -EPERM;
-
- if (hdev->adv_instance_timeout)
- return -EBUSY;
-
- adv_instance = hci_find_adv_instance(hdev, instance);
- if (!adv_instance)
- return -ENOENT;
-
- /* A zero timeout means unlimited advertising. As long as there is
- * only one instance, duration should be ignored. We still set a timeout
- * in case further instances are being added later on.
- *
- * If the remaining lifetime of the instance is more than the duration
- * then the timeout corresponds to the duration, otherwise it will be
- * reduced to the remaining instance lifetime.
- */
- if (adv_instance->timeout == 0 ||
- adv_instance->duration <= adv_instance->remaining_time)
- timeout = adv_instance->duration;
- else
- timeout = adv_instance->remaining_time;
-
- /* The remaining time is being reduced unless the instance is being
- * advertised without time limit.
- */
- if (adv_instance->timeout)
- adv_instance->remaining_time =
- adv_instance->remaining_time - timeout;
-
- /* Only use work for scheduling instances with legacy advertising */
- if (!ext_adv_capable(hdev)) {
- hdev->adv_instance_timeout = timeout;
- queue_delayed_work(hdev->req_workqueue,
- &hdev->adv_instance_expire,
- msecs_to_jiffies(timeout * 1000));
- }
-
- /* If we're just re-scheduling the same instance again then do not
- * execute any HCI commands. This happens when a single instance is
- * being advertised.
- */
- if (!force && hdev->cur_adv_instance == instance &&
- hci_dev_test_flag(hdev, HCI_LE_ADV))
- return 0;
-
- hdev->cur_adv_instance = instance;
- if (ext_adv_capable(hdev)) {
- __hci_req_start_ext_adv(req, instance);
- } else {
- __hci_req_update_adv_data(req, instance);
- __hci_req_update_scan_rsp_data(req, instance);
- __hci_req_enable_advertising(req);
- }
-
- return 0;
-}
-
-/* For a single instance:
- * - force == true: The instance will be removed even when its remaining
- * lifetime is not zero.
- * - force == false: the instance will be deactivated but kept stored unless
- * the remaining lifetime is zero.
- *
- * For instance == 0x00:
- * - force == true: All instances will be removed regardless of their timeout
- * setting.
- * - force == false: Only instances that have a timeout will be removed.
- */
-void hci_req_clear_adv_instance(struct hci_dev *hdev, struct sock *sk,
- struct hci_request *req, u8 instance,
- bool force)
-{
- struct adv_info *adv_instance, *n, *next_instance = NULL;
- int err;
- u8 rem_inst;
-
- /* Cancel any timeout concerning the removed instance(s). */
- if (!instance || hdev->cur_adv_instance == instance)
- cancel_adv_timeout(hdev);
-
- /* Get the next instance to advertise BEFORE we remove
- * the current one. This can be the same instance again
- * if there is only one instance.
- */
- if (instance && hdev->cur_adv_instance == instance)
- next_instance = hci_get_next_instance(hdev, instance);
-
- if (instance == 0x00) {
- list_for_each_entry_safe(adv_instance, n, &hdev->adv_instances,
- list) {
- if (!(force || adv_instance->timeout))
- continue;
-
- rem_inst = adv_instance->instance;
- err = hci_remove_adv_instance(hdev, rem_inst);
- if (!err)
- mgmt_advertising_removed(sk, hdev, rem_inst);
- }
- } else {
- adv_instance = hci_find_adv_instance(hdev, instance);
-
- if (force || (adv_instance && adv_instance->timeout &&
- !adv_instance->remaining_time)) {
- /* Don't advertise a removed instance. */
- if (next_instance &&
- next_instance->instance == instance)
- next_instance = NULL;
-
- err = hci_remove_adv_instance(hdev, instance);
- if (!err)
- mgmt_advertising_removed(sk, hdev, instance);
- }
- }
-
- if (!req || !hdev_is_powered(hdev) ||
- hci_dev_test_flag(hdev, HCI_ADVERTISING))
- return;
-
- if (next_instance && !ext_adv_capable(hdev))
- __hci_req_schedule_adv_instance(req, next_instance->instance,
- false);
-}
-
-int hci_update_random_address(struct hci_request *req, bool require_privacy,
- bool use_rpa, u8 *own_addr_type)
-{
- struct hci_dev *hdev = req->hdev;
- int err;
-
- /* If privacy is enabled use a resolvable private address. If
- * current RPA has expired or there is something else than
- * the current RPA in use, then generate a new one.
- */
- if (use_rpa) {
- /* If Controller supports LL Privacy use own address type is
- * 0x03
- */
- if (use_ll_privacy(hdev))
- *own_addr_type = ADDR_LE_DEV_RANDOM_RESOLVED;
- else
- *own_addr_type = ADDR_LE_DEV_RANDOM;
-
- if (rpa_valid(hdev))
- return 0;
-
- err = smp_generate_rpa(hdev, hdev->irk, &hdev->rpa);
- if (err < 0) {
- bt_dev_err(hdev, "failed to generate new RPA");
- return err;
- }
-
- set_random_addr(req, &hdev->rpa);
-
- return 0;
- }
-
- /* In case of required privacy without resolvable private address,
- * use an non-resolvable private address. This is useful for active
- * scanning and non-connectable advertising.
- */
- if (require_privacy) {
- bdaddr_t nrpa;
-
- while (true) {
- /* The non-resolvable private address is generated
- * from random six bytes with the two most significant
- * bits cleared.
- */
- get_random_bytes(&nrpa, 6);
- nrpa.b[5] &= 0x3f;
-
- /* The non-resolvable private address shall not be
- * equal to the public address.
- */
- if (bacmp(&hdev->bdaddr, &nrpa))
- break;
- }
-
- *own_addr_type = ADDR_LE_DEV_RANDOM;
- set_random_addr(req, &nrpa);
- return 0;
- }
-
- /* If forcing static address is in use or there is no public
- * address use the static address as random address (but skip
- * the HCI command if the current random address is already the
- * static one.
- *
- * In case BR/EDR has been disabled on a dual-mode controller
- * and a static address has been configured, then use that
- * address instead of the public BR/EDR address.
- */
- if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
- !bacmp(&hdev->bdaddr, BDADDR_ANY) ||
- (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
- bacmp(&hdev->static_addr, BDADDR_ANY))) {
- *own_addr_type = ADDR_LE_DEV_RANDOM;
- if (bacmp(&hdev->static_addr, &hdev->random_addr))
- hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6,
- &hdev->static_addr);
- return 0;
- }
-
- /* Neither privacy nor static address is being used so use a
- * public address.
- */
- *own_addr_type = ADDR_LE_DEV_PUBLIC;
-
- return 0;
-}
-
-static bool disconnected_accept_list_entries(struct hci_dev *hdev)
-{
- struct bdaddr_list *b;
-
- list_for_each_entry(b, &hdev->accept_list, list) {
- struct hci_conn *conn;
-
- conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &b->bdaddr);
- if (!conn)
- return true;
-
- if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
- return true;
- }
-
- return false;
-}
-
-void __hci_req_update_scan(struct hci_request *req)
-{
- struct hci_dev *hdev = req->hdev;
- u8 scan;
-
- if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
- return;
-
- if (!hdev_is_powered(hdev))
- return;
-
- if (mgmt_powering_down(hdev))
- return;
-
- if (hdev->scanning_paused)
- return;
-
- if (hci_dev_test_flag(hdev, HCI_CONNECTABLE) ||
- disconnected_accept_list_entries(hdev))
- scan = SCAN_PAGE;
- else
- scan = SCAN_DISABLED;
-
- if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
- scan |= SCAN_INQUIRY;
-
- if (test_bit(HCI_PSCAN, &hdev->flags) == !!(scan & SCAN_PAGE) &&
- test_bit(HCI_ISCAN, &hdev->flags) == !!(scan & SCAN_INQUIRY))
- return;
-
- hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
-}
-
-static u8 get_service_classes(struct hci_dev *hdev)
-{
- struct bt_uuid *uuid;
- u8 val = 0;
-
- list_for_each_entry(uuid, &hdev->uuids, list)
- val |= uuid->svc_hint;
-
- return val;
-}
-
-void __hci_req_update_class(struct hci_request *req)
-{
- struct hci_dev *hdev = req->hdev;
- u8 cod[3];
-
- bt_dev_dbg(hdev, "");
-
- if (!hdev_is_powered(hdev))
- return;
-
- if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
- return;
-
- if (hci_dev_test_flag(hdev, HCI_SERVICE_CACHE))
- return;
-
- cod[0] = hdev->minor_class;
- cod[1] = hdev->major_class;
- cod[2] = get_service_classes(hdev);
-
- if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE))
- cod[1] |= 0x20;
-
- if (memcmp(cod, hdev->dev_class, 3) == 0)
- return;
-
- hci_req_add(req, HCI_OP_WRITE_CLASS_OF_DEV, sizeof(cod), cod);
-}
-
-void __hci_abort_conn(struct hci_request *req, struct hci_conn *conn,
- u8 reason)
-{
- switch (conn->state) {
- case BT_CONNECTED:
- case BT_CONFIG:
- if (conn->type == AMP_LINK) {
- struct hci_cp_disconn_phy_link cp;
-
- cp.phy_handle = HCI_PHY_HANDLE(conn->handle);
- cp.reason = reason;
- hci_req_add(req, HCI_OP_DISCONN_PHY_LINK, sizeof(cp),
- &cp);
- } else {
- struct hci_cp_disconnect dc;
-
- dc.handle = cpu_to_le16(conn->handle);
- dc.reason = reason;
- hci_req_add(req, HCI_OP_DISCONNECT, sizeof(dc), &dc);
- }
-
- conn->state = BT_DISCONN;
-
- break;
- case BT_CONNECT:
- if (conn->type == LE_LINK) {
- if (test_bit(HCI_CONN_SCANNING, &conn->flags))
- break;
- hci_req_add(req, HCI_OP_LE_CREATE_CONN_CANCEL,
- 0, NULL);
- } else if (conn->type == ACL_LINK) {
- if (req->hdev->hci_ver < BLUETOOTH_VER_1_2)
- break;
- hci_req_add(req, HCI_OP_CREATE_CONN_CANCEL,
- 6, &conn->dst);
- }
- break;
- case BT_CONNECT2:
- if (conn->type == ACL_LINK) {
- struct hci_cp_reject_conn_req rej;
-
- bacpy(&rej.bdaddr, &conn->dst);
- rej.reason = reason;
-
- hci_req_add(req, HCI_OP_REJECT_CONN_REQ,
- sizeof(rej), &rej);
- } else if (conn->type == SCO_LINK || conn->type == ESCO_LINK) {
- struct hci_cp_reject_sync_conn_req rej;
-
- bacpy(&rej.bdaddr, &conn->dst);
-
- /* SCO rejection has its own limited set of
- * allowed error values (0x0D-0x0F) which isn't
- * compatible with most values passed to this
- * function. To be safe hard-code one of the
- * values that's suitable for SCO.
- */
- rej.reason = HCI_ERROR_REJ_LIMITED_RESOURCES;
-
- hci_req_add(req, HCI_OP_REJECT_SYNC_CONN_REQ,
- sizeof(rej), &rej);
- }
- break;
- default:
- conn->state = BT_CLOSED;
- break;
- }
-}
-
-static void abort_conn_complete(struct hci_dev *hdev, u8 status, u16 opcode)
-{
- if (status)
- bt_dev_dbg(hdev, "Failed to abort connection: status 0x%2.2x", status);
-}
-
-int hci_abort_conn(struct hci_conn *conn, u8 reason)
-{
- struct hci_request req;
- int err;
-
- hci_req_init(&req, conn->hdev);
-
- __hci_abort_conn(&req, conn, reason);
-
- err = hci_req_run(&req, abort_conn_complete);
- if (err && err != -ENODATA) {
- bt_dev_err(conn->hdev, "failed to run HCI request: err %d", err);
- return err;
- }
-
- return 0;
-}
-
-static int le_scan_disable(struct hci_request *req, unsigned long opt)
-{
- hci_req_add_le_scan_disable(req, false);
- return 0;
-}
-
-static int bredr_inquiry(struct hci_request *req, unsigned long opt)
-{
- u8 length = opt;
- const u8 giac[3] = { 0x33, 0x8b, 0x9e };
- const u8 liac[3] = { 0x00, 0x8b, 0x9e };
- struct hci_cp_inquiry cp;
-
- if (test_bit(HCI_INQUIRY, &req->hdev->flags))
- return 0;
-
- bt_dev_dbg(req->hdev, "");
-
- hci_dev_lock(req->hdev);
- hci_inquiry_cache_flush(req->hdev);
- hci_dev_unlock(req->hdev);
-
- memset(&cp, 0, sizeof(cp));
-
- if (req->hdev->discovery.limited)
- memcpy(&cp.lap, liac, sizeof(cp.lap));
- else
- memcpy(&cp.lap, giac, sizeof(cp.lap));
-
- cp.length = length;
-
- hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
-
- return 0;
-}
-
-static void le_scan_disable_work(struct work_struct *work)
-{
- struct hci_dev *hdev = container_of(work, struct hci_dev,
- le_scan_disable.work);
- u8 status;
-
- bt_dev_dbg(hdev, "");
-
- if (!hci_dev_test_flag(hdev, HCI_LE_SCAN))
- return;
-
- cancel_delayed_work(&hdev->le_scan_restart);
-
- hci_req_sync(hdev, le_scan_disable, 0, HCI_CMD_TIMEOUT, &status);
- if (status) {
- bt_dev_err(hdev, "failed to disable LE scan: status 0x%02x",
- status);
- return;
- }
-
- hdev->discovery.scan_start = 0;
-
- /* If we were running LE only scan, change discovery state. If
- * we were running both LE and BR/EDR inquiry simultaneously,
- * and BR/EDR inquiry is already finished, stop discovery,
- * otherwise BR/EDR inquiry will stop discovery when finished.
- * If we will resolve remote device name, do not change
- * discovery state.
- */
-
- if (hdev->discovery.type == DISCOV_TYPE_LE)
- goto discov_stopped;
-
- if (hdev->discovery.type != DISCOV_TYPE_INTERLEAVED)
- return;
-
- if (test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY, &hdev->quirks)) {
- if (!test_bit(HCI_INQUIRY, &hdev->flags) &&
- hdev->discovery.state != DISCOVERY_RESOLVING)
- goto discov_stopped;
-
- return;
- }
-
- hci_req_sync(hdev, bredr_inquiry, DISCOV_INTERLEAVED_INQUIRY_LEN,
- HCI_CMD_TIMEOUT, &status);
- if (status) {
- bt_dev_err(hdev, "inquiry failed: status 0x%02x", status);
- goto discov_stopped;
- }
-
- return;
-
-discov_stopped:
- hci_dev_lock(hdev);
- hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
- hci_dev_unlock(hdev);
-}
-
-static int le_scan_restart(struct hci_request *req, unsigned long opt)
-{
- struct hci_dev *hdev = req->hdev;
-
- /* If controller is not scanning we are done. */
- if (!hci_dev_test_flag(hdev, HCI_LE_SCAN))
- return 0;
-
- if (hdev->scanning_paused) {
- bt_dev_dbg(hdev, "Scanning is paused for suspend");
- return 0;
- }
-
- hci_req_add_le_scan_disable(req, false);
-
- if (use_ext_scan(hdev)) {
- struct hci_cp_le_set_ext_scan_enable ext_enable_cp;
-
- memset(&ext_enable_cp, 0, sizeof(ext_enable_cp));
- ext_enable_cp.enable = LE_SCAN_ENABLE;
- ext_enable_cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
-
- hci_req_add(req, HCI_OP_LE_SET_EXT_SCAN_ENABLE,
- sizeof(ext_enable_cp), &ext_enable_cp);
- } else {
- struct hci_cp_le_set_scan_enable cp;
-
- memset(&cp, 0, sizeof(cp));
- cp.enable = LE_SCAN_ENABLE;
- cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
- hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
- }
-
- return 0;
-}
-
-static void le_scan_restart_work(struct work_struct *work)
-{
- struct hci_dev *hdev = container_of(work, struct hci_dev,
- le_scan_restart.work);
- unsigned long timeout, duration, scan_start, now;
- u8 status;
-
- bt_dev_dbg(hdev, "");
-
- hci_req_sync(hdev, le_scan_restart, 0, HCI_CMD_TIMEOUT, &status);
- if (status) {
- bt_dev_err(hdev, "failed to restart LE scan: status %d",
- status);
- return;
- }
-
- hci_dev_lock(hdev);
-
- if (!test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks) ||
- !hdev->discovery.scan_start)
- goto unlock;
-
- /* When the scan was started, hdev->le_scan_disable has been queued
- * after duration from scan_start. During scan restart this job
- * has been canceled, and we need to queue it again after proper
- * timeout, to make sure that scan does not run indefinitely.
- */
- duration = hdev->discovery.scan_duration;
- scan_start = hdev->discovery.scan_start;
- now = jiffies;
- if (now - scan_start <= duration) {
- int elapsed;
-
- if (now >= scan_start)
- elapsed = now - scan_start;
- else
- elapsed = ULONG_MAX - scan_start + now;
-
- timeout = duration - elapsed;
- } else {
- timeout = 0;
- }
-
- queue_delayed_work(hdev->req_workqueue,
- &hdev->le_scan_disable, timeout);
-
-unlock:
- hci_dev_unlock(hdev);
-}
-
-bool hci_req_stop_discovery(struct hci_request *req)
-{
- struct hci_dev *hdev = req->hdev;
- struct discovery_state *d = &hdev->discovery;
- struct hci_cp_remote_name_req_cancel cp;
- struct inquiry_entry *e;
- bool ret = false;
-
- bt_dev_dbg(hdev, "state %u", hdev->discovery.state);
-
- if (d->state == DISCOVERY_FINDING || d->state == DISCOVERY_STOPPING) {
- if (test_bit(HCI_INQUIRY, &hdev->flags))
- hci_req_add(req, HCI_OP_INQUIRY_CANCEL, 0, NULL);
-
- if (hci_dev_test_flag(hdev, HCI_LE_SCAN)) {
- cancel_delayed_work(&hdev->le_scan_disable);
- cancel_delayed_work(&hdev->le_scan_restart);
- hci_req_add_le_scan_disable(req, false);
- }
-
- ret = true;
- } else {
- /* Passive scanning */
- if (hci_dev_test_flag(hdev, HCI_LE_SCAN)) {
- hci_req_add_le_scan_disable(req, false);
- ret = true;
- }
- }
-
- /* No further actions needed for LE-only discovery */
- if (d->type == DISCOV_TYPE_LE)
- return ret;
-
- if (d->state == DISCOVERY_RESOLVING || d->state == DISCOVERY_STOPPING) {
- e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY,
- NAME_PENDING);
- if (!e)
- return ret;
-
- bacpy(&cp.bdaddr, &e->data.bdaddr);
- hci_req_add(req, HCI_OP_REMOTE_NAME_REQ_CANCEL, sizeof(cp),
- &cp);
- ret = true;
- }
-
- return ret;
-}
-
-static void config_data_path_complete(struct hci_dev *hdev, u8 status,
- u16 opcode)
-{
- bt_dev_dbg(hdev, "status %u", status);
-}
-
-int hci_req_configure_datapath(struct hci_dev *hdev, struct bt_codec *codec)
-{
- struct hci_request req;
- int err;
- __u8 vnd_len, *vnd_data = NULL;
- struct hci_op_configure_data_path *cmd = NULL;
-
- hci_req_init(&req, hdev);
-
- err = hdev->get_codec_config_data(hdev, ESCO_LINK, codec, &vnd_len,
- &vnd_data);
- if (err < 0)
- goto error;
-
- cmd = kzalloc(sizeof(*cmd) + vnd_len, GFP_KERNEL);
- if (!cmd) {
- err = -ENOMEM;
- goto error;
- }
-
- err = hdev->get_data_path_id(hdev, &cmd->data_path_id);
- if (err < 0)
- goto error;
-
- cmd->vnd_len = vnd_len;
- memcpy(cmd->vnd_data, vnd_data, vnd_len);
-
- cmd->direction = 0x00;
- hci_req_add(&req, HCI_CONFIGURE_DATA_PATH, sizeof(*cmd) + vnd_len, cmd);
-
- cmd->direction = 0x01;
- hci_req_add(&req, HCI_CONFIGURE_DATA_PATH, sizeof(*cmd) + vnd_len, cmd);
-
- err = hci_req_run(&req, config_data_path_complete);
-error:
-
- kfree(cmd);
- kfree(vnd_data);
- return err;
-}
-
void hci_request_setup(struct hci_dev *hdev)
{
- INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
- INIT_DELAYED_WORK(&hdev->le_scan_restart, le_scan_restart_work);
- INIT_DELAYED_WORK(&hdev->adv_instance_expire, adv_timeout_expire);
INIT_DELAYED_WORK(&hdev->interleave_scan, interleave_scan_work);
}
@@ -2262,13 +918,5 @@ void hci_request_cancel_all(struct hci_dev *hdev)
{
__hci_cmd_sync_cancel(hdev, ENODEV);
- cancel_delayed_work_sync(&hdev->le_scan_disable);
- cancel_delayed_work_sync(&hdev->le_scan_restart);
-
- if (hdev->adv_instance_timeout) {
- cancel_delayed_work_sync(&hdev->adv_instance_expire);
- hdev->adv_instance_timeout = 0;
- }
-
cancel_interleave_scan(hdev);
}
diff --git a/net/bluetooth/hci_request.h b/net/bluetooth/hci_request.h
index 39d001fa3acf..b9c5a9823837 100644
--- a/net/bluetooth/hci_request.h
+++ b/net/bluetooth/hci_request.h
@@ -68,63 +68,10 @@ int __hci_req_sync(struct hci_dev *hdev, int (*func)(struct hci_request *req,
struct sk_buff *hci_prepare_cmd(struct hci_dev *hdev, u16 opcode, u32 plen,
const void *param);
-void __hci_req_write_fast_connectable(struct hci_request *req, bool enable);
-void __hci_req_update_name(struct hci_request *req);
-void __hci_req_update_eir(struct hci_request *req);
-
void hci_req_add_le_scan_disable(struct hci_request *req, bool rpa_le_conn);
void hci_req_add_le_passive_scan(struct hci_request *req);
void hci_req_prepare_suspend(struct hci_dev *hdev, enum suspended_state next);
-void hci_req_disable_address_resolution(struct hci_dev *hdev);
-void hci_req_reenable_advertising(struct hci_dev *hdev);
-void __hci_req_enable_advertising(struct hci_request *req);
-void __hci_req_disable_advertising(struct hci_request *req);
-void __hci_req_update_adv_data(struct hci_request *req, u8 instance);
-int hci_req_update_adv_data(struct hci_dev *hdev, u8 instance);
-int hci_req_start_per_adv(struct hci_dev *hdev, u8 instance, u32 flags,
- u16 min_interval, u16 max_interval,
- u16 sync_interval);
-void __hci_req_update_scan_rsp_data(struct hci_request *req, u8 instance);
-
-int __hci_req_schedule_adv_instance(struct hci_request *req, u8 instance,
- bool force);
-void hci_req_clear_adv_instance(struct hci_dev *hdev, struct sock *sk,
- struct hci_request *req, u8 instance,
- bool force);
-
-int __hci_req_setup_ext_adv_instance(struct hci_request *req, u8 instance);
-int __hci_req_setup_per_adv_instance(struct hci_request *req, u8 instance,
- u16 min_interval, u16 max_interval);
-int __hci_req_start_ext_adv(struct hci_request *req, u8 instance);
-int __hci_req_start_per_adv(struct hci_request *req, u8 instance, u32 flags,
- u16 min_interval, u16 max_interval,
- u16 sync_interval);
-int __hci_req_enable_ext_advertising(struct hci_request *req, u8 instance);
-int __hci_req_enable_per_advertising(struct hci_request *req, u8 instance);
-int __hci_req_disable_ext_adv_instance(struct hci_request *req, u8 instance);
-int __hci_req_remove_ext_adv_instance(struct hci_request *req, u8 instance);
-void __hci_req_clear_ext_adv_sets(struct hci_request *req);
-int hci_get_random_address(struct hci_dev *hdev, bool require_privacy,
- bool use_rpa, struct adv_info *adv_instance,
- u8 *own_addr_type, bdaddr_t *rand_addr);
-
-void __hci_req_update_class(struct hci_request *req);
-
-/* Returns true if HCI commands were queued */
-bool hci_req_stop_discovery(struct hci_request *req);
-
-int hci_req_configure_datapath(struct hci_dev *hdev, struct bt_codec *codec);
-
-void __hci_req_update_scan(struct hci_request *req);
-
-int hci_update_random_address(struct hci_request *req, bool require_privacy,
- bool use_rpa, u8 *own_addr_type);
-
-int hci_abort_conn(struct hci_conn *conn, u8 reason);
-void __hci_abort_conn(struct hci_request *req, struct hci_conn *conn,
- u8 reason);
-
void hci_request_setup(struct hci_dev *hdev);
void hci_request_cancel_all(struct hci_dev *hdev);
diff --git a/net/bluetooth/hci_sock.c b/net/bluetooth/hci_sock.c
index 0d015d4a8e41..06581223238c 100644
--- a/net/bluetooth/hci_sock.c
+++ b/net/bluetooth/hci_sock.c
@@ -887,7 +887,6 @@ static int hci_sock_release(struct socket *sock)
*/
hci_dev_do_close(hdev);
hci_dev_clear_flag(hdev, HCI_USER_CHANNEL);
- hci_register_suspend_notifier(hdev);
mgmt_index_added(hdev);
}
@@ -1216,7 +1215,6 @@ static int hci_sock_bind(struct socket *sock, struct sockaddr *addr,
}
mgmt_index_removed(hdev);
- hci_unregister_suspend_notifier(hdev);
err = hci_dev_open(hdev->id);
if (err) {
@@ -1231,7 +1229,6 @@ static int hci_sock_bind(struct socket *sock, struct sockaddr *addr,
err = 0;
} else {
hci_dev_clear_flag(hdev, HCI_USER_CHANNEL);
- hci_register_suspend_notifier(hdev);
mgmt_index_added(hdev);
hci_dev_put(hdev);
goto done;
@@ -2065,6 +2062,7 @@ static int hci_sock_getsockopt(struct socket *sock, int level, int optname,
static void hci_sock_destruct(struct sock *sk)
{
+ mgmt_cleanup(sk);
skb_queue_purge(&sk->sk_receive_queue);
skb_queue_purge(&sk->sk_write_queue);
}
diff --git a/net/bluetooth/hci_sync.c b/net/bluetooth/hci_sync.c
index fbd5613eebfc..76c3107c9f91 100644
--- a/net/bluetooth/hci_sync.c
+++ b/net/bluetooth/hci_sync.c
@@ -246,7 +246,7 @@ int __hci_cmd_sync_status_sk(struct hci_dev *hdev, u16 opcode, u32 plen,
skb = __hci_cmd_sync_sk(hdev, opcode, plen, param, event, timeout, sk);
if (IS_ERR(skb)) {
bt_dev_err(hdev, "Opcode 0x%4x failed: %ld", opcode,
- PTR_ERR(skb));
+ PTR_ERR(skb));
return PTR_ERR(skb);
}
@@ -321,6 +321,307 @@ static void hci_cmd_sync_cancel_work(struct work_struct *work)
wake_up_interruptible(&hdev->req_wait_q);
}
+static int hci_scan_disable_sync(struct hci_dev *hdev);
+static int scan_disable_sync(struct hci_dev *hdev, void *data)
+{
+ return hci_scan_disable_sync(hdev);
+}
+
+static int hci_inquiry_sync(struct hci_dev *hdev, u8 length);
+static int interleaved_inquiry_sync(struct hci_dev *hdev, void *data)
+{
+ return hci_inquiry_sync(hdev, DISCOV_INTERLEAVED_INQUIRY_LEN);
+}
+
+static void le_scan_disable(struct work_struct *work)
+{
+ struct hci_dev *hdev = container_of(work, struct hci_dev,
+ le_scan_disable.work);
+ int status;
+
+ bt_dev_dbg(hdev, "");
+ hci_dev_lock(hdev);
+
+ if (!hci_dev_test_flag(hdev, HCI_LE_SCAN))
+ goto _return;
+
+ cancel_delayed_work(&hdev->le_scan_restart);
+
+ status = hci_cmd_sync_queue(hdev, scan_disable_sync, NULL, NULL);
+ if (status) {
+ bt_dev_err(hdev, "failed to disable LE scan: %d", status);
+ goto _return;
+ }
+
+ hdev->discovery.scan_start = 0;
+
+ /* If we were running LE only scan, change discovery state. If
+ * we were running both LE and BR/EDR inquiry simultaneously,
+ * and BR/EDR inquiry is already finished, stop discovery,
+ * otherwise BR/EDR inquiry will stop discovery when finished.
+ * If we will resolve remote device name, do not change
+ * discovery state.
+ */
+
+ if (hdev->discovery.type == DISCOV_TYPE_LE)
+ goto discov_stopped;
+
+ if (hdev->discovery.type != DISCOV_TYPE_INTERLEAVED)
+ goto _return;
+
+ if (test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY, &hdev->quirks)) {
+ if (!test_bit(HCI_INQUIRY, &hdev->flags) &&
+ hdev->discovery.state != DISCOVERY_RESOLVING)
+ goto discov_stopped;
+
+ goto _return;
+ }
+
+ status = hci_cmd_sync_queue(hdev, interleaved_inquiry_sync, NULL, NULL);
+ if (status) {
+ bt_dev_err(hdev, "inquiry failed: status %d", status);
+ goto discov_stopped;
+ }
+
+ goto _return;
+
+discov_stopped:
+ hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
+
+_return:
+ hci_dev_unlock(hdev);
+}
+
+static int hci_le_set_scan_enable_sync(struct hci_dev *hdev, u8 val,
+ u8 filter_dup);
+static int hci_le_scan_restart_sync(struct hci_dev *hdev)
+{
+ /* If controller is not scanning we are done. */
+ if (!hci_dev_test_flag(hdev, HCI_LE_SCAN))
+ return 0;
+
+ if (hdev->scanning_paused) {
+ bt_dev_dbg(hdev, "Scanning is paused for suspend");
+ return 0;
+ }
+
+ hci_le_set_scan_enable_sync(hdev, LE_SCAN_DISABLE, 0x00);
+ return hci_le_set_scan_enable_sync(hdev, LE_SCAN_ENABLE,
+ LE_SCAN_FILTER_DUP_ENABLE);
+}
+
+static int le_scan_restart_sync(struct hci_dev *hdev, void *data)
+{
+ return hci_le_scan_restart_sync(hdev);
+}
+
+static void le_scan_restart(struct work_struct *work)
+{
+ struct hci_dev *hdev = container_of(work, struct hci_dev,
+ le_scan_restart.work);
+ unsigned long timeout, duration, scan_start, now;
+ int status;
+
+ bt_dev_dbg(hdev, "");
+
+ hci_dev_lock(hdev);
+
+ status = hci_cmd_sync_queue(hdev, le_scan_restart_sync, NULL, NULL);
+ if (status) {
+ bt_dev_err(hdev, "failed to restart LE scan: status %d",
+ status);
+ goto unlock;
+ }
+
+ if (!test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks) ||
+ !hdev->discovery.scan_start)
+ goto unlock;
+
+ /* When the scan was started, hdev->le_scan_disable has been queued
+ * after duration from scan_start. During scan restart this job
+ * has been canceled, and we need to queue it again after proper
+ * timeout, to make sure that scan does not run indefinitely.
+ */
+ duration = hdev->discovery.scan_duration;
+ scan_start = hdev->discovery.scan_start;
+ now = jiffies;
+ if (now - scan_start <= duration) {
+ int elapsed;
+
+ if (now >= scan_start)
+ elapsed = now - scan_start;
+ else
+ elapsed = ULONG_MAX - scan_start + now;
+
+ timeout = duration - elapsed;
+ } else {
+ timeout = 0;
+ }
+
+ queue_delayed_work(hdev->req_workqueue,
+ &hdev->le_scan_disable, timeout);
+
+unlock:
+ hci_dev_unlock(hdev);
+}
+
+static int reenable_adv_sync(struct hci_dev *hdev, void *data)
+{
+ bt_dev_dbg(hdev, "");
+
+ if (!hci_dev_test_flag(hdev, HCI_ADVERTISING) &&
+ list_empty(&hdev->adv_instances))
+ return 0;
+
+ if (hdev->cur_adv_instance) {
+ return hci_schedule_adv_instance_sync(hdev,
+ hdev->cur_adv_instance,
+ true);
+ } else {
+ if (ext_adv_capable(hdev)) {
+ hci_start_ext_adv_sync(hdev, 0x00);
+ } else {
+ hci_update_adv_data_sync(hdev, 0x00);
+ hci_update_scan_rsp_data_sync(hdev, 0x00);
+ hci_enable_advertising_sync(hdev);
+ }
+ }
+
+ return 0;
+}
+
+static void reenable_adv(struct work_struct *work)
+{
+ struct hci_dev *hdev = container_of(work, struct hci_dev,
+ reenable_adv_work);
+ int status;
+
+ bt_dev_dbg(hdev, "");
+
+ hci_dev_lock(hdev);
+
+ status = hci_cmd_sync_queue(hdev, reenable_adv_sync, NULL, NULL);
+ if (status)
+ bt_dev_err(hdev, "failed to reenable ADV: %d", status);
+
+ hci_dev_unlock(hdev);
+}
+
+static void cancel_adv_timeout(struct hci_dev *hdev)
+{
+ if (hdev->adv_instance_timeout) {
+ hdev->adv_instance_timeout = 0;
+ cancel_delayed_work(&hdev->adv_instance_expire);
+ }
+}
+
+/* For a single instance:
+ * - force == true: The instance will be removed even when its remaining
+ * lifetime is not zero.
+ * - force == false: the instance will be deactivated but kept stored unless
+ * the remaining lifetime is zero.
+ *
+ * For instance == 0x00:
+ * - force == true: All instances will be removed regardless of their timeout
+ * setting.
+ * - force == false: Only instances that have a timeout will be removed.
+ */
+int hci_clear_adv_instance_sync(struct hci_dev *hdev, struct sock *sk,
+ u8 instance, bool force)
+{
+ struct adv_info *adv_instance, *n, *next_instance = NULL;
+ int err;
+ u8 rem_inst;
+
+ /* Cancel any timeout concerning the removed instance(s). */
+ if (!instance || hdev->cur_adv_instance == instance)
+ cancel_adv_timeout(hdev);
+
+ /* Get the next instance to advertise BEFORE we remove
+ * the current one. This can be the same instance again
+ * if there is only one instance.
+ */
+ if (instance && hdev->cur_adv_instance == instance)
+ next_instance = hci_get_next_instance(hdev, instance);
+
+ if (instance == 0x00) {
+ list_for_each_entry_safe(adv_instance, n, &hdev->adv_instances,
+ list) {
+ if (!(force || adv_instance->timeout))
+ continue;
+
+ rem_inst = adv_instance->instance;
+ err = hci_remove_adv_instance(hdev, rem_inst);
+ if (!err)
+ mgmt_advertising_removed(sk, hdev, rem_inst);
+ }
+ } else {
+ adv_instance = hci_find_adv_instance(hdev, instance);
+
+ if (force || (adv_instance && adv_instance->timeout &&
+ !adv_instance->remaining_time)) {
+ /* Don't advertise a removed instance. */
+ if (next_instance &&
+ next_instance->instance == instance)
+ next_instance = NULL;
+
+ err = hci_remove_adv_instance(hdev, instance);
+ if (!err)
+ mgmt_advertising_removed(sk, hdev, instance);
+ }
+ }
+
+ if (!hdev_is_powered(hdev) || hci_dev_test_flag(hdev, HCI_ADVERTISING))
+ return 0;
+
+ if (next_instance && !ext_adv_capable(hdev))
+ return hci_schedule_adv_instance_sync(hdev,
+ next_instance->instance,
+ false);
+
+ return 0;
+}
+
+static int adv_timeout_expire_sync(struct hci_dev *hdev, void *data)
+{
+ u8 instance = *(u8 *)data;
+
+ kfree(data);
+
+ hci_clear_adv_instance_sync(hdev, NULL, instance, false);
+
+ if (list_empty(&hdev->adv_instances))
+ return hci_disable_advertising_sync(hdev);
+
+ return 0;
+}
+
+static void adv_timeout_expire(struct work_struct *work)
+{
+ u8 *inst_ptr;
+ struct hci_dev *hdev = container_of(work, struct hci_dev,
+ adv_instance_expire.work);
+
+ bt_dev_dbg(hdev, "");
+
+ hci_dev_lock(hdev);
+
+ hdev->adv_instance_timeout = 0;
+
+ if (hdev->cur_adv_instance == 0x00)
+ goto unlock;
+
+ inst_ptr = kmalloc(1, GFP_KERNEL);
+ if (!inst_ptr)
+ goto unlock;
+
+ *inst_ptr = hdev->cur_adv_instance;
+ hci_cmd_sync_queue(hdev, adv_timeout_expire_sync, inst_ptr, NULL);
+
+unlock:
+ hci_dev_unlock(hdev);
+}
+
void hci_cmd_sync_init(struct hci_dev *hdev)
{
INIT_WORK(&hdev->cmd_sync_work, hci_cmd_sync_work);
@@ -328,6 +629,10 @@ void hci_cmd_sync_init(struct hci_dev *hdev)
mutex_init(&hdev->cmd_sync_work_lock);
INIT_WORK(&hdev->cmd_sync_cancel_work, hci_cmd_sync_cancel_work);
+ INIT_WORK(&hdev->reenable_adv_work, reenable_adv);
+ INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable);
+ INIT_DELAYED_WORK(&hdev->le_scan_restart, le_scan_restart);
+ INIT_DELAYED_WORK(&hdev->adv_instance_expire, adv_timeout_expire);
}
void hci_cmd_sync_clear(struct hci_dev *hdev)
@@ -335,6 +640,7 @@ void hci_cmd_sync_clear(struct hci_dev *hdev)
struct hci_cmd_sync_work_entry *entry, *tmp;
cancel_work_sync(&hdev->cmd_sync_work);
+ cancel_work_sync(&hdev->reenable_adv_work);
list_for_each_entry_safe(entry, tmp, &hdev->cmd_sync_work_list, list) {
if (entry->destroy)
@@ -1333,14 +1639,6 @@ int hci_le_terminate_big_sync(struct hci_dev *hdev, u8 handle, u8 reason)
sizeof(cp), &cp, HCI_CMD_TIMEOUT);
}
-static void cancel_adv_timeout(struct hci_dev *hdev)
-{
- if (hdev->adv_instance_timeout) {
- hdev->adv_instance_timeout = 0;
- cancel_delayed_work(&hdev->adv_instance_expire);
- }
-}
-
static int hci_set_ext_adv_data_sync(struct hci_dev *hdev, u8 instance)
{
struct {
@@ -1492,10 +1790,13 @@ static int hci_clear_adv_sets_sync(struct hci_dev *hdev, struct sock *sk)
static int hci_clear_adv_sync(struct hci_dev *hdev, struct sock *sk, bool force)
{
struct adv_info *adv, *n;
+ int err = 0;
if (ext_adv_capable(hdev))
/* Remove all existing sets */
- return hci_clear_adv_sets_sync(hdev, sk);
+ err = hci_clear_adv_sets_sync(hdev, sk);
+ if (ext_adv_capable(hdev))
+ return err;
/* This is safe as long as there is no command send while the lock is
* held.
@@ -1523,11 +1824,13 @@ static int hci_clear_adv_sync(struct hci_dev *hdev, struct sock *sk, bool force)
static int hci_remove_adv_sync(struct hci_dev *hdev, u8 instance,
struct sock *sk)
{
- int err;
+ int err = 0;
/* If we use extended advertising, instance has to be removed first. */
if (ext_adv_capable(hdev))
- return hci_remove_ext_adv_instance_sync(hdev, instance, sk);
+ err = hci_remove_ext_adv_instance_sync(hdev, instance, sk);
+ if (ext_adv_capable(hdev))
+ return err;
/* This is safe as long as there is no command send while the lock is
* held.
@@ -1626,13 +1929,16 @@ int hci_read_tx_power_sync(struct hci_dev *hdev, __le16 handle, u8 type)
int hci_disable_advertising_sync(struct hci_dev *hdev)
{
u8 enable = 0x00;
+ int err = 0;
/* If controller is not advertising we are done. */
if (!hci_dev_test_flag(hdev, HCI_LE_ADV))
return 0;
if (ext_adv_capable(hdev))
- return hci_disable_ext_adv_instance_sync(hdev, 0x00);
+ err = hci_disable_ext_adv_instance_sync(hdev, 0x00);
+ if (ext_adv_capable(hdev))
+ return err;
return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_ADV_ENABLE,
sizeof(enable), &enable, HCI_CMD_TIMEOUT);
@@ -1645,7 +1951,11 @@ static int hci_le_set_ext_scan_enable_sync(struct hci_dev *hdev, u8 val,
memset(&cp, 0, sizeof(cp));
cp.enable = val;
- cp.filter_dup = filter_dup;
+
+ if (hci_dev_test_flag(hdev, HCI_MESH))
+ cp.filter_dup = LE_SCAN_FILTER_DUP_DISABLE;
+ else
+ cp.filter_dup = filter_dup;
return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_EXT_SCAN_ENABLE,
sizeof(cp), &cp, HCI_CMD_TIMEOUT);
@@ -1661,7 +1971,11 @@ static int hci_le_set_scan_enable_sync(struct hci_dev *hdev, u8 val,
memset(&cp, 0, sizeof(cp));
cp.enable = val;
- cp.filter_dup = filter_dup;
+
+ if (val && hci_dev_test_flag(hdev, HCI_MESH))
+ cp.filter_dup = LE_SCAN_FILTER_DUP_DISABLE;
+ else
+ cp.filter_dup = filter_dup;
return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_SCAN_ENABLE,
sizeof(cp), &cp, HCI_CMD_TIMEOUT);
@@ -2300,6 +2614,7 @@ static int hci_passive_scan_sync(struct hci_dev *hdev)
u8 own_addr_type;
u8 filter_policy;
u16 window, interval;
+ u8 filter_dups = LE_SCAN_FILTER_DUP_ENABLE;
int err;
if (hdev->scanning_paused) {
@@ -2362,11 +2677,16 @@ static int hci_passive_scan_sync(struct hci_dev *hdev)
interval = hdev->le_scan_interval;
}
+ /* Disable all filtering for Mesh */
+ if (hci_dev_test_flag(hdev, HCI_MESH)) {
+ filter_policy = 0;
+ filter_dups = LE_SCAN_FILTER_DUP_DISABLE;
+ }
+
bt_dev_dbg(hdev, "LE passive scan with acceptlist = %d", filter_policy);
return hci_start_scan_sync(hdev, LE_SCAN_PASSIVE, interval, window,
- own_addr_type, filter_policy,
- LE_SCAN_FILTER_DUP_ENABLE);
+ own_addr_type, filter_policy, filter_dups);
}
/* This function controls the passive scanning based on hdev->pend_le_conns
@@ -2416,7 +2736,8 @@ int hci_update_passive_scan_sync(struct hci_dev *hdev)
bt_dev_dbg(hdev, "ADV monitoring is %s",
hci_is_adv_monitoring(hdev) ? "on" : "off");
- if (list_empty(&hdev->pend_le_conns) &&
+ if (!hci_dev_test_flag(hdev, HCI_MESH) &&
+ list_empty(&hdev->pend_le_conns) &&
list_empty(&hdev->pend_le_reports) &&
!hci_is_adv_monitoring(hdev) &&
!hci_dev_test_flag(hdev, HCI_PA_SYNC)) {
@@ -4355,6 +4676,7 @@ int hci_dev_open_sync(struct hci_dev *hdev)
hci_dev_test_flag(hdev, HCI_MGMT) &&
hdev->dev_type == HCI_PRIMARY) {
ret = hci_powered_update_sync(hdev);
+ mgmt_power_on(hdev, ret);
}
} else {
/* Init failed, cleanup */
@@ -4406,6 +4728,31 @@ static void hci_pend_le_actions_clear(struct hci_dev *hdev)
BT_DBG("All LE pending actions cleared");
}
+static int hci_dev_shutdown(struct hci_dev *hdev)
+{
+ int err = 0;
+ /* Similar to how we first do setup and then set the exclusive access
+ * bit for userspace, we must first unset userchannel and then clean up.
+ * Otherwise, the kernel can't properly use the hci channel to clean up
+ * the controller (some shutdown routines require sending additional
+ * commands to the controller for example).
+ */
+ bool was_userchannel =
+ hci_dev_test_and_clear_flag(hdev, HCI_USER_CHANNEL);
+
+ if (!hci_dev_test_flag(hdev, HCI_UNREGISTER) &&
+ test_bit(HCI_UP, &hdev->flags)) {
+ /* Execute vendor specific shutdown routine */
+ if (hdev->shutdown)
+ err = hdev->shutdown(hdev);
+ }
+
+ if (was_userchannel)
+ hci_dev_set_flag(hdev, HCI_USER_CHANNEL);
+
+ return err;
+}
+
int hci_dev_close_sync(struct hci_dev *hdev)
{
bool auto_off;
@@ -4415,17 +4762,18 @@ int hci_dev_close_sync(struct hci_dev *hdev)
cancel_delayed_work(&hdev->power_off);
cancel_delayed_work(&hdev->ncmd_timer);
+ cancel_delayed_work(&hdev->le_scan_disable);
+ cancel_delayed_work(&hdev->le_scan_restart);
hci_request_cancel_all(hdev);
- if (!hci_dev_test_flag(hdev, HCI_UNREGISTER) &&
- !hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
- test_bit(HCI_UP, &hdev->flags)) {
- /* Execute vendor specific shutdown routine */
- if (hdev->shutdown)
- err = hdev->shutdown(hdev);
+ if (hdev->adv_instance_timeout) {
+ cancel_delayed_work_sync(&hdev->adv_instance_expire);
+ hdev->adv_instance_timeout = 0;
}
+ err = hci_dev_shutdown(hdev);
+
if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
cancel_delayed_work_sync(&hdev->cmd_timer);
return err;
@@ -5023,7 +5371,7 @@ static int hci_active_scan_sync(struct hci_dev *hdev, uint16_t interval)
/* Pause advertising since active scanning disables address resolution
* which advertising depend on in order to generate its RPAs.
*/
- if (use_ll_privacy(hdev)) {
+ if (use_ll_privacy(hdev) && hci_dev_test_flag(hdev, HCI_PRIVACY)) {
err = hci_pause_advertising_sync(hdev);
if (err) {
bt_dev_err(hdev, "pause advertising failed: %d", err);
@@ -5737,3 +6085,96 @@ int hci_le_pa_terminate_sync(struct hci_dev *hdev, u16 handle)
return __hci_cmd_sync_status(hdev, HCI_OP_LE_PA_TERM_SYNC,
sizeof(cp), &cp, HCI_CMD_TIMEOUT);
}
+
+int hci_get_random_address(struct hci_dev *hdev, bool require_privacy,
+ bool use_rpa, struct adv_info *adv_instance,
+ u8 *own_addr_type, bdaddr_t *rand_addr)
+{
+ int err;
+
+ bacpy(rand_addr, BDADDR_ANY);
+
+ /* If privacy is enabled use a resolvable private address. If
+ * current RPA has expired then generate a new one.
+ */
+ if (use_rpa) {
+ /* If Controller supports LL Privacy use own address type is
+ * 0x03
+ */
+ if (use_ll_privacy(hdev))
+ *own_addr_type = ADDR_LE_DEV_RANDOM_RESOLVED;
+ else
+ *own_addr_type = ADDR_LE_DEV_RANDOM;
+
+ if (adv_instance) {
+ if (adv_rpa_valid(adv_instance))
+ return 0;
+ } else {
+ if (rpa_valid(hdev))
+ return 0;
+ }
+
+ err = smp_generate_rpa(hdev, hdev->irk, &hdev->rpa);
+ if (err < 0) {
+ bt_dev_err(hdev, "failed to generate new RPA");
+ return err;
+ }
+
+ bacpy(rand_addr, &hdev->rpa);
+
+ return 0;
+ }
+
+ /* In case of required privacy without resolvable private address,
+ * use an non-resolvable private address. This is useful for
+ * non-connectable advertising.
+ */
+ if (require_privacy) {
+ bdaddr_t nrpa;
+
+ while (true) {
+ /* The non-resolvable private address is generated
+ * from random six bytes with the two most significant
+ * bits cleared.
+ */
+ get_random_bytes(&nrpa, 6);
+ nrpa.b[5] &= 0x3f;
+
+ /* The non-resolvable private address shall not be
+ * equal to the public address.
+ */
+ if (bacmp(&hdev->bdaddr, &nrpa))
+ break;
+ }
+
+ *own_addr_type = ADDR_LE_DEV_RANDOM;
+ bacpy(rand_addr, &nrpa);
+
+ return 0;
+ }
+
+ /* No privacy so use a public address. */
+ *own_addr_type = ADDR_LE_DEV_PUBLIC;
+
+ return 0;
+}
+
+static int _update_adv_data_sync(struct hci_dev *hdev, void *data)
+{
+ u8 instance = *(u8 *)data;
+
+ kfree(data);
+
+ return hci_update_adv_data_sync(hdev, instance);
+}
+
+int hci_update_adv_data(struct hci_dev *hdev, u8 instance)
+{
+ u8 *inst_ptr = kmalloc(1, GFP_KERNEL);
+
+ if (!inst_ptr)
+ return -ENOMEM;
+
+ *inst_ptr = instance;
+ return hci_cmd_sync_queue(hdev, _update_adv_data_sync, inst_ptr, NULL);
+}
diff --git a/net/bluetooth/hci_sysfs.c b/net/bluetooth/hci_sysfs.c
index 4e3e0451b08c..08542dfc2dc5 100644
--- a/net/bluetooth/hci_sysfs.c
+++ b/net/bluetooth/hci_sysfs.c
@@ -48,6 +48,9 @@ void hci_conn_add_sysfs(struct hci_conn *conn)
BT_DBG("conn %p", conn);
+ if (device_is_registered(&conn->dev))
+ return;
+
dev_set_name(&conn->dev, "%s:%d", hdev->name, conn->handle);
if (device_add(&conn->dev) < 0) {
diff --git a/net/bluetooth/l2cap_core.c b/net/bluetooth/l2cap_core.c
index 2c9de67daadc..1f34b82ca0ec 100644
--- a/net/bluetooth/l2cap_core.c
+++ b/net/bluetooth/l2cap_core.c
@@ -61,6 +61,9 @@ static void l2cap_send_disconn_req(struct l2cap_chan *chan, int err);
static void l2cap_tx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
struct sk_buff_head *skbs, u8 event);
+static void l2cap_retrans_timeout(struct work_struct *work);
+static void l2cap_monitor_timeout(struct work_struct *work);
+static void l2cap_ack_timeout(struct work_struct *work);
static inline u8 bdaddr_type(u8 link_type, u8 bdaddr_type)
{
@@ -476,6 +479,9 @@ struct l2cap_chan *l2cap_chan_create(void)
write_unlock(&chan_list_lock);
INIT_DELAYED_WORK(&chan->chan_timer, l2cap_chan_timeout);
+ INIT_DELAYED_WORK(&chan->retrans_timer, l2cap_retrans_timeout);
+ INIT_DELAYED_WORK(&chan->monitor_timer, l2cap_monitor_timeout);
+ INIT_DELAYED_WORK(&chan->ack_timer, l2cap_ack_timeout);
chan->state = BT_OPEN;
@@ -3320,10 +3326,6 @@ int l2cap_ertm_init(struct l2cap_chan *chan)
chan->rx_state = L2CAP_RX_STATE_RECV;
chan->tx_state = L2CAP_TX_STATE_XMIT;
- INIT_DELAYED_WORK(&chan->retrans_timer, l2cap_retrans_timeout);
- INIT_DELAYED_WORK(&chan->monitor_timer, l2cap_monitor_timeout);
- INIT_DELAYED_WORK(&chan->ack_timer, l2cap_ack_timeout);
-
skb_queue_head_init(&chan->srej_q);
err = l2cap_seq_list_init(&chan->srej_list, chan->tx_win);
@@ -4307,6 +4309,12 @@ static int l2cap_connect_create_rsp(struct l2cap_conn *conn,
}
}
+ chan = l2cap_chan_hold_unless_zero(chan);
+ if (!chan) {
+ err = -EBADSLT;
+ goto unlock;
+ }
+
err = 0;
l2cap_chan_lock(chan);
@@ -4336,6 +4344,7 @@ static int l2cap_connect_create_rsp(struct l2cap_conn *conn,
}
l2cap_chan_unlock(chan);
+ l2cap_chan_put(chan);
unlock:
mutex_unlock(&conn->chan_lock);
diff --git a/net/bluetooth/mgmt.c b/net/bluetooth/mgmt.c
index 72e6595a71cc..a92e7e485feb 100644
--- a/net/bluetooth/mgmt.c
+++ b/net/bluetooth/mgmt.c
@@ -129,6 +129,10 @@ static const u16 mgmt_commands[] = {
MGMT_OP_ADD_EXT_ADV_PARAMS,
MGMT_OP_ADD_EXT_ADV_DATA,
MGMT_OP_ADD_ADV_PATTERNS_MONITOR_RSSI,
+ MGMT_OP_SET_MESH_RECEIVER,
+ MGMT_OP_MESH_READ_FEATURES,
+ MGMT_OP_MESH_SEND,
+ MGMT_OP_MESH_SEND_CANCEL,
};
static const u16 mgmt_events[] = {
@@ -1048,9 +1052,66 @@ static void discov_off(struct work_struct *work)
hci_dev_unlock(hdev);
}
+static int send_settings_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev);
+
+static void mesh_send_complete(struct hci_dev *hdev,
+ struct mgmt_mesh_tx *mesh_tx, bool silent)
+{
+ u8 handle = mesh_tx->handle;
+
+ if (!silent)
+ mgmt_event(MGMT_EV_MESH_PACKET_CMPLT, hdev, &handle,
+ sizeof(handle), NULL);
+
+ mgmt_mesh_remove(mesh_tx);
+}
+
+static int mesh_send_done_sync(struct hci_dev *hdev, void *data)
+{
+ struct mgmt_mesh_tx *mesh_tx;
+
+ hci_dev_clear_flag(hdev, HCI_MESH_SENDING);
+ hci_disable_advertising_sync(hdev);
+ mesh_tx = mgmt_mesh_next(hdev, NULL);
+
+ if (mesh_tx)
+ mesh_send_complete(hdev, mesh_tx, false);
+
+ return 0;
+}
+
+static int mesh_send_sync(struct hci_dev *hdev, void *data);
+static void mesh_send_start_complete(struct hci_dev *hdev, void *data, int err);
+static void mesh_next(struct hci_dev *hdev, void *data, int err)
+{
+ struct mgmt_mesh_tx *mesh_tx = mgmt_mesh_next(hdev, NULL);
+
+ if (!mesh_tx)
+ return;
+
+ err = hci_cmd_sync_queue(hdev, mesh_send_sync, mesh_tx,
+ mesh_send_start_complete);
+
+ if (err < 0)
+ mesh_send_complete(hdev, mesh_tx, false);
+ else
+ hci_dev_set_flag(hdev, HCI_MESH_SENDING);
+}
+
+static void mesh_send_done(struct work_struct *work)
+{
+ struct hci_dev *hdev = container_of(work, struct hci_dev,
+ mesh_send_done.work);
+
+ if (!hci_dev_test_flag(hdev, HCI_MESH_SENDING))
+ return;
+
+ hci_cmd_sync_queue(hdev, mesh_send_done_sync, NULL, mesh_next);
+}
+
static void mgmt_init_hdev(struct sock *sk, struct hci_dev *hdev)
{
- if (hci_dev_test_and_set_flag(hdev, HCI_MGMT))
+ if (hci_dev_test_flag(hdev, HCI_MGMT))
return;
BT_INFO("MGMT ver %d.%d", MGMT_VERSION, MGMT_REVISION);
@@ -1058,6 +1119,7 @@ static void mgmt_init_hdev(struct sock *sk, struct hci_dev *hdev)
INIT_DELAYED_WORK(&hdev->discov_off, discov_off);
INIT_DELAYED_WORK(&hdev->service_cache, service_cache_off);
INIT_DELAYED_WORK(&hdev->rpa_expired, rpa_expired);
+ INIT_DELAYED_WORK(&hdev->mesh_send_done, mesh_send_done);
/* Non-mgmt controlled devices get this bit set
* implicitly so that pairing works for them, however
@@ -1065,6 +1127,8 @@ static void mgmt_init_hdev(struct sock *sk, struct hci_dev *hdev)
* it
*/
hci_dev_clear_flag(hdev, HCI_BONDABLE);
+
+ hci_dev_set_flag(hdev, HCI_MGMT);
}
static int read_controller_info(struct sock *sk, struct hci_dev *hdev,
@@ -2058,6 +2122,8 @@ static int set_le_sync(struct hci_dev *hdev, void *data)
int err;
if (!val) {
+ hci_clear_adv_instance_sync(hdev, NULL, 0x00, true);
+
if (hci_dev_test_flag(hdev, HCI_LE_ADV))
hci_disable_advertising_sync(hdev);
@@ -2092,6 +2158,317 @@ static int set_le_sync(struct hci_dev *hdev, void *data)
return err;
}
+static void set_mesh_complete(struct hci_dev *hdev, void *data, int err)
+{
+ struct mgmt_pending_cmd *cmd = data;
+ u8 status = mgmt_status(err);
+ struct sock *sk = cmd->sk;
+
+ if (status) {
+ mgmt_pending_foreach(MGMT_OP_SET_MESH_RECEIVER, hdev,
+ cmd_status_rsp, &status);
+ return;
+ }
+
+ mgmt_pending_remove(cmd);
+ mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_MESH_RECEIVER, 0, NULL, 0);
+}
+
+static int set_mesh_sync(struct hci_dev *hdev, void *data)
+{
+ struct mgmt_pending_cmd *cmd = data;
+ struct mgmt_cp_set_mesh *cp = cmd->param;
+ size_t len = cmd->param_len;
+
+ memset(hdev->mesh_ad_types, 0, sizeof(hdev->mesh_ad_types));
+
+ if (cp->enable)
+ hci_dev_set_flag(hdev, HCI_MESH);
+ else
+ hci_dev_clear_flag(hdev, HCI_MESH);
+
+ len -= sizeof(*cp);
+
+ /* If filters don't fit, forward all adv pkts */
+ if (len <= sizeof(hdev->mesh_ad_types))
+ memcpy(hdev->mesh_ad_types, cp->ad_types, len);
+
+ hci_update_passive_scan_sync(hdev);
+ return 0;
+}
+
+static int set_mesh(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
+{
+ struct mgmt_cp_set_mesh *cp = data;
+ struct mgmt_pending_cmd *cmd;
+ int err = 0;
+
+ bt_dev_dbg(hdev, "sock %p", sk);
+
+ if (!lmp_le_capable(hdev) ||
+ !hci_dev_test_flag(hdev, HCI_MESH_EXPERIMENTAL))
+ return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_MESH_RECEIVER,
+ MGMT_STATUS_NOT_SUPPORTED);
+
+ if (cp->enable != 0x00 && cp->enable != 0x01)
+ return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_MESH_RECEIVER,
+ MGMT_STATUS_INVALID_PARAMS);
+
+ hci_dev_lock(hdev);
+
+ cmd = mgmt_pending_add(sk, MGMT_OP_SET_MESH_RECEIVER, hdev, data, len);
+ if (!cmd)
+ err = -ENOMEM;
+ else
+ err = hci_cmd_sync_queue(hdev, set_mesh_sync, cmd,
+ set_mesh_complete);
+
+ if (err < 0) {
+ err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_MESH_RECEIVER,
+ MGMT_STATUS_FAILED);
+
+ if (cmd)
+ mgmt_pending_remove(cmd);
+ }
+
+ hci_dev_unlock(hdev);
+ return err;
+}
+
+static void mesh_send_start_complete(struct hci_dev *hdev, void *data, int err)
+{
+ struct mgmt_mesh_tx *mesh_tx = data;
+ struct mgmt_cp_mesh_send *send = (void *)mesh_tx->param;
+ unsigned long mesh_send_interval;
+ u8 mgmt_err = mgmt_status(err);
+
+ /* Report any errors here, but don't report completion */
+
+ if (mgmt_err) {
+ hci_dev_clear_flag(hdev, HCI_MESH_SENDING);
+ /* Send Complete Error Code for handle */
+ mesh_send_complete(hdev, mesh_tx, false);
+ return;
+ }
+
+ mesh_send_interval = msecs_to_jiffies((send->cnt) * 25);
+ queue_delayed_work(hdev->req_workqueue, &hdev->mesh_send_done,
+ mesh_send_interval);
+}
+
+static int mesh_send_sync(struct hci_dev *hdev, void *data)
+{
+ struct mgmt_mesh_tx *mesh_tx = data;
+ struct mgmt_cp_mesh_send *send = (void *)mesh_tx->param;
+ struct adv_info *adv, *next_instance;
+ u8 instance = hdev->le_num_of_adv_sets + 1;
+ u16 timeout, duration;
+ int err = 0;
+
+ if (hdev->le_num_of_adv_sets <= hdev->adv_instance_cnt)
+ return MGMT_STATUS_BUSY;
+
+ timeout = 1000;
+ duration = send->cnt * INTERVAL_TO_MS(hdev->le_adv_max_interval);
+ adv = hci_add_adv_instance(hdev, instance, 0,
+ send->adv_data_len, send->adv_data,
+ 0, NULL,
+ timeout, duration,
+ HCI_ADV_TX_POWER_NO_PREFERENCE,
+ hdev->le_adv_min_interval,
+ hdev->le_adv_max_interval,
+ mesh_tx->handle);
+
+ if (!IS_ERR(adv))
+ mesh_tx->instance = instance;
+ else
+ err = PTR_ERR(adv);
+
+ if (hdev->cur_adv_instance == instance) {
+ /* If the currently advertised instance is being changed then
+ * cancel the current advertising and schedule the next
+ * instance. If there is only one instance then the overridden
+ * advertising data will be visible right away.
+ */
+ cancel_adv_timeout(hdev);
+
+ next_instance = hci_get_next_instance(hdev, instance);
+ if (next_instance)
+ instance = next_instance->instance;
+ else
+ instance = 0;
+ } else if (hdev->adv_instance_timeout) {
+ /* Immediately advertise the new instance if no other, or
+ * let it go naturally from queue if ADV is already happening
+ */
+ instance = 0;
+ }
+
+ if (instance)
+ return hci_schedule_adv_instance_sync(hdev, instance, true);
+
+ return err;
+}
+
+static void send_count(struct mgmt_mesh_tx *mesh_tx, void *data)
+{
+ struct mgmt_rp_mesh_read_features *rp = data;
+
+ if (rp->used_handles >= rp->max_handles)
+ return;
+
+ rp->handles[rp->used_handles++] = mesh_tx->handle;
+}
+
+static int mesh_features(struct sock *sk, struct hci_dev *hdev,
+ void *data, u16 len)
+{
+ struct mgmt_rp_mesh_read_features rp;
+
+ if (!lmp_le_capable(hdev) ||
+ !hci_dev_test_flag(hdev, HCI_MESH_EXPERIMENTAL))
+ return mgmt_cmd_status(sk, hdev->id, MGMT_OP_MESH_READ_FEATURES,
+ MGMT_STATUS_NOT_SUPPORTED);
+
+ memset(&rp, 0, sizeof(rp));
+ rp.index = cpu_to_le16(hdev->id);
+ if (hci_dev_test_flag(hdev, HCI_LE_ENABLED))
+ rp.max_handles = MESH_HANDLES_MAX;
+
+ hci_dev_lock(hdev);
+
+ if (rp.max_handles)
+ mgmt_mesh_foreach(hdev, send_count, &rp, sk);
+
+ mgmt_cmd_complete(sk, hdev->id, MGMT_OP_MESH_READ_FEATURES, 0, &rp,
+ rp.used_handles + sizeof(rp) - MESH_HANDLES_MAX);
+
+ hci_dev_unlock(hdev);
+ return 0;
+}
+
+static int send_cancel(struct hci_dev *hdev, void *data)
+{
+ struct mgmt_pending_cmd *cmd = data;
+ struct mgmt_cp_mesh_send_cancel *cancel = (void *)cmd->param;
+ struct mgmt_mesh_tx *mesh_tx;
+
+ if (!cancel->handle) {
+ do {
+ mesh_tx = mgmt_mesh_next(hdev, cmd->sk);
+
+ if (mesh_tx)
+ mesh_send_complete(hdev, mesh_tx, false);
+ } while (mesh_tx);
+ } else {
+ mesh_tx = mgmt_mesh_find(hdev, cancel->handle);
+
+ if (mesh_tx && mesh_tx->sk == cmd->sk)
+ mesh_send_complete(hdev, mesh_tx, false);
+ }
+
+ mgmt_cmd_complete(cmd->sk, hdev->id, MGMT_OP_MESH_SEND_CANCEL,
+ 0, NULL, 0);
+ mgmt_pending_free(cmd);
+
+ return 0;
+}
+
+static int mesh_send_cancel(struct sock *sk, struct hci_dev *hdev,
+ void *data, u16 len)
+{
+ struct mgmt_pending_cmd *cmd;
+ int err;
+
+ if (!lmp_le_capable(hdev) ||
+ !hci_dev_test_flag(hdev, HCI_MESH_EXPERIMENTAL))
+ return mgmt_cmd_status(sk, hdev->id, MGMT_OP_MESH_SEND_CANCEL,
+ MGMT_STATUS_NOT_SUPPORTED);
+
+ if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
+ return mgmt_cmd_status(sk, hdev->id, MGMT_OP_MESH_SEND_CANCEL,
+ MGMT_STATUS_REJECTED);
+
+ hci_dev_lock(hdev);
+ cmd = mgmt_pending_new(sk, MGMT_OP_MESH_SEND_CANCEL, hdev, data, len);
+ if (!cmd)
+ err = -ENOMEM;
+ else
+ err = hci_cmd_sync_queue(hdev, send_cancel, cmd, NULL);
+
+ if (err < 0) {
+ err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_MESH_SEND_CANCEL,
+ MGMT_STATUS_FAILED);
+
+ if (cmd)
+ mgmt_pending_free(cmd);
+ }
+
+ hci_dev_unlock(hdev);
+ return err;
+}
+
+static int mesh_send(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
+{
+ struct mgmt_mesh_tx *mesh_tx;
+ struct mgmt_cp_mesh_send *send = data;
+ struct mgmt_rp_mesh_read_features rp;
+ bool sending;
+ int err = 0;
+
+ if (!lmp_le_capable(hdev) ||
+ !hci_dev_test_flag(hdev, HCI_MESH_EXPERIMENTAL))
+ return mgmt_cmd_status(sk, hdev->id, MGMT_OP_MESH_SEND,
+ MGMT_STATUS_NOT_SUPPORTED);
+ if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED) ||
+ len <= MGMT_MESH_SEND_SIZE ||
+ len > (MGMT_MESH_SEND_SIZE + 31))
+ return mgmt_cmd_status(sk, hdev->id, MGMT_OP_MESH_SEND,
+ MGMT_STATUS_REJECTED);
+
+ hci_dev_lock(hdev);
+
+ memset(&rp, 0, sizeof(rp));
+ rp.max_handles = MESH_HANDLES_MAX;
+
+ mgmt_mesh_foreach(hdev, send_count, &rp, sk);
+
+ if (rp.max_handles <= rp.used_handles) {
+ err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_MESH_SEND,
+ MGMT_STATUS_BUSY);
+ goto done;
+ }
+
+ sending = hci_dev_test_flag(hdev, HCI_MESH_SENDING);
+ mesh_tx = mgmt_mesh_add(sk, hdev, send, len);
+
+ if (!mesh_tx)
+ err = -ENOMEM;
+ else if (!sending)
+ err = hci_cmd_sync_queue(hdev, mesh_send_sync, mesh_tx,
+ mesh_send_start_complete);
+
+ if (err < 0) {
+ bt_dev_err(hdev, "Send Mesh Failed %d", err);
+ err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_MESH_SEND,
+ MGMT_STATUS_FAILED);
+
+ if (mesh_tx) {
+ if (sending)
+ mgmt_mesh_remove(mesh_tx);
+ }
+ } else {
+ hci_dev_set_flag(hdev, HCI_MESH_SENDING);
+
+ mgmt_cmd_complete(sk, hdev->id, MGMT_OP_MESH_SEND, 0,
+ &mesh_tx->handle, 1);
+ }
+
+done:
+ hci_dev_unlock(hdev);
+ return err;
+}
+
static int set_le(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
{
struct mgmt_mode *cp = data;
@@ -2131,9 +2508,6 @@ static int set_le(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
val = !!cp->val;
enabled = lmp_host_le_capable(hdev);
- if (!val)
- hci_req_clear_adv_instance(hdev, NULL, NULL, 0x00, true);
-
if (!hdev_is_powered(hdev) || val == enabled) {
bool changed = false;
@@ -3186,6 +3560,18 @@ unlock:
return err;
}
+static int abort_conn_sync(struct hci_dev *hdev, void *data)
+{
+ struct hci_conn *conn;
+ u16 handle = PTR_ERR(data);
+
+ conn = hci_conn_hash_lookup_handle(hdev, handle);
+ if (!conn)
+ return 0;
+
+ return hci_abort_conn_sync(hdev, conn, HCI_ERROR_REMOTE_USER_TERM);
+}
+
static int cancel_pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
u16 len)
{
@@ -3236,7 +3622,8 @@ static int cancel_pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
le_addr_type(addr->type));
if (conn->conn_reason == CONN_REASON_PAIR_DEVICE)
- hci_abort_conn(conn, HCI_ERROR_REMOTE_USER_TERM);
+ hci_cmd_sync_queue(hdev, abort_conn_sync, ERR_PTR(conn->handle),
+ NULL);
unlock:
hci_dev_unlock(hdev);
@@ -3991,17 +4378,28 @@ static const u8 iso_socket_uuid[16] = {
0x6a, 0x49, 0xe0, 0x05, 0x88, 0xf1, 0xba, 0x6f,
};
+/* 2ce463d7-7a03-4d8d-bf05-5f24e8f36e76 */
+static const u8 mgmt_mesh_uuid[16] = {
+ 0x76, 0x6e, 0xf3, 0xe8, 0x24, 0x5f, 0x05, 0xbf,
+ 0x8d, 0x4d, 0x03, 0x7a, 0xd7, 0x63, 0xe4, 0x2c,
+};
+
static int read_exp_features_info(struct sock *sk, struct hci_dev *hdev,
void *data, u16 data_len)
{
- char buf[122]; /* Enough space for 6 features: 2 + 20 * 6 */
- struct mgmt_rp_read_exp_features_info *rp = (void *)buf;
+ struct mgmt_rp_read_exp_features_info *rp;
+ size_t len;
u16 idx = 0;
u32 flags;
+ int status;
bt_dev_dbg(hdev, "sock %p", sk);
- memset(&buf, 0, sizeof(buf));
+ /* Enough space for 7 features */
+ len = sizeof(*rp) + (sizeof(rp->features[0]) * 7);
+ rp = kzalloc(len, GFP_KERNEL);
+ if (!rp)
+ return -ENOMEM;
#ifdef CONFIG_BT_FEATURE_DEBUG
if (!hdev) {
@@ -4065,6 +4463,17 @@ static int read_exp_features_info(struct sock *sk, struct hci_dev *hdev,
idx++;
}
+ if (hdev && lmp_le_capable(hdev)) {
+ if (hci_dev_test_flag(hdev, HCI_MESH_EXPERIMENTAL))
+ flags = BIT(0);
+ else
+ flags = 0;
+
+ memcpy(rp->features[idx].uuid, mgmt_mesh_uuid, 16);
+ rp->features[idx].flags = cpu_to_le32(flags);
+ idx++;
+ }
+
rp->feature_count = cpu_to_le16(idx);
/* After reading the experimental features information, enable
@@ -4072,9 +4481,12 @@ static int read_exp_features_info(struct sock *sk, struct hci_dev *hdev,
*/
hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
- return mgmt_cmd_complete(sk, hdev ? hdev->id : MGMT_INDEX_NONE,
- MGMT_OP_READ_EXP_FEATURES_INFO,
- 0, rp, sizeof(*rp) + (20 * idx));
+ status = mgmt_cmd_complete(sk, hdev ? hdev->id : MGMT_INDEX_NONE,
+ MGMT_OP_READ_EXP_FEATURES_INFO,
+ 0, rp, sizeof(*rp) + (20 * idx));
+
+ kfree(rp);
+ return status;
}
static int exp_ll_privacy_feature_changed(bool enabled, struct hci_dev *hdev,
@@ -4202,6 +4614,63 @@ static int set_debug_func(struct sock *sk, struct hci_dev *hdev,
}
#endif
+static int set_mgmt_mesh_func(struct sock *sk, struct hci_dev *hdev,
+ struct mgmt_cp_set_exp_feature *cp, u16 data_len)
+{
+ struct mgmt_rp_set_exp_feature rp;
+ bool val, changed;
+ int err;
+
+ /* Command requires to use the controller index */
+ if (!hdev)
+ return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
+ MGMT_OP_SET_EXP_FEATURE,
+ MGMT_STATUS_INVALID_INDEX);
+
+ /* Changes can only be made when controller is powered down */
+ if (hdev_is_powered(hdev))
+ return mgmt_cmd_status(sk, hdev->id,
+ MGMT_OP_SET_EXP_FEATURE,
+ MGMT_STATUS_REJECTED);
+
+ /* Parameters are limited to a single octet */
+ if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
+ return mgmt_cmd_status(sk, hdev->id,
+ MGMT_OP_SET_EXP_FEATURE,
+ MGMT_STATUS_INVALID_PARAMS);
+
+ /* Only boolean on/off is supported */
+ if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
+ return mgmt_cmd_status(sk, hdev->id,
+ MGMT_OP_SET_EXP_FEATURE,
+ MGMT_STATUS_INVALID_PARAMS);
+
+ val = !!cp->param[0];
+
+ if (val) {
+ changed = !hci_dev_test_and_set_flag(hdev,
+ HCI_MESH_EXPERIMENTAL);
+ } else {
+ hci_dev_clear_flag(hdev, HCI_MESH);
+ changed = hci_dev_test_and_clear_flag(hdev,
+ HCI_MESH_EXPERIMENTAL);
+ }
+
+ memcpy(rp.uuid, mgmt_mesh_uuid, 16);
+ rp.flags = cpu_to_le32(val ? BIT(0) : 0);
+
+ hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
+
+ err = mgmt_cmd_complete(sk, hdev->id,
+ MGMT_OP_SET_EXP_FEATURE, 0,
+ &rp, sizeof(rp));
+
+ if (changed)
+ exp_feature_changed(hdev, mgmt_mesh_uuid, val, sk);
+
+ return err;
+}
+
static int set_rpa_resolution_func(struct sock *sk, struct hci_dev *hdev,
struct mgmt_cp_set_exp_feature *cp,
u16 data_len)
@@ -4517,6 +4986,7 @@ static const struct mgmt_exp_feature {
#ifdef CONFIG_BT_FEATURE_DEBUG
EXP_FEAT(debug_uuid, set_debug_func),
#endif
+ EXP_FEAT(mgmt_mesh_uuid, set_mgmt_mesh_func),
EXP_FEAT(rpa_resolution_uuid, set_rpa_resolution_func),
EXP_FEAT(quality_report_uuid, set_quality_report_func),
EXP_FEAT(offload_codecs_uuid, set_offload_codec_func),
@@ -5981,6 +6451,7 @@ static int set_advertising(struct sock *sk, struct hci_dev *hdev, void *data,
if (!hdev_is_powered(hdev) ||
(val == hci_dev_test_flag(hdev, HCI_ADVERTISING) &&
(cp->val == 0x02) == hci_dev_test_flag(hdev, HCI_ADVERTISING_CONNECTABLE)) ||
+ hci_dev_test_flag(hdev, HCI_MESH) ||
hci_conn_num(hdev, LE_LINK) > 0 ||
(hci_dev_test_flag(hdev, HCI_LE_SCAN) &&
hdev->le_scan_type == LE_SCAN_ACTIVE)) {
@@ -7909,8 +8380,7 @@ static u32 get_supported_adv_flags(struct hci_dev *hdev)
/* In extended adv TX_POWER returned from Set Adv Param
* will be always valid.
*/
- if ((hdev->adv_tx_power != HCI_TX_POWER_INVALID) ||
- ext_adv_capable(hdev))
+ if (hdev->adv_tx_power != HCI_TX_POWER_INVALID || ext_adv_capable(hdev))
flags |= MGMT_ADV_FLAG_TX_POWER;
if (ext_adv_capable(hdev)) {
@@ -7963,8 +8433,14 @@ static int read_adv_features(struct sock *sk, struct hci_dev *hdev,
instance = rp->instance;
list_for_each_entry(adv_instance, &hdev->adv_instances, list) {
- *instance = adv_instance->instance;
- instance++;
+ /* Only instances 1-le_num_of_adv_sets are externally visible */
+ if (adv_instance->instance <= hdev->adv_instance_cnt) {
+ *instance = adv_instance->instance;
+ instance++;
+ } else {
+ rp->num_instances--;
+ rp_len--;
+ }
}
hci_dev_unlock(hdev);
@@ -8226,7 +8702,7 @@ static int add_advertising(struct sock *sk, struct hci_dev *hdev,
timeout, duration,
HCI_ADV_TX_POWER_NO_PREFERENCE,
hdev->le_adv_min_interval,
- hdev->le_adv_max_interval);
+ hdev->le_adv_max_interval, 0);
if (IS_ERR(adv)) {
err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
MGMT_STATUS_FAILED);
@@ -8430,7 +8906,7 @@ static int add_ext_adv_params(struct sock *sk, struct hci_dev *hdev,
/* Create advertising instance with no advertising or response data */
adv = hci_add_adv_instance(hdev, cp->instance, flags, 0, NULL, 0, NULL,
timeout, duration, tx_power, min_interval,
- max_interval);
+ max_interval, 0);
if (IS_ERR(adv)) {
err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
@@ -8876,8 +9352,13 @@ static const struct hci_mgmt_handler mgmt_handlers[] = {
{ add_ext_adv_data, MGMT_ADD_EXT_ADV_DATA_SIZE,
HCI_MGMT_VAR_LEN },
{ add_adv_patterns_monitor_rssi,
- MGMT_ADD_ADV_PATTERNS_MONITOR_RSSI_SIZE,
+ MGMT_ADD_ADV_PATTERNS_MONITOR_RSSI_SIZE },
+ { set_mesh, MGMT_SET_MESH_RECEIVER_SIZE,
+ HCI_MGMT_VAR_LEN },
+ { mesh_features, MGMT_MESH_READ_FEATURES_SIZE },
+ { mesh_send, MGMT_MESH_SEND_SIZE,
HCI_MGMT_VAR_LEN },
+ { mesh_send_cancel, MGMT_MESH_SEND_CANCEL_SIZE },
};
void mgmt_index_added(struct hci_dev *hdev)
@@ -9817,14 +10298,86 @@ static void mgmt_adv_monitor_device_found(struct hci_dev *hdev,
kfree_skb(skb);
}
+static void mesh_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr,
+ u8 addr_type, s8 rssi, u32 flags, u8 *eir,
+ u16 eir_len, u8 *scan_rsp, u8 scan_rsp_len,
+ u64 instant)
+{
+ struct sk_buff *skb;
+ struct mgmt_ev_mesh_device_found *ev;
+ int i, j;
+
+ if (!hdev->mesh_ad_types[0])
+ goto accepted;
+
+ /* Scan for requested AD types */
+ if (eir_len > 0) {
+ for (i = 0; i + 1 < eir_len; i += eir[i] + 1) {
+ for (j = 0; j < sizeof(hdev->mesh_ad_types); j++) {
+ if (!hdev->mesh_ad_types[j])
+ break;
+
+ if (hdev->mesh_ad_types[j] == eir[i + 1])
+ goto accepted;
+ }
+ }
+ }
+
+ if (scan_rsp_len > 0) {
+ for (i = 0; i + 1 < scan_rsp_len; i += scan_rsp[i] + 1) {
+ for (j = 0; j < sizeof(hdev->mesh_ad_types); j++) {
+ if (!hdev->mesh_ad_types[j])
+ break;
+
+ if (hdev->mesh_ad_types[j] == scan_rsp[i + 1])
+ goto accepted;
+ }
+ }
+ }
+
+ return;
+
+accepted:
+ skb = mgmt_alloc_skb(hdev, MGMT_EV_MESH_DEVICE_FOUND,
+ sizeof(*ev) + eir_len + scan_rsp_len);
+ if (!skb)
+ return;
+
+ ev = skb_put(skb, sizeof(*ev));
+
+ bacpy(&ev->addr.bdaddr, bdaddr);
+ ev->addr.type = link_to_bdaddr(LE_LINK, addr_type);
+ ev->rssi = rssi;
+ ev->flags = cpu_to_le32(flags);
+ ev->instant = cpu_to_le64(instant);
+
+ if (eir_len > 0)
+ /* Copy EIR or advertising data into event */
+ skb_put_data(skb, eir, eir_len);
+
+ if (scan_rsp_len > 0)
+ /* Append scan response data to event */
+ skb_put_data(skb, scan_rsp, scan_rsp_len);
+
+ ev->eir_len = cpu_to_le16(eir_len + scan_rsp_len);
+
+ mgmt_event_skb(skb, NULL);
+}
+
void mgmt_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
u8 addr_type, u8 *dev_class, s8 rssi, u32 flags,
- u8 *eir, u16 eir_len, u8 *scan_rsp, u8 scan_rsp_len)
+ u8 *eir, u16 eir_len, u8 *scan_rsp, u8 scan_rsp_len,
+ u64 instant)
{
struct sk_buff *skb;
struct mgmt_ev_device_found *ev;
bool report_device = hci_discovery_active(hdev);
+ if (hci_dev_test_flag(hdev, HCI_MESH) && link_type == LE_LINK)
+ mesh_device_found(hdev, bdaddr, addr_type, rssi, flags,
+ eir, eir_len, scan_rsp, scan_rsp_len,
+ instant);
+
/* Don't send events for a non-kernel initiated discovery. With
* LE one exception is if we have pend_le_reports > 0 in which
* case we're doing passive scanning and want these events.
@@ -9983,3 +10536,22 @@ void mgmt_exit(void)
{
hci_mgmt_chan_unregister(&chan);
}
+
+void mgmt_cleanup(struct sock *sk)
+{
+ struct mgmt_mesh_tx *mesh_tx;
+ struct hci_dev *hdev;
+
+ read_lock(&hci_dev_list_lock);
+
+ list_for_each_entry(hdev, &hci_dev_list, list) {
+ do {
+ mesh_tx = mgmt_mesh_next(hdev, sk);
+
+ if (mesh_tx)
+ mesh_send_complete(hdev, mesh_tx, true);
+ } while (mesh_tx);
+ }
+
+ read_unlock(&hci_dev_list_lock);
+}
diff --git a/net/bluetooth/mgmt_util.c b/net/bluetooth/mgmt_util.c
index b69cfed62088..0115f783bde8 100644
--- a/net/bluetooth/mgmt_util.c
+++ b/net/bluetooth/mgmt_util.c
@@ -314,3 +314,77 @@ void mgmt_pending_remove(struct mgmt_pending_cmd *cmd)
list_del(&cmd->list);
mgmt_pending_free(cmd);
}
+
+void mgmt_mesh_foreach(struct hci_dev *hdev,
+ void (*cb)(struct mgmt_mesh_tx *mesh_tx, void *data),
+ void *data, struct sock *sk)
+{
+ struct mgmt_mesh_tx *mesh_tx, *tmp;
+
+ list_for_each_entry_safe(mesh_tx, tmp, &hdev->mgmt_pending, list) {
+ if (!sk || mesh_tx->sk == sk)
+ cb(mesh_tx, data);
+ }
+}
+
+struct mgmt_mesh_tx *mgmt_mesh_next(struct hci_dev *hdev, struct sock *sk)
+{
+ struct mgmt_mesh_tx *mesh_tx;
+
+ if (list_empty(&hdev->mesh_pending))
+ return NULL;
+
+ list_for_each_entry(mesh_tx, &hdev->mesh_pending, list) {
+ if (!sk || mesh_tx->sk == sk)
+ return mesh_tx;
+ }
+
+ return NULL;
+}
+
+struct mgmt_mesh_tx *mgmt_mesh_find(struct hci_dev *hdev, u8 handle)
+{
+ struct mgmt_mesh_tx *mesh_tx;
+
+ if (list_empty(&hdev->mesh_pending))
+ return NULL;
+
+ list_for_each_entry(mesh_tx, &hdev->mesh_pending, list) {
+ if (mesh_tx->handle == handle)
+ return mesh_tx;
+ }
+
+ return NULL;
+}
+
+struct mgmt_mesh_tx *mgmt_mesh_add(struct sock *sk, struct hci_dev *hdev,
+ void *data, u16 len)
+{
+ struct mgmt_mesh_tx *mesh_tx;
+
+ mesh_tx = kzalloc(sizeof(*mesh_tx), GFP_KERNEL);
+ if (!mesh_tx)
+ return NULL;
+
+ hdev->mesh_send_ref++;
+ if (!hdev->mesh_send_ref)
+ hdev->mesh_send_ref++;
+
+ mesh_tx->handle = hdev->mesh_send_ref;
+ mesh_tx->index = hdev->id;
+ memcpy(mesh_tx->param, data, len);
+ mesh_tx->param_len = len;
+ mesh_tx->sk = sk;
+ sock_hold(sk);
+
+ list_add_tail(&mesh_tx->list, &hdev->mesh_pending);
+
+ return mesh_tx;
+}
+
+void mgmt_mesh_remove(struct mgmt_mesh_tx *mesh_tx)
+{
+ list_del(&mesh_tx->list);
+ sock_put(mesh_tx->sk);
+ kfree(mesh_tx);
+}
diff --git a/net/bluetooth/mgmt_util.h b/net/bluetooth/mgmt_util.h
index 98e40395a383..6a8b7e84293d 100644
--- a/net/bluetooth/mgmt_util.h
+++ b/net/bluetooth/mgmt_util.h
@@ -20,6 +20,16 @@
SOFTWARE IS DISCLAIMED.
*/
+struct mgmt_mesh_tx {
+ struct list_head list;
+ int index;
+ size_t param_len;
+ struct sock *sk;
+ u8 handle;
+ u8 instance;
+ u8 param[sizeof(struct mgmt_cp_mesh_send) + 29];
+};
+
struct mgmt_pending_cmd {
struct list_head list;
u16 opcode;
@@ -59,3 +69,11 @@ struct mgmt_pending_cmd *mgmt_pending_new(struct sock *sk, u16 opcode,
void *data, u16 len);
void mgmt_pending_free(struct mgmt_pending_cmd *cmd);
void mgmt_pending_remove(struct mgmt_pending_cmd *cmd);
+void mgmt_mesh_foreach(struct hci_dev *hdev,
+ void (*cb)(struct mgmt_mesh_tx *mesh_tx, void *data),
+ void *data, struct sock *sk);
+struct mgmt_mesh_tx *mgmt_mesh_find(struct hci_dev *hdev, u8 handle);
+struct mgmt_mesh_tx *mgmt_mesh_next(struct hci_dev *hdev, struct sock *sk);
+struct mgmt_mesh_tx *mgmt_mesh_add(struct sock *sk, struct hci_dev *hdev,
+ void *data, u16 len);
+void mgmt_mesh_remove(struct mgmt_mesh_tx *mesh_tx);
diff --git a/net/bluetooth/rfcomm/sock.c b/net/bluetooth/rfcomm/sock.c
index 4bf4ea6cbb5e..21e24da4847f 100644
--- a/net/bluetooth/rfcomm/sock.c
+++ b/net/bluetooth/rfcomm/sock.c
@@ -902,7 +902,10 @@ static int rfcomm_sock_shutdown(struct socket *sock, int how)
lock_sock(sk);
if (!sk->sk_shutdown) {
sk->sk_shutdown = SHUTDOWN_MASK;
+
+ release_sock(sk);
__rfcomm_sock_close(sk);
+ lock_sock(sk);
if (sock_flag(sk, SOCK_LINGER) && sk->sk_lingertime &&
!(current->flags & PF_EXITING))
diff --git a/net/bpf/test_run.c b/net/bpf/test_run.c
index 25d8ecf105aa..13d578ce2a09 100644
--- a/net/bpf/test_run.c
+++ b/net/bpf/test_run.c
@@ -606,6 +606,38 @@ noinline void bpf_kfunc_call_memb1_release(struct prog_test_member1 *p)
WARN_ON_ONCE(1);
}
+static int *__bpf_kfunc_call_test_get_mem(struct prog_test_ref_kfunc *p, const int size)
+{
+ if (size > 2 * sizeof(int))
+ return NULL;
+
+ return (int *)p;
+}
+
+noinline int *bpf_kfunc_call_test_get_rdwr_mem(struct prog_test_ref_kfunc *p, const int rdwr_buf_size)
+{
+ return __bpf_kfunc_call_test_get_mem(p, rdwr_buf_size);
+}
+
+noinline int *bpf_kfunc_call_test_get_rdonly_mem(struct prog_test_ref_kfunc *p, const int rdonly_buf_size)
+{
+ return __bpf_kfunc_call_test_get_mem(p, rdonly_buf_size);
+}
+
+/* the next 2 ones can't be really used for testing expect to ensure
+ * that the verifier rejects the call.
+ * Acquire functions must return struct pointers, so these ones are
+ * failing.
+ */
+noinline int *bpf_kfunc_call_test_acq_rdonly_mem(struct prog_test_ref_kfunc *p, const int rdonly_buf_size)
+{
+ return __bpf_kfunc_call_test_get_mem(p, rdonly_buf_size);
+}
+
+noinline void bpf_kfunc_call_int_mem_release(int *p)
+{
+}
+
noinline struct prog_test_ref_kfunc *
bpf_kfunc_call_test_kptr_get(struct prog_test_ref_kfunc **pp, int a, int b)
{
@@ -712,6 +744,10 @@ BTF_ID_FLAGS(func, bpf_kfunc_call_memb_acquire, KF_ACQUIRE | KF_RET_NULL)
BTF_ID_FLAGS(func, bpf_kfunc_call_test_release, KF_RELEASE)
BTF_ID_FLAGS(func, bpf_kfunc_call_memb_release, KF_RELEASE)
BTF_ID_FLAGS(func, bpf_kfunc_call_memb1_release, KF_RELEASE)
+BTF_ID_FLAGS(func, bpf_kfunc_call_test_get_rdwr_mem, KF_RET_NULL)
+BTF_ID_FLAGS(func, bpf_kfunc_call_test_get_rdonly_mem, KF_RET_NULL)
+BTF_ID_FLAGS(func, bpf_kfunc_call_test_acq_rdonly_mem, KF_ACQUIRE | KF_RET_NULL)
+BTF_ID_FLAGS(func, bpf_kfunc_call_int_mem_release, KF_RELEASE)
BTF_ID_FLAGS(func, bpf_kfunc_call_test_kptr_get, KF_ACQUIRE | KF_RET_NULL | KF_KPTR_GET)
BTF_ID_FLAGS(func, bpf_kfunc_call_test_pass_ctx)
BTF_ID_FLAGS(func, bpf_kfunc_call_test_pass1)
@@ -1634,6 +1670,7 @@ static int __init bpf_prog_test_run_init(void)
ret = register_btf_kfunc_id_set(BPF_PROG_TYPE_SCHED_CLS, &bpf_prog_test_kfunc_set);
ret = ret ?: register_btf_kfunc_id_set(BPF_PROG_TYPE_TRACING, &bpf_prog_test_kfunc_set);
+ ret = ret ?: register_btf_kfunc_id_set(BPF_PROG_TYPE_SYSCALL, &bpf_prog_test_kfunc_set);
return ret ?: register_btf_id_dtor_kfuncs(bpf_prog_test_dtor_kfunc,
ARRAY_SIZE(bpf_prog_test_dtor_kfunc),
THIS_MODULE);
diff --git a/net/core/devlink.c b/net/core/devlink.c
index 7776dc82f88d..89baa7c0938b 100644
--- a/net/core/devlink.c
+++ b/net/core/devlink.c
@@ -371,6 +371,13 @@ static struct devlink *devlink_get_from_attrs(struct net *net,
return ERR_PTR(-ENODEV);
}
+#define ASSERT_DEVLINK_PORT_REGISTERED(devlink_port) \
+ WARN_ON_ONCE(!(devlink_port)->registered)
+#define ASSERT_DEVLINK_PORT_NOT_REGISTERED(devlink_port) \
+ WARN_ON_ONCE((devlink_port)->registered)
+#define ASSERT_DEVLINK_PORT_INITIALIZED(devlink_port) \
+ WARN_ON_ONCE(!(devlink_port)->initialized)
+
static struct devlink_port *devlink_port_get_by_index(struct devlink *devlink,
unsigned int port_index)
{
@@ -9848,6 +9855,44 @@ static void devlink_port_type_warn_cancel(struct devlink_port *devlink_port)
}
/**
+ * devlink_port_init() - Init devlink port
+ *
+ * @devlink: devlink
+ * @devlink_port: devlink port
+ *
+ * Initialize essencial stuff that is needed for functions
+ * that may be called before devlink port registration.
+ * Call to this function is optional and not needed
+ * in case the driver does not use such functions.
+ */
+void devlink_port_init(struct devlink *devlink,
+ struct devlink_port *devlink_port)
+{
+ if (devlink_port->initialized)
+ return;
+ devlink_port->devlink = devlink;
+ INIT_LIST_HEAD(&devlink_port->region_list);
+ devlink_port->initialized = true;
+}
+EXPORT_SYMBOL_GPL(devlink_port_init);
+
+/**
+ * devlink_port_fini() - Deinitialize devlink port
+ *
+ * @devlink_port: devlink port
+ *
+ * Deinitialize essencial stuff that is in use for functions
+ * that may be called after devlink port unregistration.
+ * Call to this function is optional and not needed
+ * in case the driver does not use such functions.
+ */
+void devlink_port_fini(struct devlink_port *devlink_port)
+{
+ WARN_ON(!list_empty(&devlink_port->region_list));
+}
+EXPORT_SYMBOL_GPL(devlink_port_fini);
+
+/**
* devl_port_register() - Register devlink port
*
* @devlink: devlink
@@ -9869,14 +9914,15 @@ int devl_port_register(struct devlink *devlink,
if (devlink_port_index_exists(devlink, port_index))
return -EEXIST;
- WARN_ON(devlink_port->devlink);
- devlink_port->devlink = devlink;
+ ASSERT_DEVLINK_PORT_NOT_REGISTERED(devlink_port);
+
+ devlink_port_init(devlink, devlink_port);
+ devlink_port->registered = true;
devlink_port->index = port_index;
spin_lock_init(&devlink_port->type_lock);
INIT_LIST_HEAD(&devlink_port->reporter_list);
mutex_init(&devlink_port->reporters_lock);
list_add_tail(&devlink_port->list, &devlink->port_list);
- INIT_LIST_HEAD(&devlink_port->region_list);
INIT_DELAYED_WORK(&devlink_port->type_warn_dw, &devlink_port_type_warn);
devlink_port_type_warn_schedule(devlink_port);
@@ -9926,8 +9972,8 @@ void devl_port_unregister(struct devlink_port *devlink_port)
devlink_port_notify(devlink_port, DEVLINK_CMD_PORT_DEL);
list_del(&devlink_port->list);
WARN_ON(!list_empty(&devlink_port->reporter_list));
- WARN_ON(!list_empty(&devlink_port->region_list));
mutex_destroy(&devlink_port->reporters_lock);
+ devlink_port->registered = false;
}
EXPORT_SYMBOL_GPL(devl_port_unregister);
@@ -9952,8 +9998,8 @@ static void __devlink_port_type_set(struct devlink_port *devlink_port,
enum devlink_port_type type,
void *type_dev)
{
- if (WARN_ON(!devlink_port->devlink))
- return;
+ ASSERT_DEVLINK_PORT_REGISTERED(devlink_port);
+
devlink_port_type_warn_cancel(devlink_port);
spin_lock_bh(&devlink_port->type_lock);
devlink_port->type = type;
@@ -10072,8 +10118,8 @@ void devlink_port_attrs_set(struct devlink_port *devlink_port,
{
int ret;
- if (WARN_ON(devlink_port->devlink))
- return;
+ ASSERT_DEVLINK_PORT_NOT_REGISTERED(devlink_port);
+
devlink_port->attrs = *attrs;
ret = __devlink_port_attrs_set(devlink_port, attrs->flavour);
if (ret)
@@ -10096,8 +10142,8 @@ void devlink_port_attrs_pci_pf_set(struct devlink_port *devlink_port, u32 contro
struct devlink_port_attrs *attrs = &devlink_port->attrs;
int ret;
- if (WARN_ON(devlink_port->devlink))
- return;
+ ASSERT_DEVLINK_PORT_NOT_REGISTERED(devlink_port);
+
ret = __devlink_port_attrs_set(devlink_port,
DEVLINK_PORT_FLAVOUR_PCI_PF);
if (ret)
@@ -10123,8 +10169,8 @@ void devlink_port_attrs_pci_vf_set(struct devlink_port *devlink_port, u32 contro
struct devlink_port_attrs *attrs = &devlink_port->attrs;
int ret;
- if (WARN_ON(devlink_port->devlink))
- return;
+ ASSERT_DEVLINK_PORT_NOT_REGISTERED(devlink_port);
+
ret = __devlink_port_attrs_set(devlink_port,
DEVLINK_PORT_FLAVOUR_PCI_VF);
if (ret)
@@ -10151,8 +10197,8 @@ void devlink_port_attrs_pci_sf_set(struct devlink_port *devlink_port, u32 contro
struct devlink_port_attrs *attrs = &devlink_port->attrs;
int ret;
- if (WARN_ON(devlink_port->devlink))
- return;
+ ASSERT_DEVLINK_PORT_NOT_REGISTERED(devlink_port);
+
ret = __devlink_port_attrs_set(devlink_port,
DEVLINK_PORT_FLAVOUR_PCI_SF);
if (ret)
@@ -10267,8 +10313,8 @@ EXPORT_SYMBOL_GPL(devl_rate_nodes_destroy);
void devlink_port_linecard_set(struct devlink_port *devlink_port,
struct devlink_linecard *linecard)
{
- if (WARN_ON(devlink_port->devlink))
- return;
+ ASSERT_DEVLINK_PORT_NOT_REGISTERED(devlink_port);
+
devlink_port->linecard = linecard;
}
EXPORT_SYMBOL_GPL(devlink_port_linecard_set);
@@ -11339,6 +11385,8 @@ devlink_port_region_create(struct devlink_port *port,
struct devlink_region *region;
int err = 0;
+ ASSERT_DEVLINK_PORT_INITIALIZED(port);
+
if (WARN_ON(!ops) || WARN_ON(!ops->destructor))
return ERR_PTR(-EINVAL);
diff --git a/net/core/filter.c b/net/core/filter.c
index 31608801078e..bb0136e7a8e4 100644
--- a/net/core/filter.c
+++ b/net/core/filter.c
@@ -18,6 +18,7 @@
*/
#include <linux/atomic.h>
+#include <linux/bpf_verifier.h>
#include <linux/module.h>
#include <linux/types.h>
#include <linux/mm.h>
@@ -5101,6 +5102,59 @@ static int bpf_sol_tcp_setsockopt(struct sock *sk, int optname,
return 0;
}
+static int sol_tcp_sockopt_congestion(struct sock *sk, char *optval,
+ int *optlen, bool getopt)
+{
+ struct tcp_sock *tp;
+ int ret;
+
+ if (*optlen < 2)
+ return -EINVAL;
+
+ if (getopt) {
+ if (!inet_csk(sk)->icsk_ca_ops)
+ return -EINVAL;
+ /* BPF expects NULL-terminated tcp-cc string */
+ optval[--(*optlen)] = '\0';
+ return do_tcp_getsockopt(sk, SOL_TCP, TCP_CONGESTION,
+ KERNEL_SOCKPTR(optval),
+ KERNEL_SOCKPTR(optlen));
+ }
+
+ /* "cdg" is the only cc that alloc a ptr
+ * in inet_csk_ca area. The bpf-tcp-cc may
+ * overwrite this ptr after switching to cdg.
+ */
+ if (*optlen >= sizeof("cdg") - 1 && !strncmp("cdg", optval, *optlen))
+ return -ENOTSUPP;
+
+ /* It stops this looping
+ *
+ * .init => bpf_setsockopt(tcp_cc) => .init =>
+ * bpf_setsockopt(tcp_cc)" => .init => ....
+ *
+ * The second bpf_setsockopt(tcp_cc) is not allowed
+ * in order to break the loop when both .init
+ * are the same bpf prog.
+ *
+ * This applies even the second bpf_setsockopt(tcp_cc)
+ * does not cause a loop. This limits only the first
+ * '.init' can call bpf_setsockopt(TCP_CONGESTION) to
+ * pick a fallback cc (eg. peer does not support ECN)
+ * and the second '.init' cannot fallback to
+ * another.
+ */
+ tp = tcp_sk(sk);
+ if (tp->bpf_chg_cc_inprogress)
+ return -EBUSY;
+
+ tp->bpf_chg_cc_inprogress = 1;
+ ret = do_tcp_setsockopt(sk, SOL_TCP, TCP_CONGESTION,
+ KERNEL_SOCKPTR(optval), *optlen);
+ tp->bpf_chg_cc_inprogress = 0;
+ return ret;
+}
+
static int sol_tcp_sockopt(struct sock *sk, int optname,
char *optval, int *optlen,
bool getopt)
@@ -5124,9 +5178,7 @@ static int sol_tcp_sockopt(struct sock *sk, int optname,
return -EINVAL;
break;
case TCP_CONGESTION:
- if (*optlen < 2)
- return -EINVAL;
- break;
+ return sol_tcp_sockopt_congestion(sk, optval, optlen, getopt);
case TCP_SAVED_SYN:
if (*optlen < 1)
return -EINVAL;
@@ -5151,13 +5203,6 @@ static int sol_tcp_sockopt(struct sock *sk, int optname,
return 0;
}
- if (optname == TCP_CONGESTION) {
- if (!inet_csk(sk)->icsk_ca_ops)
- return -EINVAL;
- /* BPF expects NULL-terminated tcp-cc string */
- optval[--(*optlen)] = '\0';
- }
-
return do_tcp_getsockopt(sk, SOL_TCP, optname,
KERNEL_SOCKPTR(optval),
KERNEL_SOCKPTR(optlen));
@@ -5284,12 +5329,6 @@ static int _bpf_getsockopt(struct sock *sk, int level, int optname,
BPF_CALL_5(bpf_sk_setsockopt, struct sock *, sk, int, level,
int, optname, char *, optval, int, optlen)
{
- if (level == SOL_TCP && optname == TCP_CONGESTION) {
- if (optlen >= sizeof("cdg") - 1 &&
- !strncmp("cdg", optval, optlen))
- return -ENOTSUPP;
- }
-
return _bpf_setsockopt(sk, level, optname, optval, optlen);
}
@@ -8605,6 +8644,36 @@ static bool tc_cls_act_is_valid_access(int off, int size,
return bpf_skb_is_valid_access(off, size, type, prog, info);
}
+DEFINE_MUTEX(nf_conn_btf_access_lock);
+EXPORT_SYMBOL_GPL(nf_conn_btf_access_lock);
+
+int (*nfct_btf_struct_access)(struct bpf_verifier_log *log, const struct btf *btf,
+ const struct btf_type *t, int off, int size,
+ enum bpf_access_type atype, u32 *next_btf_id,
+ enum bpf_type_flag *flag);
+EXPORT_SYMBOL_GPL(nfct_btf_struct_access);
+
+static int tc_cls_act_btf_struct_access(struct bpf_verifier_log *log,
+ const struct btf *btf,
+ const struct btf_type *t, int off,
+ int size, enum bpf_access_type atype,
+ u32 *next_btf_id,
+ enum bpf_type_flag *flag)
+{
+ int ret = -EACCES;
+
+ if (atype == BPF_READ)
+ return btf_struct_access(log, btf, t, off, size, atype, next_btf_id,
+ flag);
+
+ mutex_lock(&nf_conn_btf_access_lock);
+ if (nfct_btf_struct_access)
+ ret = nfct_btf_struct_access(log, btf, t, off, size, atype, next_btf_id, flag);
+ mutex_unlock(&nf_conn_btf_access_lock);
+
+ return ret;
+}
+
static bool __is_valid_xdp_access(int off, int size)
{
if (off < 0 || off >= sizeof(struct xdp_md))
@@ -8664,6 +8733,27 @@ void bpf_warn_invalid_xdp_action(struct net_device *dev, struct bpf_prog *prog,
}
EXPORT_SYMBOL_GPL(bpf_warn_invalid_xdp_action);
+static int xdp_btf_struct_access(struct bpf_verifier_log *log,
+ const struct btf *btf,
+ const struct btf_type *t, int off,
+ int size, enum bpf_access_type atype,
+ u32 *next_btf_id,
+ enum bpf_type_flag *flag)
+{
+ int ret = -EACCES;
+
+ if (atype == BPF_READ)
+ return btf_struct_access(log, btf, t, off, size, atype, next_btf_id,
+ flag);
+
+ mutex_lock(&nf_conn_btf_access_lock);
+ if (nfct_btf_struct_access)
+ ret = nfct_btf_struct_access(log, btf, t, off, size, atype, next_btf_id, flag);
+ mutex_unlock(&nf_conn_btf_access_lock);
+
+ return ret;
+}
+
static bool sock_addr_is_valid_access(int off, int size,
enum bpf_access_type type,
const struct bpf_prog *prog,
@@ -10558,6 +10648,7 @@ const struct bpf_verifier_ops tc_cls_act_verifier_ops = {
.convert_ctx_access = tc_cls_act_convert_ctx_access,
.gen_prologue = tc_cls_act_prologue,
.gen_ld_abs = bpf_gen_ld_abs,
+ .btf_struct_access = tc_cls_act_btf_struct_access,
};
const struct bpf_prog_ops tc_cls_act_prog_ops = {
@@ -10569,6 +10660,7 @@ const struct bpf_verifier_ops xdp_verifier_ops = {
.is_valid_access = xdp_is_valid_access,
.convert_ctx_access = xdp_convert_ctx_access,
.gen_prologue = bpf_noop_prologue,
+ .btf_struct_access = xdp_btf_struct_access,
};
const struct bpf_prog_ops xdp_prog_ops = {
diff --git a/net/core/gro.c b/net/core/gro.c
index b4190eb08467..bc9451743307 100644
--- a/net/core/gro.c
+++ b/net/core/gro.c
@@ -160,6 +160,7 @@ int skb_gro_receive(struct sk_buff *p, struct sk_buff *skb)
unsigned int gro_max_size;
unsigned int new_truesize;
struct sk_buff *lp;
+ int segs;
/* pairs with WRITE_ONCE() in netif_set_gro_max_size() */
gro_max_size = READ_ONCE(p->dev->gro_max_size);
@@ -175,6 +176,7 @@ int skb_gro_receive(struct sk_buff *p, struct sk_buff *skb)
return -E2BIG;
}
+ segs = NAPI_GRO_CB(skb)->count;
lp = NAPI_GRO_CB(p)->last;
pinfo = skb_shinfo(lp);
@@ -265,7 +267,7 @@ merge:
lp = p;
done:
- NAPI_GRO_CB(p)->count++;
+ NAPI_GRO_CB(p)->count += segs;
p->data_len += len;
p->truesize += delta_truesize;
p->len += len;
@@ -496,8 +498,15 @@ static enum gro_result dev_gro_receive(struct napi_struct *napi, struct sk_buff
BUILD_BUG_ON(!IS_ALIGNED(offsetof(struct napi_gro_cb, zeroed),
sizeof(u32))); /* Avoid slow unaligned acc */
*(u32 *)&NAPI_GRO_CB(skb)->zeroed = 0;
- NAPI_GRO_CB(skb)->flush = skb_is_gso(skb) || skb_has_frag_list(skb);
+ NAPI_GRO_CB(skb)->flush = skb_has_frag_list(skb);
NAPI_GRO_CB(skb)->is_atomic = 1;
+ NAPI_GRO_CB(skb)->count = 1;
+ if (unlikely(skb_is_gso(skb))) {
+ NAPI_GRO_CB(skb)->count = skb_shinfo(skb)->gso_segs;
+ /* Only support TCP at the moment. */
+ if (!skb_is_gso_tcp(skb))
+ NAPI_GRO_CB(skb)->flush = 1;
+ }
/* Setup for GRO checksum validation */
switch (skb->ip_summed) {
@@ -545,10 +554,10 @@ static enum gro_result dev_gro_receive(struct napi_struct *napi, struct sk_buff
else
gro_list->count++;
- NAPI_GRO_CB(skb)->count = 1;
NAPI_GRO_CB(skb)->age = jiffies;
NAPI_GRO_CB(skb)->last = skb;
- skb_shinfo(skb)->gso_size = skb_gro_len(skb);
+ if (!skb_is_gso(skb))
+ skb_shinfo(skb)->gso_size = skb_gro_len(skb);
list_add(&skb->list, &gro_list->list);
ret = GRO_HELD;
@@ -660,6 +669,7 @@ static void napi_reuse_skb(struct napi_struct *napi, struct sk_buff *skb)
skb->encapsulation = 0;
skb_shinfo(skb)->gso_type = 0;
+ skb_shinfo(skb)->gso_size = 0;
if (unlikely(skb->slow_gro)) {
skb_orphan(skb);
skb_ext_reset(skb);
diff --git a/net/core/lwtunnel.c b/net/core/lwtunnel.c
index 9ccd64e8a666..6fac2f0ef074 100644
--- a/net/core/lwtunnel.c
+++ b/net/core/lwtunnel.c
@@ -50,6 +50,7 @@ static const char *lwtunnel_encap_str(enum lwtunnel_encap_types encap_type)
return "IOAM6";
case LWTUNNEL_ENCAP_IP6:
case LWTUNNEL_ENCAP_IP:
+ case LWTUNNEL_ENCAP_XFRM:
case LWTUNNEL_ENCAP_NONE:
case __LWTUNNEL_ENCAP_MAX:
/* should not have got here */
diff --git a/net/core/skmsg.c b/net/core/skmsg.c
index 188f8558d27d..ca70525621c7 100644
--- a/net/core/skmsg.c
+++ b/net/core/skmsg.c
@@ -434,8 +434,10 @@ int sk_msg_recvmsg(struct sock *sk, struct sk_psock *psock, struct msghdr *msg,
if (copied + copy > len)
copy = len - copied;
copy = copy_page_to_iter(page, sge->offset, copy, iter);
- if (!copy)
- return copied ? copied : -EFAULT;
+ if (!copy) {
+ copied = copied ? copied : -EFAULT;
+ goto out;
+ }
copied += copy;
if (likely(!peek)) {
@@ -455,7 +457,7 @@ int sk_msg_recvmsg(struct sock *sk, struct sk_psock *psock, struct msghdr *msg,
* didn't copy the entire length lets just break.
*/
if (copy != sge->length)
- return copied;
+ goto out;
sk_msg_iter_var_next(i);
}
@@ -477,7 +479,9 @@ int sk_msg_recvmsg(struct sock *sk, struct sk_psock *psock, struct msghdr *msg,
}
msg_rx = sk_psock_peek_msg(psock);
}
-
+out:
+ if (psock->work_state.skb && copied > 0)
+ schedule_work(&psock->work);
return copied;
}
EXPORT_SYMBOL_GPL(sk_msg_recvmsg);
diff --git a/net/core/stream.c b/net/core/stream.c
index ccc083cdef23..1105057ce00a 100644
--- a/net/core/stream.c
+++ b/net/core/stream.c
@@ -159,7 +159,8 @@ int sk_stream_wait_memory(struct sock *sk, long *timeo_p)
*timeo_p = current_timeo;
}
out:
- remove_wait_queue(sk_sleep(sk), &wait);
+ if (!sock_flag(sk, SOCK_DEAD))
+ remove_wait_queue(sk_sleep(sk), &wait);
return err;
do_error:
diff --git a/net/dsa/dsa2.c b/net/dsa/dsa2.c
index 7024e2120de1..af0e2c0394ac 100644
--- a/net/dsa/dsa2.c
+++ b/net/dsa/dsa2.c
@@ -461,6 +461,72 @@ static void dsa_tree_teardown_cpu_ports(struct dsa_switch_tree *dst)
dp->cpu_dp = NULL;
}
+static int dsa_port_devlink_setup(struct dsa_port *dp)
+{
+ struct devlink_port *dlp = &dp->devlink_port;
+ struct dsa_switch_tree *dst = dp->ds->dst;
+ struct devlink_port_attrs attrs = {};
+ struct devlink *dl = dp->ds->devlink;
+ struct dsa_switch *ds = dp->ds;
+ const unsigned char *id;
+ unsigned char len;
+ int err;
+
+ memset(dlp, 0, sizeof(*dlp));
+ devlink_port_init(dl, dlp);
+
+ if (ds->ops->port_setup) {
+ err = ds->ops->port_setup(ds, dp->index);
+ if (err)
+ return err;
+ }
+
+ id = (const unsigned char *)&dst->index;
+ len = sizeof(dst->index);
+
+ attrs.phys.port_number = dp->index;
+ memcpy(attrs.switch_id.id, id, len);
+ attrs.switch_id.id_len = len;
+
+ switch (dp->type) {
+ case DSA_PORT_TYPE_UNUSED:
+ attrs.flavour = DEVLINK_PORT_FLAVOUR_UNUSED;
+ break;
+ case DSA_PORT_TYPE_CPU:
+ attrs.flavour = DEVLINK_PORT_FLAVOUR_CPU;
+ break;
+ case DSA_PORT_TYPE_DSA:
+ attrs.flavour = DEVLINK_PORT_FLAVOUR_DSA;
+ break;
+ case DSA_PORT_TYPE_USER:
+ attrs.flavour = DEVLINK_PORT_FLAVOUR_PHYSICAL;
+ break;
+ }
+
+ devlink_port_attrs_set(dlp, &attrs);
+ err = devlink_port_register(dl, dlp, dp->index);
+ if (err) {
+ if (ds->ops->port_teardown)
+ ds->ops->port_teardown(ds, dp->index);
+ return err;
+ }
+
+ return 0;
+}
+
+static void dsa_port_devlink_teardown(struct dsa_port *dp)
+{
+ struct devlink_port *dlp = &dp->devlink_port;
+ struct dsa_switch *ds = dp->ds;
+
+ devlink_port_unregister(dlp);
+
+ if (ds->ops->port_teardown)
+ ds->ops->port_teardown(ds, dp->index);
+
+ devlink_port_fini(dlp);
+}
+
static int dsa_port_setup(struct dsa_port *dp)
{
struct devlink_port *dlp = &dp->devlink_port;
@@ -472,11 +538,9 @@ static int dsa_port_setup(struct dsa_port *dp)
if (dp->setup)
return 0;
- if (ds->ops->port_setup) {
- err = ds->ops->port_setup(ds, dp->index);
- if (err)
- return err;
- }
+ err = dsa_port_devlink_setup(dp);
+ if (err)
+ return err;
switch (dp->type) {
case DSA_PORT_TYPE_UNUSED:
@@ -533,8 +597,7 @@ static int dsa_port_setup(struct dsa_port *dp)
if (err && dsa_port_link_registered)
dsa_shared_port_link_unregister_of(dp);
if (err) {
- if (ds->ops->port_teardown)
- ds->ops->port_teardown(ds, dp->index);
+ dsa_port_devlink_teardown(dp);
return err;
}
@@ -543,59 +606,13 @@ static int dsa_port_setup(struct dsa_port *dp)
return 0;
}
-static int dsa_port_devlink_setup(struct dsa_port *dp)
-{
- struct devlink_port *dlp = &dp->devlink_port;
- struct dsa_switch_tree *dst = dp->ds->dst;
- struct devlink_port_attrs attrs = {};
- struct devlink *dl = dp->ds->devlink;
- const unsigned char *id;
- unsigned char len;
- int err;
-
- id = (const unsigned char *)&dst->index;
- len = sizeof(dst->index);
-
- attrs.phys.port_number = dp->index;
- memcpy(attrs.switch_id.id, id, len);
- attrs.switch_id.id_len = len;
- memset(dlp, 0, sizeof(*dlp));
-
- switch (dp->type) {
- case DSA_PORT_TYPE_UNUSED:
- attrs.flavour = DEVLINK_PORT_FLAVOUR_UNUSED;
- break;
- case DSA_PORT_TYPE_CPU:
- attrs.flavour = DEVLINK_PORT_FLAVOUR_CPU;
- break;
- case DSA_PORT_TYPE_DSA:
- attrs.flavour = DEVLINK_PORT_FLAVOUR_DSA;
- break;
- case DSA_PORT_TYPE_USER:
- attrs.flavour = DEVLINK_PORT_FLAVOUR_PHYSICAL;
- break;
- }
-
- devlink_port_attrs_set(dlp, &attrs);
- err = devlink_port_register(dl, dlp, dp->index);
-
- if (!err)
- dp->devlink_port_setup = true;
-
- return err;
-}
-
static void dsa_port_teardown(struct dsa_port *dp)
{
struct devlink_port *dlp = &dp->devlink_port;
- struct dsa_switch *ds = dp->ds;
if (!dp->setup)
return;
- if (ds->ops->port_teardown)
- ds->ops->port_teardown(ds, dp->index);
-
devlink_port_type_clear(dlp);
switch (dp->type) {
@@ -619,46 +636,15 @@ static void dsa_port_teardown(struct dsa_port *dp)
break;
}
- dp->setup = false;
-}
-
-static void dsa_port_devlink_teardown(struct dsa_port *dp)
-{
- struct devlink_port *dlp = &dp->devlink_port;
+ dsa_port_devlink_teardown(dp);
- if (dp->devlink_port_setup)
- devlink_port_unregister(dlp);
- dp->devlink_port_setup = false;
+ dp->setup = false;
}
-/* Destroy the current devlink port, and create a new one which has the UNUSED
- * flavour. At this point, any call to ds->ops->port_setup has been already
- * balanced out by a call to ds->ops->port_teardown, so we know that any
- * devlink port regions the driver had are now unregistered. We then call its
- * ds->ops->port_setup again, in order for the driver to re-create them on the
- * new devlink port.
- */
-static int dsa_port_reinit_as_unused(struct dsa_port *dp)
+static int dsa_port_setup_as_unused(struct dsa_port *dp)
{
- struct dsa_switch *ds = dp->ds;
- int err;
-
- dsa_port_devlink_teardown(dp);
dp->type = DSA_PORT_TYPE_UNUSED;
- err = dsa_port_devlink_setup(dp);
- if (err)
- return err;
-
- if (ds->ops->port_setup) {
- /* On error, leave the devlink port registered,
- * dsa_switch_teardown will clean it up later.
- */
- err = ds->ops->port_setup(ds, dp->index);
- if (err)
- return err;
- }
-
- return 0;
+ return dsa_port_setup(dp);
}
static int dsa_devlink_info_get(struct devlink *dl,
@@ -882,7 +868,6 @@ static int dsa_switch_setup(struct dsa_switch *ds)
{
struct dsa_devlink_priv *dl_priv;
struct device_node *dn;
- struct dsa_port *dp;
int err;
if (ds->setup)
@@ -905,18 +890,9 @@ static int dsa_switch_setup(struct dsa_switch *ds)
dl_priv = devlink_priv(ds->devlink);
dl_priv->ds = ds;
- /* Setup devlink port instances now, so that the switch
- * setup() can register regions etc, against the ports
- */
- dsa_switch_for_each_port(dp, ds) {
- err = dsa_port_devlink_setup(dp);
- if (err)
- goto unregister_devlink_ports;
- }
-
err = dsa_switch_register_notifier(ds);
if (err)
- goto unregister_devlink_ports;
+ goto devlink_free;
ds->configure_vlan_while_not_filtering = true;
@@ -957,9 +933,7 @@ teardown:
ds->ops->teardown(ds);
unregister_notifier:
dsa_switch_unregister_notifier(ds);
-unregister_devlink_ports:
- dsa_switch_for_each_port(dp, ds)
- dsa_port_devlink_teardown(dp);
+devlink_free:
devlink_free(ds->devlink);
ds->devlink = NULL;
return err;
@@ -967,8 +941,6 @@ unregister_devlink_ports:
static void dsa_switch_teardown(struct dsa_switch *ds)
{
- struct dsa_port *dp;
-
if (!ds->setup)
return;
@@ -987,8 +959,6 @@ static void dsa_switch_teardown(struct dsa_switch *ds)
dsa_switch_unregister_notifier(ds);
if (ds->devlink) {
- dsa_switch_for_each_port(dp, ds)
- dsa_port_devlink_teardown(dp);
devlink_free(ds->devlink);
ds->devlink = NULL;
}
@@ -1041,7 +1011,7 @@ static int dsa_tree_setup_ports(struct dsa_switch_tree *dst)
if (dsa_port_is_user(dp) || dsa_port_is_unused(dp)) {
err = dsa_port_setup(dp);
if (err) {
- err = dsa_port_reinit_as_unused(dp);
+ err = dsa_port_setup_as_unused(dp);
if (err)
goto teardown;
}
diff --git a/net/dsa/dsa_priv.h b/net/dsa/dsa_priv.h
index 129e4a649c7e..6e65c7ffd6f3 100644
--- a/net/dsa/dsa_priv.h
+++ b/net/dsa/dsa_priv.h
@@ -294,6 +294,7 @@ int dsa_port_mrp_add_ring_role(const struct dsa_port *dp,
int dsa_port_mrp_del_ring_role(const struct dsa_port *dp,
const struct switchdev_obj_ring_role_mrp *mrp);
int dsa_port_phylink_create(struct dsa_port *dp);
+void dsa_port_phylink_destroy(struct dsa_port *dp);
int dsa_shared_port_link_register_of(struct dsa_port *dp);
void dsa_shared_port_link_unregister_of(struct dsa_port *dp);
int dsa_port_hsr_join(struct dsa_port *dp, struct net_device *hsr);
diff --git a/net/dsa/port.c b/net/dsa/port.c
index e6289a1db0a0..e4a0513816bb 100644
--- a/net/dsa/port.c
+++ b/net/dsa/port.c
@@ -1661,6 +1661,7 @@ int dsa_port_phylink_create(struct dsa_port *dp)
{
struct dsa_switch *ds = dp->ds;
phy_interface_t mode;
+ struct phylink *pl;
int err;
err = of_get_phy_mode(dp->dn, &mode);
@@ -1677,16 +1678,24 @@ int dsa_port_phylink_create(struct dsa_port *dp)
if (ds->ops->phylink_get_caps)
ds->ops->phylink_get_caps(ds, dp->index, &dp->pl_config);
- dp->pl = phylink_create(&dp->pl_config, of_fwnode_handle(dp->dn),
- mode, &dsa_port_phylink_mac_ops);
- if (IS_ERR(dp->pl)) {
+ pl = phylink_create(&dp->pl_config, of_fwnode_handle(dp->dn),
+ mode, &dsa_port_phylink_mac_ops);
+ if (IS_ERR(pl)) {
pr_err("error creating PHYLINK: %ld\n", PTR_ERR(dp->pl));
- return PTR_ERR(dp->pl);
+ return PTR_ERR(pl);
}
+ dp->pl = pl;
+
return 0;
}
+void dsa_port_phylink_destroy(struct dsa_port *dp)
+{
+ phylink_destroy(dp->pl);
+ dp->pl = NULL;
+}
+
static int dsa_shared_port_setup_phy_of(struct dsa_port *dp, bool enable)
{
struct dsa_switch *ds = dp->ds;
@@ -1781,7 +1790,7 @@ static int dsa_shared_port_phylink_register(struct dsa_port *dp)
return 0;
err_phy_connect:
- phylink_destroy(dp->pl);
+ dsa_port_phylink_destroy(dp);
return err;
}
@@ -1983,8 +1992,7 @@ void dsa_shared_port_link_unregister_of(struct dsa_port *dp)
rtnl_lock();
phylink_disconnect_phy(dp->pl);
rtnl_unlock();
- phylink_destroy(dp->pl);
- dp->pl = NULL;
+ dsa_port_phylink_destroy(dp);
return;
}
diff --git a/net/dsa/slave.c b/net/dsa/slave.c
index aa47ddc19fdf..1a59918d3b30 100644
--- a/net/dsa/slave.c
+++ b/net/dsa/slave.c
@@ -2304,7 +2304,7 @@ static int dsa_slave_phy_setup(struct net_device *slave_dev)
if (ret) {
netdev_err(slave_dev, "failed to connect to PHY: %pe\n",
ERR_PTR(ret));
- phylink_destroy(dp->pl);
+ dsa_port_phylink_destroy(dp);
}
return ret;
@@ -2476,7 +2476,7 @@ out_phy:
rtnl_lock();
phylink_disconnect_phy(p->dp->pl);
rtnl_unlock();
- phylink_destroy(p->dp->pl);
+ dsa_port_phylink_destroy(p->dp);
out_gcells:
gro_cells_destroy(&p->gcells);
out_free:
@@ -2499,7 +2499,7 @@ void dsa_slave_destroy(struct net_device *slave_dev)
phylink_disconnect_phy(dp->pl);
rtnl_unlock();
- phylink_destroy(dp->pl);
+ dsa_port_phylink_destroy(dp);
gro_cells_destroy(&p->gcells);
free_percpu(slave_dev->tstats);
free_netdev(slave_dev);
diff --git a/net/ethtool/Makefile b/net/ethtool/Makefile
index b76432e70e6b..72ab0944262a 100644
--- a/net/ethtool/Makefile
+++ b/net/ethtool/Makefile
@@ -7,4 +7,5 @@ obj-$(CONFIG_ETHTOOL_NETLINK) += ethtool_nl.o
ethtool_nl-y := netlink.o bitset.o strset.o linkinfo.o linkmodes.o \
linkstate.o debug.o wol.o features.o privflags.o rings.o \
channels.o coalesce.o pause.o eee.o tsinfo.o cabletest.o \
- tunnels.o fec.o eeprom.o stats.o phc_vclocks.o module.o
+ tunnels.o fec.o eeprom.o stats.o phc_vclocks.o module.o \
+ pse-pd.o
diff --git a/net/ethtool/common.h b/net/ethtool/common.h
index 2dc2b80aea5f..c1779657e074 100644
--- a/net/ethtool/common.h
+++ b/net/ethtool/common.h
@@ -46,6 +46,7 @@ int ethtool_get_max_rxfh_channel(struct net_device *dev, u32 *max);
int __ethtool_get_ts_info(struct net_device *dev, struct ethtool_ts_info *info);
extern const struct ethtool_phy_ops *ethtool_phy_ops;
+extern const struct ethtool_pse_ops *ethtool_pse_ops;
int ethtool_get_module_info_call(struct net_device *dev,
struct ethtool_modinfo *modinfo);
diff --git a/net/ethtool/netlink.c b/net/ethtool/netlink.c
index f4e41a6e0163..1a4c11356c96 100644
--- a/net/ethtool/netlink.c
+++ b/net/ethtool/netlink.c
@@ -286,6 +286,7 @@ ethnl_default_requests[__ETHTOOL_MSG_USER_CNT] = {
[ETHTOOL_MSG_STATS_GET] = &ethnl_stats_request_ops,
[ETHTOOL_MSG_PHC_VCLOCKS_GET] = &ethnl_phc_vclocks_request_ops,
[ETHTOOL_MSG_MODULE_GET] = &ethnl_module_request_ops,
+ [ETHTOOL_MSG_PSE_GET] = &ethnl_pse_request_ops,
};
static struct ethnl_dump_ctx *ethnl_dump_context(struct netlink_callback *cb)
@@ -1023,6 +1024,22 @@ static const struct genl_ops ethtool_genl_ops[] = {
.policy = ethnl_module_set_policy,
.maxattr = ARRAY_SIZE(ethnl_module_set_policy) - 1,
},
+ {
+ .cmd = ETHTOOL_MSG_PSE_GET,
+ .doit = ethnl_default_doit,
+ .start = ethnl_default_start,
+ .dumpit = ethnl_default_dumpit,
+ .done = ethnl_default_done,
+ .policy = ethnl_pse_get_policy,
+ .maxattr = ARRAY_SIZE(ethnl_pse_get_policy) - 1,
+ },
+ {
+ .cmd = ETHTOOL_MSG_PSE_SET,
+ .flags = GENL_UNS_ADMIN_PERM,
+ .doit = ethnl_set_pse,
+ .policy = ethnl_pse_set_policy,
+ .maxattr = ARRAY_SIZE(ethnl_pse_set_policy) - 1,
+ },
};
static const struct genl_multicast_group ethtool_nl_mcgrps[] = {
diff --git a/net/ethtool/netlink.h b/net/ethtool/netlink.h
index c0d587611854..1bfd374f9718 100644
--- a/net/ethtool/netlink.h
+++ b/net/ethtool/netlink.h
@@ -345,6 +345,7 @@ extern const struct ethnl_request_ops ethnl_module_eeprom_request_ops;
extern const struct ethnl_request_ops ethnl_stats_request_ops;
extern const struct ethnl_request_ops ethnl_phc_vclocks_request_ops;
extern const struct ethnl_request_ops ethnl_module_request_ops;
+extern const struct ethnl_request_ops ethnl_pse_request_ops;
extern const struct nla_policy ethnl_header_policy[ETHTOOL_A_HEADER_FLAGS + 1];
extern const struct nla_policy ethnl_header_policy_stats[ETHTOOL_A_HEADER_FLAGS + 1];
@@ -383,6 +384,8 @@ extern const struct nla_policy ethnl_stats_get_policy[ETHTOOL_A_STATS_GROUPS + 1
extern const struct nla_policy ethnl_phc_vclocks_get_policy[ETHTOOL_A_PHC_VCLOCKS_HEADER + 1];
extern const struct nla_policy ethnl_module_get_policy[ETHTOOL_A_MODULE_HEADER + 1];
extern const struct nla_policy ethnl_module_set_policy[ETHTOOL_A_MODULE_POWER_MODE_POLICY + 1];
+extern const struct nla_policy ethnl_pse_get_policy[ETHTOOL_A_PSE_HEADER + 1];
+extern const struct nla_policy ethnl_pse_set_policy[ETHTOOL_A_PSE_MAX + 1];
int ethnl_set_linkinfo(struct sk_buff *skb, struct genl_info *info);
int ethnl_set_linkmodes(struct sk_buff *skb, struct genl_info *info);
@@ -402,6 +405,7 @@ int ethnl_tunnel_info_start(struct netlink_callback *cb);
int ethnl_tunnel_info_dumpit(struct sk_buff *skb, struct netlink_callback *cb);
int ethnl_set_fec(struct sk_buff *skb, struct genl_info *info);
int ethnl_set_module(struct sk_buff *skb, struct genl_info *info);
+int ethnl_set_pse(struct sk_buff *skb, struct genl_info *info);
extern const char stats_std_names[__ETHTOOL_STATS_CNT][ETH_GSTRING_LEN];
extern const char stats_eth_phy_names[__ETHTOOL_A_STATS_ETH_PHY_CNT][ETH_GSTRING_LEN];
diff --git a/net/ethtool/pse-pd.c b/net/ethtool/pse-pd.c
new file mode 100644
index 000000000000..5a471e115b66
--- /dev/null
+++ b/net/ethtool/pse-pd.c
@@ -0,0 +1,185 @@
+// SPDX-License-Identifier: GPL-2.0-only
+//
+// ethtool interface for for Ethernet PSE (Power Sourcing Equipment)
+// and PD (Powered Device)
+//
+// Copyright (c) 2022 Pengutronix, Oleksij Rempel <kernel@pengutronix.de>
+//
+
+#include "common.h"
+#include "linux/pse-pd/pse.h"
+#include "netlink.h"
+#include <linux/ethtool_netlink.h>
+#include <linux/ethtool.h>
+#include <linux/phy.h>
+
+struct pse_req_info {
+ struct ethnl_req_info base;
+};
+
+struct pse_reply_data {
+ struct ethnl_reply_data base;
+ struct pse_control_status status;
+};
+
+#define PSE_REPDATA(__reply_base) \
+ container_of(__reply_base, struct pse_reply_data, base)
+
+/* PSE_GET */
+
+const struct nla_policy ethnl_pse_get_policy[ETHTOOL_A_PSE_HEADER + 1] = {
+ [ETHTOOL_A_PSE_HEADER] = NLA_POLICY_NESTED(ethnl_header_policy),
+};
+
+static int pse_get_pse_attributes(struct net_device *dev,
+ struct netlink_ext_ack *extack,
+ struct pse_reply_data *data)
+{
+ struct phy_device *phydev = dev->phydev;
+
+ if (!phydev) {
+ NL_SET_ERR_MSG(extack, "No PHY is attached");
+ return -EOPNOTSUPP;
+ }
+
+ if (!phydev->psec) {
+ NL_SET_ERR_MSG(extack, "No PSE is attached");
+ return -EOPNOTSUPP;
+ }
+
+ memset(&data->status, 0, sizeof(data->status));
+
+ return pse_ethtool_get_status(phydev->psec, extack, &data->status);
+}
+
+static int pse_prepare_data(const struct ethnl_req_info *req_base,
+ struct ethnl_reply_data *reply_base,
+ struct genl_info *info)
+{
+ struct pse_reply_data *data = PSE_REPDATA(reply_base);
+ struct net_device *dev = reply_base->dev;
+ int ret;
+
+ ret = ethnl_ops_begin(dev);
+ if (ret < 0)
+ return ret;
+
+ ret = pse_get_pse_attributes(dev, info->extack, data);
+
+ ethnl_ops_complete(dev);
+
+ return ret;
+}
+
+static int pse_reply_size(const struct ethnl_req_info *req_base,
+ const struct ethnl_reply_data *reply_base)
+{
+ const struct pse_reply_data *data = PSE_REPDATA(reply_base);
+ const struct pse_control_status *st = &data->status;
+ int len = 0;
+
+ if (st->podl_admin_state > 0)
+ len += nla_total_size(sizeof(u32)); /* _PODL_PSE_ADMIN_STATE */
+ if (st->podl_pw_status > 0)
+ len += nla_total_size(sizeof(u32)); /* _PODL_PSE_PW_D_STATUS */
+
+ return len;
+}
+
+static int pse_fill_reply(struct sk_buff *skb,
+ const struct ethnl_req_info *req_base,
+ const struct ethnl_reply_data *reply_base)
+{
+ const struct pse_reply_data *data = PSE_REPDATA(reply_base);
+ const struct pse_control_status *st = &data->status;
+
+ if (st->podl_admin_state > 0 &&
+ nla_put_u32(skb, ETHTOOL_A_PODL_PSE_ADMIN_STATE,
+ st->podl_admin_state))
+ return -EMSGSIZE;
+
+ if (st->podl_pw_status > 0 &&
+ nla_put_u32(skb, ETHTOOL_A_PODL_PSE_PW_D_STATUS,
+ st->podl_pw_status))
+ return -EMSGSIZE;
+
+ return 0;
+}
+
+const struct ethnl_request_ops ethnl_pse_request_ops = {
+ .request_cmd = ETHTOOL_MSG_PSE_GET,
+ .reply_cmd = ETHTOOL_MSG_PSE_GET_REPLY,
+ .hdr_attr = ETHTOOL_A_PSE_HEADER,
+ .req_info_size = sizeof(struct pse_req_info),
+ .reply_data_size = sizeof(struct pse_reply_data),
+
+ .prepare_data = pse_prepare_data,
+ .reply_size = pse_reply_size,
+ .fill_reply = pse_fill_reply,
+};
+
+/* PSE_SET */
+
+const struct nla_policy ethnl_pse_set_policy[ETHTOOL_A_PSE_MAX + 1] = {
+ [ETHTOOL_A_PSE_HEADER] = NLA_POLICY_NESTED(ethnl_header_policy),
+ [ETHTOOL_A_PODL_PSE_ADMIN_CONTROL] =
+ NLA_POLICY_RANGE(NLA_U32, ETHTOOL_PODL_PSE_ADMIN_STATE_DISABLED,
+ ETHTOOL_PODL_PSE_ADMIN_STATE_ENABLED),
+};
+
+static int pse_set_pse_config(struct net_device *dev,
+ struct netlink_ext_ack *extack,
+ struct nlattr **tb)
+{
+ struct phy_device *phydev = dev->phydev;
+ struct pse_control_config config = {};
+
+ /* Optional attribute. Do not return error if not set. */
+ if (!tb[ETHTOOL_A_PODL_PSE_ADMIN_CONTROL])
+ return 0;
+
+ /* this values are already validated by the ethnl_pse_set_policy */
+ config.admin_cotrol = nla_get_u32(tb[ETHTOOL_A_PODL_PSE_ADMIN_CONTROL]);
+
+ if (!phydev) {
+ NL_SET_ERR_MSG(extack, "No PHY is attached");
+ return -EOPNOTSUPP;
+ }
+
+ if (!phydev->psec) {
+ NL_SET_ERR_MSG(extack, "No PSE is attached");
+ return -EOPNOTSUPP;
+ }
+
+ return pse_ethtool_set_config(phydev->psec, extack, &config);
+}
+
+int ethnl_set_pse(struct sk_buff *skb, struct genl_info *info)
+{
+ struct ethnl_req_info req_info = {};
+ struct nlattr **tb = info->attrs;
+ struct net_device *dev;
+ int ret;
+
+ ret = ethnl_parse_header_dev_get(&req_info, tb[ETHTOOL_A_PSE_HEADER],
+ genl_info_net(info), info->extack,
+ true);
+ if (ret < 0)
+ return ret;
+
+ dev = req_info.dev;
+
+ rtnl_lock();
+ ret = ethnl_ops_begin(dev);
+ if (ret < 0)
+ goto out_rtnl;
+
+ ret = pse_set_pse_config(dev, info->extack, tb);
+ ethnl_ops_complete(dev);
+out_rtnl:
+ rtnl_unlock();
+
+ ethnl_parse_header_dev_put(&req_info);
+
+ return ret;
+}
diff --git a/net/ieee802154/socket.c b/net/ieee802154/socket.c
index 7889e1ef7fad..cbd0e2ac4ffe 100644
--- a/net/ieee802154/socket.c
+++ b/net/ieee802154/socket.c
@@ -251,6 +251,9 @@ static int raw_sendmsg(struct sock *sk, struct msghdr *msg, size_t size)
return -EOPNOTSUPP;
}
+ if (!size)
+ return -EINVAL;
+
lock_sock(sk);
if (!sk->sk_bound_dev_if)
dev = dev_getfirstbyhwtype(sock_net(sk), ARPHRD_IEEE802154);
diff --git a/net/ipv4/ah4.c b/net/ipv4/ah4.c
index f8ad04470d3a..ee4e578c7f20 100644
--- a/net/ipv4/ah4.c
+++ b/net/ipv4/ah4.c
@@ -471,30 +471,38 @@ static int ah4_err(struct sk_buff *skb, u32 info)
return 0;
}
-static int ah_init_state(struct xfrm_state *x)
+static int ah_init_state(struct xfrm_state *x, struct netlink_ext_ack *extack)
{
struct ah_data *ahp = NULL;
struct xfrm_algo_desc *aalg_desc;
struct crypto_ahash *ahash;
- if (!x->aalg)
+ if (!x->aalg) {
+ NL_SET_ERR_MSG(extack, "AH requires a state with an AUTH algorithm");
goto error;
+ }
- if (x->encap)
+ if (x->encap) {
+ NL_SET_ERR_MSG(extack, "AH is not compatible with encapsulation");
goto error;
+ }
ahp = kzalloc(sizeof(*ahp), GFP_KERNEL);
if (!ahp)
return -ENOMEM;
ahash = crypto_alloc_ahash(x->aalg->alg_name, 0, 0);
- if (IS_ERR(ahash))
+ if (IS_ERR(ahash)) {
+ NL_SET_ERR_MSG(extack, "Kernel was unable to initialize cryptographic operations");
goto error;
+ }
ahp->ahash = ahash;
if (crypto_ahash_setkey(ahash, x->aalg->alg_key,
- (x->aalg->alg_key_len + 7) / 8))
+ (x->aalg->alg_key_len + 7) / 8)) {
+ NL_SET_ERR_MSG(extack, "Kernel was unable to initialize cryptographic operations");
goto error;
+ }
/*
* Lookup the algorithm description maintained by xfrm_algo,
@@ -507,10 +515,7 @@ static int ah_init_state(struct xfrm_state *x)
if (aalg_desc->uinfo.auth.icv_fullbits/8 !=
crypto_ahash_digestsize(ahash)) {
- pr_info("%s: %s digestsize %u != %u\n",
- __func__, x->aalg->alg_name,
- crypto_ahash_digestsize(ahash),
- aalg_desc->uinfo.auth.icv_fullbits / 8);
+ NL_SET_ERR_MSG(extack, "Kernel was unable to initialize cryptographic operations");
goto error;
}
diff --git a/net/ipv4/bpf_tcp_ca.c b/net/ipv4/bpf_tcp_ca.c
index 85a9e500c42d..6da16ae6a962 100644
--- a/net/ipv4/bpf_tcp_ca.c
+++ b/net/ipv4/bpf_tcp_ca.c
@@ -124,7 +124,7 @@ static int bpf_tcp_ca_btf_struct_access(struct bpf_verifier_log *log,
return -EACCES;
}
- return NOT_INIT;
+ return 0;
}
BPF_CALL_2(bpf_tcp_send_ack, struct tcp_sock *, tp, u32, rcv_nxt)
diff --git a/net/ipv4/esp4.c b/net/ipv4/esp4.c
index 6be0f8903be1..52c8047efedb 100644
--- a/net/ipv4/esp4.c
+++ b/net/ipv4/esp4.c
@@ -1008,16 +1008,17 @@ static void esp_destroy(struct xfrm_state *x)
crypto_free_aead(aead);
}
-static int esp_init_aead(struct xfrm_state *x)
+static int esp_init_aead(struct xfrm_state *x, struct netlink_ext_ack *extack)
{
char aead_name[CRYPTO_MAX_ALG_NAME];
struct crypto_aead *aead;
int err;
- err = -ENAMETOOLONG;
if (snprintf(aead_name, CRYPTO_MAX_ALG_NAME, "%s(%s)",
- x->geniv, x->aead->alg_name) >= CRYPTO_MAX_ALG_NAME)
- goto error;
+ x->geniv, x->aead->alg_name) >= CRYPTO_MAX_ALG_NAME) {
+ NL_SET_ERR_MSG(extack, "Algorithm name is too long");
+ return -ENAMETOOLONG;
+ }
aead = crypto_alloc_aead(aead_name, 0, 0);
err = PTR_ERR(aead);
@@ -1035,11 +1036,15 @@ static int esp_init_aead(struct xfrm_state *x)
if (err)
goto error;
+ return 0;
+
error:
+ NL_SET_ERR_MSG(extack, "Kernel was unable to initialize cryptographic operations");
return err;
}
-static int esp_init_authenc(struct xfrm_state *x)
+static int esp_init_authenc(struct xfrm_state *x,
+ struct netlink_ext_ack *extack)
{
struct crypto_aead *aead;
struct crypto_authenc_key_param *param;
@@ -1050,10 +1055,6 @@ static int esp_init_authenc(struct xfrm_state *x)
unsigned int keylen;
int err;
- err = -EINVAL;
- if (!x->ealg)
- goto error;
-
err = -ENAMETOOLONG;
if ((x->props.flags & XFRM_STATE_ESN)) {
@@ -1062,22 +1063,28 @@ static int esp_init_authenc(struct xfrm_state *x)
x->geniv ?: "", x->geniv ? "(" : "",
x->aalg ? x->aalg->alg_name : "digest_null",
x->ealg->alg_name,
- x->geniv ? ")" : "") >= CRYPTO_MAX_ALG_NAME)
+ x->geniv ? ")" : "") >= CRYPTO_MAX_ALG_NAME) {
+ NL_SET_ERR_MSG(extack, "Algorithm name is too long");
goto error;
+ }
} else {
if (snprintf(authenc_name, CRYPTO_MAX_ALG_NAME,
"%s%sauthenc(%s,%s)%s",
x->geniv ?: "", x->geniv ? "(" : "",
x->aalg ? x->aalg->alg_name : "digest_null",
x->ealg->alg_name,
- x->geniv ? ")" : "") >= CRYPTO_MAX_ALG_NAME)
+ x->geniv ? ")" : "") >= CRYPTO_MAX_ALG_NAME) {
+ NL_SET_ERR_MSG(extack, "Algorithm name is too long");
goto error;
+ }
}
aead = crypto_alloc_aead(authenc_name, 0, 0);
err = PTR_ERR(aead);
- if (IS_ERR(aead))
+ if (IS_ERR(aead)) {
+ NL_SET_ERR_MSG(extack, "Kernel was unable to initialize cryptographic operations");
goto error;
+ }
x->data = aead;
@@ -1107,17 +1114,16 @@ static int esp_init_authenc(struct xfrm_state *x)
err = -EINVAL;
if (aalg_desc->uinfo.auth.icv_fullbits / 8 !=
crypto_aead_authsize(aead)) {
- pr_info("ESP: %s digestsize %u != %u\n",
- x->aalg->alg_name,
- crypto_aead_authsize(aead),
- aalg_desc->uinfo.auth.icv_fullbits / 8);
+ NL_SET_ERR_MSG(extack, "Kernel was unable to initialize cryptographic operations");
goto free_key;
}
err = crypto_aead_setauthsize(
aead, x->aalg->alg_trunc_len / 8);
- if (err)
+ if (err) {
+ NL_SET_ERR_MSG(extack, "Kernel was unable to initialize cryptographic operations");
goto free_key;
+ }
}
param->enckeylen = cpu_to_be32((x->ealg->alg_key_len + 7) / 8);
@@ -1132,7 +1138,7 @@ error:
return err;
}
-static int esp_init_state(struct xfrm_state *x)
+static int esp_init_state(struct xfrm_state *x, struct netlink_ext_ack *extack)
{
struct crypto_aead *aead;
u32 align;
@@ -1140,10 +1146,14 @@ static int esp_init_state(struct xfrm_state *x)
x->data = NULL;
- if (x->aead)
- err = esp_init_aead(x);
- else
- err = esp_init_authenc(x);
+ if (x->aead) {
+ err = esp_init_aead(x, extack);
+ } else if (x->ealg) {
+ err = esp_init_authenc(x, extack);
+ } else {
+ NL_SET_ERR_MSG(extack, "ESP: AEAD or CRYPT must be provided");
+ err = -EINVAL;
+ }
if (err)
goto error;
@@ -1161,6 +1171,7 @@ static int esp_init_state(struct xfrm_state *x)
switch (encap->encap_type) {
default:
+ NL_SET_ERR_MSG(extack, "Unsupported encapsulation type for ESP");
err = -EINVAL;
goto error;
case UDP_ENCAP_ESPINUDP:
diff --git a/net/ipv4/esp4_offload.c b/net/ipv4/esp4_offload.c
index 935026f4c807..170152772d33 100644
--- a/net/ipv4/esp4_offload.c
+++ b/net/ipv4/esp4_offload.c
@@ -110,7 +110,10 @@ static struct sk_buff *xfrm4_tunnel_gso_segment(struct xfrm_state *x,
struct sk_buff *skb,
netdev_features_t features)
{
- return skb_eth_gso_segment(skb, features, htons(ETH_P_IP));
+ __be16 type = x->inner_mode.family == AF_INET6 ? htons(ETH_P_IPV6)
+ : htons(ETH_P_IP);
+
+ return skb_eth_gso_segment(skb, features, type);
}
static struct sk_buff *xfrm4_transport_gso_segment(struct xfrm_state *x,
diff --git a/net/ipv4/inet_hashtables.c b/net/ipv4/inet_hashtables.c
index 49db8c597eea..a0ad34e4f044 100644
--- a/net/ipv4/inet_hashtables.c
+++ b/net/ipv4/inet_hashtables.c
@@ -958,8 +958,8 @@ int __inet_hash_connect(struct inet_timewait_death_row *death_row,
if (likely(remaining > 1))
remaining &= ~1U;
- net_get_random_once(table_perturb,
- INET_TABLE_PERTURB_SIZE * sizeof(*table_perturb));
+ get_random_sleepable_once(table_perturb,
+ INET_TABLE_PERTURB_SIZE * sizeof(*table_perturb));
index = port_offset & (INET_TABLE_PERTURB_SIZE - 1);
offset = READ_ONCE(table_perturb[index]) + (port_offset >> 32);
diff --git a/net/ipv4/ip_tunnel_core.c b/net/ipv4/ip_tunnel_core.c
index cc1caab4a654..92c02c886fe7 100644
--- a/net/ipv4/ip_tunnel_core.c
+++ b/net/ipv4/ip_tunnel_core.c
@@ -1079,3 +1079,70 @@ EXPORT_SYMBOL(ip_tunnel_parse_protocol);
const struct header_ops ip_tunnel_header_ops = { .parse_protocol = ip_tunnel_parse_protocol };
EXPORT_SYMBOL(ip_tunnel_header_ops);
+
+/* This function returns true when ENCAP attributes are present in the nl msg */
+bool ip_tunnel_netlink_encap_parms(struct nlattr *data[],
+ struct ip_tunnel_encap *encap)
+{
+ bool ret = false;
+
+ memset(encap, 0, sizeof(*encap));
+
+ if (!data)
+ return ret;
+
+ if (data[IFLA_IPTUN_ENCAP_TYPE]) {
+ ret = true;
+ encap->type = nla_get_u16(data[IFLA_IPTUN_ENCAP_TYPE]);
+ }
+
+ if (data[IFLA_IPTUN_ENCAP_FLAGS]) {
+ ret = true;
+ encap->flags = nla_get_u16(data[IFLA_IPTUN_ENCAP_FLAGS]);
+ }
+
+ if (data[IFLA_IPTUN_ENCAP_SPORT]) {
+ ret = true;
+ encap->sport = nla_get_be16(data[IFLA_IPTUN_ENCAP_SPORT]);
+ }
+
+ if (data[IFLA_IPTUN_ENCAP_DPORT]) {
+ ret = true;
+ encap->dport = nla_get_be16(data[IFLA_IPTUN_ENCAP_DPORT]);
+ }
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(ip_tunnel_netlink_encap_parms);
+
+void ip_tunnel_netlink_parms(struct nlattr *data[],
+ struct ip_tunnel_parm *parms)
+{
+ if (data[IFLA_IPTUN_LINK])
+ parms->link = nla_get_u32(data[IFLA_IPTUN_LINK]);
+
+ if (data[IFLA_IPTUN_LOCAL])
+ parms->iph.saddr = nla_get_be32(data[IFLA_IPTUN_LOCAL]);
+
+ if (data[IFLA_IPTUN_REMOTE])
+ parms->iph.daddr = nla_get_be32(data[IFLA_IPTUN_REMOTE]);
+
+ if (data[IFLA_IPTUN_TTL]) {
+ parms->iph.ttl = nla_get_u8(data[IFLA_IPTUN_TTL]);
+ if (parms->iph.ttl)
+ parms->iph.frag_off = htons(IP_DF);
+ }
+
+ if (data[IFLA_IPTUN_TOS])
+ parms->iph.tos = nla_get_u8(data[IFLA_IPTUN_TOS]);
+
+ if (!data[IFLA_IPTUN_PMTUDISC] || nla_get_u8(data[IFLA_IPTUN_PMTUDISC]))
+ parms->iph.frag_off = htons(IP_DF);
+
+ if (data[IFLA_IPTUN_FLAGS])
+ parms->i_flags = nla_get_be16(data[IFLA_IPTUN_FLAGS]);
+
+ if (data[IFLA_IPTUN_PROTO])
+ parms->iph.protocol = nla_get_u8(data[IFLA_IPTUN_PROTO]);
+}
+EXPORT_SYMBOL_GPL(ip_tunnel_netlink_parms);
diff --git a/net/ipv4/ipcomp.c b/net/ipv4/ipcomp.c
index 366094c1ce6c..5a4fb2539b08 100644
--- a/net/ipv4/ipcomp.c
+++ b/net/ipv4/ipcomp.c
@@ -117,7 +117,8 @@ out:
return err;
}
-static int ipcomp4_init_state(struct xfrm_state *x)
+static int ipcomp4_init_state(struct xfrm_state *x,
+ struct netlink_ext_ack *extack)
{
int err = -EINVAL;
@@ -129,17 +130,20 @@ static int ipcomp4_init_state(struct xfrm_state *x)
x->props.header_len += sizeof(struct iphdr);
break;
default:
+ NL_SET_ERR_MSG(extack, "Unsupported XFRM mode for IPcomp");
goto out;
}
- err = ipcomp_init_state(x);
+ err = ipcomp_init_state(x, extack);
if (err)
goto out;
if (x->props.mode == XFRM_MODE_TUNNEL) {
err = ipcomp_tunnel_attach(x);
- if (err)
+ if (err) {
+ NL_SET_ERR_MSG(extack, "Kernel error: failed to initialize the associated state");
goto out;
+ }
}
err = 0;
diff --git a/net/ipv4/ipip.c b/net/ipv4/ipip.c
index 123ea63a04cb..180f9daf5bec 100644
--- a/net/ipv4/ipip.c
+++ b/net/ipv4/ipip.c
@@ -417,29 +417,7 @@ static void ipip_netlink_parms(struct nlattr *data[],
if (!data)
return;
- if (data[IFLA_IPTUN_LINK])
- parms->link = nla_get_u32(data[IFLA_IPTUN_LINK]);
-
- if (data[IFLA_IPTUN_LOCAL])
- parms->iph.saddr = nla_get_in_addr(data[IFLA_IPTUN_LOCAL]);
-
- if (data[IFLA_IPTUN_REMOTE])
- parms->iph.daddr = nla_get_in_addr(data[IFLA_IPTUN_REMOTE]);
-
- if (data[IFLA_IPTUN_TTL]) {
- parms->iph.ttl = nla_get_u8(data[IFLA_IPTUN_TTL]);
- if (parms->iph.ttl)
- parms->iph.frag_off = htons(IP_DF);
- }
-
- if (data[IFLA_IPTUN_TOS])
- parms->iph.tos = nla_get_u8(data[IFLA_IPTUN_TOS]);
-
- if (data[IFLA_IPTUN_PROTO])
- parms->iph.protocol = nla_get_u8(data[IFLA_IPTUN_PROTO]);
-
- if (!data[IFLA_IPTUN_PMTUDISC] || nla_get_u8(data[IFLA_IPTUN_PMTUDISC]))
- parms->iph.frag_off = htons(IP_DF);
+ ip_tunnel_netlink_parms(data, parms);
if (data[IFLA_IPTUN_COLLECT_METADATA])
*collect_md = true;
@@ -448,40 +426,6 @@ static void ipip_netlink_parms(struct nlattr *data[],
*fwmark = nla_get_u32(data[IFLA_IPTUN_FWMARK]);
}
-/* This function returns true when ENCAP attributes are present in the nl msg */
-static bool ipip_netlink_encap_parms(struct nlattr *data[],
- struct ip_tunnel_encap *ipencap)
-{
- bool ret = false;
-
- memset(ipencap, 0, sizeof(*ipencap));
-
- if (!data)
- return ret;
-
- if (data[IFLA_IPTUN_ENCAP_TYPE]) {
- ret = true;
- ipencap->type = nla_get_u16(data[IFLA_IPTUN_ENCAP_TYPE]);
- }
-
- if (data[IFLA_IPTUN_ENCAP_FLAGS]) {
- ret = true;
- ipencap->flags = nla_get_u16(data[IFLA_IPTUN_ENCAP_FLAGS]);
- }
-
- if (data[IFLA_IPTUN_ENCAP_SPORT]) {
- ret = true;
- ipencap->sport = nla_get_be16(data[IFLA_IPTUN_ENCAP_SPORT]);
- }
-
- if (data[IFLA_IPTUN_ENCAP_DPORT]) {
- ret = true;
- ipencap->dport = nla_get_be16(data[IFLA_IPTUN_ENCAP_DPORT]);
- }
-
- return ret;
-}
-
static int ipip_newlink(struct net *src_net, struct net_device *dev,
struct nlattr *tb[], struct nlattr *data[],
struct netlink_ext_ack *extack)
@@ -491,7 +435,7 @@ static int ipip_newlink(struct net *src_net, struct net_device *dev,
struct ip_tunnel_encap ipencap;
__u32 fwmark = 0;
- if (ipip_netlink_encap_parms(data, &ipencap)) {
+ if (ip_tunnel_netlink_encap_parms(data, &ipencap)) {
int err = ip_tunnel_encap_setup(t, &ipencap);
if (err < 0)
@@ -512,7 +456,7 @@ static int ipip_changelink(struct net_device *dev, struct nlattr *tb[],
bool collect_md;
__u32 fwmark = t->fwmark;
- if (ipip_netlink_encap_parms(data, &ipencap)) {
+ if (ip_tunnel_netlink_encap_parms(data, &ipencap)) {
int err = ip_tunnel_encap_setup(t, &ipencap);
if (err < 0)
diff --git a/net/ipv4/ping.c b/net/ipv4/ping.c
index b83c2bd9d722..517042caf6dc 100644
--- a/net/ipv4/ping.c
+++ b/net/ipv4/ping.c
@@ -33,6 +33,7 @@
#include <linux/skbuff.h>
#include <linux/proc_fs.h>
#include <linux/export.h>
+#include <linux/bpf-cgroup.h>
#include <net/sock.h>
#include <net/ping.h>
#include <net/udp.h>
@@ -295,6 +296,19 @@ void ping_close(struct sock *sk, long timeout)
}
EXPORT_SYMBOL_GPL(ping_close);
+static int ping_pre_connect(struct sock *sk, struct sockaddr *uaddr,
+ int addr_len)
+{
+ /* This check is replicated from __ip4_datagram_connect() and
+ * intended to prevent BPF program called below from accessing bytes
+ * that are out of the bound specified by user in addr_len.
+ */
+ if (addr_len < sizeof(struct sockaddr_in))
+ return -EINVAL;
+
+ return BPF_CGROUP_RUN_PROG_INET4_CONNECT_LOCK(sk, uaddr);
+}
+
/* Checks the bind address and possibly modifies sk->sk_bound_dev_if. */
static int ping_check_bind_addr(struct sock *sk, struct inet_sock *isk,
struct sockaddr *uaddr, int addr_len)
@@ -1009,6 +1023,7 @@ struct proto ping_prot = {
.owner = THIS_MODULE,
.init = ping_init_sock,
.close = ping_close,
+ .pre_connect = ping_pre_connect,
.connect = ip4_datagram_connect,
.disconnect = __udp_disconnect,
.setsockopt = ip_setsockopt,
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index 648b5c54bb32..0c51abeee172 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -3136,6 +3136,8 @@ int tcp_disconnect(struct sock *sk, int flags)
tp->snd_ssthresh = TCP_INFINITE_SSTHRESH;
tcp_snd_cwnd_set(tp, TCP_INIT_CWND);
tp->snd_cwnd_cnt = 0;
+ tp->is_cwnd_limited = 0;
+ tp->max_packets_out = 0;
tp->window_clamp = 0;
tp->delivered = 0;
tp->delivered_ce = 0;
diff --git a/net/ipv4/tcp_minisocks.c b/net/ipv4/tcp_minisocks.c
index 442838ab0253..79f30f026d89 100644
--- a/net/ipv4/tcp_minisocks.c
+++ b/net/ipv4/tcp_minisocks.c
@@ -561,6 +561,7 @@ struct sock *tcp_create_openreq_child(const struct sock *sk,
newtp->fastopen_req = NULL;
RCU_INIT_POINTER(newtp->fastopen_rsk, NULL);
+ newtp->bpf_chg_cc_inprogress = 0;
tcp_bpf_clone(sk, newsk);
__TCP_INC_STATS(sock_net(sk), TCP_MIB_PASSIVEOPENS);
diff --git a/net/ipv4/tcp_offload.c b/net/ipv4/tcp_offload.c
index a844a0d38482..45dda7889387 100644
--- a/net/ipv4/tcp_offload.c
+++ b/net/ipv4/tcp_offload.c
@@ -255,7 +255,15 @@ found:
mss = skb_shinfo(p)->gso_size;
- flush |= (len - 1) >= mss;
+ /* If skb is a GRO packet, make sure its gso_size matches prior packet mss.
+ * If it is a single frame, do not aggregate it if its length
+ * is bigger than our mss.
+ */
+ if (unlikely(skb_is_gso(skb)))
+ flush |= (mss != skb_shinfo(skb)->gso_size);
+ else
+ flush |= (len - 1) >= mss;
+
flush |= (ntohl(th2->seq) + skb_gro_len(p)) ^ ntohl(th->seq);
#ifdef CONFIG_TLS_DEVICE
flush |= p->decrypted ^ skb->decrypted;
@@ -269,7 +277,12 @@ found:
tcp_flag_word(th2) |= flags & (TCP_FLAG_FIN | TCP_FLAG_PSH);
out_check_final:
- flush = len < mss;
+ /* Force a flush if last segment is smaller than mss. */
+ if (unlikely(skb_is_gso(skb)))
+ flush = len != NAPI_GRO_CB(skb)->count * skb_shinfo(skb)->gso_size;
+ else
+ flush = len < mss;
+
flush |= (__force int)(flags & (TCP_FLAG_URG | TCP_FLAG_PSH |
TCP_FLAG_RST | TCP_FLAG_SYN |
TCP_FLAG_FIN));
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
index 290019de766d..c69f4d966024 100644
--- a/net/ipv4/tcp_output.c
+++ b/net/ipv4/tcp_output.c
@@ -1875,15 +1875,20 @@ static void tcp_cwnd_validate(struct sock *sk, bool is_cwnd_limited)
const struct tcp_congestion_ops *ca_ops = inet_csk(sk)->icsk_ca_ops;
struct tcp_sock *tp = tcp_sk(sk);
- /* Track the maximum number of outstanding packets in each
- * window, and remember whether we were cwnd-limited then.
+ /* Track the strongest available signal of the degree to which the cwnd
+ * is fully utilized. If cwnd-limited then remember that fact for the
+ * current window. If not cwnd-limited then track the maximum number of
+ * outstanding packets in the current window. (If cwnd-limited then we
+ * chose to not update tp->max_packets_out to avoid an extra else
+ * clause with no functional impact.)
*/
- if (!before(tp->snd_una, tp->max_packets_seq) ||
- tp->packets_out > tp->max_packets_out ||
- is_cwnd_limited) {
- tp->max_packets_out = tp->packets_out;
- tp->max_packets_seq = tp->snd_nxt;
+ if (!before(tp->snd_una, tp->cwnd_usage_seq) ||
+ is_cwnd_limited ||
+ (!tp->is_cwnd_limited &&
+ tp->packets_out > tp->max_packets_out)) {
tp->is_cwnd_limited = is_cwnd_limited;
+ tp->max_packets_out = tp->packets_out;
+ tp->cwnd_usage_seq = tp->snd_nxt;
}
if (tcp_is_cwnd_limited(sk)) {
diff --git a/net/ipv4/xfrm4_tunnel.c b/net/ipv4/xfrm4_tunnel.c
index 9d4f418f1bf8..8489fa106583 100644
--- a/net/ipv4/xfrm4_tunnel.c
+++ b/net/ipv4/xfrm4_tunnel.c
@@ -22,13 +22,17 @@ static int ipip_xfrm_rcv(struct xfrm_state *x, struct sk_buff *skb)
return ip_hdr(skb)->protocol;
}
-static int ipip_init_state(struct xfrm_state *x)
+static int ipip_init_state(struct xfrm_state *x, struct netlink_ext_ack *extack)
{
- if (x->props.mode != XFRM_MODE_TUNNEL)
+ if (x->props.mode != XFRM_MODE_TUNNEL) {
+ NL_SET_ERR_MSG(extack, "IPv4 tunnel can only be used with tunnel mode");
return -EINVAL;
+ }
- if (x->encap)
+ if (x->encap) {
+ NL_SET_ERR_MSG(extack, "IPv4 tunnel is not compatible with encapsulation");
return -EINVAL;
+ }
x->props.header_len = sizeof(struct iphdr);
diff --git a/net/ipv6/ah6.c b/net/ipv6/ah6.c
index b5995c1f4d7a..5228d2716289 100644
--- a/net/ipv6/ah6.c
+++ b/net/ipv6/ah6.c
@@ -666,30 +666,38 @@ static int ah6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
return 0;
}
-static int ah6_init_state(struct xfrm_state *x)
+static int ah6_init_state(struct xfrm_state *x, struct netlink_ext_ack *extack)
{
struct ah_data *ahp = NULL;
struct xfrm_algo_desc *aalg_desc;
struct crypto_ahash *ahash;
- if (!x->aalg)
+ if (!x->aalg) {
+ NL_SET_ERR_MSG(extack, "AH requires a state with an AUTH algorithm");
goto error;
+ }
- if (x->encap)
+ if (x->encap) {
+ NL_SET_ERR_MSG(extack, "AH is not compatible with encapsulation");
goto error;
+ }
ahp = kzalloc(sizeof(*ahp), GFP_KERNEL);
if (!ahp)
return -ENOMEM;
ahash = crypto_alloc_ahash(x->aalg->alg_name, 0, 0);
- if (IS_ERR(ahash))
+ if (IS_ERR(ahash)) {
+ NL_SET_ERR_MSG(extack, "Kernel was unable to initialize cryptographic operations");
goto error;
+ }
ahp->ahash = ahash;
if (crypto_ahash_setkey(ahash, x->aalg->alg_key,
- (x->aalg->alg_key_len + 7) / 8))
+ (x->aalg->alg_key_len + 7) / 8)) {
+ NL_SET_ERR_MSG(extack, "Kernel was unable to initialize cryptographic operations");
goto error;
+ }
/*
* Lookup the algorithm description maintained by xfrm_algo,
@@ -702,9 +710,7 @@ static int ah6_init_state(struct xfrm_state *x)
if (aalg_desc->uinfo.auth.icv_fullbits/8 !=
crypto_ahash_digestsize(ahash)) {
- pr_info("AH: %s digestsize %u != %u\n",
- x->aalg->alg_name, crypto_ahash_digestsize(ahash),
- aalg_desc->uinfo.auth.icv_fullbits/8);
+ NL_SET_ERR_MSG(extack, "Kernel was unable to initialize cryptographic operations");
goto error;
}
@@ -721,6 +727,7 @@ static int ah6_init_state(struct xfrm_state *x)
x->props.header_len += sizeof(struct ipv6hdr);
break;
default:
+ NL_SET_ERR_MSG(extack, "Invalid mode requested for AH, must be one of TRANSPORT, TUNNEL, BEET");
goto error;
}
x->data = ahp;
diff --git a/net/ipv6/esp6.c b/net/ipv6/esp6.c
index b10f9183801d..14ed868680c6 100644
--- a/net/ipv6/esp6.c
+++ b/net/ipv6/esp6.c
@@ -1051,16 +1051,17 @@ static void esp6_destroy(struct xfrm_state *x)
crypto_free_aead(aead);
}
-static int esp_init_aead(struct xfrm_state *x)
+static int esp_init_aead(struct xfrm_state *x, struct netlink_ext_ack *extack)
{
char aead_name[CRYPTO_MAX_ALG_NAME];
struct crypto_aead *aead;
int err;
- err = -ENAMETOOLONG;
if (snprintf(aead_name, CRYPTO_MAX_ALG_NAME, "%s(%s)",
- x->geniv, x->aead->alg_name) >= CRYPTO_MAX_ALG_NAME)
- goto error;
+ x->geniv, x->aead->alg_name) >= CRYPTO_MAX_ALG_NAME) {
+ NL_SET_ERR_MSG(extack, "Algorithm name is too long");
+ return -ENAMETOOLONG;
+ }
aead = crypto_alloc_aead(aead_name, 0, 0);
err = PTR_ERR(aead);
@@ -1078,11 +1079,15 @@ static int esp_init_aead(struct xfrm_state *x)
if (err)
goto error;
+ return 0;
+
error:
+ NL_SET_ERR_MSG(extack, "Kernel was unable to initialize cryptographic operations");
return err;
}
-static int esp_init_authenc(struct xfrm_state *x)
+static int esp_init_authenc(struct xfrm_state *x,
+ struct netlink_ext_ack *extack)
{
struct crypto_aead *aead;
struct crypto_authenc_key_param *param;
@@ -1093,10 +1098,6 @@ static int esp_init_authenc(struct xfrm_state *x)
unsigned int keylen;
int err;
- err = -EINVAL;
- if (!x->ealg)
- goto error;
-
err = -ENAMETOOLONG;
if ((x->props.flags & XFRM_STATE_ESN)) {
@@ -1105,22 +1106,28 @@ static int esp_init_authenc(struct xfrm_state *x)
x->geniv ?: "", x->geniv ? "(" : "",
x->aalg ? x->aalg->alg_name : "digest_null",
x->ealg->alg_name,
- x->geniv ? ")" : "") >= CRYPTO_MAX_ALG_NAME)
+ x->geniv ? ")" : "") >= CRYPTO_MAX_ALG_NAME) {
+ NL_SET_ERR_MSG(extack, "Algorithm name is too long");
goto error;
+ }
} else {
if (snprintf(authenc_name, CRYPTO_MAX_ALG_NAME,
"%s%sauthenc(%s,%s)%s",
x->geniv ?: "", x->geniv ? "(" : "",
x->aalg ? x->aalg->alg_name : "digest_null",
x->ealg->alg_name,
- x->geniv ? ")" : "") >= CRYPTO_MAX_ALG_NAME)
+ x->geniv ? ")" : "") >= CRYPTO_MAX_ALG_NAME) {
+ NL_SET_ERR_MSG(extack, "Algorithm name is too long");
goto error;
+ }
}
aead = crypto_alloc_aead(authenc_name, 0, 0);
err = PTR_ERR(aead);
- if (IS_ERR(aead))
+ if (IS_ERR(aead)) {
+ NL_SET_ERR_MSG(extack, "Kernel was unable to initialize cryptographic operations");
goto error;
+ }
x->data = aead;
@@ -1150,17 +1157,16 @@ static int esp_init_authenc(struct xfrm_state *x)
err = -EINVAL;
if (aalg_desc->uinfo.auth.icv_fullbits / 8 !=
crypto_aead_authsize(aead)) {
- pr_info("ESP: %s digestsize %u != %u\n",
- x->aalg->alg_name,
- crypto_aead_authsize(aead),
- aalg_desc->uinfo.auth.icv_fullbits / 8);
+ NL_SET_ERR_MSG(extack, "Kernel was unable to initialize cryptographic operations");
goto free_key;
}
err = crypto_aead_setauthsize(
aead, x->aalg->alg_trunc_len / 8);
- if (err)
+ if (err) {
+ NL_SET_ERR_MSG(extack, "Kernel was unable to initialize cryptographic operations");
goto free_key;
+ }
}
param->enckeylen = cpu_to_be32((x->ealg->alg_key_len + 7) / 8);
@@ -1175,7 +1181,7 @@ error:
return err;
}
-static int esp6_init_state(struct xfrm_state *x)
+static int esp6_init_state(struct xfrm_state *x, struct netlink_ext_ack *extack)
{
struct crypto_aead *aead;
u32 align;
@@ -1183,10 +1189,14 @@ static int esp6_init_state(struct xfrm_state *x)
x->data = NULL;
- if (x->aead)
- err = esp_init_aead(x);
- else
- err = esp_init_authenc(x);
+ if (x->aead) {
+ err = esp_init_aead(x, extack);
+ } else if (x->ealg) {
+ err = esp_init_authenc(x, extack);
+ } else {
+ NL_SET_ERR_MSG(extack, "ESP: AEAD or CRYPT must be provided");
+ err = -EINVAL;
+ }
if (err)
goto error;
@@ -1214,6 +1224,7 @@ static int esp6_init_state(struct xfrm_state *x)
switch (encap->encap_type) {
default:
+ NL_SET_ERR_MSG(extack, "Unsupported encapsulation type for ESP");
err = -EINVAL;
goto error;
case UDP_ENCAP_ESPINUDP:
diff --git a/net/ipv6/esp6_offload.c b/net/ipv6/esp6_offload.c
index 3a293838a91d..79d43548279c 100644
--- a/net/ipv6/esp6_offload.c
+++ b/net/ipv6/esp6_offload.c
@@ -145,7 +145,10 @@ static struct sk_buff *xfrm6_tunnel_gso_segment(struct xfrm_state *x,
struct sk_buff *skb,
netdev_features_t features)
{
- return skb_eth_gso_segment(skb, features, htons(ETH_P_IPV6));
+ __be16 type = x->inner_mode.family == AF_INET ? htons(ETH_P_IP)
+ : htons(ETH_P_IPV6);
+
+ return skb_eth_gso_segment(skb, features, type);
}
static struct sk_buff *xfrm6_transport_gso_segment(struct xfrm_state *x,
diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c
index 9e97f3b4c7e8..cc5d5e75b658 100644
--- a/net/ipv6/ip6_tunnel.c
+++ b/net/ipv6/ip6_tunnel.c
@@ -1988,39 +1988,6 @@ static void ip6_tnl_netlink_parms(struct nlattr *data[],
parms->fwmark = nla_get_u32(data[IFLA_IPTUN_FWMARK]);
}
-static bool ip6_tnl_netlink_encap_parms(struct nlattr *data[],
- struct ip_tunnel_encap *ipencap)
-{
- bool ret = false;
-
- memset(ipencap, 0, sizeof(*ipencap));
-
- if (!data)
- return ret;
-
- if (data[IFLA_IPTUN_ENCAP_TYPE]) {
- ret = true;
- ipencap->type = nla_get_u16(data[IFLA_IPTUN_ENCAP_TYPE]);
- }
-
- if (data[IFLA_IPTUN_ENCAP_FLAGS]) {
- ret = true;
- ipencap->flags = nla_get_u16(data[IFLA_IPTUN_ENCAP_FLAGS]);
- }
-
- if (data[IFLA_IPTUN_ENCAP_SPORT]) {
- ret = true;
- ipencap->sport = nla_get_be16(data[IFLA_IPTUN_ENCAP_SPORT]);
- }
-
- if (data[IFLA_IPTUN_ENCAP_DPORT]) {
- ret = true;
- ipencap->dport = nla_get_be16(data[IFLA_IPTUN_ENCAP_DPORT]);
- }
-
- return ret;
-}
-
static int ip6_tnl_newlink(struct net *src_net, struct net_device *dev,
struct nlattr *tb[], struct nlattr *data[],
struct netlink_ext_ack *extack)
@@ -2033,7 +2000,7 @@ static int ip6_tnl_newlink(struct net *src_net, struct net_device *dev,
nt = netdev_priv(dev);
- if (ip6_tnl_netlink_encap_parms(data, &ipencap)) {
+ if (ip_tunnel_netlink_encap_parms(data, &ipencap)) {
err = ip6_tnl_encap_setup(nt, &ipencap);
if (err < 0)
return err;
@@ -2070,7 +2037,7 @@ static int ip6_tnl_changelink(struct net_device *dev, struct nlattr *tb[],
if (dev == ip6n->fb_tnl_dev)
return -EINVAL;
- if (ip6_tnl_netlink_encap_parms(data, &ipencap)) {
+ if (ip_tunnel_netlink_encap_parms(data, &ipencap)) {
int err = ip6_tnl_encap_setup(t, &ipencap);
if (err < 0)
diff --git a/net/ipv6/ipcomp6.c b/net/ipv6/ipcomp6.c
index 15f984be3570..72d4858dec18 100644
--- a/net/ipv6/ipcomp6.c
+++ b/net/ipv6/ipcomp6.c
@@ -136,7 +136,8 @@ out:
return err;
}
-static int ipcomp6_init_state(struct xfrm_state *x)
+static int ipcomp6_init_state(struct xfrm_state *x,
+ struct netlink_ext_ack *extack)
{
int err = -EINVAL;
@@ -148,17 +149,20 @@ static int ipcomp6_init_state(struct xfrm_state *x)
x->props.header_len += sizeof(struct ipv6hdr);
break;
default:
+ NL_SET_ERR_MSG(extack, "Unsupported XFRM mode for IPcomp");
goto out;
}
- err = ipcomp_init_state(x);
+ err = ipcomp_init_state(x, extack);
if (err)
goto out;
if (x->props.mode == XFRM_MODE_TUNNEL) {
err = ipcomp6_tunnel_attach(x);
- if (err)
+ if (err) {
+ NL_SET_ERR_MSG(extack, "Kernel error: failed to initialize the associated state");
goto out;
+ }
}
err = 0;
diff --git a/net/ipv6/mip6.c b/net/ipv6/mip6.c
index aeb35d26e474..83d2a8be263f 100644
--- a/net/ipv6/mip6.c
+++ b/net/ipv6/mip6.c
@@ -247,15 +247,14 @@ static int mip6_destopt_reject(struct xfrm_state *x, struct sk_buff *skb,
return err;
}
-static int mip6_destopt_init_state(struct xfrm_state *x)
+static int mip6_destopt_init_state(struct xfrm_state *x, struct netlink_ext_ack *extack)
{
if (x->id.spi) {
- pr_info("%s: spi is not 0: %u\n", __func__, x->id.spi);
+ NL_SET_ERR_MSG(extack, "SPI must be 0");
return -EINVAL;
}
if (x->props.mode != XFRM_MODE_ROUTEOPTIMIZATION) {
- pr_info("%s: state's mode is not %u: %u\n",
- __func__, XFRM_MODE_ROUTEOPTIMIZATION, x->props.mode);
+ NL_SET_ERR_MSG(extack, "XFRM mode must be XFRM_MODE_ROUTEOPTIMIZATION");
return -EINVAL;
}
@@ -333,15 +332,14 @@ static int mip6_rthdr_output(struct xfrm_state *x, struct sk_buff *skb)
return 0;
}
-static int mip6_rthdr_init_state(struct xfrm_state *x)
+static int mip6_rthdr_init_state(struct xfrm_state *x, struct netlink_ext_ack *extack)
{
if (x->id.spi) {
- pr_info("%s: spi is not 0: %u\n", __func__, x->id.spi);
+ NL_SET_ERR_MSG(extack, "SPI must be 0");
return -EINVAL;
}
if (x->props.mode != XFRM_MODE_ROUTEOPTIMIZATION) {
- pr_info("%s: state's mode is not %u: %u\n",
- __func__, XFRM_MODE_ROUTEOPTIMIZATION, x->props.mode);
+ NL_SET_ERR_MSG(extack, "XFRM mode must be XFRM_MODE_ROUTEOPTIMIZATION");
return -EINVAL;
}
diff --git a/net/ipv6/ping.c b/net/ipv6/ping.c
index 91b840514656..5f2ef8493714 100644
--- a/net/ipv6/ping.c
+++ b/net/ipv6/ping.c
@@ -20,6 +20,7 @@
#include <net/udp.h>
#include <net/transp_v6.h>
#include <linux/proc_fs.h>
+#include <linux/bpf-cgroup.h>
#include <net/ping.h>
static void ping_v6_destroy(struct sock *sk)
@@ -49,6 +50,20 @@ static int dummy_ipv6_chk_addr(struct net *net, const struct in6_addr *addr,
return 0;
}
+static int ping_v6_pre_connect(struct sock *sk, struct sockaddr *uaddr,
+ int addr_len)
+{
+ /* This check is replicated from __ip6_datagram_connect() and
+ * intended to prevent BPF program called below from accessing
+ * bytes that are out of the bound specified by user in addr_len.
+ */
+
+ if (addr_len < SIN6_LEN_RFC2133)
+ return -EINVAL;
+
+ return BPF_CGROUP_RUN_PROG_INET6_CONNECT_LOCK(sk, uaddr);
+}
+
static int ping_v6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
{
struct inet_sock *inet = inet_sk(sk);
@@ -191,6 +206,7 @@ struct proto pingv6_prot = {
.init = ping_init_sock,
.close = ping_close,
.destroy = ping_v6_destroy,
+ .pre_connect = ping_v6_pre_connect,
.connect = ip6_datagram_connect_v6_only,
.disconnect = __udp_disconnect,
.setsockopt = ipv6_setsockopt,
diff --git a/net/ipv6/sit.c b/net/ipv6/sit.c
index 98f1cf40746f..d27683e3fc97 100644
--- a/net/ipv6/sit.c
+++ b/net/ipv6/sit.c
@@ -1503,71 +1503,12 @@ static void ipip6_netlink_parms(struct nlattr *data[],
if (!data)
return;
- if (data[IFLA_IPTUN_LINK])
- parms->link = nla_get_u32(data[IFLA_IPTUN_LINK]);
-
- if (data[IFLA_IPTUN_LOCAL])
- parms->iph.saddr = nla_get_be32(data[IFLA_IPTUN_LOCAL]);
-
- if (data[IFLA_IPTUN_REMOTE])
- parms->iph.daddr = nla_get_be32(data[IFLA_IPTUN_REMOTE]);
-
- if (data[IFLA_IPTUN_TTL]) {
- parms->iph.ttl = nla_get_u8(data[IFLA_IPTUN_TTL]);
- if (parms->iph.ttl)
- parms->iph.frag_off = htons(IP_DF);
- }
-
- if (data[IFLA_IPTUN_TOS])
- parms->iph.tos = nla_get_u8(data[IFLA_IPTUN_TOS]);
-
- if (!data[IFLA_IPTUN_PMTUDISC] || nla_get_u8(data[IFLA_IPTUN_PMTUDISC]))
- parms->iph.frag_off = htons(IP_DF);
-
- if (data[IFLA_IPTUN_FLAGS])
- parms->i_flags = nla_get_be16(data[IFLA_IPTUN_FLAGS]);
-
- if (data[IFLA_IPTUN_PROTO])
- parms->iph.protocol = nla_get_u8(data[IFLA_IPTUN_PROTO]);
+ ip_tunnel_netlink_parms(data, parms);
if (data[IFLA_IPTUN_FWMARK])
*fwmark = nla_get_u32(data[IFLA_IPTUN_FWMARK]);
}
-/* This function returns true when ENCAP attributes are present in the nl msg */
-static bool ipip6_netlink_encap_parms(struct nlattr *data[],
- struct ip_tunnel_encap *ipencap)
-{
- bool ret = false;
-
- memset(ipencap, 0, sizeof(*ipencap));
-
- if (!data)
- return ret;
-
- if (data[IFLA_IPTUN_ENCAP_TYPE]) {
- ret = true;
- ipencap->type = nla_get_u16(data[IFLA_IPTUN_ENCAP_TYPE]);
- }
-
- if (data[IFLA_IPTUN_ENCAP_FLAGS]) {
- ret = true;
- ipencap->flags = nla_get_u16(data[IFLA_IPTUN_ENCAP_FLAGS]);
- }
-
- if (data[IFLA_IPTUN_ENCAP_SPORT]) {
- ret = true;
- ipencap->sport = nla_get_be16(data[IFLA_IPTUN_ENCAP_SPORT]);
- }
-
- if (data[IFLA_IPTUN_ENCAP_DPORT]) {
- ret = true;
- ipencap->dport = nla_get_be16(data[IFLA_IPTUN_ENCAP_DPORT]);
- }
-
- return ret;
-}
-
#ifdef CONFIG_IPV6_SIT_6RD
/* This function returns true when 6RD attributes are present in the nl msg */
static bool ipip6_netlink_6rd_parms(struct nlattr *data[],
@@ -1619,7 +1560,7 @@ static int ipip6_newlink(struct net *src_net, struct net_device *dev,
nt = netdev_priv(dev);
- if (ipip6_netlink_encap_parms(data, &ipencap)) {
+ if (ip_tunnel_netlink_encap_parms(data, &ipencap)) {
err = ip_tunnel_encap_setup(nt, &ipencap);
if (err < 0)
return err;
@@ -1671,7 +1612,7 @@ static int ipip6_changelink(struct net_device *dev, struct nlattr *tb[],
if (dev == sitn->fb_tunnel_dev)
return -EINVAL;
- if (ipip6_netlink_encap_parms(data, &ipencap)) {
+ if (ip_tunnel_netlink_encap_parms(data, &ipencap)) {
err = ip_tunnel_encap_setup(t, &ipencap);
if (err < 0)
return err;
diff --git a/net/ipv6/xfrm6_tunnel.c b/net/ipv6/xfrm6_tunnel.c
index 2b31112c0856..1323f2f6928e 100644
--- a/net/ipv6/xfrm6_tunnel.c
+++ b/net/ipv6/xfrm6_tunnel.c
@@ -270,13 +270,17 @@ static int xfrm6_tunnel_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
return 0;
}
-static int xfrm6_tunnel_init_state(struct xfrm_state *x)
+static int xfrm6_tunnel_init_state(struct xfrm_state *x, struct netlink_ext_ack *extack)
{
- if (x->props.mode != XFRM_MODE_TUNNEL)
+ if (x->props.mode != XFRM_MODE_TUNNEL) {
+ NL_SET_ERR_MSG(extack, "IPv6 tunnel can only be used with tunnel mode");
return -EINVAL;
+ }
- if (x->encap)
+ if (x->encap) {
+ NL_SET_ERR_MSG(extack, "IPv6 tunnel is not compatible with encapsulation");
return -EINVAL;
+ }
x->props.header_len = sizeof(struct ipv6hdr);
diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c
index 572254366a0f..dd9ac1f7d2ea 100644
--- a/net/mac80211/iface.c
+++ b/net/mac80211/iface.c
@@ -243,7 +243,7 @@ static int ieee80211_can_powered_addr_change(struct ieee80211_sub_if_data *sdata
*/
break;
default:
- return -EOPNOTSUPP;
+ ret = -EOPNOTSUPP;
}
unlock:
@@ -461,7 +461,7 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata, bool going_do
/*
* Stop TX on this interface first.
*/
- if (sdata->dev)
+ if (!local->ops->wake_tx_queue && sdata->dev)
netif_tx_stop_all_queues(sdata->dev);
ieee80211_roc_purge(local, sdata);
@@ -1412,8 +1412,6 @@ int ieee80211_do_open(struct wireless_dev *wdev, bool coming_up)
sdata->vif.type != NL80211_IFTYPE_STATION);
}
- set_bit(SDATA_STATE_RUNNING, &sdata->state);
-
switch (sdata->vif.type) {
case NL80211_IFTYPE_P2P_DEVICE:
rcu_assign_pointer(local->p2p_sdata, sdata);
@@ -1472,6 +1470,8 @@ int ieee80211_do_open(struct wireless_dev *wdev, bool coming_up)
spin_unlock_irqrestore(&local->queue_stop_reason_lock, flags);
}
+ set_bit(SDATA_STATE_RUNNING, &sdata->state);
+
return 0;
err_del_interface:
drv_remove_interface(local, sdata);
diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c
index a651d3ffd8e6..a804e0220ed7 100644
--- a/net/mac80211/mlme.c
+++ b/net/mac80211/mlme.c
@@ -4443,8 +4443,11 @@ ieee80211_verify_peer_he_mcs_support(struct ieee80211_sub_if_data *sdata,
he_cap_elem = cfg80211_find_ext_elem(WLAN_EID_EXT_HE_CAPABILITY,
ies->data, ies->len);
+ if (!he_cap_elem)
+ return false;
+
/* invalid HE IE */
- if (!he_cap_elem || he_cap_elem->datalen < 1 + sizeof(*he_cap)) {
+ if (he_cap_elem->datalen < 1 + sizeof(*he_cap)) {
sdata_info(sdata,
"Invalid HE elem, Disable HE\n");
return false;
@@ -4710,8 +4713,6 @@ static int ieee80211_prep_channel(struct ieee80211_sub_if_data *sdata,
}
if (!elems->vht_cap_elem) {
- sdata_info(sdata,
- "bad VHT capabilities, disabling VHT\n");
*conn_flags |= IEEE80211_CONN_DISABLE_VHT;
vht_oper = NULL;
}
diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c
index bd215fe3c796..589521717c35 100644
--- a/net/mac80211/rx.c
+++ b/net/mac80211/rx.c
@@ -4352,6 +4352,7 @@ void ieee80211_check_fast_rx(struct sta_info *sta)
.vif_type = sdata->vif.type,
.control_port_protocol = sdata->control_port_protocol,
}, *old, *new = NULL;
+ u32 offload_flags;
bool set_offload = false;
bool assign = false;
bool offload;
@@ -4467,10 +4468,10 @@ void ieee80211_check_fast_rx(struct sta_info *sta)
if (assign)
new = kmemdup(&fastrx, sizeof(fastrx), GFP_KERNEL);
- offload = assign &&
- (sdata->vif.offload_flags & IEEE80211_OFFLOAD_DECAP_ENABLED);
+ offload_flags = get_bss_sdata(sdata)->vif.offload_flags;
+ offload = offload_flags & IEEE80211_OFFLOAD_DECAP_ENABLED;
- if (offload)
+ if (assign && offload)
set_offload = !test_and_set_sta_flag(sta, WLAN_STA_DECAP_OFFLOAD);
else
set_offload = test_and_clear_sta_flag(sta, WLAN_STA_DECAP_OFFLOAD);
@@ -4708,7 +4709,7 @@ static bool ieee80211_invoke_fast_rx(struct ieee80211_rx_data *rx,
if (!(status->rx_flags & IEEE80211_RX_AMSDU)) {
if (!pskb_may_pull(skb, snap_offs + sizeof(*payload)))
- goto drop;
+ return false;
payload = (void *)(skb->data + snap_offs);
diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c
index d9e7a2ed5d2c..e137355bea67 100644
--- a/net/mac80211/tx.c
+++ b/net/mac80211/tx.c
@@ -2319,6 +2319,10 @@ netdev_tx_t ieee80211_monitor_start_xmit(struct sk_buff *skb,
u16 len_rthdr;
int hdrlen;
+ sdata = IEEE80211_DEV_TO_SUB_IF(dev);
+ if (unlikely(!ieee80211_sdata_running(sdata)))
+ goto fail;
+
memset(info, 0, sizeof(*info));
info->flags = IEEE80211_TX_CTL_REQ_TX_STATUS |
IEEE80211_TX_CTL_INJECTED;
@@ -2378,8 +2382,6 @@ netdev_tx_t ieee80211_monitor_start_xmit(struct sk_buff *skb,
* This is necessary, for example, for old hostapd versions that
* don't use nl80211-based management TX/RX.
*/
- sdata = IEEE80211_DEV_TO_SUB_IF(dev);
-
list_for_each_entry_rcu(tmp_sdata, &local->interfaces, list) {
if (!ieee80211_sdata_running(tmp_sdata))
continue;
@@ -4169,7 +4171,7 @@ void __ieee80211_subif_start_xmit(struct sk_buff *skb,
struct sk_buff *next;
int len = skb->len;
- if (unlikely(skb->len < ETH_HLEN)) {
+ if (unlikely(!ieee80211_sdata_running(sdata) || skb->len < ETH_HLEN)) {
kfree_skb(skb);
return;
}
@@ -4566,7 +4568,7 @@ netdev_tx_t ieee80211_subif_start_xmit_8023(struct sk_buff *skb,
struct ieee80211_key *key;
struct sta_info *sta;
- if (unlikely(skb->len < ETH_HLEN)) {
+ if (unlikely(!ieee80211_sdata_running(sdata) || skb->len < ETH_HLEN)) {
kfree_skb(skb);
return NETDEV_TX_OK;
}
diff --git a/net/mac80211/util.c b/net/mac80211/util.c
index a4bf86f17c39..7a80843f1a0a 100644
--- a/net/mac80211/util.c
+++ b/net/mac80211/util.c
@@ -2238,7 +2238,7 @@ static int ieee80211_build_preq_ies_band(struct ieee80211_sub_if_data *sdata,
if (he_cap) {
enum nl80211_iftype iftype =
ieee80211_vif_type_p2p(&sdata->vif);
- __le16 cap = ieee80211_get_he_6ghz_capa(sband, iftype);
+ __le16 cap = ieee80211_get_he_6ghz_capa(sband6, iftype);
pos = ieee80211_write_he_6ghz_cap(pos, cap, end);
}
diff --git a/net/mptcp/protocol.c b/net/mptcp/protocol.c
index 760404b15cd0..f599ad44ed24 100644
--- a/net/mptcp/protocol.c
+++ b/net/mptcp/protocol.c
@@ -662,9 +662,9 @@ static bool __mptcp_move_skbs_from_subflow(struct mptcp_sock *msk,
skb = skb_peek(&ssk->sk_receive_queue);
if (!skb) {
- /* if no data is found, a racing workqueue/recvmsg
- * already processed the new data, stop here or we
- * can enter an infinite loop
+ /* With racing move_skbs_to_msk() and __mptcp_move_skbs(),
+ * a different CPU can have already processed the pending
+ * data, stop here or we can enter an infinite loop
*/
if (!moved)
done = true;
@@ -672,9 +672,9 @@ static bool __mptcp_move_skbs_from_subflow(struct mptcp_sock *msk,
}
if (__mptcp_check_fallback(msk)) {
- /* if we are running under the workqueue, TCP could have
- * collapsed skbs between dummy map creation and now
- * be sure to adjust the size
+ /* Under fallback skbs have no MPTCP extension and TCP could
+ * collapse them between the dummy map creation and the
+ * current dequeue. Be sure to adjust the map size.
*/
map_remaining = skb->len;
subflow->map_data_len = skb->len;
@@ -1707,7 +1707,7 @@ static int mptcp_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
goto out;
} else if (ret) {
release_sock(ssk);
- goto out;
+ goto do_error;
}
release_sock(ssk);
}
@@ -1717,9 +1717,13 @@ static int mptcp_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
if ((1 << sk->sk_state) & ~(TCPF_ESTABLISHED | TCPF_CLOSE_WAIT)) {
ret = sk_stream_wait_connect(sk, &timeo);
if (ret)
- goto out;
+ goto do_error;
}
+ ret = -EPIPE;
+ if (unlikely(sk->sk_err || (sk->sk_shutdown & SEND_SHUTDOWN)))
+ goto do_error;
+
pfrag = sk_page_frag(sk);
while (msg_data_left(msg)) {
@@ -1728,11 +1732,6 @@ static int mptcp_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
bool dfrag_collapsed;
size_t psize, offset;
- if (sk->sk_err || (sk->sk_shutdown & SEND_SHUTDOWN)) {
- ret = -EPIPE;
- goto out;
- }
-
/* reuse tail pfrag, if possible, or carve a new one from the
* page allocator
*/
@@ -1764,7 +1763,7 @@ static int mptcp_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
if (copy_page_from_iter(dfrag->page, offset, psize,
&msg->msg_iter) != psize) {
ret = -EFAULT;
- goto out;
+ goto do_error;
}
/* data successfully copied into the write queue */
@@ -1796,7 +1795,7 @@ wait_for_memory:
__mptcp_push_pending(sk, msg->msg_flags);
ret = sk_stream_wait_memory(sk, &timeo);
if (ret)
- goto out;
+ goto do_error;
}
if (copied)
@@ -1804,7 +1803,14 @@ wait_for_memory:
out:
release_sock(sk);
- return copied ? : ret;
+ return copied;
+
+do_error:
+ if (copied)
+ goto out;
+
+ copied = sk_stream_error(sk, msg->msg_flags, ret);
+ goto out;
}
static int __mptcp_recvmsg_mskq(struct mptcp_sock *msk,
@@ -2307,8 +2313,14 @@ static void __mptcp_close_ssk(struct sock *sk, struct sock *ssk,
lock_sock_nested(ssk, SINGLE_DEPTH_NESTING);
- if (flags & MPTCP_CF_FASTCLOSE)
+ if (flags & MPTCP_CF_FASTCLOSE) {
+ /* be sure to force the tcp_disconnect() path,
+ * to generate the egress reset
+ */
+ ssk->sk_lingertime = 0;
+ sock_set_flag(ssk, SOCK_LINGER);
subflow->send_fastclose = 1;
+ }
need_push = (flags & MPTCP_CF_PUSH) && __mptcp_retransmit_pending_data(sk);
if (!dispose_it) {
@@ -2441,12 +2453,31 @@ static void mptcp_check_fastclose(struct mptcp_sock *msk)
unlock_sock_fast(tcp_sk, slow);
}
+ /* Mirror the tcp_reset() error propagation */
+ switch (sk->sk_state) {
+ case TCP_SYN_SENT:
+ sk->sk_err = ECONNREFUSED;
+ break;
+ case TCP_CLOSE_WAIT:
+ sk->sk_err = EPIPE;
+ break;
+ case TCP_CLOSE:
+ return;
+ default:
+ sk->sk_err = ECONNRESET;
+ }
+
inet_sk_state_store(sk, TCP_CLOSE);
sk->sk_shutdown = SHUTDOWN_MASK;
smp_mb__before_atomic(); /* SHUTDOWN must be visible first */
set_bit(MPTCP_WORK_CLOSE_SUBFLOW, &msk->flags);
- mptcp_close_wake_up(sk);
+ /* the calling mptcp_worker will properly destroy the socket */
+ if (sock_flag(sk, SOCK_DEAD))
+ return;
+
+ sk->sk_state_change(sk);
+ sk_error_report(sk);
}
static void __mptcp_retrans(struct sock *sk)
@@ -2552,6 +2583,16 @@ static void mptcp_mp_fail_no_response(struct mptcp_sock *msk)
mptcp_reset_timeout(msk, 0);
}
+static void mptcp_do_fastclose(struct sock *sk)
+{
+ struct mptcp_subflow_context *subflow, *tmp;
+ struct mptcp_sock *msk = mptcp_sk(sk);
+
+ mptcp_for_each_subflow_safe(msk, subflow, tmp)
+ __mptcp_close_ssk(sk, mptcp_subflow_tcp_sock(subflow),
+ subflow, MPTCP_CF_FASTCLOSE);
+}
+
static void mptcp_worker(struct work_struct *work)
{
struct mptcp_sock *msk = container_of(work, struct mptcp_sock, work);
@@ -2580,11 +2621,15 @@ static void mptcp_worker(struct work_struct *work)
* closed, but we need the msk around to reply to incoming DATA_FIN,
* even if it is orphaned and in FIN_WAIT2 state
*/
- if (sock_flag(sk, SOCK_DEAD) &&
- (mptcp_check_close_timeout(sk) || sk->sk_state == TCP_CLOSE)) {
- inet_sk_state_store(sk, TCP_CLOSE);
- __mptcp_destroy_sock(sk);
- goto unlock;
+ if (sock_flag(sk, SOCK_DEAD)) {
+ if (mptcp_check_close_timeout(sk)) {
+ inet_sk_state_store(sk, TCP_CLOSE);
+ mptcp_do_fastclose(sk);
+ }
+ if (sk->sk_state == TCP_CLOSE) {
+ __mptcp_destroy_sock(sk);
+ goto unlock;
+ }
}
if (test_and_clear_bit(MPTCP_WORK_CLOSE_SUBFLOW, &msk->flags))
@@ -2825,6 +2870,18 @@ static void __mptcp_destroy_sock(struct sock *sk)
sock_put(sk);
}
+static __poll_t mptcp_check_readable(struct mptcp_sock *msk)
+{
+ /* Concurrent splices from sk_receive_queue into receive_queue will
+ * always show at least one non-empty queue when checked in this order.
+ */
+ if (skb_queue_empty_lockless(&((struct sock *)msk)->sk_receive_queue) &&
+ skb_queue_empty_lockless(&msk->receive_queue))
+ return 0;
+
+ return EPOLLIN | EPOLLRDNORM;
+}
+
bool __mptcp_close(struct sock *sk, long timeout)
{
struct mptcp_subflow_context *subflow;
@@ -2838,8 +2895,13 @@ bool __mptcp_close(struct sock *sk, long timeout)
goto cleanup;
}
- if (mptcp_close_state(sk))
+ if (mptcp_check_readable(msk)) {
+ /* the msk has read data, do the MPTCP equivalent of TCP reset */
+ inet_sk_state_store(sk, TCP_CLOSE);
+ mptcp_do_fastclose(sk);
+ } else if (mptcp_close_state(sk)) {
__mptcp_wr_shutdown(sk);
+ }
sk_stream_wait_close(sk, timeout);
@@ -3656,18 +3718,6 @@ static int mptcp_stream_accept(struct socket *sock, struct socket *newsock,
return err;
}
-static __poll_t mptcp_check_readable(struct mptcp_sock *msk)
-{
- /* Concurrent splices from sk_receive_queue into receive_queue will
- * always show at least one non-empty queue when checked in this order.
- */
- if (skb_queue_empty_lockless(&((struct sock *)msk)->sk_receive_queue) &&
- skb_queue_empty_lockless(&msk->receive_queue))
- return 0;
-
- return EPOLLIN | EPOLLRDNORM;
-}
-
static __poll_t mptcp_check_writeable(struct mptcp_sock *msk)
{
struct sock *sk = (struct sock *)msk;
@@ -3718,7 +3768,7 @@ static __poll_t mptcp_poll(struct file *file, struct socket *sock,
if (sk->sk_shutdown & RCV_SHUTDOWN)
mask |= EPOLLIN | EPOLLRDNORM | EPOLLRDHUP;
- /* This barrier is coupled with smp_wmb() in tcp_reset() */
+ /* This barrier is coupled with smp_wmb() in __mptcp_error_report() */
smp_rmb();
if (sk->sk_err)
mask |= EPOLLERR;
diff --git a/net/netfilter/Makefile b/net/netfilter/Makefile
index 06df49ea6329..0f060d100880 100644
--- a/net/netfilter/Makefile
+++ b/net/netfilter/Makefile
@@ -60,6 +60,12 @@ obj-$(CONFIG_NF_NAT) += nf_nat.o
nf_nat-$(CONFIG_NF_NAT_REDIRECT) += nf_nat_redirect.o
nf_nat-$(CONFIG_NF_NAT_MASQUERADE) += nf_nat_masquerade.o
+ifeq ($(CONFIG_NF_NAT),m)
+nf_nat-$(CONFIG_DEBUG_INFO_BTF_MODULES) += nf_nat_bpf.o
+else ifeq ($(CONFIG_NF_NAT),y)
+nf_nat-$(CONFIG_DEBUG_INFO_BTF) += nf_nat_bpf.o
+endif
+
# NAT helpers
obj-$(CONFIG_NF_NAT_AMANDA) += nf_nat_amanda.o
obj-$(CONFIG_NF_NAT_FTP) += nf_nat_ftp.o
diff --git a/net/netfilter/nf_conntrack_bpf.c b/net/netfilter/nf_conntrack_bpf.c
index 1cd87b28c9b0..8639e7efd0e2 100644
--- a/net/netfilter/nf_conntrack_bpf.c
+++ b/net/netfilter/nf_conntrack_bpf.c
@@ -6,12 +6,14 @@
* are exposed through to BPF programs is explicitly unstable.
*/
+#include <linux/bpf_verifier.h>
#include <linux/bpf.h>
#include <linux/btf.h>
+#include <linux/filter.h>
+#include <linux/mutex.h>
#include <linux/types.h>
#include <linux/btf_ids.h>
#include <linux/net_namespace.h>
-#include <net/netfilter/nf_conntrack.h>
#include <net/netfilter/nf_conntrack_bpf.h>
#include <net/netfilter/nf_conntrack_core.h>
@@ -134,7 +136,6 @@ __bpf_nf_ct_alloc_entry(struct net *net, struct bpf_sock_tuple *bpf_tuple,
memset(&ct->proto, 0, sizeof(ct->proto));
__nf_ct_set_timeout(ct, timeout * HZ);
- ct->status |= IPS_CONFIRMED;
out:
if (opts->netns_id >= 0)
@@ -184,14 +185,58 @@ static struct nf_conn *__bpf_nf_ct_lookup(struct net *net,
return ct;
}
+BTF_ID_LIST(btf_nf_conn_ids)
+BTF_ID(struct, nf_conn)
+BTF_ID(struct, nf_conn___init)
+
+/* Check writes into `struct nf_conn` */
+static int _nf_conntrack_btf_struct_access(struct bpf_verifier_log *log,
+ const struct btf *btf,
+ const struct btf_type *t, int off,
+ int size, enum bpf_access_type atype,
+ u32 *next_btf_id,
+ enum bpf_type_flag *flag)
+{
+ const struct btf_type *ncit;
+ const struct btf_type *nct;
+ size_t end;
+
+ ncit = btf_type_by_id(btf, btf_nf_conn_ids[1]);
+ nct = btf_type_by_id(btf, btf_nf_conn_ids[0]);
+
+ if (t != nct && t != ncit) {
+ bpf_log(log, "only read is supported\n");
+ return -EACCES;
+ }
+
+ /* `struct nf_conn` and `struct nf_conn___init` have the same layout
+ * so we are safe to simply merge offset checks here
+ */
+ switch (off) {
+#if defined(CONFIG_NF_CONNTRACK_MARK)
+ case offsetof(struct nf_conn, mark):
+ end = offsetofend(struct nf_conn, mark);
+ break;
+#endif
+ default:
+ bpf_log(log, "no write support to nf_conn at off %d\n", off);
+ return -EACCES;
+ }
+
+ if (off + size > end) {
+ bpf_log(log,
+ "write access at off %d with size %d beyond the member of nf_conn ended at %zu\n",
+ off, size, end);
+ return -EACCES;
+ }
+
+ return 0;
+}
+
__diag_push();
__diag_ignore_all("-Wmissing-prototypes",
"Global functions as their definitions will be in nf_conntrack BTF");
-struct nf_conn___init {
- struct nf_conn ct;
-};
-
/* bpf_xdp_ct_alloc - Allocate a new CT entry
*
* Parameters:
@@ -339,6 +384,7 @@ struct nf_conn *bpf_ct_insert_entry(struct nf_conn___init *nfct_i)
struct nf_conn *nfct = (struct nf_conn *)nfct_i;
int err;
+ nfct->status |= IPS_CONFIRMED;
err = nf_conntrack_hash_check_insert(nfct);
if (err < 0) {
nf_conntrack_free(nfct);
@@ -449,5 +495,19 @@ int register_nf_conntrack_bpf(void)
int ret;
ret = register_btf_kfunc_id_set(BPF_PROG_TYPE_XDP, &nf_conntrack_kfunc_set);
- return ret ?: register_btf_kfunc_id_set(BPF_PROG_TYPE_SCHED_CLS, &nf_conntrack_kfunc_set);
+ ret = ret ?: register_btf_kfunc_id_set(BPF_PROG_TYPE_SCHED_CLS, &nf_conntrack_kfunc_set);
+ if (!ret) {
+ mutex_lock(&nf_conn_btf_access_lock);
+ nfct_btf_struct_access = _nf_conntrack_btf_struct_access;
+ mutex_unlock(&nf_conn_btf_access_lock);
+ }
+
+ return ret;
+}
+
+void cleanup_nf_conntrack_bpf(void)
+{
+ mutex_lock(&nf_conn_btf_access_lock);
+ nfct_btf_struct_access = NULL;
+ mutex_unlock(&nf_conn_btf_access_lock);
}
diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c
index 8208a28ea342..f97bda06d2a9 100644
--- a/net/netfilter/nf_conntrack_core.c
+++ b/net/netfilter/nf_conntrack_core.c
@@ -2516,6 +2516,7 @@ static int kill_all(struct nf_conn *i, void *data)
void nf_conntrack_cleanup_start(void)
{
+ cleanup_nf_conntrack_bpf();
conntrack_gc_work.exiting = true;
}
diff --git a/net/netfilter/nf_nat_bpf.c b/net/netfilter/nf_nat_bpf.c
new file mode 100644
index 000000000000..0fa5a0bbb0ff
--- /dev/null
+++ b/net/netfilter/nf_nat_bpf.c
@@ -0,0 +1,79 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Unstable NAT Helpers for XDP and TC-BPF hook
+ *
+ * These are called from the XDP and SCHED_CLS BPF programs. Note that it is
+ * allowed to break compatibility for these functions since the interface they
+ * are exposed through to BPF programs is explicitly unstable.
+ */
+
+#include <linux/bpf.h>
+#include <linux/btf_ids.h>
+#include <net/netfilter/nf_conntrack_bpf.h>
+#include <net/netfilter/nf_conntrack_core.h>
+#include <net/netfilter/nf_nat.h>
+
+__diag_push();
+__diag_ignore_all("-Wmissing-prototypes",
+ "Global functions as their definitions will be in nf_nat BTF");
+
+/* bpf_ct_set_nat_info - Set source or destination nat address
+ *
+ * Set source or destination nat address of the newly allocated
+ * nf_conn before insertion. This must be invoked for referenced
+ * PTR_TO_BTF_ID to nf_conn___init.
+ *
+ * Parameters:
+ * @nfct - Pointer to referenced nf_conn object, obtained using
+ * bpf_xdp_ct_alloc or bpf_skb_ct_alloc.
+ * @addr - Nat source/destination address
+ * @port - Nat source/destination port. Non-positive values are
+ * interpreted as select a random port.
+ * @manip - NF_NAT_MANIP_SRC or NF_NAT_MANIP_DST
+ */
+int bpf_ct_set_nat_info(struct nf_conn___init *nfct,
+ union nf_inet_addr *addr, int port,
+ enum nf_nat_manip_type manip)
+{
+ struct nf_conn *ct = (struct nf_conn *)nfct;
+ u16 proto = nf_ct_l3num(ct);
+ struct nf_nat_range2 range;
+
+ if (proto != NFPROTO_IPV4 && proto != NFPROTO_IPV6)
+ return -EINVAL;
+
+ memset(&range, 0, sizeof(struct nf_nat_range2));
+ range.flags = NF_NAT_RANGE_MAP_IPS;
+ range.min_addr = *addr;
+ range.max_addr = range.min_addr;
+ if (port > 0) {
+ range.flags |= NF_NAT_RANGE_PROTO_SPECIFIED;
+ range.min_proto.all = cpu_to_be16(port);
+ range.max_proto.all = range.min_proto.all;
+ }
+
+ return nf_nat_setup_info(ct, &range, manip) == NF_DROP ? -ENOMEM : 0;
+}
+
+__diag_pop()
+
+BTF_SET8_START(nf_nat_kfunc_set)
+BTF_ID_FLAGS(func, bpf_ct_set_nat_info, KF_TRUSTED_ARGS)
+BTF_SET8_END(nf_nat_kfunc_set)
+
+static const struct btf_kfunc_id_set nf_bpf_nat_kfunc_set = {
+ .owner = THIS_MODULE,
+ .set = &nf_nat_kfunc_set,
+};
+
+int register_nf_nat_bpf(void)
+{
+ int ret;
+
+ ret = register_btf_kfunc_id_set(BPF_PROG_TYPE_XDP,
+ &nf_bpf_nat_kfunc_set);
+ if (ret)
+ return ret;
+
+ return register_btf_kfunc_id_set(BPF_PROG_TYPE_SCHED_CLS,
+ &nf_bpf_nat_kfunc_set);
+}
diff --git a/net/netfilter/nf_nat_core.c b/net/netfilter/nf_nat_core.c
index 7981be526f26..d8e6380f6337 100644
--- a/net/netfilter/nf_nat_core.c
+++ b/net/netfilter/nf_nat_core.c
@@ -16,7 +16,7 @@
#include <linux/siphash.h>
#include <linux/rtnetlink.h>
-#include <net/netfilter/nf_conntrack.h>
+#include <net/netfilter/nf_conntrack_bpf.h>
#include <net/netfilter/nf_conntrack_core.h>
#include <net/netfilter/nf_conntrack_helper.h>
#include <net/netfilter/nf_conntrack_seqadj.h>
@@ -1152,7 +1152,7 @@ static int __init nf_nat_init(void)
WARN_ON(nf_nat_hook != NULL);
RCU_INIT_POINTER(nf_nat_hook, &nat_hook);
- return 0;
+ return register_nf_nat_bpf();
}
static void __exit nf_nat_cleanup(void)
diff --git a/net/netlink/genetlink.c b/net/netlink/genetlink.c
index 7c136de117eb..39b7c00e4cef 100644
--- a/net/netlink/genetlink.c
+++ b/net/netlink/genetlink.c
@@ -739,6 +739,36 @@ out:
return err;
}
+static int genl_header_check(const struct genl_family *family,
+ struct nlmsghdr *nlh, struct genlmsghdr *hdr,
+ struct netlink_ext_ack *extack)
+{
+ u16 flags;
+
+ /* Only for commands added after we started validating */
+ if (hdr->cmd < family->resv_start_op)
+ return 0;
+
+ if (hdr->reserved) {
+ NL_SET_ERR_MSG(extack, "genlmsghdr.reserved field is not 0");
+ return -EINVAL;
+ }
+
+ /* Old netlink flags have pretty loose semantics, allow only the flags
+ * consumed by the core where we can enforce the meaning.
+ */
+ flags = nlh->nlmsg_flags;
+ if ((flags & NLM_F_DUMP) == NLM_F_DUMP) /* DUMP is 2 bits */
+ flags &= ~NLM_F_DUMP;
+ if (flags & ~(NLM_F_REQUEST | NLM_F_ACK | NLM_F_ECHO)) {
+ NL_SET_ERR_MSG(extack,
+ "ambiguous or reserved bits set in nlmsg_flags");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
static int genl_family_rcv_msg(const struct genl_family *family,
struct sk_buff *skb,
struct nlmsghdr *nlh,
@@ -757,7 +787,7 @@ static int genl_family_rcv_msg(const struct genl_family *family,
if (nlh->nlmsg_len < nlmsg_msg_size(hdrlen))
return -EINVAL;
- if (hdr->cmd >= family->resv_start_op && hdr->reserved)
+ if (genl_header_check(family, nlh, hdr, extack))
return -EINVAL;
if (genl_get_cmd(hdr->cmd, family, &op))
diff --git a/net/rds/tcp.c b/net/rds/tcp.c
index d8754366506a..4444fd82b66d 100644
--- a/net/rds/tcp.c
+++ b/net/rds/tcp.c
@@ -166,10 +166,10 @@ void rds_tcp_reset_callbacks(struct socket *sock,
*/
atomic_set(&cp->cp_state, RDS_CONN_RESETTING);
wait_event(cp->cp_waitq, !test_bit(RDS_IN_XMIT, &cp->cp_flags));
- lock_sock(osock->sk);
/* reset receive side state for rds_tcp_data_recv() for osock */
cancel_delayed_work_sync(&cp->cp_send_w);
cancel_delayed_work_sync(&cp->cp_recv_w);
+ lock_sock(osock->sk);
if (tc->t_tinc) {
rds_inc_put(&tc->t_tinc->ti_inc);
tc->t_tinc = NULL;
diff --git a/net/sched/cls_basic.c b/net/sched/cls_basic.c
index d9fbaa0fbe8b..d229ce99e554 100644
--- a/net/sched/cls_basic.c
+++ b/net/sched/cls_basic.c
@@ -261,12 +261,7 @@ static void basic_bind_class(void *fh, u32 classid, unsigned long cl, void *q,
{
struct basic_filter *f = fh;
- if (f && f->res.classid == classid) {
- if (cl)
- __tcf_bind_filter(q, &f->res, base);
- else
- __tcf_unbind_filter(q, &f->res);
- }
+ tc_cls_bind_class(classid, cl, q, &f->res, base);
}
static int basic_dump(struct net *net, struct tcf_proto *tp, void *fh,
diff --git a/net/sched/cls_bpf.c b/net/sched/cls_bpf.c
index 938be14cfa3f..bc317b3eac12 100644
--- a/net/sched/cls_bpf.c
+++ b/net/sched/cls_bpf.c
@@ -635,12 +635,7 @@ static void cls_bpf_bind_class(void *fh, u32 classid, unsigned long cl,
{
struct cls_bpf_prog *prog = fh;
- if (prog && prog->res.classid == classid) {
- if (cl)
- __tcf_bind_filter(q, &prog->res, base);
- else
- __tcf_unbind_filter(q, &prog->res);
- }
+ tc_cls_bind_class(classid, cl, q, &prog->res, base);
}
static void cls_bpf_walk(struct tcf_proto *tp, struct tcf_walker *arg,
diff --git a/net/sched/cls_flower.c b/net/sched/cls_flower.c
index 22d32b82bc09..25bc57ee6ea1 100644
--- a/net/sched/cls_flower.c
+++ b/net/sched/cls_flower.c
@@ -3405,12 +3405,7 @@ static void fl_bind_class(void *fh, u32 classid, unsigned long cl, void *q,
{
struct cls_fl_filter *f = fh;
- if (f && f->res.classid == classid) {
- if (cl)
- __tcf_bind_filter(q, &f->res, base);
- else
- __tcf_unbind_filter(q, &f->res);
- }
+ tc_cls_bind_class(classid, cl, q, &f->res, base);
}
static bool fl_delete_empty(struct tcf_proto *tp)
diff --git a/net/sched/cls_fw.c b/net/sched/cls_fw.c
index fa66191574a4..a32351da968c 100644
--- a/net/sched/cls_fw.c
+++ b/net/sched/cls_fw.c
@@ -416,12 +416,7 @@ static void fw_bind_class(void *fh, u32 classid, unsigned long cl, void *q,
{
struct fw_filter *f = fh;
- if (f && f->res.classid == classid) {
- if (cl)
- __tcf_bind_filter(q, &f->res, base);
- else
- __tcf_unbind_filter(q, &f->res);
- }
+ tc_cls_bind_class(classid, cl, q, &f->res, base);
}
static struct tcf_proto_ops cls_fw_ops __read_mostly = {
diff --git a/net/sched/cls_matchall.c b/net/sched/cls_matchall.c
index 63b99ffb7dbc..39a5d9c170de 100644
--- a/net/sched/cls_matchall.c
+++ b/net/sched/cls_matchall.c
@@ -394,12 +394,7 @@ static void mall_bind_class(void *fh, u32 classid, unsigned long cl, void *q,
{
struct cls_mall_head *head = fh;
- if (head && head->res.classid == classid) {
- if (cl)
- __tcf_bind_filter(q, &head->res, base);
- else
- __tcf_unbind_filter(q, &head->res);
- }
+ tc_cls_bind_class(classid, cl, q, &head->res, base);
}
static struct tcf_proto_ops cls_mall_ops __read_mostly = {
diff --git a/net/sched/cls_route.c b/net/sched/cls_route.c
index 17bb04af2fa8..9e43b929d4ca 100644
--- a/net/sched/cls_route.c
+++ b/net/sched/cls_route.c
@@ -649,12 +649,7 @@ static void route4_bind_class(void *fh, u32 classid, unsigned long cl, void *q,
{
struct route4_filter *f = fh;
- if (f && f->res.classid == classid) {
- if (cl)
- __tcf_bind_filter(q, &f->res, base);
- else
- __tcf_unbind_filter(q, &f->res);
- }
+ tc_cls_bind_class(classid, cl, q, &f->res, base);
}
static struct tcf_proto_ops cls_route4_ops __read_mostly = {
diff --git a/net/sched/cls_rsvp.h b/net/sched/cls_rsvp.h
index fb60f2c2c325..b00a7dbd0587 100644
--- a/net/sched/cls_rsvp.h
+++ b/net/sched/cls_rsvp.h
@@ -733,12 +733,7 @@ static void rsvp_bind_class(void *fh, u32 classid, unsigned long cl, void *q,
{
struct rsvp_filter *f = fh;
- if (f && f->res.classid == classid) {
- if (cl)
- __tcf_bind_filter(q, &f->res, base);
- else
- __tcf_unbind_filter(q, &f->res);
- }
+ tc_cls_bind_class(classid, cl, q, &f->res, base);
}
static struct tcf_proto_ops RSVP_OPS __read_mostly = {
diff --git a/net/sched/cls_tcindex.c b/net/sched/cls_tcindex.c
index a33076033462..1c9eeb98d826 100644
--- a/net/sched/cls_tcindex.c
+++ b/net/sched/cls_tcindex.c
@@ -691,12 +691,7 @@ static void tcindex_bind_class(void *fh, u32 classid, unsigned long cl,
{
struct tcindex_filter_result *r = fh;
- if (r && r->res.classid == classid) {
- if (cl)
- __tcf_bind_filter(q, &r->res, base);
- else
- __tcf_unbind_filter(q, &r->res);
- }
+ tc_cls_bind_class(classid, cl, q, &r->res, base);
}
static struct tcf_proto_ops cls_tcindex_ops __read_mostly = {
diff --git a/net/sched/cls_u32.c b/net/sched/cls_u32.c
index 0b3d909214b8..34d25f7a0687 100644
--- a/net/sched/cls_u32.c
+++ b/net/sched/cls_u32.c
@@ -1250,12 +1250,7 @@ static void u32_bind_class(void *fh, u32 classid, unsigned long cl, void *q,
{
struct tc_u_knode *n = fh;
- if (n && n->res.classid == classid) {
- if (cl)
- __tcf_bind_filter(q, &n->res, base);
- else
- __tcf_unbind_filter(q, &n->res);
- }
+ tc_cls_bind_class(classid, cl, q, &n->res, base);
}
static int u32_dump(struct net *net, struct tcf_proto *tp, void *fh,
diff --git a/net/sched/sch_api.c b/net/sched/sch_api.c
index 7c15f1f3da17..c98af0ada706 100644
--- a/net/sched/sch_api.c
+++ b/net/sched/sch_api.c
@@ -1915,7 +1915,7 @@ static int tcf_node_bind(struct tcf_proto *tp, void *n, struct tcf_walker *arg)
{
struct tcf_bind_args *a = (void *)arg;
- if (tp->ops->bind_class) {
+ if (n && tp->ops->bind_class) {
struct Qdisc *q = tcf_block_q(tp->chain->block);
sch_tree_lock(q);
diff --git a/net/sctp/auth.c b/net/sctp/auth.c
index db6b7373d16c..34964145514e 100644
--- a/net/sctp/auth.c
+++ b/net/sctp/auth.c
@@ -863,12 +863,17 @@ int sctp_auth_set_key(struct sctp_endpoint *ep,
}
list_del_init(&shkey->key_list);
- sctp_auth_shkey_release(shkey);
list_add(&cur_key->key_list, sh_keys);
- if (asoc && asoc->active_key_id == auth_key->sca_keynumber)
- sctp_auth_asoc_init_active_key(asoc, GFP_KERNEL);
+ if (asoc && asoc->active_key_id == auth_key->sca_keynumber &&
+ sctp_auth_asoc_init_active_key(asoc, GFP_KERNEL)) {
+ list_del_init(&cur_key->key_list);
+ sctp_auth_shkey_release(cur_key);
+ list_add(&shkey->key_list, sh_keys);
+ return -ENOMEM;
+ }
+ sctp_auth_shkey_release(shkey);
return 0;
}
@@ -902,8 +907,13 @@ int sctp_auth_set_active_key(struct sctp_endpoint *ep,
return -EINVAL;
if (asoc) {
+ __u16 active_key_id = asoc->active_key_id;
+
asoc->active_key_id = key_id;
- sctp_auth_asoc_init_active_key(asoc, GFP_KERNEL);
+ if (sctp_auth_asoc_init_active_key(asoc, GFP_KERNEL)) {
+ asoc->active_key_id = active_key_id;
+ return -ENOMEM;
+ }
} else
ep->active_key_id = key_id;
diff --git a/net/sunrpc/svc.c b/net/sunrpc/svc.c
index 7c9a0d0b1230..149171774bc6 100644
--- a/net/sunrpc/svc.c
+++ b/net/sunrpc/svc.c
@@ -1205,7 +1205,7 @@ svc_generic_init_request(struct svc_rqst *rqstp,
goto err_bad_proc;
/* Initialize storage for argp and resp */
- memset(rqstp->rq_argp, 0, procp->pc_argsize);
+ memset(rqstp->rq_argp, 0, procp->pc_argzero);
memset(rqstp->rq_resp, 0, procp->pc_ressize);
/* Bump per-procedure stats counter */
@@ -1434,8 +1434,7 @@ svc_process(struct svc_rqst *rqstp)
{
struct kvec *argv = &rqstp->rq_arg.head[0];
struct kvec *resv = &rqstp->rq_res.head[0];
- struct svc_serv *serv = rqstp->rq_server;
- u32 dir;
+ __be32 dir;
#if IS_ENABLED(CONFIG_FAIL_SUNRPC)
if (!fail_sunrpc.ignore_server_disconnect &&
@@ -1450,7 +1449,7 @@ svc_process(struct svc_rqst *rqstp)
rqstp->rq_next_page = &rqstp->rq_respages[1];
resv->iov_base = page_address(rqstp->rq_respages[0]);
resv->iov_len = 0;
- rqstp->rq_res.pages = rqstp->rq_respages + 1;
+ rqstp->rq_res.pages = rqstp->rq_next_page;
rqstp->rq_res.len = 0;
rqstp->rq_res.page_base = 0;
rqstp->rq_res.page_len = 0;
@@ -1458,18 +1457,17 @@ svc_process(struct svc_rqst *rqstp)
rqstp->rq_res.tail[0].iov_base = NULL;
rqstp->rq_res.tail[0].iov_len = 0;
- dir = svc_getnl(argv);
- if (dir != 0) {
- /* direction != CALL */
- svc_printk(rqstp, "bad direction %d, dropping request\n", dir);
- serv->sv_stats->rpcbadfmt++;
+ dir = svc_getu32(argv);
+ if (dir != rpc_call)
+ goto out_baddir;
+ if (!svc_process_common(rqstp, argv, resv))
goto out_drop;
- }
-
- /* Returns 1 for send, 0 for drop */
- if (likely(svc_process_common(rqstp, argv, resv)))
- return svc_send(rqstp);
+ return svc_send(rqstp);
+out_baddir:
+ svc_printk(rqstp, "bad direction 0x%08x, dropping request\n",
+ be32_to_cpu(dir));
+ rqstp->rq_server->sv_stats->rpcbadfmt++;
out_drop:
svc_drop(rqstp);
return 0;
@@ -1556,8 +1554,12 @@ out:
EXPORT_SYMBOL_GPL(bc_svc_process);
#endif /* CONFIG_SUNRPC_BACKCHANNEL */
-/*
- * Return (transport-specific) limit on the rpc payload.
+/**
+ * svc_max_payload - Return transport-specific limit on the RPC payload
+ * @rqstp: RPC transaction context
+ *
+ * Returns the maximum number of payload bytes the current transport
+ * allows.
*/
u32 svc_max_payload(const struct svc_rqst *rqstp)
{
diff --git a/net/sunrpc/xdr.c b/net/sunrpc/xdr.c
index 482586c23fdd..336a7c7833e4 100644
--- a/net/sunrpc/xdr.c
+++ b/net/sunrpc/xdr.c
@@ -947,6 +947,28 @@ void xdr_init_encode(struct xdr_stream *xdr, struct xdr_buf *buf, __be32 *p,
EXPORT_SYMBOL_GPL(xdr_init_encode);
/**
+ * xdr_init_encode_pages - Initialize an xdr_stream for encoding into pages
+ * @xdr: pointer to xdr_stream struct
+ * @buf: pointer to XDR buffer into which to encode data
+ * @pages: list of pages to decode into
+ * @rqst: pointer to controlling rpc_rqst, for debugging
+ *
+ */
+void xdr_init_encode_pages(struct xdr_stream *xdr, struct xdr_buf *buf,
+ struct page **pages, struct rpc_rqst *rqst)
+{
+ xdr_reset_scratch_buffer(xdr);
+
+ xdr->buf = buf;
+ xdr->page_ptr = pages;
+ xdr->iov = NULL;
+ xdr->p = page_address(*pages);
+ xdr->end = (void *)xdr->p + min_t(u32, buf->buflen, PAGE_SIZE);
+ xdr->rqst = rqst;
+}
+EXPORT_SYMBOL_GPL(xdr_init_encode_pages);
+
+/**
* __xdr_commit_encode - Ensure all data is written to buffer
* @xdr: pointer to xdr_stream
*
@@ -1575,7 +1597,7 @@ EXPORT_SYMBOL_GPL(xdr_buf_from_iov);
*
* @buf and @subbuf may be pointers to the same struct xdr_buf.
*
- * Returns -1 if base of length are out of bounds.
+ * Returns -1 if base or length are out of bounds.
*/
int xdr_buf_subsegment(const struct xdr_buf *buf, struct xdr_buf *subbuf,
unsigned int base, unsigned int len)
diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c
index c955c7253d4b..0f08c3177872 100644
--- a/net/unix/af_unix.c
+++ b/net/unix/af_unix.c
@@ -569,12 +569,6 @@ static void unix_sock_destructor(struct sock *sk)
skb_queue_purge(&sk->sk_receive_queue);
-#if IS_ENABLED(CONFIG_AF_UNIX_OOB)
- if (u->oob_skb) {
- kfree_skb(u->oob_skb);
- u->oob_skb = NULL;
- }
-#endif
DEBUG_NET_WARN_ON_ONCE(refcount_read(&sk->sk_wmem_alloc));
DEBUG_NET_WARN_ON_ONCE(!sk_unhashed(sk));
DEBUG_NET_WARN_ON_ONCE(sk->sk_socket);
@@ -620,6 +614,13 @@ static void unix_release_sock(struct sock *sk, int embrion)
unix_state_unlock(sk);
+#if IS_ENABLED(CONFIG_AF_UNIX_OOB)
+ if (u->oob_skb) {
+ kfree_skb(u->oob_skb);
+ u->oob_skb = NULL;
+ }
+#endif
+
wake_up_interruptible_all(&u->peer_wait);
if (skpair != NULL) {
diff --git a/net/vmw_vsock/virtio_transport_common.c b/net/vmw_vsock/virtio_transport_common.c
index 35863132f4f1..a9980e9b9304 100644
--- a/net/vmw_vsock/virtio_transport_common.c
+++ b/net/vmw_vsock/virtio_transport_common.c
@@ -1339,7 +1339,7 @@ EXPORT_SYMBOL_GPL(virtio_transport_recv_pkt);
void virtio_transport_free_pkt(struct virtio_vsock_pkt *pkt)
{
- kfree(pkt->buf);
+ kvfree(pkt->buf);
kfree(pkt);
}
EXPORT_SYMBOL_GPL(virtio_transport_free_pkt);
diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c
index fe368af39554..148f66edb015 100644
--- a/net/wireless/nl80211.c
+++ b/net/wireless/nl80211.c
@@ -13266,7 +13266,9 @@ static int nl80211_parse_wowlan_tcp(struct cfg80211_registered_device *rdev,
wake_mask_size);
if (tok) {
cfg->tokens_size = tokens_size;
- memcpy(&cfg->payload_tok, tok, sizeof(*tok) + tokens_size);
+ cfg->payload_tok = *tok;
+ memcpy(cfg->payload_tok.token_stream, tok->token_stream,
+ tokens_size);
}
trig->tcp = cfg;
diff --git a/net/wireless/util.c b/net/wireless/util.c
index f09d528e5199..eb277105a878 100644
--- a/net/wireless/util.c
+++ b/net/wireless/util.c
@@ -559,7 +559,7 @@ int ieee80211_data_to_8023_exthdr(struct sk_buff *skb, struct ethhdr *ehdr,
return -1;
hdrlen = ieee80211_hdrlen(hdr->frame_control) + data_offset;
- if (skb->len < hdrlen + 8)
+ if (skb->len < hdrlen)
return -1;
/* convert IEEE 802.11 header + possible LLC headers into Ethernet
@@ -574,8 +574,9 @@ int ieee80211_data_to_8023_exthdr(struct sk_buff *skb, struct ethhdr *ehdr,
memcpy(tmp.h_dest, ieee80211_get_DA(hdr), ETH_ALEN);
memcpy(tmp.h_source, ieee80211_get_SA(hdr), ETH_ALEN);
- if (iftype == NL80211_IFTYPE_MESH_POINT)
- skb_copy_bits(skb, hdrlen, &mesh_flags, 1);
+ if (iftype == NL80211_IFTYPE_MESH_POINT &&
+ skb_copy_bits(skb, hdrlen, &mesh_flags, 1) < 0)
+ return -1;
mesh_flags &= MESH_FLAGS_AE;
@@ -595,11 +596,12 @@ int ieee80211_data_to_8023_exthdr(struct sk_buff *skb, struct ethhdr *ehdr,
if (iftype == NL80211_IFTYPE_MESH_POINT) {
if (mesh_flags == MESH_FLAGS_AE_A4)
return -1;
- if (mesh_flags == MESH_FLAGS_AE_A5_A6) {
- skb_copy_bits(skb, hdrlen +
- offsetof(struct ieee80211s_hdr, eaddr1),
- tmp.h_dest, 2 * ETH_ALEN);
- }
+ if (mesh_flags == MESH_FLAGS_AE_A5_A6 &&
+ skb_copy_bits(skb, hdrlen +
+ offsetof(struct ieee80211s_hdr, eaddr1),
+ tmp.h_dest, 2 * ETH_ALEN) < 0)
+ return -1;
+
hdrlen += __ieee80211_get_mesh_hdrlen(mesh_flags);
}
break;
@@ -613,10 +615,11 @@ int ieee80211_data_to_8023_exthdr(struct sk_buff *skb, struct ethhdr *ehdr,
if (iftype == NL80211_IFTYPE_MESH_POINT) {
if (mesh_flags == MESH_FLAGS_AE_A5_A6)
return -1;
- if (mesh_flags == MESH_FLAGS_AE_A4)
- skb_copy_bits(skb, hdrlen +
- offsetof(struct ieee80211s_hdr, eaddr1),
- tmp.h_source, ETH_ALEN);
+ if (mesh_flags == MESH_FLAGS_AE_A4 &&
+ skb_copy_bits(skb, hdrlen +
+ offsetof(struct ieee80211s_hdr, eaddr1),
+ tmp.h_source, ETH_ALEN) < 0)
+ return -1;
hdrlen += __ieee80211_get_mesh_hdrlen(mesh_flags);
}
break;
@@ -628,16 +631,15 @@ int ieee80211_data_to_8023_exthdr(struct sk_buff *skb, struct ethhdr *ehdr,
break;
}
- skb_copy_bits(skb, hdrlen, &payload, sizeof(payload));
- tmp.h_proto = payload.proto;
-
- if (likely((!is_amsdu && ether_addr_equal(payload.hdr, rfc1042_header) &&
- tmp.h_proto != htons(ETH_P_AARP) &&
- tmp.h_proto != htons(ETH_P_IPX)) ||
- ether_addr_equal(payload.hdr, bridge_tunnel_header))) {
+ if (likely(skb_copy_bits(skb, hdrlen, &payload, sizeof(payload)) == 0 &&
+ ((!is_amsdu && ether_addr_equal(payload.hdr, rfc1042_header) &&
+ payload.proto != htons(ETH_P_AARP) &&
+ payload.proto != htons(ETH_P_IPX)) ||
+ ether_addr_equal(payload.hdr, bridge_tunnel_header)))) {
/* remove RFC1042 or Bridge-Tunnel encapsulation and
* replace EtherType */
hdrlen += ETH_ALEN + 2;
+ tmp.h_proto = payload.proto;
skb_postpull_rcsum(skb, &payload, ETH_ALEN + 2);
} else {
tmp.h_proto = htons(skb->len - hdrlen);
diff --git a/net/wireless/wext-core.c b/net/wireless/wext-core.c
index 76a80a41615b..fe8765c4075d 100644
--- a/net/wireless/wext-core.c
+++ b/net/wireless/wext-core.c
@@ -468,6 +468,7 @@ void wireless_send_event(struct net_device * dev,
struct __compat_iw_event *compat_event;
struct compat_iw_point compat_wrqu;
struct sk_buff *compskb;
+ int ptr_len;
#endif
/*
@@ -582,6 +583,9 @@ void wireless_send_event(struct net_device * dev,
nlmsg_end(skb, nlh);
#ifdef CONFIG_COMPAT
hdr_len = compat_event_type_size[descr->header_type];
+
+ /* ptr_len is remaining size in event header apart from LCP */
+ ptr_len = hdr_len - IW_EV_COMPAT_LCP_LEN;
event_len = hdr_len + extra_len;
compskb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_ATOMIC);
@@ -612,16 +616,15 @@ void wireless_send_event(struct net_device * dev,
if (descr->header_type == IW_HEADER_TYPE_POINT) {
compat_wrqu.length = wrqu->data.length;
compat_wrqu.flags = wrqu->data.flags;
- memcpy(&compat_event->pointer,
- ((char *) &compat_wrqu) + IW_EV_COMPAT_POINT_OFF,
- hdr_len - IW_EV_COMPAT_LCP_LEN);
+ memcpy(compat_event->ptr_bytes,
+ ((char *)&compat_wrqu) + IW_EV_COMPAT_POINT_OFF,
+ ptr_len);
if (extra_len)
- memcpy(((char *) compat_event) + hdr_len,
- extra, extra_len);
+ memcpy(&compat_event->ptr_bytes[ptr_len],
+ extra, extra_len);
} else {
/* extra_len must be zero, so no if (extra) needed */
- memcpy(&compat_event->pointer, wrqu,
- hdr_len - IW_EV_COMPAT_LCP_LEN);
+ memcpy(compat_event->ptr_bytes, wrqu, ptr_len);
}
nlmsg_end(compskb, nlh);
diff --git a/net/xdp/xsk.c b/net/xdp/xsk.c
index 5b4ce6ba1bc7..9f0561b67c12 100644
--- a/net/xdp/xsk.c
+++ b/net/xdp/xsk.c
@@ -355,16 +355,15 @@ static u32 xsk_tx_peek_release_fallback(struct xsk_buff_pool *pool, u32 max_entr
return nb_pkts;
}
-u32 xsk_tx_peek_release_desc_batch(struct xsk_buff_pool *pool, u32 max_entries)
+u32 xsk_tx_peek_release_desc_batch(struct xsk_buff_pool *pool, u32 nb_pkts)
{
struct xdp_sock *xs;
- u32 nb_pkts;
rcu_read_lock();
if (!list_is_singular(&pool->xsk_tx_list)) {
/* Fallback to the non-batched version */
rcu_read_unlock();
- return xsk_tx_peek_release_fallback(pool, max_entries);
+ return xsk_tx_peek_release_fallback(pool, nb_pkts);
}
xs = list_first_or_null_rcu(&pool->xsk_tx_list, struct xdp_sock, tx_list);
@@ -373,12 +372,7 @@ u32 xsk_tx_peek_release_desc_batch(struct xsk_buff_pool *pool, u32 max_entries)
goto out;
}
- max_entries = xskq_cons_nb_entries(xs->tx, max_entries);
- nb_pkts = xskq_cons_read_desc_batch(xs->tx, pool, max_entries);
- if (!nb_pkts) {
- xs->tx->queue_empty_descs++;
- goto out;
- }
+ nb_pkts = xskq_cons_nb_entries(xs->tx, nb_pkts);
/* This is the backpressure mechanism for the Tx path. Try to
* reserve space in the completion queue for all packets, but
@@ -386,12 +380,18 @@ u32 xsk_tx_peek_release_desc_batch(struct xsk_buff_pool *pool, u32 max_entries)
* packets. This avoids having to implement any buffering in
* the Tx path.
*/
- nb_pkts = xskq_prod_reserve_addr_batch(pool->cq, pool->tx_descs, nb_pkts);
+ nb_pkts = xskq_prod_nb_free(pool->cq, nb_pkts);
if (!nb_pkts)
goto out;
- xskq_cons_release_n(xs->tx, max_entries);
+ nb_pkts = xskq_cons_read_desc_batch(xs->tx, pool, nb_pkts);
+ if (!nb_pkts) {
+ xs->tx->queue_empty_descs++;
+ goto out;
+ }
+
__xskq_cons_release(xs->tx);
+ xskq_prod_write_addr_batch(pool->cq, pool->tx_descs, nb_pkts);
xs->sk.sk_write_space(&xs->sk);
out:
@@ -954,8 +954,8 @@ static int xsk_bind(struct socket *sock, struct sockaddr *addr, int addr_len)
goto out_unlock;
}
- err = xp_assign_dev_shared(xs->pool, umem_xs->umem,
- dev, qid);
+ err = xp_assign_dev_shared(xs->pool, umem_xs, dev,
+ qid);
if (err) {
xp_destroy(xs->pool);
xs->pool = NULL;
diff --git a/net/xdp/xsk_buff_pool.c b/net/xdp/xsk_buff_pool.c
index a71a8c6edf55..ed6c71826d31 100644
--- a/net/xdp/xsk_buff_pool.c
+++ b/net/xdp/xsk_buff_pool.c
@@ -212,17 +212,18 @@ err_unreg_pool:
return err;
}
-int xp_assign_dev_shared(struct xsk_buff_pool *pool, struct xdp_umem *umem,
+int xp_assign_dev_shared(struct xsk_buff_pool *pool, struct xdp_sock *umem_xs,
struct net_device *dev, u16 queue_id)
{
u16 flags;
+ struct xdp_umem *umem = umem_xs->umem;
/* One fill and completion ring required for each queue id. */
if (!pool->fq || !pool->cq)
return -EINVAL;
flags = umem->zc ? XDP_ZEROCOPY : XDP_COPY;
- if (pool->uses_need_wakeup)
+ if (umem_xs->pool->uses_need_wakeup)
flags |= XDP_USE_NEED_WAKEUP;
return xp_assign_dev(pool, dev, queue_id, flags);
diff --git a/net/xdp/xsk_queue.h b/net/xdp/xsk_queue.h
index fb20bf7207cf..c6fb6b763658 100644
--- a/net/xdp/xsk_queue.h
+++ b/net/xdp/xsk_queue.h
@@ -205,6 +205,11 @@ static inline bool xskq_cons_read_desc(struct xsk_queue *q,
return false;
}
+static inline void xskq_cons_release_n(struct xsk_queue *q, u32 cnt)
+{
+ q->cached_cons += cnt;
+}
+
static inline u32 xskq_cons_read_desc_batch(struct xsk_queue *q, struct xsk_buff_pool *pool,
u32 max)
{
@@ -226,6 +231,8 @@ static inline u32 xskq_cons_read_desc_batch(struct xsk_queue *q, struct xsk_buff
cached_cons++;
}
+ /* Release valid plus any invalid entries */
+ xskq_cons_release_n(q, cached_cons - q->cached_cons);
return nb_entries;
}
@@ -291,11 +298,6 @@ static inline void xskq_cons_release(struct xsk_queue *q)
q->cached_cons++;
}
-static inline void xskq_cons_release_n(struct xsk_queue *q, u32 cnt)
-{
- q->cached_cons += cnt;
-}
-
static inline u32 xskq_cons_present_entries(struct xsk_queue *q)
{
/* No barriers needed since data is not accessed */
@@ -350,21 +352,17 @@ static inline int xskq_prod_reserve_addr(struct xsk_queue *q, u64 addr)
return 0;
}
-static inline u32 xskq_prod_reserve_addr_batch(struct xsk_queue *q, struct xdp_desc *descs,
- u32 max)
+static inline void xskq_prod_write_addr_batch(struct xsk_queue *q, struct xdp_desc *descs,
+ u32 nb_entries)
{
struct xdp_umem_ring *ring = (struct xdp_umem_ring *)q->ring;
- u32 nb_entries, i, cached_prod;
-
- nb_entries = xskq_prod_nb_free(q, max);
+ u32 i, cached_prod;
/* A, matches D */
cached_prod = q->cached_prod;
for (i = 0; i < nb_entries; i++)
ring->desc[cached_prod++ & q->ring_mask] = descs[i].addr;
q->cached_prod = cached_prod;
-
- return nb_entries;
}
static inline int xskq_prod_reserve_desc(struct xsk_queue *q,
diff --git a/net/xfrm/xfrm_device.c b/net/xfrm/xfrm_device.c
index 637ca8838436..5f5aafd418af 100644
--- a/net/xfrm/xfrm_device.c
+++ b/net/xfrm/xfrm_device.c
@@ -207,7 +207,8 @@ struct sk_buff *validate_xmit_xfrm(struct sk_buff *skb, netdev_features_t featur
EXPORT_SYMBOL_GPL(validate_xmit_xfrm);
int xfrm_dev_state_add(struct net *net, struct xfrm_state *x,
- struct xfrm_user_offload *xuo)
+ struct xfrm_user_offload *xuo,
+ struct netlink_ext_ack *extack)
{
int err;
struct dst_entry *dst;
@@ -216,15 +217,21 @@ int xfrm_dev_state_add(struct net *net, struct xfrm_state *x,
xfrm_address_t *saddr;
xfrm_address_t *daddr;
- if (!x->type_offload)
+ if (!x->type_offload) {
+ NL_SET_ERR_MSG(extack, "Type doesn't support offload");
return -EINVAL;
+ }
/* We don't yet support UDP encapsulation and TFC padding. */
- if (x->encap || x->tfcpad)
+ if (x->encap || x->tfcpad) {
+ NL_SET_ERR_MSG(extack, "Encapsulation and TFC padding can't be offloaded");
return -EINVAL;
+ }
- if (xuo->flags & ~(XFRM_OFFLOAD_IPV6 | XFRM_OFFLOAD_INBOUND))
+ if (xuo->flags & ~(XFRM_OFFLOAD_IPV6 | XFRM_OFFLOAD_INBOUND)) {
+ NL_SET_ERR_MSG(extack, "Unrecognized flags in offload request");
return -EINVAL;
+ }
dev = dev_get_by_index(net, xuo->ifindex);
if (!dev) {
@@ -256,6 +263,7 @@ int xfrm_dev_state_add(struct net *net, struct xfrm_state *x,
if (x->props.flags & XFRM_STATE_ESN &&
!dev->xfrmdev_ops->xdo_dev_state_advance_esn) {
+ NL_SET_ERR_MSG(extack, "Device doesn't support offload with ESN");
xso->dev = NULL;
dev_put(dev);
return -EINVAL;
@@ -277,8 +285,10 @@ int xfrm_dev_state_add(struct net *net, struct xfrm_state *x,
xso->real_dev = NULL;
netdev_put(dev, &xso->dev_tracker);
- if (err != -EOPNOTSUPP)
+ if (err != -EOPNOTSUPP) {
+ NL_SET_ERR_MSG(extack, "Device failed to offload this state");
return err;
+ }
}
return 0;
diff --git a/net/xfrm/xfrm_input.c b/net/xfrm/xfrm_input.c
index b2f4ec9c537f..97074f6f2bde 100644
--- a/net/xfrm/xfrm_input.c
+++ b/net/xfrm/xfrm_input.c
@@ -20,11 +20,13 @@
#include <net/xfrm.h>
#include <net/ip_tunnels.h>
#include <net/ip6_tunnel.h>
+#include <net/dst_metadata.h>
#include "xfrm_inout.h"
struct xfrm_trans_tasklet {
- struct tasklet_struct tasklet;
+ struct work_struct work;
+ spinlock_t queue_lock;
struct sk_buff_head queue;
};
@@ -719,7 +721,8 @@ resume:
sp = skb_sec_path(skb);
if (sp)
sp->olen = 0;
- skb_dst_drop(skb);
+ if (skb_valid_dst(skb))
+ skb_dst_drop(skb);
gro_cells_receive(&gro_cells, skb);
return 0;
} else {
@@ -737,7 +740,8 @@ resume:
sp = skb_sec_path(skb);
if (sp)
sp->olen = 0;
- skb_dst_drop(skb);
+ if (skb_valid_dst(skb))
+ skb_dst_drop(skb);
gro_cells_receive(&gro_cells, skb);
return err;
}
@@ -760,18 +764,22 @@ int xfrm_input_resume(struct sk_buff *skb, int nexthdr)
}
EXPORT_SYMBOL(xfrm_input_resume);
-static void xfrm_trans_reinject(struct tasklet_struct *t)
+static void xfrm_trans_reinject(struct work_struct *work)
{
- struct xfrm_trans_tasklet *trans = from_tasklet(trans, t, tasklet);
+ struct xfrm_trans_tasklet *trans = container_of(work, struct xfrm_trans_tasklet, work);
struct sk_buff_head queue;
struct sk_buff *skb;
__skb_queue_head_init(&queue);
+ spin_lock_bh(&trans->queue_lock);
skb_queue_splice_init(&trans->queue, &queue);
+ spin_unlock_bh(&trans->queue_lock);
+ local_bh_disable();
while ((skb = __skb_dequeue(&queue)))
XFRM_TRANS_SKB_CB(skb)->finish(XFRM_TRANS_SKB_CB(skb)->net,
NULL, skb);
+ local_bh_enable();
}
int xfrm_trans_queue_net(struct net *net, struct sk_buff *skb,
@@ -789,8 +797,10 @@ int xfrm_trans_queue_net(struct net *net, struct sk_buff *skb,
XFRM_TRANS_SKB_CB(skb)->finish = finish;
XFRM_TRANS_SKB_CB(skb)->net = net;
+ spin_lock_bh(&trans->queue_lock);
__skb_queue_tail(&trans->queue, skb);
- tasklet_schedule(&trans->tasklet);
+ spin_unlock_bh(&trans->queue_lock);
+ schedule_work(&trans->work);
return 0;
}
EXPORT_SYMBOL(xfrm_trans_queue_net);
@@ -817,7 +827,8 @@ void __init xfrm_input_init(void)
struct xfrm_trans_tasklet *trans;
trans = &per_cpu(xfrm_trans_tasklet, i);
+ spin_lock_init(&trans->queue_lock);
__skb_queue_head_init(&trans->queue);
- tasklet_setup(&trans->tasklet, xfrm_trans_reinject);
+ INIT_WORK(&trans->work, xfrm_trans_reinject);
}
}
diff --git a/net/xfrm/xfrm_interface.c b/net/xfrm/xfrm_interface.c
index 5113fa0fbcee..5a67b120c4db 100644
--- a/net/xfrm/xfrm_interface.c
+++ b/net/xfrm/xfrm_interface.c
@@ -41,6 +41,7 @@
#include <net/addrconf.h>
#include <net/xfrm.h>
#include <net/net_namespace.h>
+#include <net/dst_metadata.h>
#include <net/netns/generic.h>
#include <linux/etherdevice.h>
@@ -56,6 +57,89 @@ static const struct net_device_ops xfrmi_netdev_ops;
struct xfrmi_net {
/* lists for storing interfaces in use */
struct xfrm_if __rcu *xfrmi[XFRMI_HASH_SIZE];
+ struct xfrm_if __rcu *collect_md_xfrmi;
+};
+
+static const struct nla_policy xfrm_lwt_policy[LWT_XFRM_MAX + 1] = {
+ [LWT_XFRM_IF_ID] = NLA_POLICY_MIN(NLA_U32, 1),
+ [LWT_XFRM_LINK] = NLA_POLICY_MIN(NLA_U32, 1),
+};
+
+static void xfrmi_destroy_state(struct lwtunnel_state *lwt)
+{
+}
+
+static int xfrmi_build_state(struct net *net, struct nlattr *nla,
+ unsigned int family, const void *cfg,
+ struct lwtunnel_state **ts,
+ struct netlink_ext_ack *extack)
+{
+ struct nlattr *tb[LWT_XFRM_MAX + 1];
+ struct lwtunnel_state *new_state;
+ struct xfrm_md_info *info;
+ int ret;
+
+ ret = nla_parse_nested(tb, LWT_XFRM_MAX, nla, xfrm_lwt_policy, extack);
+ if (ret < 0)
+ return ret;
+
+ if (!tb[LWT_XFRM_IF_ID]) {
+ NL_SET_ERR_MSG(extack, "if_id must be set");
+ return -EINVAL;
+ }
+
+ new_state = lwtunnel_state_alloc(sizeof(*info));
+ if (!new_state) {
+ NL_SET_ERR_MSG(extack, "failed to create encap info");
+ return -ENOMEM;
+ }
+
+ new_state->type = LWTUNNEL_ENCAP_XFRM;
+
+ info = lwt_xfrm_info(new_state);
+
+ info->if_id = nla_get_u32(tb[LWT_XFRM_IF_ID]);
+
+ if (tb[LWT_XFRM_LINK])
+ info->link = nla_get_u32(tb[LWT_XFRM_LINK]);
+
+ *ts = new_state;
+ return 0;
+}
+
+static int xfrmi_fill_encap_info(struct sk_buff *skb,
+ struct lwtunnel_state *lwt)
+{
+ struct xfrm_md_info *info = lwt_xfrm_info(lwt);
+
+ if (nla_put_u32(skb, LWT_XFRM_IF_ID, info->if_id) ||
+ (info->link && nla_put_u32(skb, LWT_XFRM_LINK, info->link)))
+ return -EMSGSIZE;
+
+ return 0;
+}
+
+static int xfrmi_encap_nlsize(struct lwtunnel_state *lwtstate)
+{
+ return nla_total_size(sizeof(u32)) + /* LWT_XFRM_IF_ID */
+ nla_total_size(sizeof(u32)); /* LWT_XFRM_LINK */
+}
+
+static int xfrmi_encap_cmp(struct lwtunnel_state *a, struct lwtunnel_state *b)
+{
+ struct xfrm_md_info *a_info = lwt_xfrm_info(a);
+ struct xfrm_md_info *b_info = lwt_xfrm_info(b);
+
+ return memcmp(a_info, b_info, sizeof(*a_info));
+}
+
+static const struct lwtunnel_encap_ops xfrmi_encap_ops = {
+ .build_state = xfrmi_build_state,
+ .destroy_state = xfrmi_destroy_state,
+ .fill_encap = xfrmi_fill_encap_info,
+ .get_encap_size = xfrmi_encap_nlsize,
+ .cmp_encap = xfrmi_encap_cmp,
+ .owner = THIS_MODULE,
};
#define for_each_xfrmi_rcu(start, xi) \
@@ -77,17 +161,23 @@ static struct xfrm_if *xfrmi_lookup(struct net *net, struct xfrm_state *x)
return xi;
}
+ xi = rcu_dereference(xfrmn->collect_md_xfrmi);
+ if (xi && (xi->dev->flags & IFF_UP))
+ return xi;
+
return NULL;
}
-static struct xfrm_if *xfrmi_decode_session(struct sk_buff *skb,
- unsigned short family)
+static bool xfrmi_decode_session(struct sk_buff *skb,
+ unsigned short family,
+ struct xfrm_if_decode_session_result *res)
{
struct net_device *dev;
+ struct xfrm_if *xi;
int ifindex = 0;
if (!secpath_exists(skb) || !skb->dev)
- return NULL;
+ return false;
switch (family) {
case AF_INET6:
@@ -107,11 +197,18 @@ static struct xfrm_if *xfrmi_decode_session(struct sk_buff *skb,
}
if (!dev || !(dev->flags & IFF_UP))
- return NULL;
+ return false;
if (dev->netdev_ops != &xfrmi_netdev_ops)
- return NULL;
+ return false;
- return netdev_priv(dev);
+ xi = netdev_priv(dev);
+ res->net = xi->net;
+
+ if (xi->p.collect_md)
+ res->if_id = xfrm_input_state(skb)->if_id;
+ else
+ res->if_id = xi->p.if_id;
+ return true;
}
static void xfrmi_link(struct xfrmi_net *xfrmn, struct xfrm_if *xi)
@@ -157,7 +254,10 @@ static int xfrmi_create(struct net_device *dev)
if (err < 0)
goto out;
- xfrmi_link(xfrmn, xi);
+ if (xi->p.collect_md)
+ rcu_assign_pointer(xfrmn->collect_md_xfrmi, xi);
+ else
+ xfrmi_link(xfrmn, xi);
return 0;
@@ -185,7 +285,10 @@ static void xfrmi_dev_uninit(struct net_device *dev)
struct xfrm_if *xi = netdev_priv(dev);
struct xfrmi_net *xfrmn = net_generic(xi->net, xfrmi_net_id);
- xfrmi_unlink(xfrmn, xi);
+ if (xi->p.collect_md)
+ RCU_INIT_POINTER(xfrmn->collect_md_xfrmi, NULL);
+ else
+ xfrmi_unlink(xfrmn, xi);
}
static void xfrmi_scrub_packet(struct sk_buff *skb, bool xnet)
@@ -214,6 +317,7 @@ static int xfrmi_rcv_cb(struct sk_buff *skb, int err)
struct xfrm_state *x;
struct xfrm_if *xi;
bool xnet;
+ int link;
if (err && !secpath_exists(skb))
return 0;
@@ -224,6 +328,7 @@ static int xfrmi_rcv_cb(struct sk_buff *skb, int err)
if (!xi)
return 1;
+ link = skb->dev->ifindex;
dev = xi->dev;
skb->dev = dev;
@@ -254,6 +359,17 @@ static int xfrmi_rcv_cb(struct sk_buff *skb, int err)
}
xfrmi_scrub_packet(skb, xnet);
+ if (xi->p.collect_md) {
+ struct metadata_dst *md_dst;
+
+ md_dst = metadata_dst_alloc(0, METADATA_XFRM, GFP_ATOMIC);
+ if (!md_dst)
+ return -ENOMEM;
+
+ md_dst->u.xfrm_info.if_id = x->if_id;
+ md_dst->u.xfrm_info.link = link;
+ skb_dst_set(skb, (struct dst_entry *)md_dst);
+ }
dev_sw_netstats_rx_add(dev, skb->len);
return 0;
@@ -269,10 +385,23 @@ xfrmi_xmit2(struct sk_buff *skb, struct net_device *dev, struct flowi *fl)
struct net_device *tdev;
struct xfrm_state *x;
int err = -1;
+ u32 if_id;
int mtu;
+ if (xi->p.collect_md) {
+ struct xfrm_md_info *md_info = skb_xfrm_md_info(skb);
+
+ if (unlikely(!md_info))
+ return -EINVAL;
+
+ if_id = md_info->if_id;
+ fl->flowi_oif = md_info->link;
+ } else {
+ if_id = xi->p.if_id;
+ }
+
dst_hold(dst);
- dst = xfrm_lookup_with_ifid(xi->net, dst, fl, NULL, 0, xi->p.if_id);
+ dst = xfrm_lookup_with_ifid(xi->net, dst, fl, NULL, 0, if_id);
if (IS_ERR(dst)) {
err = PTR_ERR(dst);
dst = NULL;
@@ -283,7 +412,7 @@ xfrmi_xmit2(struct sk_buff *skb, struct net_device *dev, struct flowi *fl)
if (!x)
goto tx_err_link_failure;
- if (x->if_id != xi->p.if_id)
+ if (x->if_id != if_id)
goto tx_err_link_failure;
tdev = dst->dev;
@@ -633,6 +762,9 @@ static void xfrmi_netlink_parms(struct nlattr *data[],
if (data[IFLA_XFRM_IF_ID])
parms->if_id = nla_get_u32(data[IFLA_XFRM_IF_ID]);
+
+ if (data[IFLA_XFRM_COLLECT_METADATA])
+ parms->collect_md = true;
}
static int xfrmi_newlink(struct net *src_net, struct net_device *dev,
@@ -645,14 +777,27 @@ static int xfrmi_newlink(struct net *src_net, struct net_device *dev,
int err;
xfrmi_netlink_parms(data, &p);
- if (!p.if_id) {
- NL_SET_ERR_MSG(extack, "if_id must be non zero");
- return -EINVAL;
- }
+ if (p.collect_md) {
+ struct xfrmi_net *xfrmn = net_generic(net, xfrmi_net_id);
- xi = xfrmi_locate(net, &p);
- if (xi)
- return -EEXIST;
+ if (p.link || p.if_id) {
+ NL_SET_ERR_MSG(extack, "link and if_id must be zero");
+ return -EINVAL;
+ }
+
+ if (rtnl_dereference(xfrmn->collect_md_xfrmi))
+ return -EEXIST;
+
+ } else {
+ if (!p.if_id) {
+ NL_SET_ERR_MSG(extack, "if_id must be non zero");
+ return -EINVAL;
+ }
+
+ xi = xfrmi_locate(net, &p);
+ if (xi)
+ return -EEXIST;
+ }
xi = netdev_priv(dev);
xi->p = p;
@@ -682,12 +827,22 @@ static int xfrmi_changelink(struct net_device *dev, struct nlattr *tb[],
return -EINVAL;
}
+ if (p.collect_md) {
+ NL_SET_ERR_MSG(extack, "collect_md can't be changed");
+ return -EINVAL;
+ }
+
xi = xfrmi_locate(net, &p);
if (!xi) {
xi = netdev_priv(dev);
} else {
if (xi->dev != dev)
return -EEXIST;
+ if (xi->p.collect_md) {
+ NL_SET_ERR_MSG(extack,
+ "device can't be changed to collect_md");
+ return -EINVAL;
+ }
}
return xfrmi_update(xi, &p);
@@ -700,6 +855,8 @@ static size_t xfrmi_get_size(const struct net_device *dev)
nla_total_size(4) +
/* IFLA_XFRM_IF_ID */
nla_total_size(4) +
+ /* IFLA_XFRM_COLLECT_METADATA */
+ nla_total_size(0) +
0;
}
@@ -709,7 +866,8 @@ static int xfrmi_fill_info(struct sk_buff *skb, const struct net_device *dev)
struct xfrm_if_parms *parm = &xi->p;
if (nla_put_u32(skb, IFLA_XFRM_LINK, parm->link) ||
- nla_put_u32(skb, IFLA_XFRM_IF_ID, parm->if_id))
+ nla_put_u32(skb, IFLA_XFRM_IF_ID, parm->if_id) ||
+ (xi->p.collect_md && nla_put_flag(skb, IFLA_XFRM_COLLECT_METADATA)))
goto nla_put_failure;
return 0;
@@ -725,8 +883,10 @@ static struct net *xfrmi_get_link_net(const struct net_device *dev)
}
static const struct nla_policy xfrmi_policy[IFLA_XFRM_MAX + 1] = {
- [IFLA_XFRM_LINK] = { .type = NLA_U32 },
- [IFLA_XFRM_IF_ID] = { .type = NLA_U32 },
+ [IFLA_XFRM_UNSPEC] = { .strict_start_type = IFLA_XFRM_COLLECT_METADATA },
+ [IFLA_XFRM_LINK] = { .type = NLA_U32 },
+ [IFLA_XFRM_IF_ID] = { .type = NLA_U32 },
+ [IFLA_XFRM_COLLECT_METADATA] = { .type = NLA_FLAG },
};
static struct rtnl_link_ops xfrmi_link_ops __read_mostly = {
@@ -762,6 +922,9 @@ static void __net_exit xfrmi_exit_batch_net(struct list_head *net_exit_list)
xip = &xi->next)
unregister_netdevice_queue(xi->dev, &list);
}
+ xi = rtnl_dereference(xfrmn->collect_md_xfrmi);
+ if (xi)
+ unregister_netdevice_queue(xi->dev, &list);
}
unregister_netdevice_many(&list);
rtnl_unlock();
@@ -999,6 +1162,8 @@ static int __init xfrmi_init(void)
if (err < 0)
goto rtnl_link_failed;
+ lwtunnel_encap_add_ops(&xfrmi_encap_ops, LWTUNNEL_ENCAP_XFRM);
+
xfrm_if_register_cb(&xfrm_if_cb);
return err;
@@ -1017,6 +1182,7 @@ pernet_dev_failed:
static void __exit xfrmi_fini(void)
{
xfrm_if_unregister_cb();
+ lwtunnel_encap_del_ops(&xfrmi_encap_ops, LWTUNNEL_ENCAP_XFRM);
rtnl_link_unregister(&xfrmi_link_ops);
xfrmi4_fini();
xfrmi6_fini();
diff --git a/net/xfrm/xfrm_ipcomp.c b/net/xfrm/xfrm_ipcomp.c
index cb40ff0ff28d..80143360bf09 100644
--- a/net/xfrm/xfrm_ipcomp.c
+++ b/net/xfrm/xfrm_ipcomp.c
@@ -203,6 +203,7 @@ static void ipcomp_free_scratches(void)
vfree(*per_cpu_ptr(scratches, i));
free_percpu(scratches);
+ ipcomp_scratches = NULL;
}
static void * __percpu *ipcomp_alloc_scratches(void)
@@ -325,18 +326,22 @@ void ipcomp_destroy(struct xfrm_state *x)
}
EXPORT_SYMBOL_GPL(ipcomp_destroy);
-int ipcomp_init_state(struct xfrm_state *x)
+int ipcomp_init_state(struct xfrm_state *x, struct netlink_ext_ack *extack)
{
int err;
struct ipcomp_data *ipcd;
struct xfrm_algo_desc *calg_desc;
err = -EINVAL;
- if (!x->calg)
+ if (!x->calg) {
+ NL_SET_ERR_MSG(extack, "Missing required compression algorithm");
goto out;
+ }
- if (x->encap)
+ if (x->encap) {
+ NL_SET_ERR_MSG(extack, "IPComp is not compatible with encapsulation");
goto out;
+ }
err = -ENOMEM;
ipcd = kzalloc(sizeof(*ipcd), GFP_KERNEL);
diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c
index cc6ab79609e2..e392d8d05e0c 100644
--- a/net/xfrm/xfrm_policy.c
+++ b/net/xfrm/xfrm_policy.c
@@ -1889,7 +1889,7 @@ EXPORT_SYMBOL(xfrm_policy_walk_done);
*/
static int xfrm_policy_match(const struct xfrm_policy *pol,
const struct flowi *fl,
- u8 type, u16 family, int dir, u32 if_id)
+ u8 type, u16 family, u32 if_id)
{
const struct xfrm_selector *sel = &pol->selector;
int ret = -ESRCH;
@@ -2014,7 +2014,7 @@ static struct xfrm_policy *
__xfrm_policy_eval_candidates(struct hlist_head *chain,
struct xfrm_policy *prefer,
const struct flowi *fl,
- u8 type, u16 family, int dir, u32 if_id)
+ u8 type, u16 family, u32 if_id)
{
u32 priority = prefer ? prefer->priority : ~0u;
struct xfrm_policy *pol;
@@ -2028,7 +2028,7 @@ __xfrm_policy_eval_candidates(struct hlist_head *chain,
if (pol->priority > priority)
break;
- err = xfrm_policy_match(pol, fl, type, family, dir, if_id);
+ err = xfrm_policy_match(pol, fl, type, family, if_id);
if (err) {
if (err != -ESRCH)
return ERR_PTR(err);
@@ -2053,7 +2053,7 @@ static struct xfrm_policy *
xfrm_policy_eval_candidates(struct xfrm_pol_inexact_candidates *cand,
struct xfrm_policy *prefer,
const struct flowi *fl,
- u8 type, u16 family, int dir, u32 if_id)
+ u8 type, u16 family, u32 if_id)
{
struct xfrm_policy *tmp;
int i;
@@ -2061,8 +2061,7 @@ xfrm_policy_eval_candidates(struct xfrm_pol_inexact_candidates *cand,
for (i = 0; i < ARRAY_SIZE(cand->res); i++) {
tmp = __xfrm_policy_eval_candidates(cand->res[i],
prefer,
- fl, type, family, dir,
- if_id);
+ fl, type, family, if_id);
if (!tmp)
continue;
@@ -2101,7 +2100,7 @@ static struct xfrm_policy *xfrm_policy_lookup_bytype(struct net *net, u8 type,
ret = NULL;
hlist_for_each_entry_rcu(pol, chain, bydst) {
- err = xfrm_policy_match(pol, fl, type, family, dir, if_id);
+ err = xfrm_policy_match(pol, fl, type, family, if_id);
if (err) {
if (err == -ESRCH)
continue;
@@ -2120,7 +2119,7 @@ static struct xfrm_policy *xfrm_policy_lookup_bytype(struct net *net, u8 type,
goto skip_inexact;
pol = xfrm_policy_eval_candidates(&cand, ret, fl, type,
- family, dir, if_id);
+ family, if_id);
if (pol) {
ret = pol;
if (IS_ERR(pol))
@@ -3516,17 +3515,17 @@ int __xfrm_policy_check(struct sock *sk, int dir, struct sk_buff *skb,
int xerr_idx = -1;
const struct xfrm_if_cb *ifcb;
struct sec_path *sp;
- struct xfrm_if *xi;
u32 if_id = 0;
rcu_read_lock();
ifcb = xfrm_if_get_cb();
if (ifcb) {
- xi = ifcb->decode_session(skb, family);
- if (xi) {
- if_id = xi->p.if_id;
- net = xi->net;
+ struct xfrm_if_decode_session_result r;
+
+ if (ifcb->decode_session(skb, family, &r)) {
+ if_id = r.if_id;
+ net = r.net;
}
}
rcu_read_unlock();
diff --git a/net/xfrm/xfrm_replay.c b/net/xfrm/xfrm_replay.c
index 9277d81b344c..9f4d42eb090f 100644
--- a/net/xfrm/xfrm_replay.c
+++ b/net/xfrm/xfrm_replay.c
@@ -766,18 +766,22 @@ int xfrm_replay_overflow(struct xfrm_state *x, struct sk_buff *skb)
}
#endif
-int xfrm_init_replay(struct xfrm_state *x)
+int xfrm_init_replay(struct xfrm_state *x, struct netlink_ext_ack *extack)
{
struct xfrm_replay_state_esn *replay_esn = x->replay_esn;
if (replay_esn) {
if (replay_esn->replay_window >
- replay_esn->bmp_len * sizeof(__u32) * 8)
+ replay_esn->bmp_len * sizeof(__u32) * 8) {
+ NL_SET_ERR_MSG(extack, "ESN replay window is too large for the chosen bitmap size");
return -EINVAL;
+ }
if (x->props.flags & XFRM_STATE_ESN) {
- if (replay_esn->replay_window == 0)
+ if (replay_esn->replay_window == 0) {
+ NL_SET_ERR_MSG(extack, "ESN replay window must be > 0");
return -EINVAL;
+ }
x->repl_mode = XFRM_REPLAY_MODE_ESN;
} else {
x->repl_mode = XFRM_REPLAY_MODE_BMP;
diff --git a/net/xfrm/xfrm_state.c b/net/xfrm/xfrm_state.c
index 91c32a3b6924..81df34b3da6e 100644
--- a/net/xfrm/xfrm_state.c
+++ b/net/xfrm/xfrm_state.c
@@ -2611,7 +2611,8 @@ u32 xfrm_state_mtu(struct xfrm_state *x, int mtu)
}
EXPORT_SYMBOL_GPL(xfrm_state_mtu);
-int __xfrm_init_state(struct xfrm_state *x, bool init_replay, bool offload)
+int __xfrm_init_state(struct xfrm_state *x, bool init_replay, bool offload,
+ struct netlink_ext_ack *extack)
{
const struct xfrm_mode *inner_mode;
const struct xfrm_mode *outer_mode;
@@ -2626,12 +2627,16 @@ int __xfrm_init_state(struct xfrm_state *x, bool init_replay, bool offload)
if (x->sel.family != AF_UNSPEC) {
inner_mode = xfrm_get_mode(x->props.mode, x->sel.family);
- if (inner_mode == NULL)
+ if (inner_mode == NULL) {
+ NL_SET_ERR_MSG(extack, "Requested mode not found");
goto error;
+ }
if (!(inner_mode->flags & XFRM_MODE_FLAG_TUNNEL) &&
- family != x->sel.family)
+ family != x->sel.family) {
+ NL_SET_ERR_MSG(extack, "Only tunnel modes can accommodate a change of family");
goto error;
+ }
x->inner_mode = *inner_mode;
} else {
@@ -2639,11 +2644,15 @@ int __xfrm_init_state(struct xfrm_state *x, bool init_replay, bool offload)
int iafamily = AF_INET;
inner_mode = xfrm_get_mode(x->props.mode, x->props.family);
- if (inner_mode == NULL)
+ if (inner_mode == NULL) {
+ NL_SET_ERR_MSG(extack, "Requested mode not found");
goto error;
+ }
- if (!(inner_mode->flags & XFRM_MODE_FLAG_TUNNEL))
+ if (!(inner_mode->flags & XFRM_MODE_FLAG_TUNNEL)) {
+ NL_SET_ERR_MSG(extack, "Only tunnel modes can accommodate an AF_UNSPEC selector");
goto error;
+ }
x->inner_mode = *inner_mode;
@@ -2658,24 +2667,27 @@ int __xfrm_init_state(struct xfrm_state *x, bool init_replay, bool offload)
}
x->type = xfrm_get_type(x->id.proto, family);
- if (x->type == NULL)
+ if (x->type == NULL) {
+ NL_SET_ERR_MSG(extack, "Requested type not found");
goto error;
+ }
x->type_offload = xfrm_get_type_offload(x->id.proto, family, offload);
- err = x->type->init_state(x);
+ err = x->type->init_state(x, extack);
if (err)
goto error;
outer_mode = xfrm_get_mode(x->props.mode, family);
if (!outer_mode) {
+ NL_SET_ERR_MSG(extack, "Requested mode not found");
err = -EPROTONOSUPPORT;
goto error;
}
x->outer_mode = *outer_mode;
if (init_replay) {
- err = xfrm_init_replay(x);
+ err = xfrm_init_replay(x, extack);
if (err)
goto error;
}
@@ -2690,7 +2702,7 @@ int xfrm_init_state(struct xfrm_state *x)
{
int err;
- err = __xfrm_init_state(x, true, false);
+ err = __xfrm_init_state(x, true, false, NULL);
if (!err)
x->km.state = XFRM_STATE_VALID;
diff --git a/net/xfrm/xfrm_user.c b/net/xfrm/xfrm_user.c
index 2ff017117730..e73f9efc54c1 100644
--- a/net/xfrm/xfrm_user.c
+++ b/net/xfrm/xfrm_user.c
@@ -35,7 +35,8 @@
#endif
#include <asm/unaligned.h>
-static int verify_one_alg(struct nlattr **attrs, enum xfrm_attr_type_t type)
+static int verify_one_alg(struct nlattr **attrs, enum xfrm_attr_type_t type,
+ struct netlink_ext_ack *extack)
{
struct nlattr *rt = attrs[type];
struct xfrm_algo *algp;
@@ -44,8 +45,10 @@ static int verify_one_alg(struct nlattr **attrs, enum xfrm_attr_type_t type)
return 0;
algp = nla_data(rt);
- if (nla_len(rt) < (int)xfrm_alg_len(algp))
+ if (nla_len(rt) < (int)xfrm_alg_len(algp)) {
+ NL_SET_ERR_MSG(extack, "Invalid AUTH/CRYPT/COMP attribute length");
return -EINVAL;
+ }
switch (type) {
case XFRMA_ALG_AUTH:
@@ -54,6 +57,7 @@ static int verify_one_alg(struct nlattr **attrs, enum xfrm_attr_type_t type)
break;
default:
+ NL_SET_ERR_MSG(extack, "Invalid algorithm attribute type");
return -EINVAL;
}
@@ -61,7 +65,8 @@ static int verify_one_alg(struct nlattr **attrs, enum xfrm_attr_type_t type)
return 0;
}
-static int verify_auth_trunc(struct nlattr **attrs)
+static int verify_auth_trunc(struct nlattr **attrs,
+ struct netlink_ext_ack *extack)
{
struct nlattr *rt = attrs[XFRMA_ALG_AUTH_TRUNC];
struct xfrm_algo_auth *algp;
@@ -70,14 +75,16 @@ static int verify_auth_trunc(struct nlattr **attrs)
return 0;
algp = nla_data(rt);
- if (nla_len(rt) < (int)xfrm_alg_auth_len(algp))
+ if (nla_len(rt) < (int)xfrm_alg_auth_len(algp)) {
+ NL_SET_ERR_MSG(extack, "Invalid AUTH_TRUNC attribute length");
return -EINVAL;
+ }
algp->alg_name[sizeof(algp->alg_name) - 1] = '\0';
return 0;
}
-static int verify_aead(struct nlattr **attrs)
+static int verify_aead(struct nlattr **attrs, struct netlink_ext_ack *extack)
{
struct nlattr *rt = attrs[XFRMA_ALG_AEAD];
struct xfrm_algo_aead *algp;
@@ -86,8 +93,10 @@ static int verify_aead(struct nlattr **attrs)
return 0;
algp = nla_data(rt);
- if (nla_len(rt) < (int)aead_len(algp))
+ if (nla_len(rt) < (int)aead_len(algp)) {
+ NL_SET_ERR_MSG(extack, "Invalid AEAD attribute length");
return -EINVAL;
+ }
algp->alg_name[sizeof(algp->alg_name) - 1] = '\0';
return 0;
@@ -102,7 +111,7 @@ static void verify_one_addr(struct nlattr **attrs, enum xfrm_attr_type_t type,
*addrp = nla_data(rt);
}
-static inline int verify_sec_ctx_len(struct nlattr **attrs)
+static inline int verify_sec_ctx_len(struct nlattr **attrs, struct netlink_ext_ack *extack)
{
struct nlattr *rt = attrs[XFRMA_SEC_CTX];
struct xfrm_user_sec_ctx *uctx;
@@ -112,42 +121,59 @@ static inline int verify_sec_ctx_len(struct nlattr **attrs)
uctx = nla_data(rt);
if (uctx->len > nla_len(rt) ||
- uctx->len != (sizeof(struct xfrm_user_sec_ctx) + uctx->ctx_len))
+ uctx->len != (sizeof(struct xfrm_user_sec_ctx) + uctx->ctx_len)) {
+ NL_SET_ERR_MSG(extack, "Invalid security context length");
return -EINVAL;
+ }
return 0;
}
static inline int verify_replay(struct xfrm_usersa_info *p,
- struct nlattr **attrs)
+ struct nlattr **attrs,
+ struct netlink_ext_ack *extack)
{
struct nlattr *rt = attrs[XFRMA_REPLAY_ESN_VAL];
struct xfrm_replay_state_esn *rs;
- if (!rt)
- return (p->flags & XFRM_STATE_ESN) ? -EINVAL : 0;
+ if (!rt) {
+ if (p->flags & XFRM_STATE_ESN) {
+ NL_SET_ERR_MSG(extack, "Missing required attribute for ESN");
+ return -EINVAL;
+ }
+ return 0;
+ }
rs = nla_data(rt);
- if (rs->bmp_len > XFRMA_REPLAY_ESN_MAX / sizeof(rs->bmp[0]) / 8)
+ if (rs->bmp_len > XFRMA_REPLAY_ESN_MAX / sizeof(rs->bmp[0]) / 8) {
+ NL_SET_ERR_MSG(extack, "ESN bitmap length must be <= 128");
return -EINVAL;
+ }
if (nla_len(rt) < (int)xfrm_replay_state_esn_len(rs) &&
- nla_len(rt) != sizeof(*rs))
+ nla_len(rt) != sizeof(*rs)) {
+ NL_SET_ERR_MSG(extack, "ESN attribute is too short to fit the full bitmap length");
return -EINVAL;
+ }
/* As only ESP and AH support ESN feature. */
- if ((p->id.proto != IPPROTO_ESP) && (p->id.proto != IPPROTO_AH))
+ if ((p->id.proto != IPPROTO_ESP) && (p->id.proto != IPPROTO_AH)) {
+ NL_SET_ERR_MSG(extack, "ESN only supported for ESP and AH");
return -EINVAL;
+ }
- if (p->replay_window != 0)
+ if (p->replay_window != 0) {
+ NL_SET_ERR_MSG(extack, "ESN not compatible with legacy replay_window");
return -EINVAL;
+ }
return 0;
}
static int verify_newsa_info(struct xfrm_usersa_info *p,
- struct nlattr **attrs)
+ struct nlattr **attrs,
+ struct netlink_ext_ack *extack)
{
int err;
@@ -161,10 +187,12 @@ static int verify_newsa_info(struct xfrm_usersa_info *p,
break;
#else
err = -EAFNOSUPPORT;
+ NL_SET_ERR_MSG(extack, "IPv6 support disabled");
goto out;
#endif
default:
+ NL_SET_ERR_MSG(extack, "Invalid address family");
goto out;
}
@@ -173,65 +201,98 @@ static int verify_newsa_info(struct xfrm_usersa_info *p,
break;
case AF_INET:
- if (p->sel.prefixlen_d > 32 || p->sel.prefixlen_s > 32)
+ if (p->sel.prefixlen_d > 32 || p->sel.prefixlen_s > 32) {
+ NL_SET_ERR_MSG(extack, "Invalid prefix length in selector (must be <= 32 for IPv4)");
goto out;
+ }
break;
case AF_INET6:
#if IS_ENABLED(CONFIG_IPV6)
- if (p->sel.prefixlen_d > 128 || p->sel.prefixlen_s > 128)
+ if (p->sel.prefixlen_d > 128 || p->sel.prefixlen_s > 128) {
+ NL_SET_ERR_MSG(extack, "Invalid prefix length in selector (must be <= 128 for IPv6)");
goto out;
+ }
break;
#else
+ NL_SET_ERR_MSG(extack, "IPv6 support disabled");
err = -EAFNOSUPPORT;
goto out;
#endif
default:
+ NL_SET_ERR_MSG(extack, "Invalid address family in selector");
goto out;
}
err = -EINVAL;
switch (p->id.proto) {
case IPPROTO_AH:
- if ((!attrs[XFRMA_ALG_AUTH] &&
- !attrs[XFRMA_ALG_AUTH_TRUNC]) ||
- attrs[XFRMA_ALG_AEAD] ||
+ if (!attrs[XFRMA_ALG_AUTH] &&
+ !attrs[XFRMA_ALG_AUTH_TRUNC]) {
+ NL_SET_ERR_MSG(extack, "Missing required attribute for AH: AUTH_TRUNC or AUTH");
+ goto out;
+ }
+
+ if (attrs[XFRMA_ALG_AEAD] ||
attrs[XFRMA_ALG_CRYPT] ||
attrs[XFRMA_ALG_COMP] ||
- attrs[XFRMA_TFCPAD])
+ attrs[XFRMA_TFCPAD]) {
+ NL_SET_ERR_MSG(extack, "Invalid attributes for AH: AEAD, CRYPT, COMP, TFCPAD");
goto out;
+ }
break;
case IPPROTO_ESP:
- if (attrs[XFRMA_ALG_COMP])
+ if (attrs[XFRMA_ALG_COMP]) {
+ NL_SET_ERR_MSG(extack, "Invalid attribute for ESP: COMP");
goto out;
+ }
+
if (!attrs[XFRMA_ALG_AUTH] &&
!attrs[XFRMA_ALG_AUTH_TRUNC] &&
!attrs[XFRMA_ALG_CRYPT] &&
- !attrs[XFRMA_ALG_AEAD])
+ !attrs[XFRMA_ALG_AEAD]) {
+ NL_SET_ERR_MSG(extack, "Missing required attribute for ESP: at least one of AUTH, AUTH_TRUNC, CRYPT, AEAD");
goto out;
+ }
+
if ((attrs[XFRMA_ALG_AUTH] ||
attrs[XFRMA_ALG_AUTH_TRUNC] ||
attrs[XFRMA_ALG_CRYPT]) &&
- attrs[XFRMA_ALG_AEAD])
+ attrs[XFRMA_ALG_AEAD]) {
+ NL_SET_ERR_MSG(extack, "Invalid attribute combination for ESP: AEAD can't be used with AUTH, AUTH_TRUNC, CRYPT");
goto out;
+ }
+
if (attrs[XFRMA_TFCPAD] &&
- p->mode != XFRM_MODE_TUNNEL)
+ p->mode != XFRM_MODE_TUNNEL) {
+ NL_SET_ERR_MSG(extack, "TFC padding can only be used in tunnel mode");
goto out;
+ }
break;
case IPPROTO_COMP:
- if (!attrs[XFRMA_ALG_COMP] ||
- attrs[XFRMA_ALG_AEAD] ||
+ if (!attrs[XFRMA_ALG_COMP]) {
+ NL_SET_ERR_MSG(extack, "Missing required attribute for COMP: COMP");
+ goto out;
+ }
+
+ if (attrs[XFRMA_ALG_AEAD] ||
attrs[XFRMA_ALG_AUTH] ||
attrs[XFRMA_ALG_AUTH_TRUNC] ||
attrs[XFRMA_ALG_CRYPT] ||
- attrs[XFRMA_TFCPAD] ||
- (ntohl(p->id.spi) >= 0x10000))
+ attrs[XFRMA_TFCPAD]) {
+ NL_SET_ERR_MSG(extack, "Invalid attributes for COMP: AEAD, AUTH, AUTH_TRUNC, CRYPT, TFCPAD");
goto out;
+ }
+
+ if (ntohl(p->id.spi) >= 0x10000) {
+ NL_SET_ERR_MSG(extack, "SPI is too large for COMP (must be < 0x10000)");
+ goto out;
+ }
break;
#if IS_ENABLED(CONFIG_IPV6)
@@ -244,29 +305,36 @@ static int verify_newsa_info(struct xfrm_usersa_info *p,
attrs[XFRMA_ALG_CRYPT] ||
attrs[XFRMA_ENCAP] ||
attrs[XFRMA_SEC_CTX] ||
- attrs[XFRMA_TFCPAD] ||
- !attrs[XFRMA_COADDR])
+ attrs[XFRMA_TFCPAD]) {
+ NL_SET_ERR_MSG(extack, "Invalid attributes for DSTOPTS/ROUTING");
goto out;
+ }
+
+ if (!attrs[XFRMA_COADDR]) {
+ NL_SET_ERR_MSG(extack, "Missing required COADDR attribute for DSTOPTS/ROUTING");
+ goto out;
+ }
break;
#endif
default:
+ NL_SET_ERR_MSG(extack, "Unsupported protocol");
goto out;
}
- if ((err = verify_aead(attrs)))
+ if ((err = verify_aead(attrs, extack)))
goto out;
- if ((err = verify_auth_trunc(attrs)))
+ if ((err = verify_auth_trunc(attrs, extack)))
goto out;
- if ((err = verify_one_alg(attrs, XFRMA_ALG_AUTH)))
+ if ((err = verify_one_alg(attrs, XFRMA_ALG_AUTH, extack)))
goto out;
- if ((err = verify_one_alg(attrs, XFRMA_ALG_CRYPT)))
+ if ((err = verify_one_alg(attrs, XFRMA_ALG_CRYPT, extack)))
goto out;
- if ((err = verify_one_alg(attrs, XFRMA_ALG_COMP)))
+ if ((err = verify_one_alg(attrs, XFRMA_ALG_COMP, extack)))
goto out;
- if ((err = verify_sec_ctx_len(attrs)))
+ if ((err = verify_sec_ctx_len(attrs, extack)))
goto out;
- if ((err = verify_replay(p, attrs)))
+ if ((err = verify_replay(p, attrs, extack)))
goto out;
err = -EINVAL;
@@ -278,14 +346,19 @@ static int verify_newsa_info(struct xfrm_usersa_info *p,
break;
default:
+ NL_SET_ERR_MSG(extack, "Unsupported mode");
goto out;
}
err = 0;
- if (attrs[XFRMA_MTIMER_THRESH])
- if (!attrs[XFRMA_ENCAP])
+ if (attrs[XFRMA_MTIMER_THRESH]) {
+ if (!attrs[XFRMA_ENCAP]) {
+ NL_SET_ERR_MSG(extack, "MTIMER_THRESH attribute can only be set on ENCAP states");
err = -EINVAL;
+ goto out;
+ }
+ }
out:
return err;
@@ -293,7 +366,7 @@ out:
static int attach_one_algo(struct xfrm_algo **algpp, u8 *props,
struct xfrm_algo_desc *(*get_byname)(const char *, int),
- struct nlattr *rta)
+ struct nlattr *rta, struct netlink_ext_ack *extack)
{
struct xfrm_algo *p, *ualg;
struct xfrm_algo_desc *algo;
@@ -304,8 +377,10 @@ static int attach_one_algo(struct xfrm_algo **algpp, u8 *props,
ualg = nla_data(rta);
algo = get_byname(ualg->alg_name, 1);
- if (!algo)
+ if (!algo) {
+ NL_SET_ERR_MSG(extack, "Requested COMP algorithm not found");
return -ENOSYS;
+ }
*props = algo->desc.sadb_alg_id;
p = kmemdup(ualg, xfrm_alg_len(ualg), GFP_KERNEL);
@@ -317,7 +392,8 @@ static int attach_one_algo(struct xfrm_algo **algpp, u8 *props,
return 0;
}
-static int attach_crypt(struct xfrm_state *x, struct nlattr *rta)
+static int attach_crypt(struct xfrm_state *x, struct nlattr *rta,
+ struct netlink_ext_ack *extack)
{
struct xfrm_algo *p, *ualg;
struct xfrm_algo_desc *algo;
@@ -328,8 +404,10 @@ static int attach_crypt(struct xfrm_state *x, struct nlattr *rta)
ualg = nla_data(rta);
algo = xfrm_ealg_get_byname(ualg->alg_name, 1);
- if (!algo)
+ if (!algo) {
+ NL_SET_ERR_MSG(extack, "Requested CRYPT algorithm not found");
return -ENOSYS;
+ }
x->props.ealgo = algo->desc.sadb_alg_id;
p = kmemdup(ualg, xfrm_alg_len(ualg), GFP_KERNEL);
@@ -343,7 +421,7 @@ static int attach_crypt(struct xfrm_state *x, struct nlattr *rta)
}
static int attach_auth(struct xfrm_algo_auth **algpp, u8 *props,
- struct nlattr *rta)
+ struct nlattr *rta, struct netlink_ext_ack *extack)
{
struct xfrm_algo *ualg;
struct xfrm_algo_auth *p;
@@ -355,8 +433,10 @@ static int attach_auth(struct xfrm_algo_auth **algpp, u8 *props,
ualg = nla_data(rta);
algo = xfrm_aalg_get_byname(ualg->alg_name, 1);
- if (!algo)
+ if (!algo) {
+ NL_SET_ERR_MSG(extack, "Requested AUTH algorithm not found");
return -ENOSYS;
+ }
*props = algo->desc.sadb_alg_id;
p = kmalloc(sizeof(*p) + (ualg->alg_key_len + 7) / 8, GFP_KERNEL);
@@ -373,7 +453,7 @@ static int attach_auth(struct xfrm_algo_auth **algpp, u8 *props,
}
static int attach_auth_trunc(struct xfrm_algo_auth **algpp, u8 *props,
- struct nlattr *rta)
+ struct nlattr *rta, struct netlink_ext_ack *extack)
{
struct xfrm_algo_auth *p, *ualg;
struct xfrm_algo_desc *algo;
@@ -384,10 +464,14 @@ static int attach_auth_trunc(struct xfrm_algo_auth **algpp, u8 *props,
ualg = nla_data(rta);
algo = xfrm_aalg_get_byname(ualg->alg_name, 1);
- if (!algo)
+ if (!algo) {
+ NL_SET_ERR_MSG(extack, "Requested AUTH_TRUNC algorithm not found");
return -ENOSYS;
- if (ualg->alg_trunc_len > algo->uinfo.auth.icv_fullbits)
+ }
+ if (ualg->alg_trunc_len > algo->uinfo.auth.icv_fullbits) {
+ NL_SET_ERR_MSG(extack, "Invalid length requested for truncated ICV");
return -EINVAL;
+ }
*props = algo->desc.sadb_alg_id;
p = kmemdup(ualg, xfrm_alg_auth_len(ualg), GFP_KERNEL);
@@ -402,7 +486,8 @@ static int attach_auth_trunc(struct xfrm_algo_auth **algpp, u8 *props,
return 0;
}
-static int attach_aead(struct xfrm_state *x, struct nlattr *rta)
+static int attach_aead(struct xfrm_state *x, struct nlattr *rta,
+ struct netlink_ext_ack *extack)
{
struct xfrm_algo_aead *p, *ualg;
struct xfrm_algo_desc *algo;
@@ -413,8 +498,10 @@ static int attach_aead(struct xfrm_state *x, struct nlattr *rta)
ualg = nla_data(rta);
algo = xfrm_aead_get_byname(ualg->alg_name, ualg->alg_icv_len, 1);
- if (!algo)
+ if (!algo) {
+ NL_SET_ERR_MSG(extack, "Requested AEAD algorithm not found");
return -ENOSYS;
+ }
x->props.ealgo = algo->desc.sadb_alg_id;
p = kmemdup(ualg, aead_len(ualg), GFP_KERNEL);
@@ -579,7 +666,8 @@ static void xfrm_smark_init(struct nlattr **attrs, struct xfrm_mark *m)
static struct xfrm_state *xfrm_state_construct(struct net *net,
struct xfrm_usersa_info *p,
struct nlattr **attrs,
- int *errp)
+ int *errp,
+ struct netlink_ext_ack *extack)
{
struct xfrm_state *x = xfrm_state_alloc(net);
int err = -ENOMEM;
@@ -606,21 +694,21 @@ static struct xfrm_state *xfrm_state_construct(struct net *net,
if (attrs[XFRMA_SA_EXTRA_FLAGS])
x->props.extra_flags = nla_get_u32(attrs[XFRMA_SA_EXTRA_FLAGS]);
- if ((err = attach_aead(x, attrs[XFRMA_ALG_AEAD])))
+ if ((err = attach_aead(x, attrs[XFRMA_ALG_AEAD], extack)))
goto error;
if ((err = attach_auth_trunc(&x->aalg, &x->props.aalgo,
- attrs[XFRMA_ALG_AUTH_TRUNC])))
+ attrs[XFRMA_ALG_AUTH_TRUNC], extack)))
goto error;
if (!x->props.aalgo) {
if ((err = attach_auth(&x->aalg, &x->props.aalgo,
- attrs[XFRMA_ALG_AUTH])))
+ attrs[XFRMA_ALG_AUTH], extack)))
goto error;
}
- if ((err = attach_crypt(x, attrs[XFRMA_ALG_CRYPT])))
+ if ((err = attach_crypt(x, attrs[XFRMA_ALG_CRYPT], extack)))
goto error;
if ((err = attach_one_algo(&x->calg, &x->props.calgo,
xfrm_calg_get_byname,
- attrs[XFRMA_ALG_COMP])))
+ attrs[XFRMA_ALG_COMP], extack)))
goto error;
if (attrs[XFRMA_TFCPAD])
@@ -633,7 +721,7 @@ static struct xfrm_state *xfrm_state_construct(struct net *net,
if (attrs[XFRMA_IF_ID])
x->if_id = nla_get_u32(attrs[XFRMA_IF_ID]);
- err = __xfrm_init_state(x, false, attrs[XFRMA_OFFLOAD_DEV]);
+ err = __xfrm_init_state(x, false, attrs[XFRMA_OFFLOAD_DEV], extack);
if (err)
goto error;
@@ -653,7 +741,7 @@ static struct xfrm_state *xfrm_state_construct(struct net *net,
/* sysctl_xfrm_aevent_etime is in 100ms units */
x->replay_maxage = (net->xfrm.sysctl_aevent_etime*HZ)/XFRM_AE_ETH_M;
- if ((err = xfrm_init_replay(x)))
+ if ((err = xfrm_init_replay(x, extack)))
goto error;
/* override default values from above */
@@ -662,7 +750,8 @@ static struct xfrm_state *xfrm_state_construct(struct net *net,
/* configure the hardware if offload is requested */
if (attrs[XFRMA_OFFLOAD_DEV]) {
err = xfrm_dev_state_add(net, x,
- nla_data(attrs[XFRMA_OFFLOAD_DEV]));
+ nla_data(attrs[XFRMA_OFFLOAD_DEV]),
+ extack);
if (err)
goto error;
}
@@ -678,7 +767,7 @@ error_no_put:
}
static int xfrm_add_sa(struct sk_buff *skb, struct nlmsghdr *nlh,
- struct nlattr **attrs)
+ struct nlattr **attrs, struct netlink_ext_ack *extack)
{
struct net *net = sock_net(skb->sk);
struct xfrm_usersa_info *p = nlmsg_data(nlh);
@@ -686,11 +775,11 @@ static int xfrm_add_sa(struct sk_buff *skb, struct nlmsghdr *nlh,
int err;
struct km_event c;
- err = verify_newsa_info(p, attrs);
+ err = verify_newsa_info(p, attrs, extack);
if (err)
return err;
- x = xfrm_state_construct(net, p, attrs, &err);
+ x = xfrm_state_construct(net, p, attrs, &err, extack);
if (!x)
return err;
@@ -757,7 +846,7 @@ static struct xfrm_state *xfrm_user_state_lookup(struct net *net,
}
static int xfrm_del_sa(struct sk_buff *skb, struct nlmsghdr *nlh,
- struct nlattr **attrs)
+ struct nlattr **attrs, struct netlink_ext_ack *extack)
{
struct net *net = sock_net(skb->sk);
struct xfrm_state *x;
@@ -1254,7 +1343,8 @@ static int build_spdinfo(struct sk_buff *skb, struct net *net,
}
static int xfrm_set_spdinfo(struct sk_buff *skb, struct nlmsghdr *nlh,
- struct nlattr **attrs)
+ struct nlattr **attrs,
+ struct netlink_ext_ack *extack)
{
struct net *net = sock_net(skb->sk);
struct xfrmu_spdhthresh *thresh4 = NULL;
@@ -1299,7 +1389,8 @@ static int xfrm_set_spdinfo(struct sk_buff *skb, struct nlmsghdr *nlh,
}
static int xfrm_get_spdinfo(struct sk_buff *skb, struct nlmsghdr *nlh,
- struct nlattr **attrs)
+ struct nlattr **attrs,
+ struct netlink_ext_ack *extack)
{
struct net *net = sock_net(skb->sk);
struct sk_buff *r_skb;
@@ -1358,7 +1449,8 @@ static int build_sadinfo(struct sk_buff *skb, struct net *net,
}
static int xfrm_get_sadinfo(struct sk_buff *skb, struct nlmsghdr *nlh,
- struct nlattr **attrs)
+ struct nlattr **attrs,
+ struct netlink_ext_ack *extack)
{
struct net *net = sock_net(skb->sk);
struct sk_buff *r_skb;
@@ -1378,7 +1470,7 @@ static int xfrm_get_sadinfo(struct sk_buff *skb, struct nlmsghdr *nlh,
}
static int xfrm_get_sa(struct sk_buff *skb, struct nlmsghdr *nlh,
- struct nlattr **attrs)
+ struct nlattr **attrs, struct netlink_ext_ack *extack)
{
struct net *net = sock_net(skb->sk);
struct xfrm_usersa_id *p = nlmsg_data(nlh);
@@ -1402,7 +1494,8 @@ out_noput:
}
static int xfrm_alloc_userspi(struct sk_buff *skb, struct nlmsghdr *nlh,
- struct nlattr **attrs)
+ struct nlattr **attrs,
+ struct netlink_ext_ack *extack)
{
struct net *net = sock_net(skb->sk);
struct xfrm_state *x;
@@ -1477,7 +1570,7 @@ out_noput:
return err;
}
-static int verify_policy_dir(u8 dir)
+static int verify_policy_dir(u8 dir, struct netlink_ext_ack *extack)
{
switch (dir) {
case XFRM_POLICY_IN:
@@ -1486,13 +1579,14 @@ static int verify_policy_dir(u8 dir)
break;
default:
+ NL_SET_ERR_MSG(extack, "Invalid policy direction");
return -EINVAL;
}
return 0;
}
-static int verify_policy_type(u8 type)
+static int verify_policy_type(u8 type, struct netlink_ext_ack *extack)
{
switch (type) {
case XFRM_POLICY_TYPE_MAIN:
@@ -1502,13 +1596,15 @@ static int verify_policy_type(u8 type)
break;
default:
+ NL_SET_ERR_MSG(extack, "Invalid policy type");
return -EINVAL;
}
return 0;
}
-static int verify_newpolicy_info(struct xfrm_userpolicy_info *p)
+static int verify_newpolicy_info(struct xfrm_userpolicy_info *p,
+ struct netlink_ext_ack *extack)
{
int ret;
@@ -1520,6 +1616,7 @@ static int verify_newpolicy_info(struct xfrm_userpolicy_info *p)
break;
default:
+ NL_SET_ERR_MSG(extack, "Invalid policy share");
return -EINVAL;
}
@@ -1529,35 +1626,44 @@ static int verify_newpolicy_info(struct xfrm_userpolicy_info *p)
break;
default:
+ NL_SET_ERR_MSG(extack, "Invalid policy action");
return -EINVAL;
}
switch (p->sel.family) {
case AF_INET:
- if (p->sel.prefixlen_d > 32 || p->sel.prefixlen_s > 32)
+ if (p->sel.prefixlen_d > 32 || p->sel.prefixlen_s > 32) {
+ NL_SET_ERR_MSG(extack, "Invalid prefix length in selector (must be <= 32 for IPv4)");
return -EINVAL;
+ }
break;
case AF_INET6:
#if IS_ENABLED(CONFIG_IPV6)
- if (p->sel.prefixlen_d > 128 || p->sel.prefixlen_s > 128)
+ if (p->sel.prefixlen_d > 128 || p->sel.prefixlen_s > 128) {
+ NL_SET_ERR_MSG(extack, "Invalid prefix length in selector (must be <= 128 for IPv6)");
return -EINVAL;
+ }
break;
#else
+ NL_SET_ERR_MSG(extack, "IPv6 support disabled");
return -EAFNOSUPPORT;
#endif
default:
+ NL_SET_ERR_MSG(extack, "Invalid selector family");
return -EINVAL;
}
- ret = verify_policy_dir(p->dir);
+ ret = verify_policy_dir(p->dir, extack);
if (ret)
return ret;
- if (p->index && (xfrm_policy_id2dir(p->index) != p->dir))
+ if (p->index && (xfrm_policy_id2dir(p->index) != p->dir)) {
+ NL_SET_ERR_MSG(extack, "Policy index doesn't match direction");
return -EINVAL;
+ }
return 0;
}
@@ -1599,13 +1705,16 @@ static void copy_templates(struct xfrm_policy *xp, struct xfrm_user_tmpl *ut,
}
}
-static int validate_tmpl(int nr, struct xfrm_user_tmpl *ut, u16 family)
+static int validate_tmpl(int nr, struct xfrm_user_tmpl *ut, u16 family,
+ struct netlink_ext_ack *extack)
{
u16 prev_family;
int i;
- if (nr > XFRM_MAX_DEPTH)
+ if (nr > XFRM_MAX_DEPTH) {
+ NL_SET_ERR_MSG(extack, "Template count must be <= XFRM_MAX_DEPTH (" __stringify(XFRM_MAX_DEPTH) ")");
return -EINVAL;
+ }
prev_family = family;
@@ -1625,12 +1734,16 @@ static int validate_tmpl(int nr, struct xfrm_user_tmpl *ut, u16 family)
case XFRM_MODE_BEET:
break;
default:
- if (ut[i].family != prev_family)
+ if (ut[i].family != prev_family) {
+ NL_SET_ERR_MSG(extack, "Mode in template doesn't support a family change");
return -EINVAL;
+ }
break;
}
- if (ut[i].mode >= XFRM_MODE_MAX)
+ if (ut[i].mode >= XFRM_MODE_MAX) {
+ NL_SET_ERR_MSG(extack, "Mode in template must be < XFRM_MODE_MAX (" __stringify(XFRM_MODE_MAX) ")");
return -EINVAL;
+ }
prev_family = ut[i].family;
@@ -1642,17 +1755,21 @@ static int validate_tmpl(int nr, struct xfrm_user_tmpl *ut, u16 family)
break;
#endif
default:
+ NL_SET_ERR_MSG(extack, "Invalid family in template");
return -EINVAL;
}
- if (!xfrm_id_proto_valid(ut[i].id.proto))
+ if (!xfrm_id_proto_valid(ut[i].id.proto)) {
+ NL_SET_ERR_MSG(extack, "Invalid XFRM protocol in template");
return -EINVAL;
+ }
}
return 0;
}
-static int copy_from_user_tmpl(struct xfrm_policy *pol, struct nlattr **attrs)
+static int copy_from_user_tmpl(struct xfrm_policy *pol, struct nlattr **attrs,
+ struct netlink_ext_ack *extack)
{
struct nlattr *rt = attrs[XFRMA_TMPL];
@@ -1663,7 +1780,7 @@ static int copy_from_user_tmpl(struct xfrm_policy *pol, struct nlattr **attrs)
int nr = nla_len(rt) / sizeof(*utmpl);
int err;
- err = validate_tmpl(nr, utmpl, pol->family);
+ err = validate_tmpl(nr, utmpl, pol->family, extack);
if (err)
return err;
@@ -1672,7 +1789,8 @@ static int copy_from_user_tmpl(struct xfrm_policy *pol, struct nlattr **attrs)
return 0;
}
-static int copy_from_user_policy_type(u8 *tp, struct nlattr **attrs)
+static int copy_from_user_policy_type(u8 *tp, struct nlattr **attrs,
+ struct netlink_ext_ack *extack)
{
struct nlattr *rt = attrs[XFRMA_POLICY_TYPE];
struct xfrm_userpolicy_type *upt;
@@ -1684,7 +1802,7 @@ static int copy_from_user_policy_type(u8 *tp, struct nlattr **attrs)
type = upt->type;
}
- err = verify_policy_type(type);
+ err = verify_policy_type(type, extack);
if (err)
return err;
@@ -1719,7 +1837,11 @@ static void copy_to_user_policy(struct xfrm_policy *xp, struct xfrm_userpolicy_i
p->share = XFRM_SHARE_ANY; /* XXX xp->share */
}
-static struct xfrm_policy *xfrm_policy_construct(struct net *net, struct xfrm_userpolicy_info *p, struct nlattr **attrs, int *errp)
+static struct xfrm_policy *xfrm_policy_construct(struct net *net,
+ struct xfrm_userpolicy_info *p,
+ struct nlattr **attrs,
+ int *errp,
+ struct netlink_ext_ack *extack)
{
struct xfrm_policy *xp = xfrm_policy_alloc(net, GFP_KERNEL);
int err;
@@ -1731,11 +1853,11 @@ static struct xfrm_policy *xfrm_policy_construct(struct net *net, struct xfrm_us
copy_from_user_policy(xp, p);
- err = copy_from_user_policy_type(&xp->type, attrs);
+ err = copy_from_user_policy_type(&xp->type, attrs, extack);
if (err)
goto error;
- if (!(err = copy_from_user_tmpl(xp, attrs)))
+ if (!(err = copy_from_user_tmpl(xp, attrs, extack)))
err = copy_from_user_sec_ctx(xp, attrs);
if (err)
goto error;
@@ -1754,7 +1876,8 @@ static struct xfrm_policy *xfrm_policy_construct(struct net *net, struct xfrm_us
}
static int xfrm_add_policy(struct sk_buff *skb, struct nlmsghdr *nlh,
- struct nlattr **attrs)
+ struct nlattr **attrs,
+ struct netlink_ext_ack *extack)
{
struct net *net = sock_net(skb->sk);
struct xfrm_userpolicy_info *p = nlmsg_data(nlh);
@@ -1763,14 +1886,14 @@ static int xfrm_add_policy(struct sk_buff *skb, struct nlmsghdr *nlh,
int err;
int excl;
- err = verify_newpolicy_info(p);
+ err = verify_newpolicy_info(p, extack);
if (err)
return err;
- err = verify_sec_ctx_len(attrs);
+ err = verify_sec_ctx_len(attrs, extack);
if (err)
return err;
- xp = xfrm_policy_construct(net, p, attrs, &err);
+ xp = xfrm_policy_construct(net, p, attrs, &err, extack);
if (!xp)
return err;
@@ -2015,7 +2138,7 @@ static bool xfrm_userpolicy_is_valid(__u8 policy)
}
static int xfrm_set_default(struct sk_buff *skb, struct nlmsghdr *nlh,
- struct nlattr **attrs)
+ struct nlattr **attrs, struct netlink_ext_ack *extack)
{
struct net *net = sock_net(skb->sk);
struct xfrm_userpolicy_default *up = nlmsg_data(nlh);
@@ -2036,7 +2159,7 @@ static int xfrm_set_default(struct sk_buff *skb, struct nlmsghdr *nlh,
}
static int xfrm_get_default(struct sk_buff *skb, struct nlmsghdr *nlh,
- struct nlattr **attrs)
+ struct nlattr **attrs, struct netlink_ext_ack *extack)
{
struct sk_buff *r_skb;
struct nlmsghdr *r_nlh;
@@ -2066,7 +2189,8 @@ static int xfrm_get_default(struct sk_buff *skb, struct nlmsghdr *nlh,
}
static int xfrm_get_policy(struct sk_buff *skb, struct nlmsghdr *nlh,
- struct nlattr **attrs)
+ struct nlattr **attrs,
+ struct netlink_ext_ack *extack)
{
struct net *net = sock_net(skb->sk);
struct xfrm_policy *xp;
@@ -2081,11 +2205,11 @@ static int xfrm_get_policy(struct sk_buff *skb, struct nlmsghdr *nlh,
p = nlmsg_data(nlh);
delete = nlh->nlmsg_type == XFRM_MSG_DELPOLICY;
- err = copy_from_user_policy_type(&type, attrs);
+ err = copy_from_user_policy_type(&type, attrs, extack);
if (err)
return err;
- err = verify_policy_dir(p->dir);
+ err = verify_policy_dir(p->dir, extack);
if (err)
return err;
@@ -2101,7 +2225,7 @@ static int xfrm_get_policy(struct sk_buff *skb, struct nlmsghdr *nlh,
struct nlattr *rt = attrs[XFRMA_SEC_CTX];
struct xfrm_sec_ctx *ctx;
- err = verify_sec_ctx_len(attrs);
+ err = verify_sec_ctx_len(attrs, extack);
if (err)
return err;
@@ -2149,7 +2273,8 @@ out:
}
static int xfrm_flush_sa(struct sk_buff *skb, struct nlmsghdr *nlh,
- struct nlattr **attrs)
+ struct nlattr **attrs,
+ struct netlink_ext_ack *extack)
{
struct net *net = sock_net(skb->sk);
struct km_event c;
@@ -2249,7 +2374,7 @@ out_cancel:
}
static int xfrm_get_ae(struct sk_buff *skb, struct nlmsghdr *nlh,
- struct nlattr **attrs)
+ struct nlattr **attrs, struct netlink_ext_ack *extack)
{
struct net *net = sock_net(skb->sk);
struct xfrm_state *x;
@@ -2293,7 +2418,7 @@ static int xfrm_get_ae(struct sk_buff *skb, struct nlmsghdr *nlh,
}
static int xfrm_new_ae(struct sk_buff *skb, struct nlmsghdr *nlh,
- struct nlattr **attrs)
+ struct nlattr **attrs, struct netlink_ext_ack *extack)
{
struct net *net = sock_net(skb->sk);
struct xfrm_state *x;
@@ -2344,14 +2469,15 @@ out:
}
static int xfrm_flush_policy(struct sk_buff *skb, struct nlmsghdr *nlh,
- struct nlattr **attrs)
+ struct nlattr **attrs,
+ struct netlink_ext_ack *extack)
{
struct net *net = sock_net(skb->sk);
struct km_event c;
u8 type = XFRM_POLICY_TYPE_MAIN;
int err;
- err = copy_from_user_policy_type(&type, attrs);
+ err = copy_from_user_policy_type(&type, attrs, extack);
if (err)
return err;
@@ -2372,7 +2498,8 @@ static int xfrm_flush_policy(struct sk_buff *skb, struct nlmsghdr *nlh,
}
static int xfrm_add_pol_expire(struct sk_buff *skb, struct nlmsghdr *nlh,
- struct nlattr **attrs)
+ struct nlattr **attrs,
+ struct netlink_ext_ack *extack)
{
struct net *net = sock_net(skb->sk);
struct xfrm_policy *xp;
@@ -2383,11 +2510,11 @@ static int xfrm_add_pol_expire(struct sk_buff *skb, struct nlmsghdr *nlh,
struct xfrm_mark m;
u32 if_id = 0;
- err = copy_from_user_policy_type(&type, attrs);
+ err = copy_from_user_policy_type(&type, attrs, extack);
if (err)
return err;
- err = verify_policy_dir(p->dir);
+ err = verify_policy_dir(p->dir, extack);
if (err)
return err;
@@ -2403,7 +2530,7 @@ static int xfrm_add_pol_expire(struct sk_buff *skb, struct nlmsghdr *nlh,
struct nlattr *rt = attrs[XFRMA_SEC_CTX];
struct xfrm_sec_ctx *ctx;
- err = verify_sec_ctx_len(attrs);
+ err = verify_sec_ctx_len(attrs, extack);
if (err)
return err;
@@ -2438,7 +2565,8 @@ out:
}
static int xfrm_add_sa_expire(struct sk_buff *skb, struct nlmsghdr *nlh,
- struct nlattr **attrs)
+ struct nlattr **attrs,
+ struct netlink_ext_ack *extack)
{
struct net *net = sock_net(skb->sk);
struct xfrm_state *x;
@@ -2472,7 +2600,8 @@ out:
}
static int xfrm_add_acquire(struct sk_buff *skb, struct nlmsghdr *nlh,
- struct nlattr **attrs)
+ struct nlattr **attrs,
+ struct netlink_ext_ack *extack)
{
struct net *net = sock_net(skb->sk);
struct xfrm_policy *xp;
@@ -2490,15 +2619,15 @@ static int xfrm_add_acquire(struct sk_buff *skb, struct nlmsghdr *nlh,
xfrm_mark_get(attrs, &mark);
- err = verify_newpolicy_info(&ua->policy);
+ err = verify_newpolicy_info(&ua->policy, extack);
if (err)
goto free_state;
- err = verify_sec_ctx_len(attrs);
+ err = verify_sec_ctx_len(attrs, extack);
if (err)
goto free_state;
/* build an XP */
- xp = xfrm_policy_construct(net, &ua->policy, attrs, &err);
+ xp = xfrm_policy_construct(net, &ua->policy, attrs, &err, extack);
if (!xp)
goto free_state;
@@ -2577,7 +2706,7 @@ static int copy_from_user_migrate(struct xfrm_migrate *ma,
}
static int xfrm_do_migrate(struct sk_buff *skb, struct nlmsghdr *nlh,
- struct nlattr **attrs)
+ struct nlattr **attrs, struct netlink_ext_ack *extack)
{
struct xfrm_userpolicy_id *pi = nlmsg_data(nlh);
struct xfrm_migrate m[XFRM_MAX_DEPTH];
@@ -2594,7 +2723,7 @@ static int xfrm_do_migrate(struct sk_buff *skb, struct nlmsghdr *nlh,
kmp = attrs[XFRMA_KMADDRESS] ? &km : NULL;
- err = copy_from_user_policy_type(&type, attrs);
+ err = copy_from_user_policy_type(&type, attrs, extack);
if (err)
return err;
@@ -2623,7 +2752,7 @@ static int xfrm_do_migrate(struct sk_buff *skb, struct nlmsghdr *nlh,
}
#else
static int xfrm_do_migrate(struct sk_buff *skb, struct nlmsghdr *nlh,
- struct nlattr **attrs)
+ struct nlattr **attrs, struct netlink_ext_ack *extack)
{
return -ENOPROTOOPT;
}
@@ -2819,7 +2948,8 @@ static const struct nla_policy xfrma_spd_policy[XFRMA_SPD_MAX+1] = {
};
static const struct xfrm_link {
- int (*doit)(struct sk_buff *, struct nlmsghdr *, struct nlattr **);
+ int (*doit)(struct sk_buff *, struct nlmsghdr *, struct nlattr **,
+ struct netlink_ext_ack *);
int (*start)(struct netlink_callback *);
int (*dump)(struct sk_buff *, struct netlink_callback *);
int (*done)(struct netlink_callback *);
@@ -2921,7 +3051,7 @@ static int xfrm_user_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh,
goto err;
}
- err = link->doit(skb, nlh, attrs);
+ err = link->doit(skb, nlh, attrs, extack);
/* We need to free skb allocated in xfrm_alloc_compat() before
* returning from this function, because consume_skb() won't take
@@ -3272,11 +3402,11 @@ static struct xfrm_policy *xfrm_compile_policy(struct sock *sk, int opt,
*dir = -EINVAL;
if (len < sizeof(*p) ||
- verify_newpolicy_info(p))
+ verify_newpolicy_info(p, NULL))
return NULL;
nr = ((len - sizeof(*p)) / sizeof(*ut));
- if (validate_tmpl(nr, ut, p->sel.family))
+ if (validate_tmpl(nr, ut, p->sel.family, NULL))
return NULL;
if (p->dir > XFRM_POLICY_OUT)