summaryrefslogtreecommitdiff
path: root/net
diff options
context:
space:
mode:
Diffstat (limited to 'net')
-rw-r--r--net/caif/cffrml.c9
-rw-r--r--net/can/Kconfig1
-rw-r--r--net/can/j1939/socket.c6
-rw-r--r--net/can/j1939/transport.c2
-rw-r--r--net/ceph/osd_client.c6
-rw-r--r--net/ceph/osdmap.c120
-rw-r--r--net/ethtool/ioctl.c30
-rw-r--r--net/handshake/request.c8
-rw-r--r--net/hsr/hsr_forward.c2
-rw-r--r--net/ipv4/inet_fragment.c55
-rw-r--r--net/ipv4/ip_fragment.c22
-rw-r--r--net/mptcp/Kconfig2
-rw-r--r--net/mptcp/pm_netlink.c3
-rw-r--r--net/mptcp/protocol.c22
-rw-r--r--net/netfilter/ipvs/ip_vs_xmit.c3
-rw-r--r--net/netfilter/nf_conncount.c25
-rw-r--r--net/netfilter/nf_conntrack_core.c3
-rw-r--r--net/netfilter/nf_flow_table_path.c4
-rw-r--r--net/netfilter/nf_nat_core.c14
-rw-r--r--net/netfilter/nf_tables_api.c84
-rw-r--r--net/netrom/nr_out.c4
-rw-r--r--net/openvswitch/flow_netlink.c13
-rw-r--r--net/sched/act_mirred.c9
-rw-r--r--net/sched/sch_ets.c6
-rw-r--r--net/sctp/ipv6.c2
-rw-r--r--net/sctp/socket.c7
-rw-r--r--net/smc/Kconfig4
-rw-r--r--net/sunrpc/backchannel_rqst.c35
-rw-r--r--net/sunrpc/xprtrdma/backchannel.c8
-rw-r--r--net/unix/garbage.c2
30 files changed, 343 insertions, 168 deletions
diff --git a/net/caif/cffrml.c b/net/caif/cffrml.c
index 6651a8dc62e0..d4d63586053a 100644
--- a/net/caif/cffrml.c
+++ b/net/caif/cffrml.c
@@ -92,8 +92,15 @@ static int cffrml_receive(struct cflayer *layr, struct cfpkt *pkt)
len = le16_to_cpu(tmp);
/* Subtract for FCS on length if FCS is not used. */
- if (!this->dofcs)
+ if (!this->dofcs) {
+ if (len < 2) {
+ ++cffrml_rcv_error;
+ pr_err("Invalid frame length (%d)\n", len);
+ cfpkt_destroy(pkt);
+ return -EPROTO;
+ }
len -= 2;
+ }
if (cfpkt_setlen(pkt, len) < 0) {
++cffrml_rcv_error;
diff --git a/net/can/Kconfig b/net/can/Kconfig
index e4ccf731a24c..af64a6f76458 100644
--- a/net/can/Kconfig
+++ b/net/can/Kconfig
@@ -5,7 +5,6 @@
menuconfig CAN
tristate "CAN bus subsystem support"
- select CAN_DEV
help
Controller Area Network (CAN) is a slow (up to 1Mbit/s) serial
communications protocol. Development of the CAN bus started in
diff --git a/net/can/j1939/socket.c b/net/can/j1939/socket.c
index 6272326dd614..ff9c4fd7b433 100644
--- a/net/can/j1939/socket.c
+++ b/net/can/j1939/socket.c
@@ -482,6 +482,12 @@ static int j1939_sk_bind(struct socket *sock, struct sockaddr_unsized *uaddr, in
goto out_release_sock;
}
+ if (ndev->reg_state != NETREG_REGISTERED) {
+ dev_put(ndev);
+ ret = -ENODEV;
+ goto out_release_sock;
+ }
+
can_ml = can_get_ml_priv(ndev);
if (!can_ml) {
dev_put(ndev);
diff --git a/net/can/j1939/transport.c b/net/can/j1939/transport.c
index fbf5c8001c9d..613a911dda10 100644
--- a/net/can/j1939/transport.c
+++ b/net/can/j1939/transport.c
@@ -1567,6 +1567,8 @@ int j1939_session_activate(struct j1939_session *session)
if (active) {
j1939_session_put(active);
ret = -EAGAIN;
+ } else if (priv->ndev->reg_state != NETREG_REGISTERED) {
+ ret = -ENODEV;
} else {
WARN_ON_ONCE(session->state != J1939_SESSION_NEW);
list_add_tail(&session->active_session_list_entry,
diff --git a/net/ceph/osd_client.c b/net/ceph/osd_client.c
index 6664ea73ccf8..3667319b949d 100644
--- a/net/ceph/osd_client.c
+++ b/net/ceph/osd_client.c
@@ -1280,8 +1280,7 @@ static struct ceph_osd *create_osd(struct ceph_osd_client *osdc, int onum)
static struct ceph_osd *get_osd(struct ceph_osd *osd)
{
if (refcount_inc_not_zero(&osd->o_ref)) {
- dout("get_osd %p %d -> %d\n", osd, refcount_read(&osd->o_ref)-1,
- refcount_read(&osd->o_ref));
+ dout("get_osd %p -> %d\n", osd, refcount_read(&osd->o_ref));
return osd;
} else {
dout("get_osd %p FAIL\n", osd);
@@ -1291,8 +1290,7 @@ static struct ceph_osd *get_osd(struct ceph_osd *osd)
static void put_osd(struct ceph_osd *osd)
{
- dout("put_osd %p %d -> %d\n", osd, refcount_read(&osd->o_ref),
- refcount_read(&osd->o_ref) - 1);
+ dout("put_osd %p -> %d\n", osd, refcount_read(&osd->o_ref) - 1);
if (refcount_dec_and_test(&osd->o_ref)) {
osd_cleanup(osd);
kfree(osd);
diff --git a/net/ceph/osdmap.c b/net/ceph/osdmap.c
index d245fa508e1c..34b3ab59602f 100644
--- a/net/ceph/osdmap.c
+++ b/net/ceph/osdmap.c
@@ -806,51 +806,49 @@ static int decode_pool(void **p, void *end, struct ceph_pg_pool_info *pi)
ceph_decode_need(p, end, len, bad);
pool_end = *p + len;
+ ceph_decode_need(p, end, 4 + 4 + 4, bad);
pi->type = ceph_decode_8(p);
pi->size = ceph_decode_8(p);
pi->crush_ruleset = ceph_decode_8(p);
pi->object_hash = ceph_decode_8(p);
-
pi->pg_num = ceph_decode_32(p);
pi->pgp_num = ceph_decode_32(p);
- *p += 4 + 4; /* skip lpg* */
- *p += 4; /* skip last_change */
- *p += 8 + 4; /* skip snap_seq, snap_epoch */
+ /* lpg*, last_change, snap_seq, snap_epoch */
+ ceph_decode_skip_n(p, end, 8 + 4 + 8 + 4, bad);
/* skip snaps */
- num = ceph_decode_32(p);
+ ceph_decode_32_safe(p, end, num, bad);
while (num--) {
- *p += 8; /* snapid key */
- *p += 1 + 1; /* versions */
- len = ceph_decode_32(p);
- *p += len;
+ /* snapid key, pool snap (with versions) */
+ ceph_decode_skip_n(p, end, 8 + 2, bad);
+ ceph_decode_skip_string(p, end, bad);
}
- /* skip removed_snaps */
- num = ceph_decode_32(p);
- *p += num * (8 + 8);
+ /* removed_snaps */
+ ceph_decode_skip_map(p, end, 64, 64, bad);
+ ceph_decode_need(p, end, 8 + 8 + 4, bad);
*p += 8; /* skip auid */
pi->flags = ceph_decode_64(p);
*p += 4; /* skip crash_replay_interval */
if (ev >= 7)
- pi->min_size = ceph_decode_8(p);
+ ceph_decode_8_safe(p, end, pi->min_size, bad);
else
pi->min_size = pi->size - pi->size / 2;
if (ev >= 8)
- *p += 8 + 8; /* skip quota_max_* */
+ /* quota_max_* */
+ ceph_decode_skip_n(p, end, 8 + 8, bad);
if (ev >= 9) {
- /* skip tiers */
- num = ceph_decode_32(p);
- *p += num * 8;
+ /* tiers */
+ ceph_decode_skip_set(p, end, 64, bad);
+ ceph_decode_need(p, end, 8 + 1 + 8 + 8, bad);
*p += 8; /* skip tier_of */
*p += 1; /* skip cache_mode */
-
pi->read_tier = ceph_decode_64(p);
pi->write_tier = ceph_decode_64(p);
} else {
@@ -858,86 +856,76 @@ static int decode_pool(void **p, void *end, struct ceph_pg_pool_info *pi)
pi->write_tier = -1;
}
- if (ev >= 10) {
- /* skip properties */
- num = ceph_decode_32(p);
- while (num--) {
- len = ceph_decode_32(p);
- *p += len; /* key */
- len = ceph_decode_32(p);
- *p += len; /* val */
- }
- }
+ if (ev >= 10)
+ /* properties */
+ ceph_decode_skip_map(p, end, string, string, bad);
if (ev >= 11) {
- /* skip hit_set_params */
- *p += 1 + 1; /* versions */
- len = ceph_decode_32(p);
- *p += len;
+ /* hit_set_params (with versions) */
+ ceph_decode_skip_n(p, end, 2, bad);
+ ceph_decode_skip_string(p, end, bad);
- *p += 4; /* skip hit_set_period */
- *p += 4; /* skip hit_set_count */
+ /* hit_set_period, hit_set_count */
+ ceph_decode_skip_n(p, end, 4 + 4, bad);
}
if (ev >= 12)
- *p += 4; /* skip stripe_width */
+ /* stripe_width */
+ ceph_decode_skip_32(p, end, bad);
- if (ev >= 13) {
- *p += 8; /* skip target_max_bytes */
- *p += 8; /* skip target_max_objects */
- *p += 4; /* skip cache_target_dirty_ratio_micro */
- *p += 4; /* skip cache_target_full_ratio_micro */
- *p += 4; /* skip cache_min_flush_age */
- *p += 4; /* skip cache_min_evict_age */
- }
+ if (ev >= 13)
+ /* target_max_*, cache_target_*, cache_min_* */
+ ceph_decode_skip_n(p, end, 16 + 8 + 8, bad);
- if (ev >= 14) {
- /* skip erasure_code_profile */
- len = ceph_decode_32(p);
- *p += len;
- }
+ if (ev >= 14)
+ /* erasure_code_profile */
+ ceph_decode_skip_string(p, end, bad);
/*
* last_force_op_resend_preluminous, will be overridden if the
* map was encoded with RESEND_ON_SPLIT
*/
if (ev >= 15)
- pi->last_force_request_resend = ceph_decode_32(p);
+ ceph_decode_32_safe(p, end, pi->last_force_request_resend, bad);
else
pi->last_force_request_resend = 0;
if (ev >= 16)
- *p += 4; /* skip min_read_recency_for_promote */
+ /* min_read_recency_for_promote */
+ ceph_decode_skip_32(p, end, bad);
if (ev >= 17)
- *p += 8; /* skip expected_num_objects */
+ /* expected_num_objects */
+ ceph_decode_skip_64(p, end, bad);
if (ev >= 19)
- *p += 4; /* skip cache_target_dirty_high_ratio_micro */
+ /* cache_target_dirty_high_ratio_micro */
+ ceph_decode_skip_32(p, end, bad);
if (ev >= 20)
- *p += 4; /* skip min_write_recency_for_promote */
+ /* min_write_recency_for_promote */
+ ceph_decode_skip_32(p, end, bad);
if (ev >= 21)
- *p += 1; /* skip use_gmt_hitset */
+ /* use_gmt_hitset */
+ ceph_decode_skip_8(p, end, bad);
if (ev >= 22)
- *p += 1; /* skip fast_read */
+ /* fast_read */
+ ceph_decode_skip_8(p, end, bad);
- if (ev >= 23) {
- *p += 4; /* skip hit_set_grade_decay_rate */
- *p += 4; /* skip hit_set_search_last_n */
- }
+ if (ev >= 23)
+ /* hit_set_grade_decay_rate, hit_set_search_last_n */
+ ceph_decode_skip_n(p, end, 4 + 4, bad);
if (ev >= 24) {
- /* skip opts */
- *p += 1 + 1; /* versions */
- len = ceph_decode_32(p);
- *p += len;
+ /* opts (with versions) */
+ ceph_decode_skip_n(p, end, 2, bad);
+ ceph_decode_skip_string(p, end, bad);
}
if (ev >= 25)
- pi->last_force_request_resend = ceph_decode_32(p);
+ ceph_decode_32_safe(p, end, pi->last_force_request_resend, bad);
/* ignore the rest */
@@ -1438,7 +1426,7 @@ static struct ceph_pg_mapping *__decode_pg_temp(void **p, void *end,
ceph_decode_32_safe(p, end, len, e_inval);
if (len == 0 && incremental)
return NULL; /* new_pg_temp: [] to remove */
- if (len > (SIZE_MAX - sizeof(*pg)) / sizeof(u32))
+ if ((size_t)len > (SIZE_MAX - sizeof(*pg)) / sizeof(u32))
return ERR_PTR(-EINVAL);
ceph_decode_need(p, end, len * sizeof(u32), e_inval);
@@ -1619,7 +1607,7 @@ static struct ceph_pg_mapping *__decode_pg_upmap_items(void **p, void *end,
u32 len, i;
ceph_decode_32_safe(p, end, len, e_inval);
- if (len > (SIZE_MAX - sizeof(*pg)) / (2 * sizeof(u32)))
+ if ((size_t)len > (SIZE_MAX - sizeof(*pg)) / (2 * sizeof(u32)))
return ERR_PTR(-EINVAL);
ceph_decode_need(p, end, 2 * len * sizeof(u32), e_inval);
diff --git a/net/ethtool/ioctl.c b/net/ethtool/ioctl.c
index fa83ddade4f8..9431e305b233 100644
--- a/net/ethtool/ioctl.c
+++ b/net/ethtool/ioctl.c
@@ -2383,7 +2383,10 @@ static int ethtool_get_strings(struct net_device *dev, void __user *useraddr)
return -ENOMEM;
WARN_ON_ONCE(!ret);
- gstrings.len = ret;
+ if (gstrings.len && gstrings.len != ret)
+ gstrings.len = 0;
+ else
+ gstrings.len = ret;
if (gstrings.len) {
data = vzalloc(array_size(gstrings.len, ETH_GSTRING_LEN));
@@ -2509,10 +2512,13 @@ static int ethtool_get_stats(struct net_device *dev, void __user *useraddr)
if (copy_from_user(&stats, useraddr, sizeof(stats)))
return -EFAULT;
- stats.n_stats = n_stats;
+ if (stats.n_stats && stats.n_stats != n_stats)
+ stats.n_stats = 0;
+ else
+ stats.n_stats = n_stats;
- if (n_stats) {
- data = vzalloc(array_size(n_stats, sizeof(u64)));
+ if (stats.n_stats) {
+ data = vzalloc(array_size(stats.n_stats, sizeof(u64)));
if (!data)
return -ENOMEM;
ops->get_ethtool_stats(dev, &stats, data);
@@ -2524,7 +2530,9 @@ static int ethtool_get_stats(struct net_device *dev, void __user *useraddr)
if (copy_to_user(useraddr, &stats, sizeof(stats)))
goto out;
useraddr += sizeof(stats);
- if (n_stats && copy_to_user(useraddr, data, array_size(n_stats, sizeof(u64))))
+ if (stats.n_stats &&
+ copy_to_user(useraddr, data,
+ array_size(stats.n_stats, sizeof(u64))))
goto out;
ret = 0;
@@ -2560,6 +2568,10 @@ static int ethtool_get_phy_stats_phydev(struct phy_device *phydev,
return -EOPNOTSUPP;
n_stats = phy_ops->get_sset_count(phydev);
+ if (stats->n_stats && stats->n_stats != n_stats) {
+ stats->n_stats = 0;
+ return 0;
+ }
ret = ethtool_vzalloc_stats_array(n_stats, data);
if (ret)
@@ -2580,6 +2592,10 @@ static int ethtool_get_phy_stats_ethtool(struct net_device *dev,
return -EOPNOTSUPP;
n_stats = ops->get_sset_count(dev, ETH_SS_PHY_STATS);
+ if (stats->n_stats && stats->n_stats != n_stats) {
+ stats->n_stats = 0;
+ return 0;
+ }
ret = ethtool_vzalloc_stats_array(n_stats, data);
if (ret)
@@ -2616,7 +2632,9 @@ static int ethtool_get_phy_stats(struct net_device *dev, void __user *useraddr)
}
useraddr += sizeof(stats);
- if (copy_to_user(useraddr, data, array_size(stats.n_stats, sizeof(u64))))
+ if (stats.n_stats &&
+ copy_to_user(useraddr, data,
+ array_size(stats.n_stats, sizeof(u64))))
ret = -EFAULT;
out:
diff --git a/net/handshake/request.c b/net/handshake/request.c
index 274d2c89b6b2..6b7e3e0bf399 100644
--- a/net/handshake/request.c
+++ b/net/handshake/request.c
@@ -276,6 +276,8 @@ int handshake_req_submit(struct socket *sock, struct handshake_req *req,
out_unlock:
spin_unlock(&hn->hn_lock);
out_err:
+ /* Restore original destructor so socket teardown still runs on failure */
+ req->hr_sk->sk_destruct = req->hr_odestruct;
trace_handshake_submit_err(net, req, req->hr_sk, ret);
handshake_req_destroy(req);
return ret;
@@ -324,7 +326,11 @@ bool handshake_req_cancel(struct sock *sk)
hn = handshake_pernet(net);
if (hn && remove_pending(hn, req)) {
- /* Request hadn't been accepted */
+ /* Request hadn't been accepted - mark cancelled */
+ if (test_and_set_bit(HANDSHAKE_F_REQ_COMPLETED, &req->hr_flags)) {
+ trace_handshake_cancel_busy(net, req, sk);
+ return false;
+ }
goto out_true;
}
if (test_and_set_bit(HANDSHAKE_F_REQ_COMPLETED, &req->hr_flags)) {
diff --git a/net/hsr/hsr_forward.c b/net/hsr/hsr_forward.c
index 339f0d220212..aefc9b6936ba 100644
--- a/net/hsr/hsr_forward.c
+++ b/net/hsr/hsr_forward.c
@@ -205,6 +205,8 @@ struct sk_buff *prp_get_untagged_frame(struct hsr_frame_info *frame,
__pskb_copy(frame->skb_prp,
skb_headroom(frame->skb_prp),
GFP_ATOMIC);
+ if (!frame->skb_std)
+ return NULL;
} else {
/* Unexpected */
WARN_ONCE(1, "%s:%d: Unexpected frame received (port_src %s)\n",
diff --git a/net/ipv4/inet_fragment.c b/net/ipv4/inet_fragment.c
index 025895eb6ec5..001ee5c4d962 100644
--- a/net/ipv4/inet_fragment.c
+++ b/net/ipv4/inet_fragment.c
@@ -218,6 +218,41 @@ static int __init inet_frag_wq_init(void)
pure_initcall(inet_frag_wq_init);
+void fqdir_pre_exit(struct fqdir *fqdir)
+{
+ struct inet_frag_queue *fq;
+ struct rhashtable_iter hti;
+
+ /* Prevent creation of new frags.
+ * Pairs with READ_ONCE() in inet_frag_find().
+ */
+ WRITE_ONCE(fqdir->high_thresh, 0);
+
+ /* Pairs with READ_ONCE() in inet_frag_kill(), ip_expire()
+ * and ip6frag_expire_frag_queue().
+ */
+ WRITE_ONCE(fqdir->dead, true);
+
+ rhashtable_walk_enter(&fqdir->rhashtable, &hti);
+ rhashtable_walk_start(&hti);
+
+ while ((fq = rhashtable_walk_next(&hti))) {
+ if (IS_ERR(fq)) {
+ if (PTR_ERR(fq) != -EAGAIN)
+ break;
+ continue;
+ }
+ spin_lock_bh(&fq->lock);
+ if (!(fq->flags & INET_FRAG_COMPLETE))
+ inet_frag_queue_flush(fq, 0);
+ spin_unlock_bh(&fq->lock);
+ }
+
+ rhashtable_walk_stop(&hti);
+ rhashtable_walk_exit(&hti);
+}
+EXPORT_SYMBOL(fqdir_pre_exit);
+
void fqdir_exit(struct fqdir *fqdir)
{
INIT_WORK(&fqdir->destroy_work, fqdir_work_fn);
@@ -263,8 +298,8 @@ static void inet_frag_destroy_rcu(struct rcu_head *head)
kmem_cache_free(f->frags_cachep, q);
}
-unsigned int inet_frag_rbtree_purge(struct rb_root *root,
- enum skb_drop_reason reason)
+static unsigned int
+inet_frag_rbtree_purge(struct rb_root *root, enum skb_drop_reason reason)
{
struct rb_node *p = rb_first(root);
unsigned int sum = 0;
@@ -284,7 +319,17 @@ unsigned int inet_frag_rbtree_purge(struct rb_root *root,
}
return sum;
}
-EXPORT_SYMBOL(inet_frag_rbtree_purge);
+
+void inet_frag_queue_flush(struct inet_frag_queue *q,
+ enum skb_drop_reason reason)
+{
+ unsigned int sum;
+
+ reason = reason ?: SKB_DROP_REASON_FRAG_REASM_TIMEOUT;
+ sum = inet_frag_rbtree_purge(&q->rb_fragments, reason);
+ sub_frag_mem_limit(q->fqdir, sum);
+}
+EXPORT_SYMBOL(inet_frag_queue_flush);
void inet_frag_destroy(struct inet_frag_queue *q)
{
@@ -327,7 +372,9 @@ static struct inet_frag_queue *inet_frag_alloc(struct fqdir *fqdir,
timer_setup(&q->timer, f->frag_expire, 0);
spin_lock_init(&q->lock);
- /* One reference for the timer, one for the hash table. */
+ /* One reference for the timer, one for the hash table.
+ * We never take any extra references, only decrement this field.
+ */
refcount_set(&q->refcnt, 2);
return q;
diff --git a/net/ipv4/ip_fragment.c b/net/ipv4/ip_fragment.c
index f7012479713b..56b0f738d2f2 100644
--- a/net/ipv4/ip_fragment.c
+++ b/net/ipv4/ip_fragment.c
@@ -134,11 +134,6 @@ static void ip_expire(struct timer_list *t)
net = qp->q.fqdir->net;
rcu_read_lock();
-
- /* Paired with WRITE_ONCE() in fqdir_pre_exit(). */
- if (READ_ONCE(qp->q.fqdir->dead))
- goto out_rcu_unlock;
-
spin_lock(&qp->q.lock);
if (qp->q.flags & INET_FRAG_COMPLETE)
@@ -146,6 +141,13 @@ static void ip_expire(struct timer_list *t)
qp->q.flags |= INET_FRAG_DROP;
inet_frag_kill(&qp->q, &refs);
+
+ /* Paired with WRITE_ONCE() in fqdir_pre_exit(). */
+ if (READ_ONCE(qp->q.fqdir->dead)) {
+ inet_frag_queue_flush(&qp->q, 0);
+ goto out;
+ }
+
__IP_INC_STATS(net, IPSTATS_MIB_REASMFAILS);
__IP_INC_STATS(net, IPSTATS_MIB_REASMTIMEOUT);
@@ -240,16 +242,10 @@ static int ip_frag_too_far(struct ipq *qp)
static int ip_frag_reinit(struct ipq *qp)
{
- unsigned int sum_truesize = 0;
-
- if (!mod_timer(&qp->q.timer, jiffies + qp->q.fqdir->timeout)) {
- refcount_inc(&qp->q.refcnt);
+ if (!mod_timer_pending(&qp->q.timer, jiffies + qp->q.fqdir->timeout))
return -ETIMEDOUT;
- }
- sum_truesize = inet_frag_rbtree_purge(&qp->q.rb_fragments,
- SKB_DROP_REASON_FRAG_TOO_FAR);
- sub_frag_mem_limit(qp->q.fqdir, sum_truesize);
+ inet_frag_queue_flush(&qp->q, SKB_DROP_REASON_FRAG_TOO_FAR);
qp->q.flags = 0;
qp->q.len = 0;
diff --git a/net/mptcp/Kconfig b/net/mptcp/Kconfig
index 20328920f6ed..be71fc9b4638 100644
--- a/net/mptcp/Kconfig
+++ b/net/mptcp/Kconfig
@@ -4,7 +4,7 @@ config MPTCP
depends on INET
select SKB_EXTENSIONS
select CRYPTO_LIB_SHA256
- select CRYPTO
+ select CRYPTO_LIB_UTILS
help
Multipath TCP (MPTCP) connections send and receive data over multiple
subflows in order to utilize multiple network paths. Each subflow
diff --git a/net/mptcp/pm_netlink.c b/net/mptcp/pm_netlink.c
index d5b383870f79..7aa42de9c47b 100644
--- a/net/mptcp/pm_netlink.c
+++ b/net/mptcp/pm_netlink.c
@@ -119,7 +119,8 @@ int mptcp_pm_parse_entry(struct nlattr *attr, struct genl_info *info,
}
if (tb[MPTCP_PM_ADDR_ATTR_FLAGS])
- entry->flags = nla_get_u32(tb[MPTCP_PM_ADDR_ATTR_FLAGS]);
+ entry->flags = nla_get_u32(tb[MPTCP_PM_ADDR_ATTR_FLAGS]) &
+ MPTCP_PM_ADDR_FLAGS_MASK;
if (tb[MPTCP_PM_ADDR_ATTR_PORT])
entry->addr.port = htons(nla_get_u16(tb[MPTCP_PM_ADDR_ATTR_PORT]));
diff --git a/net/mptcp/protocol.c b/net/mptcp/protocol.c
index e212c1374bd0..9b1fafd87cb9 100644
--- a/net/mptcp/protocol.c
+++ b/net/mptcp/protocol.c
@@ -1623,7 +1623,7 @@ void __mptcp_push_pending(struct sock *sk, unsigned int flags)
struct mptcp_sendmsg_info info = {
.flags = flags,
};
- bool do_check_data_fin = false;
+ bool copied = false;
int push_count = 1;
while (mptcp_send_head(sk) && (push_count > 0)) {
@@ -1665,7 +1665,7 @@ void __mptcp_push_pending(struct sock *sk, unsigned int flags)
push_count--;
continue;
}
- do_check_data_fin = true;
+ copied = true;
}
}
}
@@ -1674,11 +1674,14 @@ void __mptcp_push_pending(struct sock *sk, unsigned int flags)
if (ssk)
mptcp_push_release(ssk, &info);
- /* ensure the rtx timer is running */
- if (!mptcp_rtx_timer_pending(sk))
- mptcp_reset_rtx_timer(sk);
- if (do_check_data_fin)
+ /* Avoid scheduling the rtx timer if no data has been pushed; the timer
+ * will be updated on positive acks by __mptcp_cleanup_una().
+ */
+ if (copied) {
+ if (!mptcp_rtx_timer_pending(sk))
+ mptcp_reset_rtx_timer(sk);
mptcp_check_send_data_fin(sk);
+ }
}
static void __mptcp_subflow_push_pending(struct sock *sk, struct sock *ssk, bool first)
@@ -2766,10 +2769,13 @@ static void __mptcp_retrans(struct sock *sk)
/*
* make the whole retrans decision, xmit, disallow
- * fallback atomic
+ * fallback atomic, note that we can't retrans even
+ * when an infinite fallback is in progress, i.e. new
+ * subflows are disallowed.
*/
spin_lock_bh(&msk->fallback_lock);
- if (__mptcp_check_fallback(msk)) {
+ if (__mptcp_check_fallback(msk) ||
+ !msk->allow_subflows) {
spin_unlock_bh(&msk->fallback_lock);
release_sock(ssk);
goto clear_scheduled;
diff --git a/net/netfilter/ipvs/ip_vs_xmit.c b/net/netfilter/ipvs/ip_vs_xmit.c
index 3162ce3c2640..64c697212578 100644
--- a/net/netfilter/ipvs/ip_vs_xmit.c
+++ b/net/netfilter/ipvs/ip_vs_xmit.c
@@ -408,6 +408,9 @@ err_put:
return -1;
err_unreach:
+ if (!skb->dev)
+ skb->dev = skb_dst(skb)->dev;
+
dst_link_failure(skb);
return -1;
}
diff --git a/net/netfilter/nf_conncount.c b/net/netfilter/nf_conncount.c
index f1be4dd5cf85..3654f1e8976c 100644
--- a/net/netfilter/nf_conncount.c
+++ b/net/netfilter/nf_conncount.c
@@ -172,14 +172,14 @@ static int __nf_conncount_add(struct net *net,
struct nf_conn *found_ct;
unsigned int collect = 0;
bool refcounted = false;
+ int err = 0;
if (!get_ct_or_tuple_from_skb(net, skb, l3num, &ct, &tuple, &zone, &refcounted))
return -ENOENT;
if (ct && nf_ct_is_confirmed(ct)) {
- if (refcounted)
- nf_ct_put(ct);
- return -EEXIST;
+ err = -EEXIST;
+ goto out_put;
}
if ((u32)jiffies == list->last_gc)
@@ -231,12 +231,16 @@ static int __nf_conncount_add(struct net *net,
}
add_new_node:
- if (WARN_ON_ONCE(list->count > INT_MAX))
- return -EOVERFLOW;
+ if (WARN_ON_ONCE(list->count > INT_MAX)) {
+ err = -EOVERFLOW;
+ goto out_put;
+ }
conn = kmem_cache_alloc(conncount_conn_cachep, GFP_ATOMIC);
- if (conn == NULL)
- return -ENOMEM;
+ if (conn == NULL) {
+ err = -ENOMEM;
+ goto out_put;
+ }
conn->tuple = tuple;
conn->zone = *zone;
@@ -249,7 +253,7 @@ add_new_node:
out_put:
if (refcounted)
nf_ct_put(ct);
- return 0;
+ return err;
}
int nf_conncount_add_skb(struct net *net,
@@ -456,11 +460,10 @@ restart:
rb_link_node_rcu(&rbconn->node, parent, rbnode);
rb_insert_color(&rbconn->node, root);
-
- if (refcounted)
- nf_ct_put(ct);
}
out_unlock:
+ if (refcounted)
+ nf_ct_put(ct);
spin_unlock_bh(&nf_conncount_locks[hash]);
return count;
}
diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c
index 0b95f226f211..d1f8eb725d42 100644
--- a/net/netfilter/nf_conntrack_core.c
+++ b/net/netfilter/nf_conntrack_core.c
@@ -2487,6 +2487,7 @@ void nf_conntrack_cleanup_net(struct net *net)
void nf_conntrack_cleanup_net_list(struct list_head *net_exit_list)
{
struct nf_ct_iter_data iter_data = {};
+ unsigned long start = jiffies;
struct net *net;
int busy;
@@ -2507,6 +2508,8 @@ i_see_dead_people:
busy = 1;
}
if (busy) {
+ DEBUG_NET_WARN_ONCE(time_after(jiffies, start + 60 * HZ),
+ "conntrack cleanup blocked for 60s");
schedule();
goto i_see_dead_people;
}
diff --git a/net/netfilter/nf_flow_table_path.c b/net/netfilter/nf_flow_table_path.c
index f0984cf69a09..eb24fe2715dc 100644
--- a/net/netfilter/nf_flow_table_path.c
+++ b/net/netfilter/nf_flow_table_path.c
@@ -250,6 +250,9 @@ static void nft_dev_forward_path(const struct nft_pktinfo *pkt,
if (nft_dev_fill_forward_path(route, dst, ct, dir, ha, &stack) >= 0)
nft_dev_path_info(&stack, &info, ha, &ft->data);
+ if (info.outdev)
+ route->tuple[dir].out.ifindex = info.outdev->ifindex;
+
if (!info.indev || !nft_flowtable_find_dev(info.indev, ft))
return;
@@ -269,7 +272,6 @@ static void nft_dev_forward_path(const struct nft_pktinfo *pkt,
route->tuple[!dir].in.num_encaps = info.num_encaps;
route->tuple[!dir].in.ingress_vlans = info.ingress_vlans;
- route->tuple[dir].out.ifindex = info.outdev->ifindex;
if (info.xmit_type == FLOW_OFFLOAD_XMIT_DIRECT) {
memcpy(route->tuple[dir].out.h_source, info.h_source, ETH_ALEN);
diff --git a/net/netfilter/nf_nat_core.c b/net/netfilter/nf_nat_core.c
index 78a61dac4ade..e6b24586d2fe 100644
--- a/net/netfilter/nf_nat_core.c
+++ b/net/netfilter/nf_nat_core.c
@@ -294,25 +294,13 @@ nf_nat_used_tuple_new(const struct nf_conntrack_tuple *tuple,
ct = nf_ct_tuplehash_to_ctrack(thash);
- /* NB: IP_CT_DIR_ORIGINAL should be impossible because
- * nf_nat_used_tuple() handles origin collisions.
- *
- * Handle remote chance other CPU confirmed its ct right after.
- */
- if (thash->tuple.dst.dir != IP_CT_DIR_REPLY)
- goto out;
-
/* clashing connection subject to NAT? Retry with new tuple. */
if (READ_ONCE(ct->status) & uses_nat)
goto out;
if (nf_ct_tuple_equal(&ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple,
- &ignored_ct->tuplehash[IP_CT_DIR_REPLY].tuple) &&
- nf_ct_tuple_equal(&ct->tuplehash[IP_CT_DIR_REPLY].tuple,
- &ignored_ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple)) {
+ &ignored_ct->tuplehash[IP_CT_DIR_REPLY].tuple))
taken = false;
- goto out;
- }
out:
nf_ct_put(ct);
return taken;
diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c
index f3de2f9bbebf..618af6e90773 100644
--- a/net/netfilter/nf_tables_api.c
+++ b/net/netfilter/nf_tables_api.c
@@ -123,6 +123,29 @@ static void nft_validate_state_update(struct nft_table *table, u8 new_validate_s
table->validate_state = new_validate_state;
}
+
+static bool nft_chain_vstate_valid(const struct nft_ctx *ctx,
+ const struct nft_chain *chain)
+{
+ const struct nft_base_chain *base_chain;
+ enum nft_chain_types type;
+ u8 hooknum;
+
+ if (WARN_ON_ONCE(!nft_is_base_chain(ctx->chain)))
+ return false;
+
+ base_chain = nft_base_chain(ctx->chain);
+ hooknum = base_chain->ops.hooknum;
+ type = base_chain->type->type;
+
+ /* chain is already validated for this call depth */
+ if (chain->vstate.depth >= ctx->level &&
+ chain->vstate.hook_mask[type] & BIT(hooknum))
+ return true;
+
+ return false;
+}
+
static void nf_tables_trans_destroy_work(struct work_struct *w);
static void nft_trans_gc_work(struct work_struct *work);
@@ -4079,6 +4102,29 @@ static void nf_tables_rule_release(const struct nft_ctx *ctx, struct nft_rule *r
nf_tables_rule_destroy(ctx, rule);
}
+static void nft_chain_vstate_update(const struct nft_ctx *ctx, struct nft_chain *chain)
+{
+ const struct nft_base_chain *base_chain;
+ enum nft_chain_types type;
+ u8 hooknum;
+
+ /* ctx->chain must hold the calling base chain. */
+ if (WARN_ON_ONCE(!nft_is_base_chain(ctx->chain))) {
+ memset(&chain->vstate, 0, sizeof(chain->vstate));
+ return;
+ }
+
+ base_chain = nft_base_chain(ctx->chain);
+ hooknum = base_chain->ops.hooknum;
+ type = base_chain->type->type;
+
+ BUILD_BUG_ON(BIT(NF_INET_NUMHOOKS) > U8_MAX);
+
+ chain->vstate.hook_mask[type] |= BIT(hooknum);
+ if (chain->vstate.depth < ctx->level)
+ chain->vstate.depth = ctx->level;
+}
+
/** nft_chain_validate - loop detection and hook validation
*
* @ctx: context containing call depth and base chain
@@ -4088,15 +4134,25 @@ static void nf_tables_rule_release(const struct nft_ctx *ctx, struct nft_rule *r
* and set lookups until either the jump limit is hit or all reachable
* chains have been validated.
*/
-int nft_chain_validate(const struct nft_ctx *ctx, const struct nft_chain *chain)
+int nft_chain_validate(const struct nft_ctx *ctx, struct nft_chain *chain)
{
struct nft_expr *expr, *last;
struct nft_rule *rule;
int err;
+ BUILD_BUG_ON(NFT_JUMP_STACK_SIZE > 255);
if (ctx->level == NFT_JUMP_STACK_SIZE)
return -EMLINK;
+ if (ctx->level > 0) {
+ /* jumps to base chains are not allowed. */
+ if (nft_is_base_chain(chain))
+ return -ELOOP;
+
+ if (nft_chain_vstate_valid(ctx, chain))
+ return 0;
+ }
+
list_for_each_entry(rule, &chain->rules, list) {
if (fatal_signal_pending(current))
return -EINTR;
@@ -4115,8 +4171,11 @@ int nft_chain_validate(const struct nft_ctx *ctx, const struct nft_chain *chain)
if (err < 0)
return err;
}
+
+ cond_resched();
}
+ nft_chain_vstate_update(ctx, chain);
return 0;
}
EXPORT_SYMBOL_GPL(nft_chain_validate);
@@ -4128,7 +4187,7 @@ static int nft_table_validate(struct net *net, const struct nft_table *table)
.net = net,
.family = table->family,
};
- int err;
+ int err = 0;
list_for_each_entry(chain, &table->chains, list) {
if (!nft_is_base_chain(chain))
@@ -4137,12 +4196,14 @@ static int nft_table_validate(struct net *net, const struct nft_table *table)
ctx.chain = chain;
err = nft_chain_validate(&ctx, chain);
if (err < 0)
- return err;
-
- cond_resched();
+ goto err;
}
- return 0;
+err:
+ list_for_each_entry(chain, &table->chains, list)
+ memset(&chain->vstate, 0, sizeof(chain->vstate));
+
+ return err;
}
int nft_setelem_validate(const struct nft_ctx *ctx, struct nft_set *set,
@@ -11676,21 +11737,10 @@ static int nft_validate_register_store(const struct nft_ctx *ctx,
enum nft_data_types type,
unsigned int len)
{
- int err;
-
switch (reg) {
case NFT_REG_VERDICT:
if (type != NFT_DATA_VERDICT)
return -EINVAL;
-
- if (data != NULL &&
- (data->verdict.code == NFT_GOTO ||
- data->verdict.code == NFT_JUMP)) {
- err = nft_chain_validate(ctx, data->verdict.chain);
- if (err < 0)
- return err;
- }
-
break;
default:
if (type != NFT_DATA_VALUE)
diff --git a/net/netrom/nr_out.c b/net/netrom/nr_out.c
index 5e531394a724..2b3cbceb0b52 100644
--- a/net/netrom/nr_out.c
+++ b/net/netrom/nr_out.c
@@ -43,8 +43,10 @@ void nr_output(struct sock *sk, struct sk_buff *skb)
frontlen = skb_headroom(skb);
while (skb->len > 0) {
- if ((skbn = sock_alloc_send_skb(sk, frontlen + NR_MAX_PACKET_SIZE, 0, &err)) == NULL)
+ if ((skbn = sock_alloc_send_skb(sk, frontlen + NR_MAX_PACKET_SIZE, 0, &err)) == NULL) {
+ kfree_skb(skb);
return;
+ }
skb_reserve(skbn, frontlen);
diff --git a/net/openvswitch/flow_netlink.c b/net/openvswitch/flow_netlink.c
index 1cb4f97335d8..2d536901309e 100644
--- a/net/openvswitch/flow_netlink.c
+++ b/net/openvswitch/flow_netlink.c
@@ -2802,13 +2802,20 @@ static int validate_and_copy_set_tun(const struct nlattr *attr,
return err;
}
-static bool validate_push_nsh(const struct nlattr *attr, bool log)
+static bool validate_push_nsh(const struct nlattr *a, bool log)
{
+ struct nlattr *nsh_key = nla_data(a);
struct sw_flow_match match;
struct sw_flow_key key;
+ /* There must be one and only one NSH header. */
+ if (!nla_ok(nsh_key, nla_len(a)) ||
+ nla_total_size(nla_len(nsh_key)) != nla_len(a) ||
+ nla_type(nsh_key) != OVS_KEY_ATTR_NSH)
+ return false;
+
ovs_match_init(&match, &key, true, NULL);
- return !nsh_key_put_from_nlattr(attr, &match, false, true, log);
+ return !nsh_key_put_from_nlattr(nsh_key, &match, false, true, log);
}
/* Return false if there are any non-masked bits set.
@@ -3389,7 +3396,7 @@ static int __ovs_nla_copy_actions(struct net *net, const struct nlattr *attr,
return -EINVAL;
}
mac_proto = MAC_PROTO_NONE;
- if (!validate_push_nsh(nla_data(a), log))
+ if (!validate_push_nsh(a, log))
return -EINVAL;
break;
diff --git a/net/sched/act_mirred.c b/net/sched/act_mirred.c
index f27b583def78..91c96cc625bd 100644
--- a/net/sched/act_mirred.c
+++ b/net/sched/act_mirred.c
@@ -281,6 +281,15 @@ static int tcf_mirred_to_dev(struct sk_buff *skb, struct tcf_mirred *m,
want_ingress = tcf_mirred_act_wants_ingress(m_eaction);
+ if (dev == skb->dev && want_ingress == at_ingress) {
+ pr_notice_once("tc mirred: Loop (%s:%s --> %s:%s)\n",
+ netdev_name(skb->dev),
+ at_ingress ? "ingress" : "egress",
+ netdev_name(dev),
+ want_ingress ? "ingress" : "egress");
+ goto err_cant_do;
+ }
+
/* All mirred/redirected skbs should clear previous ct info */
nf_reset_ct(skb_to_send);
if (want_ingress && !at_ingress) /* drop dst for egress -> ingress */
diff --git a/net/sched/sch_ets.c b/net/sched/sch_ets.c
index 82635dd2cfa5..306e046276d4 100644
--- a/net/sched/sch_ets.c
+++ b/net/sched/sch_ets.c
@@ -652,7 +652,7 @@ static int ets_qdisc_change(struct Qdisc *sch, struct nlattr *opt,
sch_tree_lock(sch);
for (i = nbands; i < oldbands; i++) {
- if (i >= q->nstrict && q->classes[i].qdisc->q.qlen)
+ if (cl_is_active(&q->classes[i]))
list_del_init(&q->classes[i].alist);
qdisc_purge_queue(q->classes[i].qdisc);
}
@@ -664,6 +664,10 @@ static int ets_qdisc_change(struct Qdisc *sch, struct nlattr *opt,
q->classes[i].deficit = quanta[i];
}
}
+ for (i = q->nstrict; i < nstrict; i++) {
+ if (cl_is_active(&q->classes[i]))
+ list_del_init(&q->classes[i].alist);
+ }
WRITE_ONCE(q->nstrict, nstrict);
memcpy(q->prio2band, priomap, sizeof(priomap));
diff --git a/net/sctp/ipv6.c b/net/sctp/ipv6.c
index 069b7e45d8bd..531cb0690007 100644
--- a/net/sctp/ipv6.c
+++ b/net/sctp/ipv6.c
@@ -492,6 +492,8 @@ static void sctp_v6_copy_ip_options(struct sock *sk, struct sock *newsk)
struct ipv6_pinfo *newnp, *np = inet6_sk(sk);
struct ipv6_txoptions *opt;
+ inet_sk(newsk)->inet_opt = NULL;
+
newnp = inet6_sk(newsk);
rcu_read_lock();
diff --git a/net/sctp/socket.c b/net/sctp/socket.c
index d808096f5ab1..2493a5b1fa3c 100644
--- a/net/sctp/socket.c
+++ b/net/sctp/socket.c
@@ -4863,8 +4863,6 @@ static struct sock *sctp_clone_sock(struct sock *sk,
newsp->pf->to_sk_daddr(&asoc->peer.primary_addr, newsk);
newinet->inet_dport = htons(asoc->peer.port);
-
- newsp->pf->copy_ip_options(sk, newsk);
atomic_set(&newinet->inet_id, get_random_u16());
inet_set_bit(MC_LOOP, newsk);
@@ -4874,17 +4872,20 @@ static struct sock *sctp_clone_sock(struct sock *sk,
#if IS_ENABLED(CONFIG_IPV6)
if (sk->sk_family == AF_INET6) {
- struct ipv6_pinfo *newnp = inet6_sk(newsk);
+ struct ipv6_pinfo *newnp;
newinet->pinet6 = &((struct sctp6_sock *)newsk)->inet6;
newinet->ipv6_fl_list = NULL;
+ newnp = inet6_sk(newsk);
memcpy(newnp, inet6_sk(sk), sizeof(struct ipv6_pinfo));
newnp->ipv6_mc_list = NULL;
newnp->ipv6_ac_list = NULL;
}
#endif
+ newsp->pf->copy_ip_options(sk, newsk);
+
newsp->do_auto_asconf = 0;
skb_queue_head_init(&newsp->pd_lobby);
diff --git a/net/smc/Kconfig b/net/smc/Kconfig
index 325addf83cc6..277ef504bc26 100644
--- a/net/smc/Kconfig
+++ b/net/smc/Kconfig
@@ -22,10 +22,10 @@ config SMC_DIAG
config SMC_HS_CTRL_BPF
bool "Generic eBPF hook for SMC handshake flow"
- depends on SMC && BPF_SYSCALL
+ depends on SMC && BPF_JIT && BPF_SYSCALL
default y
help
SMC_HS_CTRL_BPF enables support to register generic eBPF hook for SMC
handshake flow, which offer much greater flexibility in modifying the behavior
of the SMC protocol stack compared to a complete kernel-based approach. Select
- this option if you want filtring the handshake process via eBPF programs. \ No newline at end of file
+ this option if you want filtring the handshake process via eBPF programs.
diff --git a/net/sunrpc/backchannel_rqst.c b/net/sunrpc/backchannel_rqst.c
index caa94cf57123..68b1fcdea8f0 100644
--- a/net/sunrpc/backchannel_rqst.c
+++ b/net/sunrpc/backchannel_rqst.c
@@ -25,6 +25,22 @@ unsigned int xprt_bc_max_slots(struct rpc_xprt *xprt)
}
/*
+ * Helper function to nullify backchannel server pointer in transport.
+ * We need to synchronize setting the pointer to NULL (done so after
+ * the backchannel server is shutdown) with the usage of that pointer
+ * by the backchannel request processing routines
+ * xprt_complete_bc_request() and rpcrdma_bc_receive_call().
+ */
+void xprt_svc_destroy_nullify_bc(struct rpc_xprt *xprt, struct svc_serv **serv)
+{
+ spin_lock(&xprt->bc_pa_lock);
+ svc_destroy(serv);
+ xprt->bc_serv = NULL;
+ spin_unlock(&xprt->bc_pa_lock);
+}
+EXPORT_SYMBOL_GPL(xprt_svc_destroy_nullify_bc);
+
+/*
* Helper routines that track the number of preallocation elements
* on the transport.
*/
@@ -354,7 +370,6 @@ found:
void xprt_complete_bc_request(struct rpc_rqst *req, uint32_t copied)
{
struct rpc_xprt *xprt = req->rq_xprt;
- struct svc_serv *bc_serv = xprt->bc_serv;
spin_lock(&xprt->bc_pa_lock);
list_del(&req->rq_bc_pa_list);
@@ -365,7 +380,21 @@ void xprt_complete_bc_request(struct rpc_rqst *req, uint32_t copied)
set_bit(RPC_BC_PA_IN_USE, &req->rq_bc_pa_state);
dprintk("RPC: add callback request to list\n");
+ xprt_enqueue_bc_request(req);
+}
+
+void xprt_enqueue_bc_request(struct rpc_rqst *req)
+{
+ struct rpc_xprt *xprt = req->rq_xprt;
+ struct svc_serv *bc_serv;
+
xprt_get(xprt);
- lwq_enqueue(&req->rq_bc_list, &bc_serv->sv_cb_list);
- svc_pool_wake_idle_thread(&bc_serv->sv_pools[0]);
+ spin_lock(&xprt->bc_pa_lock);
+ bc_serv = xprt->bc_serv;
+ if (bc_serv) {
+ lwq_enqueue(&req->rq_bc_list, &bc_serv->sv_cb_list);
+ svc_pool_wake_idle_thread(&bc_serv->sv_pools[0]);
+ }
+ spin_unlock(&xprt->bc_pa_lock);
}
+EXPORT_SYMBOL_GPL(xprt_enqueue_bc_request);
diff --git a/net/sunrpc/xprtrdma/backchannel.c b/net/sunrpc/xprtrdma/backchannel.c
index 8c817e755262..2f0f9618dd05 100644
--- a/net/sunrpc/xprtrdma/backchannel.c
+++ b/net/sunrpc/xprtrdma/backchannel.c
@@ -9,6 +9,7 @@
#include <linux/sunrpc/svc.h>
#include <linux/sunrpc/svc_xprt.h>
#include <linux/sunrpc/svc_rdma.h>
+#include <linux/sunrpc/bc_xprt.h>
#include "xprt_rdma.h"
#include <trace/events/rpcrdma.h>
@@ -220,7 +221,6 @@ void rpcrdma_bc_receive_call(struct rpcrdma_xprt *r_xprt,
struct rpcrdma_rep *rep)
{
struct rpc_xprt *xprt = &r_xprt->rx_xprt;
- struct svc_serv *bc_serv;
struct rpcrdma_req *req;
struct rpc_rqst *rqst;
struct xdr_buf *buf;
@@ -261,11 +261,7 @@ void rpcrdma_bc_receive_call(struct rpcrdma_xprt *r_xprt,
trace_xprtrdma_cb_call(r_xprt, rqst);
/* Queue rqst for ULP's callback service */
- bc_serv = xprt->bc_serv;
- xprt_get(xprt);
- lwq_enqueue(&rqst->rq_bc_list, &bc_serv->sv_cb_list);
-
- svc_pool_wake_idle_thread(&bc_serv->sv_pools[0]);
+ xprt_enqueue_bc_request(rqst);
r_xprt->rx_stats.bcall_count++;
return;
diff --git a/net/unix/garbage.c b/net/unix/garbage.c
index 78323d43e63e..25f65817faab 100644
--- a/net/unix/garbage.c
+++ b/net/unix/garbage.c
@@ -199,7 +199,7 @@ static void unix_free_vertices(struct scm_fp_list *fpl)
}
}
-static DEFINE_SPINLOCK(unix_gc_lock);
+static __cacheline_aligned_in_smp DEFINE_SPINLOCK(unix_gc_lock);
void unix_add_edges(struct scm_fp_list *fpl, struct unix_sock *receiver)
{