summaryrefslogtreecommitdiff
path: root/net
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2019-07-18 14:04:45 -0700
committerDavid S. Miller <davem@davemloft.net>2019-07-18 14:04:45 -0700
commitbb745231678cd92ab0c4d37343b06e4879072368 (patch)
tree34b96b1bc5310b40779b8848389868e58ce37b5d /net
parent7369c10f81172b55b284944caa2f51f595bbdb84 (diff)
parent59fd3486c3dd5678bc2fcac75e14466775465c3e (diff)
Merge git://git.kernel.org/pub/scm/linux/kernel/git/bpf/bpf
Alexei Starovoitov says: ==================== pull-request: bpf 2019-07-18 The following pull-request contains BPF updates for your *net* tree. The main changes are: 1) verifier precision propagation fix, from Andrii. 2) BTF size fix for typedefs, from Andrii. 3) a bunch of big endian fixes, from Ilya. 4) wide load from bpf_sock_addr fixes, from Stanislav. 5) a bunch of misc fixes from a number of developers. ==================== Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net')
-rw-r--r--net/core/filter.c24
-rw-r--r--net/xdp/xdp_umem.c16
-rw-r--r--net/xdp/xsk.c13
3 files changed, 29 insertions, 24 deletions
diff --git a/net/core/filter.c b/net/core/filter.c
index 47f6386fb17a..0f6854ccf894 100644
--- a/net/core/filter.c
+++ b/net/core/filter.c
@@ -6884,20 +6884,30 @@ static bool sock_addr_is_valid_access(int off, int size,
case bpf_ctx_range(struct bpf_sock_addr, msg_src_ip4):
case bpf_ctx_range_till(struct bpf_sock_addr, msg_src_ip6[0],
msg_src_ip6[3]):
- /* Only narrow read access allowed for now. */
if (type == BPF_READ) {
bpf_ctx_record_field_size(info, size_default);
+
+ if (bpf_ctx_wide_access_ok(off, size,
+ struct bpf_sock_addr,
+ user_ip6))
+ return true;
+
+ if (bpf_ctx_wide_access_ok(off, size,
+ struct bpf_sock_addr,
+ msg_src_ip6))
+ return true;
+
if (!bpf_ctx_narrow_access_ok(off, size, size_default))
return false;
} else {
- if (bpf_ctx_wide_store_ok(off, size,
- struct bpf_sock_addr,
- user_ip6))
+ if (bpf_ctx_wide_access_ok(off, size,
+ struct bpf_sock_addr,
+ user_ip6))
return true;
- if (bpf_ctx_wide_store_ok(off, size,
- struct bpf_sock_addr,
- msg_src_ip6))
+ if (bpf_ctx_wide_access_ok(off, size,
+ struct bpf_sock_addr,
+ msg_src_ip6))
return true;
if (size != size_default)
diff --git a/net/xdp/xdp_umem.c b/net/xdp/xdp_umem.c
index 20c91f02d3d8..83de74ca729a 100644
--- a/net/xdp/xdp_umem.c
+++ b/net/xdp/xdp_umem.c
@@ -87,21 +87,20 @@ int xdp_umem_assign_dev(struct xdp_umem *umem, struct net_device *dev,
struct netdev_bpf bpf;
int err = 0;
+ ASSERT_RTNL();
+
force_zc = flags & XDP_ZEROCOPY;
force_copy = flags & XDP_COPY;
if (force_zc && force_copy)
return -EINVAL;
- rtnl_lock();
- if (xdp_get_umem_from_qid(dev, queue_id)) {
- err = -EBUSY;
- goto out_rtnl_unlock;
- }
+ if (xdp_get_umem_from_qid(dev, queue_id))
+ return -EBUSY;
err = xdp_reg_umem_at_qid(dev, umem, queue_id);
if (err)
- goto out_rtnl_unlock;
+ return err;
umem->dev = dev;
umem->queue_id = queue_id;
@@ -110,7 +109,7 @@ int xdp_umem_assign_dev(struct xdp_umem *umem, struct net_device *dev,
if (force_copy)
/* For copy-mode, we are done. */
- goto out_rtnl_unlock;
+ return 0;
if (!dev->netdev_ops->ndo_bpf ||
!dev->netdev_ops->ndo_xsk_async_xmit) {
@@ -125,7 +124,6 @@ int xdp_umem_assign_dev(struct xdp_umem *umem, struct net_device *dev,
err = dev->netdev_ops->ndo_bpf(dev, &bpf);
if (err)
goto err_unreg_umem;
- rtnl_unlock();
umem->zc = true;
return 0;
@@ -135,8 +133,6 @@ err_unreg_umem:
err = 0; /* fallback to copy mode */
if (err)
xdp_clear_umem_at_qid(dev, queue_id);
-out_rtnl_unlock:
- rtnl_unlock();
return err;
}
diff --git a/net/xdp/xsk.c b/net/xdp/xsk.c
index d4d6f10aa936..59b57d708697 100644
--- a/net/xdp/xsk.c
+++ b/net/xdp/xsk.c
@@ -240,6 +240,9 @@ static int xsk_generic_xmit(struct sock *sk, struct msghdr *m,
mutex_lock(&xs->mutex);
+ if (xs->queue_id >= xs->dev->real_num_tx_queues)
+ goto out;
+
while (xskq_peek_desc(xs->tx, &desc)) {
char *buffer;
u64 addr;
@@ -250,12 +253,6 @@ static int xsk_generic_xmit(struct sock *sk, struct msghdr *m,
goto out;
}
- if (xskq_reserve_addr(xs->umem->cq))
- goto out;
-
- if (xs->queue_id >= xs->dev->real_num_tx_queues)
- goto out;
-
len = desc.len;
skb = sock_alloc_send_skb(sk, len, 1, &err);
if (unlikely(!skb)) {
@@ -267,7 +264,7 @@ static int xsk_generic_xmit(struct sock *sk, struct msghdr *m,
addr = desc.addr;
buffer = xdp_umem_get_data(xs->umem, addr);
err = skb_store_bits(skb, 0, buffer, len);
- if (unlikely(err)) {
+ if (unlikely(err) || xskq_reserve_addr(xs->umem->cq)) {
kfree_skb(skb);
goto out;
}
@@ -433,6 +430,7 @@ static int xsk_bind(struct socket *sock, struct sockaddr *addr, int addr_len)
if (flags & ~(XDP_SHARED_UMEM | XDP_COPY | XDP_ZEROCOPY))
return -EINVAL;
+ rtnl_lock();
mutex_lock(&xs->mutex);
if (xs->state != XSK_READY) {
err = -EBUSY;
@@ -518,6 +516,7 @@ out_unlock:
xs->state = XSK_BOUND;
out_release:
mutex_unlock(&xs->mutex);
+ rtnl_unlock();
return err;
}