summaryrefslogtreecommitdiff
path: root/net
diff options
context:
space:
mode:
authorJakub Kicinski <kuba@kernel.org>2025-05-27 19:19:37 -0700
committerJakub Kicinski <kuba@kernel.org>2025-05-27 19:19:38 -0700
commit163786f71a12237841b998db39e48fa574f861fd (patch)
tree26d93c6fede77fa0e07ad4d5b3040e477ff8fb18 /net
parent429d410bf9eff7bc930e5850277c87ed47ceaeea (diff)
parentaffffcbb87266b76b6f83edb39ae404604ffc6b5 (diff)
Merge branch 'devmem-tcp-minor-cleanups-and-ksft-improvements'
Mina Almasry says: ==================== Devmem TCP minor cleanups and ksft improvements v2: https://lore.kernel.org/20250519023517.4062941-1-almasrymina@google.com ==================== Link: https://patch.msgid.link/20250523230524.1107879-1-almasrymina@google.com Signed-off-by: Jakub Kicinski <kuba@kernel.org>
Diffstat (limited to 'net')
-rw-r--r--net/core/devmem.c5
-rw-r--r--net/core/devmem.h5
-rw-r--r--net/core/netdev-genl.c8
-rw-r--r--net/core/page_pool.c4
-rw-r--r--net/ipv4/tcp.c24
5 files changed, 22 insertions, 24 deletions
diff --git a/net/core/devmem.c b/net/core/devmem.c
index 0dba26baae18..b3a62ca0df65 100644
--- a/net/core/devmem.c
+++ b/net/core/devmem.c
@@ -178,7 +178,8 @@ err_close_rxq:
struct net_devmem_dmabuf_binding *
net_devmem_bind_dmabuf(struct net_device *dev,
enum dma_data_direction direction,
- unsigned int dmabuf_fd, struct netlink_ext_ack *extack)
+ unsigned int dmabuf_fd, struct netdev_nl_sock *priv,
+ struct netlink_ext_ack *extack)
{
struct net_devmem_dmabuf_binding *binding;
static u32 id_alloc_next;
@@ -299,6 +300,8 @@ net_devmem_bind_dmabuf(struct net_device *dev,
if (err < 0)
goto err_free_chunks;
+ list_add(&binding->list, &priv->bindings);
+
return binding;
err_free_chunks:
diff --git a/net/core/devmem.h b/net/core/devmem.h
index 58d8d3c1b945..e7ba77050b8f 100644
--- a/net/core/devmem.h
+++ b/net/core/devmem.h
@@ -11,6 +11,7 @@
#define _NET_DEVMEM_H
#include <net/netmem.h>
+#include <net/netdev_netlink.h>
struct netlink_ext_ack;
@@ -82,7 +83,8 @@ void __net_devmem_dmabuf_binding_free(struct work_struct *wq);
struct net_devmem_dmabuf_binding *
net_devmem_bind_dmabuf(struct net_device *dev,
enum dma_data_direction direction,
- unsigned int dmabuf_fd, struct netlink_ext_ack *extack);
+ unsigned int dmabuf_fd, struct netdev_nl_sock *priv,
+ struct netlink_ext_ack *extack);
struct net_devmem_dmabuf_binding *net_devmem_lookup_dmabuf(u32 id);
void net_devmem_unbind_dmabuf(struct net_devmem_dmabuf_binding *binding);
int net_devmem_bind_dmabuf_to_queue(struct net_device *dev, u32 rxq_idx,
@@ -170,6 +172,7 @@ static inline void __net_devmem_dmabuf_binding_free(struct work_struct *wq)
static inline struct net_devmem_dmabuf_binding *
net_devmem_bind_dmabuf(struct net_device *dev, unsigned int dmabuf_fd,
enum dma_data_direction direction,
+ struct netdev_nl_sock *priv,
struct netlink_ext_ack *extack)
{
return ERR_PTR(-EOPNOTSUPP);
diff --git a/net/core/netdev-genl.c b/net/core/netdev-genl.c
index 762570dcda61..2afa7b2141aa 100644
--- a/net/core/netdev-genl.c
+++ b/net/core/netdev-genl.c
@@ -908,7 +908,7 @@ int netdev_nl_bind_rx_doit(struct sk_buff *skb, struct genl_info *info)
}
binding = net_devmem_bind_dmabuf(netdev, DMA_FROM_DEVICE, dmabuf_fd,
- info->extack);
+ priv, info->extack);
if (IS_ERR(binding)) {
err = PTR_ERR(binding);
goto err_unlock;
@@ -943,8 +943,6 @@ int netdev_nl_bind_rx_doit(struct sk_buff *skb, struct genl_info *info)
goto err_unbind;
}
- list_add(&binding->list, &priv->bindings);
-
nla_put_u32(rsp, NETDEV_A_DMABUF_ID, binding->id);
genlmsg_end(rsp, hdr);
@@ -1020,15 +1018,13 @@ int netdev_nl_bind_tx_doit(struct sk_buff *skb, struct genl_info *info)
goto err_unlock_netdev;
}
- binding = net_devmem_bind_dmabuf(netdev, DMA_TO_DEVICE, dmabuf_fd,
+ binding = net_devmem_bind_dmabuf(netdev, DMA_TO_DEVICE, dmabuf_fd, priv,
info->extack);
if (IS_ERR(binding)) {
err = PTR_ERR(binding);
goto err_unlock_netdev;
}
- list_add(&binding->list, &priv->bindings);
-
nla_put_u32(rsp, NETDEV_A_DMABUF_ID, binding->id);
genlmsg_end(rsp, hdr);
diff --git a/net/core/page_pool.c b/net/core/page_pool.c
index 974f3eef2efa..4011eb305cee 100644
--- a/net/core/page_pool.c
+++ b/net/core/page_pool.c
@@ -867,8 +867,8 @@ void page_pool_put_unrefed_netmem(struct page_pool *pool, netmem_ref netmem,
if (!allow_direct)
allow_direct = page_pool_napi_local(pool);
- netmem =
- __page_pool_put_page(pool, netmem, dma_sync_size, allow_direct);
+ netmem = __page_pool_put_page(pool, netmem, dma_sync_size,
+ allow_direct);
if (netmem && !page_pool_recycle_in_ring(pool, netmem)) {
/* Cache full, fallback to free pages */
recycle_stat_inc(pool, ring_full);
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index b7b6ab41b496..f64f8276a73c 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -1067,7 +1067,7 @@ int tcp_sendmsg_locked(struct sock *sk, struct msghdr *msg, size_t size)
int flags, err, copied = 0;
int mss_now = 0, size_goal, copied_syn = 0;
int process_backlog = 0;
- bool sockc_valid = true;
+ int sockc_err = 0;
int zc = 0;
long timeo;
@@ -1075,13 +1075,10 @@ int tcp_sendmsg_locked(struct sock *sk, struct msghdr *msg, size_t size)
sockc = (struct sockcm_cookie){ .tsflags = READ_ONCE(sk->sk_tsflags) };
if (msg->msg_controllen) {
- err = sock_cmsg_send(sk, msg, &sockc);
- if (unlikely(err))
- /* Don't return error until MSG_FASTOPEN has been
- * processed; that may succeed even if the cmsg is
- * invalid.
- */
- sockc_valid = false;
+ sockc_err = sock_cmsg_send(sk, msg, &sockc);
+ /* Don't return error until MSG_FASTOPEN has been processed;
+ * that may succeed even if the cmsg is invalid.
+ */
}
if ((flags & MSG_ZEROCOPY) && size) {
@@ -1092,7 +1089,7 @@ int tcp_sendmsg_locked(struct sock *sk, struct msghdr *msg, size_t size)
} else if (sock_flag(sk, SOCK_ZEROCOPY)) {
skb = tcp_write_queue_tail(sk);
uarg = msg_zerocopy_realloc(sk, size, skb_zcopy(skb),
- sockc_valid && !!sockc.dmabuf_id);
+ !sockc_err && sockc.dmabuf_id);
if (!uarg) {
err = -ENOBUFS;
goto out_err;
@@ -1102,7 +1099,7 @@ int tcp_sendmsg_locked(struct sock *sk, struct msghdr *msg, size_t size)
else
uarg_to_msgzc(uarg)->zerocopy = 0;
- if (sockc_valid && sockc.dmabuf_id) {
+ if (!sockc_err && sockc.dmabuf_id) {
binding = net_devmem_get_binding(sk, sockc.dmabuf_id);
if (IS_ERR(binding)) {
err = PTR_ERR(binding);
@@ -1116,7 +1113,7 @@ int tcp_sendmsg_locked(struct sock *sk, struct msghdr *msg, size_t size)
zc = MSG_SPLICE_PAGES;
}
- if (sockc_valid && sockc.dmabuf_id &&
+ if (!sockc_err && sockc.dmabuf_id &&
(!(flags & MSG_ZEROCOPY) || !sock_flag(sk, SOCK_ZEROCOPY))) {
err = -EINVAL;
goto out_err;
@@ -1160,9 +1157,8 @@ int tcp_sendmsg_locked(struct sock *sk, struct msghdr *msg, size_t size)
/* 'common' sending to sendq */
}
- if (!sockc_valid) {
- if (!err)
- err = -EINVAL;
+ if (sockc_err) {
+ err = sockc_err;
goto out_err;
}