From 6d0d419914286a1b255530812a38d992c6c4e608 Mon Sep 17 00:00:00 2001 From: David Howells Date: Mon, 25 Sep 2023 13:03:06 +0100 Subject: iov_iter, net: Move csum_and_copy_to/from_iter() to net/ Move csum_and_copy_to/from_iter() to net code now that the iteration framework can be #included. Signed-off-by: David Howells Link: https://lore.kernel.org/r/20230925120309.1731676-10-dhowells@redhat.com cc: Alexander Viro cc: Jens Axboe cc: Christoph Hellwig cc: Christian Brauner cc: Matthew Wilcox cc: Linus Torvalds cc: David Laight cc: "David S. Miller" cc: Eric Dumazet cc: Jakub Kicinski cc: Paolo Abeni cc: linux-block@vger.kernel.org cc: linux-fsdevel@vger.kernel.org cc: linux-mm@kvack.org cc: netdev@vger.kernel.org Signed-off-by: Christian Brauner --- include/linux/skbuff.h | 25 +++++++++++++++++++++++++ 1 file changed, 25 insertions(+) (limited to 'include/linux/skbuff.h') diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h index 4174c4b82d13..d0656cc11c16 100644 --- a/include/linux/skbuff.h +++ b/include/linux/skbuff.h @@ -3679,6 +3679,31 @@ static inline int __must_check skb_put_padto(struct sk_buff *skb, unsigned int l return __skb_put_padto(skb, len, true); } +static inline __wsum csum_and_memcpy(void *to, const void *from, size_t len, + __wsum sum, size_t off) +{ + __wsum next = csum_partial_copy_nocheck(from, to, len); + return csum_block_add(sum, next, off); +} + +struct csum_state { + __wsum csum; + size_t off; +}; + +size_t csum_and_copy_from_iter(void *addr, size_t bytes, __wsum *csum, struct iov_iter *i); + +static __always_inline __must_check +bool csum_and_copy_from_iter_full(void *addr, size_t bytes, + __wsum *csum, struct iov_iter *i) +{ + size_t copied = csum_and_copy_from_iter(addr, bytes, csum, i); + if (likely(copied == bytes)) + return true; + iov_iter_revert(i, copied); + return false; +} + static inline int skb_add_data(struct sk_buff *skb, struct iov_iter *from, int copy) { -- cgit