summaryrefslogtreecommitdiff
path: root/lib/iov_iter.c
diff options
context:
space:
mode:
Diffstat (limited to 'lib/iov_iter.c')
-rw-r--r--lib/iov_iter.c437
1 files changed, 136 insertions, 301 deletions
diff --git a/lib/iov_iter.c b/lib/iov_iter.c
index 27234a820eeb..de7d11cf4c63 100644
--- a/lib/iov_iter.c
+++ b/lib/iov_iter.c
@@ -1,5 +1,4 @@
// SPDX-License-Identifier: GPL-2.0-only
-#include <crypto/hash.h>
#include <linux/export.h>
#include <linux/bvec.h>
#include <linux/fault-inject-usercopy.h>
@@ -10,192 +9,71 @@
#include <linux/vmalloc.h>
#include <linux/splice.h>
#include <linux/compat.h>
-#include <net/checksum.h>
#include <linux/scatterlist.h>
#include <linux/instrumented.h>
+#include <linux/iov_iter.h>
-/* covers ubuf and kbuf alike */
-#define iterate_buf(i, n, base, len, off, __p, STEP) { \
- size_t __maybe_unused off = 0; \
- len = n; \
- base = __p + i->iov_offset; \
- len -= (STEP); \
- i->iov_offset += len; \
- n = len; \
-}
-
-/* covers iovec and kvec alike */
-#define iterate_iovec(i, n, base, len, off, __p, STEP) { \
- size_t off = 0; \
- size_t skip = i->iov_offset; \
- do { \
- len = min(n, __p->iov_len - skip); \
- if (likely(len)) { \
- base = __p->iov_base + skip; \
- len -= (STEP); \
- off += len; \
- skip += len; \
- n -= len; \
- if (skip < __p->iov_len) \
- break; \
- } \
- __p++; \
- skip = 0; \
- } while (n); \
- i->iov_offset = skip; \
- n = off; \
-}
-
-#define iterate_bvec(i, n, base, len, off, p, STEP) { \
- size_t off = 0; \
- unsigned skip = i->iov_offset; \
- while (n) { \
- unsigned offset = p->bv_offset + skip; \
- unsigned left; \
- void *kaddr = kmap_local_page(p->bv_page + \
- offset / PAGE_SIZE); \
- base = kaddr + offset % PAGE_SIZE; \
- len = min(min(n, (size_t)(p->bv_len - skip)), \
- (size_t)(PAGE_SIZE - offset % PAGE_SIZE)); \
- left = (STEP); \
- kunmap_local(kaddr); \
- len -= left; \
- off += len; \
- skip += len; \
- if (skip == p->bv_len) { \
- skip = 0; \
- p++; \
- } \
- n -= len; \
- if (left) \
- break; \
- } \
- i->iov_offset = skip; \
- n = off; \
-}
-
-#define iterate_xarray(i, n, base, len, __off, STEP) { \
- __label__ __out; \
- size_t __off = 0; \
- struct folio *folio; \
- loff_t start = i->xarray_start + i->iov_offset; \
- pgoff_t index = start / PAGE_SIZE; \
- XA_STATE(xas, i->xarray, index); \
- \
- len = PAGE_SIZE - offset_in_page(start); \
- rcu_read_lock(); \
- xas_for_each(&xas, folio, ULONG_MAX) { \
- unsigned left; \
- size_t offset; \
- if (xas_retry(&xas, folio)) \
- continue; \
- if (WARN_ON(xa_is_value(folio))) \
- break; \
- if (WARN_ON(folio_test_hugetlb(folio))) \
- break; \
- offset = offset_in_folio(folio, start + __off); \
- while (offset < folio_size(folio)) { \
- base = kmap_local_folio(folio, offset); \
- len = min(n, len); \
- left = (STEP); \
- kunmap_local(base); \
- len -= left; \
- __off += len; \
- n -= len; \
- if (left || n == 0) \
- goto __out; \
- offset += len; \
- len = PAGE_SIZE; \
- } \
- } \
-__out: \
- rcu_read_unlock(); \
- i->iov_offset += __off; \
- n = __off; \
-}
-
-#define __iterate_and_advance(i, n, base, len, off, I, K) { \
- if (unlikely(i->count < n)) \
- n = i->count; \
- if (likely(n)) { \
- if (likely(iter_is_ubuf(i))) { \
- void __user *base; \
- size_t len; \
- iterate_buf(i, n, base, len, off, \
- i->ubuf, (I)) \
- } else if (likely(iter_is_iovec(i))) { \
- const struct iovec *iov = iter_iov(i); \
- void __user *base; \
- size_t len; \
- iterate_iovec(i, n, base, len, off, \
- iov, (I)) \
- i->nr_segs -= iov - iter_iov(i); \
- i->__iov = iov; \
- } else if (iov_iter_is_bvec(i)) { \
- const struct bio_vec *bvec = i->bvec; \
- void *base; \
- size_t len; \
- iterate_bvec(i, n, base, len, off, \
- bvec, (K)) \
- i->nr_segs -= bvec - i->bvec; \
- i->bvec = bvec; \
- } else if (iov_iter_is_kvec(i)) { \
- const struct kvec *kvec = i->kvec; \
- void *base; \
- size_t len; \
- iterate_iovec(i, n, base, len, off, \
- kvec, (K)) \
- i->nr_segs -= kvec - i->kvec; \
- i->kvec = kvec; \
- } else if (iov_iter_is_xarray(i)) { \
- void *base; \
- size_t len; \
- iterate_xarray(i, n, base, len, off, \
- (K)) \
- } \
- i->count -= n; \
- } \
-}
-#define iterate_and_advance(i, n, base, len, off, I, K) \
- __iterate_and_advance(i, n, base, len, off, I, ((void)(K),0))
-
-static int copyout(void __user *to, const void *from, size_t n)
+static __always_inline
+size_t copy_to_user_iter(void __user *iter_to, size_t progress,
+ size_t len, void *from, void *priv2)
{
if (should_fail_usercopy())
- return n;
- if (access_ok(to, n)) {
- instrument_copy_to_user(to, from, n);
- n = raw_copy_to_user(to, from, n);
+ return len;
+ if (access_ok(iter_to, len)) {
+ from += progress;
+ instrument_copy_to_user(iter_to, from, len);
+ len = raw_copy_to_user(iter_to, from, len);
}
- return n;
+ return len;
}
-static int copyout_nofault(void __user *to, const void *from, size_t n)
+static __always_inline
+size_t copy_to_user_iter_nofault(void __user *iter_to, size_t progress,
+ size_t len, void *from, void *priv2)
{
- long res;
+ ssize_t res;
if (should_fail_usercopy())
- return n;
-
- res = copy_to_user_nofault(to, from, n);
+ return len;
- return res < 0 ? n : res;
+ from += progress;
+ res = copy_to_user_nofault(iter_to, from, len);
+ return res < 0 ? len : res;
}
-static int copyin(void *to, const void __user *from, size_t n)
+static __always_inline
+size_t copy_from_user_iter(void __user *iter_from, size_t progress,
+ size_t len, void *to, void *priv2)
{
- size_t res = n;
+ size_t res = len;
if (should_fail_usercopy())
- return n;
- if (access_ok(from, n)) {
- instrument_copy_from_user_before(to, from, n);
- res = raw_copy_from_user(to, from, n);
- instrument_copy_from_user_after(to, from, n, res);
+ return len;
+ if (access_ok(iter_from, len)) {
+ to += progress;
+ instrument_copy_from_user_before(to, iter_from, len);
+ res = raw_copy_from_user(to, iter_from, len);
+ instrument_copy_from_user_after(to, iter_from, len, res);
}
return res;
}
+static __always_inline
+size_t memcpy_to_iter(void *iter_to, size_t progress,
+ size_t len, void *from, void *priv2)
+{
+ memcpy(iter_to, from + progress, len);
+ return 0;
+}
+
+static __always_inline
+size_t memcpy_from_iter(void *iter_from, size_t progress,
+ size_t len, void *to, void *priv2)
+{
+ memcpy(to + progress, iter_from, len);
+ return 0;
+}
+
/*
* fault_in_iov_iter_readable - fault in iov iterator for reading
* @i: iterator
@@ -290,7 +168,6 @@ void iov_iter_init(struct iov_iter *i, unsigned int direction,
.iter_type = ITER_IOVEC,
.copy_mc = false,
.nofault = false,
- .user_backed = true,
.data_source = direction,
.__iov = iov,
.nr_segs = nr_segs,
@@ -300,36 +177,35 @@ void iov_iter_init(struct iov_iter *i, unsigned int direction,
}
EXPORT_SYMBOL(iov_iter_init);
-static __wsum csum_and_memcpy(void *to, const void *from, size_t len,
- __wsum sum, size_t off)
-{
- __wsum next = csum_partial_copy_nocheck(from, to, len);
- return csum_block_add(sum, next, off);
-}
-
size_t _copy_to_iter(const void *addr, size_t bytes, struct iov_iter *i)
{
if (WARN_ON_ONCE(i->data_source))
return 0;
if (user_backed_iter(i))
might_fault();
- iterate_and_advance(i, bytes, base, len, off,
- copyout(base, addr + off, len),
- memcpy(base, addr + off, len)
- )
-
- return bytes;
+ return iterate_and_advance(i, bytes, (void *)addr,
+ copy_to_user_iter, memcpy_to_iter);
}
EXPORT_SYMBOL(_copy_to_iter);
#ifdef CONFIG_ARCH_HAS_COPY_MC
-static int copyout_mc(void __user *to, const void *from, size_t n)
-{
- if (access_ok(to, n)) {
- instrument_copy_to_user(to, from, n);
- n = copy_mc_to_user((__force void *) to, from, n);
+static __always_inline
+size_t copy_to_user_iter_mc(void __user *iter_to, size_t progress,
+ size_t len, void *from, void *priv2)
+{
+ if (access_ok(iter_to, len)) {
+ from += progress;
+ instrument_copy_to_user(iter_to, from, len);
+ len = copy_mc_to_user(iter_to, from, len);
}
- return n;
+ return len;
+}
+
+static __always_inline
+size_t memcpy_to_iter_mc(void *iter_to, size_t progress,
+ size_t len, void *from, void *priv2)
+{
+ return copy_mc_to_kernel(iter_to, from + progress, len);
}
/**
@@ -362,22 +238,35 @@ size_t _copy_mc_to_iter(const void *addr, size_t bytes, struct iov_iter *i)
return 0;
if (user_backed_iter(i))
might_fault();
- __iterate_and_advance(i, bytes, base, len, off,
- copyout_mc(base, addr + off, len),
- copy_mc_to_kernel(base, addr + off, len)
- )
-
- return bytes;
+ return iterate_and_advance(i, bytes, (void *)addr,
+ copy_to_user_iter_mc, memcpy_to_iter_mc);
}
EXPORT_SYMBOL_GPL(_copy_mc_to_iter);
#endif /* CONFIG_ARCH_HAS_COPY_MC */
-static void *memcpy_from_iter(struct iov_iter *i, void *to, const void *from,
- size_t size)
+static __always_inline
+size_t memcpy_from_iter_mc(void *iter_from, size_t progress,
+ size_t len, void *to, void *priv2)
+{
+ return copy_mc_to_kernel(to + progress, iter_from, len);
+}
+
+static size_t __copy_from_iter_mc(void *addr, size_t bytes, struct iov_iter *i)
+{
+ if (unlikely(i->count < bytes))
+ bytes = i->count;
+ if (unlikely(!bytes))
+ return 0;
+ return iterate_bvec(i, bytes, addr, NULL, memcpy_from_iter_mc);
+}
+
+static __always_inline
+size_t __copy_from_iter(void *addr, size_t bytes, struct iov_iter *i)
{
- if (iov_iter_is_copy_mc(i))
- return (void *)copy_mc_to_kernel(to, from, size);
- return memcpy(to, from, size);
+ if (unlikely(iov_iter_is_copy_mc(i)))
+ return __copy_from_iter_mc(addr, bytes, i);
+ return iterate_and_advance(i, bytes, addr,
+ copy_from_user_iter, memcpy_from_iter);
}
size_t _copy_from_iter(void *addr, size_t bytes, struct iov_iter *i)
@@ -387,30 +276,44 @@ size_t _copy_from_iter(void *addr, size_t bytes, struct iov_iter *i)
if (user_backed_iter(i))
might_fault();
- iterate_and_advance(i, bytes, base, len, off,
- copyin(addr + off, base, len),
- memcpy_from_iter(i, addr + off, base, len)
- )
-
- return bytes;
+ return __copy_from_iter(addr, bytes, i);
}
EXPORT_SYMBOL(_copy_from_iter);
+static __always_inline
+size_t copy_from_user_iter_nocache(void __user *iter_from, size_t progress,
+ size_t len, void *to, void *priv2)
+{
+ return __copy_from_user_inatomic_nocache(to + progress, iter_from, len);
+}
+
size_t _copy_from_iter_nocache(void *addr, size_t bytes, struct iov_iter *i)
{
if (WARN_ON_ONCE(!i->data_source))
return 0;
- iterate_and_advance(i, bytes, base, len, off,
- __copy_from_user_inatomic_nocache(addr + off, base, len),
- memcpy(addr + off, base, len)
- )
-
- return bytes;
+ return iterate_and_advance(i, bytes, addr,
+ copy_from_user_iter_nocache,
+ memcpy_from_iter);
}
EXPORT_SYMBOL(_copy_from_iter_nocache);
#ifdef CONFIG_ARCH_HAS_UACCESS_FLUSHCACHE
+static __always_inline
+size_t copy_from_user_iter_flushcache(void __user *iter_from, size_t progress,
+ size_t len, void *to, void *priv2)
+{
+ return __copy_from_user_flushcache(to + progress, iter_from, len);
+}
+
+static __always_inline
+size_t memcpy_from_iter_flushcache(void *iter_from, size_t progress,
+ size_t len, void *to, void *priv2)
+{
+ memcpy_flushcache(to + progress, iter_from, len);
+ return 0;
+}
+
/**
* _copy_from_iter_flushcache - write destination through cpu cache
* @addr: destination kernel address
@@ -432,12 +335,9 @@ size_t _copy_from_iter_flushcache(void *addr, size_t bytes, struct iov_iter *i)
if (WARN_ON_ONCE(!i->data_source))
return 0;
- iterate_and_advance(i, bytes, base, len, off,
- __copy_from_user_flushcache(addr + off, base, len),
- memcpy_flushcache(addr + off, base, len)
- )
-
- return bytes;
+ return iterate_and_advance(i, bytes, addr,
+ copy_from_user_iter_flushcache,
+ memcpy_from_iter_flushcache);
}
EXPORT_SYMBOL_GPL(_copy_from_iter_flushcache);
#endif
@@ -509,10 +409,9 @@ size_t copy_page_to_iter_nofault(struct page *page, unsigned offset, size_t byte
void *kaddr = kmap_local_page(page);
size_t n = min(bytes, (size_t)PAGE_SIZE - offset);
- iterate_and_advance(i, n, base, len, off,
- copyout_nofault(base, kaddr + offset + off, len),
- memcpy(base, kaddr + offset + off, len)
- )
+ n = iterate_and_advance(i, bytes, kaddr,
+ copy_to_user_iter_nofault,
+ memcpy_to_iter);
kunmap_local(kaddr);
res += n;
bytes -= n;
@@ -555,14 +454,25 @@ size_t copy_page_from_iter(struct page *page, size_t offset, size_t bytes,
}
EXPORT_SYMBOL(copy_page_from_iter);
-size_t iov_iter_zero(size_t bytes, struct iov_iter *i)
+static __always_inline
+size_t zero_to_user_iter(void __user *iter_to, size_t progress,
+ size_t len, void *priv, void *priv2)
{
- iterate_and_advance(i, bytes, base, len, count,
- clear_user(base, len),
- memset(base, 0, len)
- )
+ return clear_user(iter_to, len);
+}
+
+static __always_inline
+size_t zero_to_iter(void *iter_to, size_t progress,
+ size_t len, void *priv, void *priv2)
+{
+ memset(iter_to, 0, len);
+ return 0;
+}
- return bytes;
+size_t iov_iter_zero(size_t bytes, struct iov_iter *i)
+{
+ return iterate_and_advance(i, bytes, NULL,
+ zero_to_user_iter, zero_to_iter);
}
EXPORT_SYMBOL(iov_iter_zero);
@@ -587,10 +497,7 @@ size_t copy_page_from_iter_atomic(struct page *page, size_t offset,
}
p = kmap_atomic(page) + offset;
- iterate_and_advance(i, n, base, len, off,
- copyin(p + off, base, len),
- memcpy_from_iter(i, p + off, base, len)
- )
+ n = __copy_from_iter(p, n, i);
kunmap_atomic(p);
copied += n;
offset += n;
@@ -1181,78 +1088,6 @@ ssize_t iov_iter_get_pages_alloc2(struct iov_iter *i,
}
EXPORT_SYMBOL(iov_iter_get_pages_alloc2);
-size_t csum_and_copy_from_iter(void *addr, size_t bytes, __wsum *csum,
- struct iov_iter *i)
-{
- __wsum sum, next;
- sum = *csum;
- if (WARN_ON_ONCE(!i->data_source))
- return 0;
-
- iterate_and_advance(i, bytes, base, len, off, ({
- next = csum_and_copy_from_user(base, addr + off, len);
- sum = csum_block_add(sum, next, off);
- next ? 0 : len;
- }), ({
- sum = csum_and_memcpy(addr + off, base, len, sum, off);
- })
- )
- *csum = sum;
- return bytes;
-}
-EXPORT_SYMBOL(csum_and_copy_from_iter);
-
-size_t csum_and_copy_to_iter(const void *addr, size_t bytes, void *_csstate,
- struct iov_iter *i)
-{
- struct csum_state *csstate = _csstate;
- __wsum sum, next;
-
- if (WARN_ON_ONCE(i->data_source))
- return 0;
- if (unlikely(iov_iter_is_discard(i))) {
- // can't use csum_memcpy() for that one - data is not copied
- csstate->csum = csum_block_add(csstate->csum,
- csum_partial(addr, bytes, 0),
- csstate->off);
- csstate->off += bytes;
- return bytes;
- }
-
- sum = csum_shift(csstate->csum, csstate->off);
- iterate_and_advance(i, bytes, base, len, off, ({
- next = csum_and_copy_to_user(addr + off, base, len);
- sum = csum_block_add(sum, next, off);
- next ? 0 : len;
- }), ({
- sum = csum_and_memcpy(base, addr + off, len, sum, off);
- })
- )
- csstate->csum = csum_shift(sum, csstate->off);
- csstate->off += bytes;
- return bytes;
-}
-EXPORT_SYMBOL(csum_and_copy_to_iter);
-
-size_t hash_and_copy_to_iter(const void *addr, size_t bytes, void *hashp,
- struct iov_iter *i)
-{
-#ifdef CONFIG_CRYPTO_HASH
- struct ahash_request *hash = hashp;
- struct scatterlist sg;
- size_t copied;
-
- copied = copy_to_iter(addr, bytes, i);
- sg_init_one(&sg, addr, copied);
- ahash_request_set_crypt(hash, &sg, NULL, copied);
- crypto_ahash_update(hash);
- return copied;
-#else
- return 0;
-#endif
-}
-EXPORT_SYMBOL(hash_and_copy_to_iter);
-
static int iov_npages(const struct iov_iter *i, int maxpages)
{
size_t skip = i->iov_offset, size = i->count;