summaryrefslogtreecommitdiff
path: root/include/linux/uio.h
diff options
context:
space:
mode:
authorAl Viro <viro@zeniv.linux.org.uk>2022-06-06 18:42:59 -0400
committerAl Viro <viro@zeniv.linux.org.uk>2022-06-07 16:18:08 -0400
commit0e3c3b901c00364198d31482fa2552ccf2d5c899 (patch)
tree183000d0206ce0fc5c925d1f23819988a853b63e /include/linux/uio.h
parentf2906aa863381afb0015a9eb7fefad885d4e5a56 (diff)
No need of likely/unlikely on calls of check_copy_size()
it's inline and unlikely() inside of it (including the implicit one in WARN_ON_ONCE()) suffice to convince the compiler that getting false from check_copy_size() is unlikely. Spotted-by: Jens Axboe <axboe@kernel.dk> Reviewed-by: Christoph Hellwig <hch@lst.de> Reviewed-by: Christian Brauner (Microsoft) <brauner@kernel.org> Signed-off-by: Al Viro <viro@zeniv.linux.org.uk>
Diffstat (limited to 'include/linux/uio.h')
-rw-r--r--include/linux/uio.h15
1 files changed, 6 insertions, 9 deletions
diff --git a/include/linux/uio.h b/include/linux/uio.h
index 739285fe5a2f..76d305f3d4c2 100644
--- a/include/linux/uio.h
+++ b/include/linux/uio.h
@@ -156,19 +156,17 @@ static inline size_t copy_folio_to_iter(struct folio *folio, size_t offset,
static __always_inline __must_check
size_t copy_to_iter(const void *addr, size_t bytes, struct iov_iter *i)
{
- if (unlikely(!check_copy_size(addr, bytes, true)))
- return 0;
- else
+ if (check_copy_size(addr, bytes, true))
return _copy_to_iter(addr, bytes, i);
+ return 0;
}
static __always_inline __must_check
size_t copy_from_iter(void *addr, size_t bytes, struct iov_iter *i)
{
- if (unlikely(!check_copy_size(addr, bytes, false)))
- return 0;
- else
+ if (check_copy_size(addr, bytes, false))
return _copy_from_iter(addr, bytes, i);
+ return 0;
}
static __always_inline __must_check
@@ -184,10 +182,9 @@ bool copy_from_iter_full(void *addr, size_t bytes, struct iov_iter *i)
static __always_inline __must_check
size_t copy_from_iter_nocache(void *addr, size_t bytes, struct iov_iter *i)
{
- if (unlikely(!check_copy_size(addr, bytes, false)))
- return 0;
- else
+ if (check_copy_size(addr, bytes, false))
return _copy_from_iter_nocache(addr, bytes, i);
+ return 0;
}
static __always_inline __must_check