diff options
Diffstat (limited to 'mm/fadvise.c')
| -rw-r--r-- | mm/fadvise.c | 188 |
1 files changed, 128 insertions, 60 deletions
diff --git a/mm/fadvise.c b/mm/fadvise.c index 3bcfd81db45e..67028e30aa91 100644 --- a/mm/fadvise.c +++ b/mm/fadvise.c @@ -1,3 +1,4 @@ +// SPDX-License-Identifier: GPL-2.0 /* * mm/fadvise.c * @@ -13,7 +14,6 @@ #include <linux/mm.h> #include <linux/pagemap.h> #include <linux/backing-dev.h> -#include <linux/pagevec.h> #include <linux/fadvise.h> #include <linux/writeback.h> #include <linux/syscalls.h> @@ -21,36 +21,34 @@ #include <asm/unistd.h> +#include "internal.h" + /* * POSIX_FADV_WILLNEED could set PG_Referenced, and POSIX_FADV_NOREUSE could * deactivate the pages and clear PG_Referenced. */ -SYSCALL_DEFINE4(fadvise64_64, int, fd, loff_t, offset, loff_t, len, int, advice) + +int generic_fadvise(struct file *file, loff_t offset, loff_t len, int advice) { - struct fd f = fdget(fd); + struct inode *inode; struct address_space *mapping; struct backing_dev_info *bdi; loff_t endbyte; /* inclusive */ pgoff_t start_index; pgoff_t end_index; unsigned long nrpages; - int ret = 0; - if (!f.file) - return -EBADF; + inode = file_inode(file); + if (S_ISFIFO(inode->i_mode)) + return -ESPIPE; - if (S_ISFIFO(file_inode(f.file)->i_mode)) { - ret = -ESPIPE; - goto out; - } + mapping = file->f_mapping; + if (!mapping || len < 0) + return -EINVAL; - mapping = f.file->f_mapping; - if (!mapping || len < 0) { - ret = -EINVAL; - goto out; - } + bdi = inode_to_bdi(mapping->host); - if (mapping->a_ops->get_xip_mem) { + if (IS_DAX(inode) || (bdi == &noop_backing_dev_info)) { switch (advice) { case POSIX_FADV_NORMAL: case POSIX_FADV_RANDOM: @@ -61,77 +59,110 @@ SYSCALL_DEFINE4(fadvise64_64, int, fd, loff_t, offset, loff_t, len, int, advice) /* no bad return value, but ignore advice */ break; default: - ret = -EINVAL; + return -EINVAL; } - goto out; + return 0; } - /* Careful about overflows. Len == 0 means "as much as possible" */ - endbyte = offset + len; + /* + * Careful about overflows. Len == 0 means "as much as possible". Use + * unsigned math because signed overflows are undefined and UBSan + * complains. + */ + endbyte = (u64)offset + (u64)len; if (!len || endbyte < len) - endbyte = -1; + endbyte = LLONG_MAX; else endbyte--; /* inclusive */ - bdi = mapping->backing_dev_info; - switch (advice) { case POSIX_FADV_NORMAL: - f.file->f_ra.ra_pages = bdi->ra_pages; - spin_lock(&f.file->f_lock); - f.file->f_mode &= ~FMODE_RANDOM; - spin_unlock(&f.file->f_lock); + file->f_ra.ra_pages = bdi->ra_pages; + spin_lock(&file->f_lock); + file->f_mode &= ~(FMODE_RANDOM | FMODE_NOREUSE); + spin_unlock(&file->f_lock); break; case POSIX_FADV_RANDOM: - spin_lock(&f.file->f_lock); - f.file->f_mode |= FMODE_RANDOM; - spin_unlock(&f.file->f_lock); + spin_lock(&file->f_lock); + file->f_mode |= FMODE_RANDOM; + spin_unlock(&file->f_lock); break; case POSIX_FADV_SEQUENTIAL: - f.file->f_ra.ra_pages = bdi->ra_pages * 2; - spin_lock(&f.file->f_lock); - f.file->f_mode &= ~FMODE_RANDOM; - spin_unlock(&f.file->f_lock); + file->f_ra.ra_pages = bdi->ra_pages * 2; + spin_lock(&file->f_lock); + file->f_mode &= ~FMODE_RANDOM; + spin_unlock(&file->f_lock); break; case POSIX_FADV_WILLNEED: /* First and last PARTIAL page! */ - start_index = offset >> PAGE_CACHE_SHIFT; - end_index = endbyte >> PAGE_CACHE_SHIFT; + start_index = offset >> PAGE_SHIFT; + end_index = endbyte >> PAGE_SHIFT; /* Careful about overflow on the "+1" */ nrpages = end_index - start_index + 1; if (!nrpages) nrpages = ~0UL; - /* - * Ignore return value because fadvise() shall return - * success even if filesystem can't retrieve a hint, - */ - force_page_cache_readahead(mapping, f.file, start_index, - nrpages); + force_page_cache_readahead(mapping, file, start_index, nrpages); break; case POSIX_FADV_NOREUSE: + spin_lock(&file->f_lock); + file->f_mode |= FMODE_NOREUSE; + spin_unlock(&file->f_lock); break; case POSIX_FADV_DONTNEED: - if (!bdi_write_congested(mapping->backing_dev_info)) - __filemap_fdatawrite_range(mapping, offset, endbyte, - WB_SYNC_NONE); + filemap_flush_range(mapping, offset, endbyte); - /* First and last FULL page! */ - start_index = (offset+(PAGE_CACHE_SIZE-1)) >> PAGE_CACHE_SHIFT; - end_index = (endbyte >> PAGE_CACHE_SHIFT); + /* + * First and last FULL page! Partial pages are deliberately + * preserved on the expectation that it is better to preserve + * needed memory than to discard unneeded memory. + */ + start_index = (offset+(PAGE_SIZE-1)) >> PAGE_SHIFT; + end_index = (endbyte >> PAGE_SHIFT); + /* + * The page at end_index will be inclusively discarded according + * by invalidate_mapping_pages(), so subtracting 1 from + * end_index means we will skip the last page. But if endbyte + * is page aligned or is at the end of file, we should not skip + * that page - discarding the last page is safe enough. + */ + if ((endbyte & ~PAGE_MASK) != ~PAGE_MASK && + endbyte != inode->i_size - 1) { + /* First page is tricky as 0 - 1 = -1, but pgoff_t + * is unsigned, so the end_index >= start_index + * check below would be true and we'll discard the whole + * file cache which is not what was asked. + */ + if (end_index == 0) + break; + + end_index--; + } if (end_index >= start_index) { - unsigned long count = invalidate_mapping_pages(mapping, - start_index, end_index); + unsigned long nr_failed = 0; /* - * If fewer pages were invalidated than expected then - * it is possible that some of the pages were on - * a per-cpu pagevec for a remote CPU. Drain all - * pagevecs and try again. + * It's common to FADV_DONTNEED right after + * the read or write that instantiates the + * pages, in which case there will be some + * sitting on the local LRU cache. Try to + * avoid the expensive remote drain and the + * second cache tree walk below by flushing + * them out right away. */ - if (count < (end_index - start_index + 1)) { + lru_add_drain(); + + mapping_try_invalidate(mapping, start_index, end_index, + &nr_failed); + + /* + * The failures may be due to the folio being + * in the LRU cache of a remote CPU. Drain all + * caches and try again. + */ + if (nr_failed) { lru_add_drain_all(); invalidate_mapping_pages(mapping, start_index, end_index); @@ -139,18 +170,55 @@ SYSCALL_DEFINE4(fadvise64_64, int, fd, loff_t, offset, loff_t, len, int, advice) } break; default: - ret = -EINVAL; + return -EINVAL; } -out: - fdput(f); - return ret; + return 0; +} +EXPORT_SYMBOL(generic_fadvise); + +int vfs_fadvise(struct file *file, loff_t offset, loff_t len, int advice) +{ + if (file->f_op->fadvise) + return file->f_op->fadvise(file, offset, len, advice); + + return generic_fadvise(file, offset, len, advice); +} +EXPORT_SYMBOL(vfs_fadvise); + +#ifdef CONFIG_ADVISE_SYSCALLS + +int ksys_fadvise64_64(int fd, loff_t offset, loff_t len, int advice) +{ + CLASS(fd, f)(fd); + + if (fd_empty(f)) + return -EBADF; + + return vfs_fadvise(fd_file(f), offset, len, advice); +} + +SYSCALL_DEFINE4(fadvise64_64, int, fd, loff_t, offset, loff_t, len, int, advice) +{ + return ksys_fadvise64_64(fd, offset, len, advice); } #ifdef __ARCH_WANT_SYS_FADVISE64 SYSCALL_DEFINE4(fadvise64, int, fd, loff_t, offset, size_t, len, int, advice) { - return sys_fadvise64_64(fd, offset, len, advice); + return ksys_fadvise64_64(fd, offset, len, advice); +} + +#endif + +#if defined(CONFIG_COMPAT) && defined(__ARCH_WANT_COMPAT_FADVISE64_64) + +COMPAT_SYSCALL_DEFINE6(fadvise64_64, int, fd, compat_arg_u64_dual(offset), + compat_arg_u64_dual(len), int, advice) +{ + return ksys_fadvise64_64(fd, compat_arg_u64_glue(offset), + compat_arg_u64_glue(len), advice); } #endif +#endif |
