summaryrefslogtreecommitdiff
path: root/mm/gup.c
diff options
context:
space:
mode:
authorJohn Hubbard <jhubbard@nvidia.com>2020-04-01 21:05:25 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2020-04-02 09:35:27 -0700
commit94202f126f698691f8865906ad6a68203e5dde8c (patch)
tree38dac4f05826af466d3ade8c56cbcc06d0d71d46 /mm/gup.c
parent3b78d8347d31a050bb4f378a5f42cf495d873796 (diff)
mm/gup: require FOLL_GET for get_user_pages_fast()
Internal to mm/gup.c, require that get_user_pages_fast() and __get_user_pages_fast() identify themselves, by setting FOLL_GET. This is required in order to be able to make decisions based on "FOLL_PIN, or FOLL_GET, or both or neither are set", in upcoming patches. Signed-off-by: John Hubbard <jhubbard@nvidia.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Reviewed-by: Jan Kara <jack@suse.cz> Acked-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com> Cc: Ira Weiny <ira.weiny@intel.com> Cc: Jérôme Glisse <jglisse@redhat.com> Cc: "Matthew Wilcox (Oracle)" <willy@infradead.org> Cc: Al Viro <viro@zeniv.linux.org.uk> Cc: Christoph Hellwig <hch@infradead.org> Cc: Dan Williams <dan.j.williams@intel.com> Cc: Dave Chinner <david@fromorbit.com> Cc: Jason Gunthorpe <jgg@ziepe.ca> Cc: Jonathan Corbet <corbet@lwn.net> Cc: Michal Hocko <mhocko@suse.com> Cc: Mike Kravetz <mike.kravetz@oracle.com> Cc: Shuah Khan <shuah@kernel.org> Cc: Vlastimil Babka <vbabka@suse.cz> Link: http://lkml.kernel.org/r/20200211001536.1027652-6-jhubbard@nvidia.com Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/gup.c')
-rw-r--r--mm/gup.c19
1 files changed, 17 insertions, 2 deletions
diff --git a/mm/gup.c b/mm/gup.c
index e5f75e886663..c8affbea2019 100644
--- a/mm/gup.c
+++ b/mm/gup.c
@@ -2390,6 +2390,14 @@ int __get_user_pages_fast(unsigned long start, int nr_pages, int write,
unsigned long len, end;
unsigned long flags;
int nr = 0;
+ /*
+ * Internally (within mm/gup.c), gup fast variants must set FOLL_GET,
+ * because gup fast is always a "pin with a +1 page refcount" request.
+ */
+ unsigned int gup_flags = FOLL_GET;
+
+ if (write)
+ gup_flags |= FOLL_WRITE;
start = untagged_addr(start) & PAGE_MASK;
len = (unsigned long) nr_pages << PAGE_SHIFT;
@@ -2415,7 +2423,7 @@ int __get_user_pages_fast(unsigned long start, int nr_pages, int write,
if (IS_ENABLED(CONFIG_HAVE_FAST_GUP) &&
gup_fast_permitted(start, end)) {
local_irq_save(flags);
- gup_pgd_range(start, end, write ? FOLL_WRITE : 0, pages, &nr);
+ gup_pgd_range(start, end, gup_flags, pages, &nr);
local_irq_restore(flags);
}
@@ -2454,7 +2462,7 @@ static int internal_get_user_pages_fast(unsigned long start, int nr_pages,
int nr = 0, ret = 0;
if (WARN_ON_ONCE(gup_flags & ~(FOLL_WRITE | FOLL_LONGTERM |
- FOLL_FORCE | FOLL_PIN)))
+ FOLL_FORCE | FOLL_PIN | FOLL_GET)))
return -EINVAL;
start = untagged_addr(start) & PAGE_MASK;
@@ -2521,6 +2529,13 @@ int get_user_pages_fast(unsigned long start, int nr_pages,
if (WARN_ON_ONCE(gup_flags & FOLL_PIN))
return -EINVAL;
+ /*
+ * The caller may or may not have explicitly set FOLL_GET; either way is
+ * OK. However, internally (within mm/gup.c), gup fast variants must set
+ * FOLL_GET, because gup fast is always a "pin with a +1 page refcount"
+ * request.
+ */
+ gup_flags |= FOLL_GET;
return internal_get_user_pages_fast(start, nr_pages, gup_flags, pages);
}
EXPORT_SYMBOL_GPL(get_user_pages_fast);