summaryrefslogtreecommitdiff
path: root/mm/vmalloc.c
diff options
context:
space:
mode:
authorChristoph Hellwig <hch@lst.de>2020-06-01 21:51:11 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2020-06-02 10:59:10 -0700
commita29adb6209cead1f6c34a8d72481fb183bfc2d68 (patch)
tree73c6eb93fc95a56ec3ed2408106f0671bc68c9b5 /mm/vmalloc.c
parentb521c43f58e5234ee9b29817ed5e93523abcffa9 (diff)
mm: rename vmap_page_range to map_kernel_range
This matches the map_kernel_range_noflush API. Also change to pass a size instead of the end, similar to the noflush version. Signed-off-by: Christoph Hellwig <hch@lst.de> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Acked-by: Peter Zijlstra (Intel) <peterz@infradead.org> Cc: Christian Borntraeger <borntraeger@de.ibm.com> Cc: Christophe Leroy <christophe.leroy@c-s.fr> Cc: Daniel Vetter <daniel.vetter@ffwll.ch> Cc: David Airlie <airlied@linux.ie> Cc: Gao Xiang <xiang@kernel.org> Cc: Greg Kroah-Hartman <gregkh@linuxfoundation.org> Cc: Haiyang Zhang <haiyangz@microsoft.com> Cc: Johannes Weiner <hannes@cmpxchg.org> Cc: "K. Y. Srinivasan" <kys@microsoft.com> Cc: Laura Abbott <labbott@redhat.com> Cc: Mark Rutland <mark.rutland@arm.com> Cc: Michael Kelley <mikelley@microsoft.com> Cc: Minchan Kim <minchan@kernel.org> Cc: Nitin Gupta <ngupta@vflare.org> Cc: Robin Murphy <robin.murphy@arm.com> Cc: Sakari Ailus <sakari.ailus@linux.intel.com> Cc: Stephen Hemminger <sthemmin@microsoft.com> Cc: Sumit Semwal <sumit.semwal@linaro.org> Cc: Wei Liu <wei.liu@kernel.org> Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org> Cc: Catalin Marinas <catalin.marinas@arm.com> Cc: Heiko Carstens <heiko.carstens@de.ibm.com> Cc: Paul Mackerras <paulus@ozlabs.org> Cc: Vasily Gorbik <gor@linux.ibm.com> Cc: Will Deacon <will@kernel.org> Link: http://lkml.kernel.org/r/20200414131348.444715-15-hch@lst.de Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/vmalloc.c')
-rw-r--r--mm/vmalloc.c11
1 files changed, 5 insertions, 6 deletions
diff --git a/mm/vmalloc.c b/mm/vmalloc.c
index e9970849a103..fe8b7aa33094 100644
--- a/mm/vmalloc.c
+++ b/mm/vmalloc.c
@@ -273,13 +273,13 @@ int map_kernel_range_noflush(unsigned long addr, unsigned long size,
return nr;
}
-static int vmap_page_range(unsigned long start, unsigned long end,
+static int map_kernel_range(unsigned long start, unsigned long size,
pgprot_t prot, struct page **pages)
{
int ret;
- ret = map_kernel_range_noflush(start, end - start, prot, pages);
- flush_cache_vmap(start, end);
+ ret = map_kernel_range_noflush(start, size, prot, pages);
+ flush_cache_vmap(start, start + size);
return ret;
}
@@ -1867,7 +1867,7 @@ void *vm_map_ram(struct page **pages, unsigned int count, int node, pgprot_t pro
kasan_unpoison_vmalloc(mem, size);
- if (vmap_page_range(addr, addr + size, prot, pages) < 0) {
+ if (map_kernel_range(addr, size, prot, pages) < 0) {
vm_unmap_ram(mem, count);
return NULL;
}
@@ -2031,10 +2031,9 @@ void unmap_kernel_range(unsigned long addr, unsigned long size)
int map_vm_area(struct vm_struct *area, pgprot_t prot, struct page **pages)
{
unsigned long addr = (unsigned long)area->addr;
- unsigned long end = addr + get_vm_area_size(area);
int err;
- err = vmap_page_range(addr, end, prot, pages);
+ err = map_kernel_range(addr, get_vm_area_size(area), prot, pages);
return err > 0 ? 0 : err;
}