summaryrefslogtreecommitdiff
path: root/mm/memremap.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/memremap.c')
-rw-r--r--mm/memremap.c40
1 files changed, 33 insertions, 7 deletions
diff --git a/mm/memremap.c b/mm/memremap.c
index 9b2c97ceb775..03e38b7a38f1 100644
--- a/mm/memremap.c
+++ b/mm/memremap.c
@@ -7,6 +7,7 @@
#include <linux/mm.h>
#include <linux/pfn_t.h>
#include <linux/swap.h>
+#include <linux/mmzone.h>
#include <linux/swapops.h>
#include <linux/types.h>
#include <linux/wait_bit.h>
@@ -14,6 +15,28 @@
static DEFINE_XARRAY(pgmap_array);
+/*
+ * The memremap() and memremap_pages() interfaces are alternately used
+ * to map persistent memory namespaces. These interfaces place different
+ * constraints on the alignment and size of the mapping (namespace).
+ * memremap() can map individual PAGE_SIZE pages. memremap_pages() can
+ * only map subsections (2MB), and at least one architecture (PowerPC)
+ * the minimum mapping granularity of memremap_pages() is 16MB.
+ *
+ * The role of memremap_compat_align() is to communicate the minimum
+ * arch supported alignment of a namespace such that it can freely
+ * switch modes without violating the arch constraint. Namely, do not
+ * allow a namespace to be PAGE_SIZE aligned since that namespace may be
+ * reconfigured into a mode that requires SUBSECTION_SIZE alignment.
+ */
+#ifndef CONFIG_ARCH_HAS_MEMREMAP_COMPAT_ALIGN
+unsigned long memremap_compat_align(void)
+{
+ return SUBSECTION_SIZE;
+}
+EXPORT_SYMBOL_GPL(memremap_compat_align);
+#endif
+
#ifdef CONFIG_DEV_PAGEMAP_OPS
DEFINE_STATIC_KEY_FALSE(devmap_managed_key);
EXPORT_SYMBOL(devmap_managed_key);
@@ -161,13 +184,13 @@ void *memremap_pages(struct dev_pagemap *pgmap, int nid)
{
struct resource *res = &pgmap->res;
struct dev_pagemap *conflict_pgmap;
- struct mhp_restrictions restrictions = {
+ struct mhp_params params = {
/*
* We do not want any optional features only our own memmap
*/
.altmap = pgmap_altmap(pgmap),
+ .pgprot = PAGE_KERNEL,
};
- pgprot_t pgprot = PAGE_KERNEL;
int error, is_ram;
bool need_devmap_managed = true;
@@ -194,7 +217,10 @@ void *memremap_pages(struct dev_pagemap *pgmap, int nid)
}
break;
case MEMORY_DEVICE_DEVDAX:
+ need_devmap_managed = false;
+ break;
case MEMORY_DEVICE_PCI_P2PDMA:
+ params.pgprot = pgprot_noncached(params.pgprot);
need_devmap_managed = false;
break;
default:
@@ -259,8 +285,8 @@ void *memremap_pages(struct dev_pagemap *pgmap, int nid)
if (nid < 0)
nid = numa_mem_id();
- error = track_pfn_remap(NULL, &pgprot, PHYS_PFN(res->start), 0,
- resource_size(res));
+ error = track_pfn_remap(NULL, &params.pgprot, PHYS_PFN(res->start),
+ 0, resource_size(res));
if (error)
goto err_pfn_remap;
@@ -279,7 +305,7 @@ void *memremap_pages(struct dev_pagemap *pgmap, int nid)
*/
if (pgmap->type == MEMORY_DEVICE_PRIVATE) {
error = add_pages(nid, PHYS_PFN(res->start),
- PHYS_PFN(resource_size(res)), &restrictions);
+ PHYS_PFN(resource_size(res)), &params);
} else {
error = kasan_add_zero_shadow(__va(res->start), resource_size(res));
if (error) {
@@ -288,7 +314,7 @@ void *memremap_pages(struct dev_pagemap *pgmap, int nid)
}
error = arch_add_memory(nid, res->start, resource_size(res),
- &restrictions);
+ &params);
}
if (!error) {
@@ -296,7 +322,7 @@ void *memremap_pages(struct dev_pagemap *pgmap, int nid)
zone = &NODE_DATA(nid)->node_zones[ZONE_DEVICE];
move_pfn_range_to_zone(zone, PHYS_PFN(res->start),
- PHYS_PFN(resource_size(res)), restrictions.altmap);
+ PHYS_PFN(resource_size(res)), params.altmap);
}
mem_hotplug_done();