summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorVasily Gorbik <gor@linux.ibm.com>2025-11-28 23:01:08 +0100
committerHeiko Carstens <hca@linux.ibm.com>2025-12-07 16:15:19 +0100
commit8543ecc0e03b9367e36a93d82bdef0bf5a16dc56 (patch)
tree3b3fa46c4782b15cecf9ac92dc306f2d0b95ce78
parent6a35d02fec5a1e2ab6c0c94ccc5b0c57a580b098 (diff)
s390: Unmap early KASAN shadow on memory offlining
Teach the memory hotplug path to tear down KASAN shadow that was mapped during early boot when a memory block is offlined. Track for each sclp_mem whether its range was covered by the early KASAN shadow via an early_shadow_mapped flag. When such a block is deconfigured and removed via sclp_config_mem_store(), compute the corresponding shadow range and call vmemmap_free() to unmap the boot mapped shadow, then clear the flag. Using vmemmap_free() for the early shadow is safe despite the use of large mappings in the boot-time KASAN setup. The initial shadow is mapped with 1M and 2G pages, where possible. The minimum hotplug memory block size is 128M and always aligned (the identity mapping is at least 2G aligned), which corresponds to a 16M chunk of at least 1M aligned shadow. PMD-mapped 1M shadow pages therefore never need splitting, and PUD-mapped 2G shadow pages can now be split following the preceding changes. Relax the modify_pagetable() sanity check in vmem so that, with KASAN enabled, it may also operate on the KASAN shadow region in addition to the 1:1 mapping and vmemmap area. This allows the KASAN shadow unmapping to reuse the common vmem helpers. Signed-off-by: Vasily Gorbik <gor@linux.ibm.com> Reviewed-by: Heiko Carstens <hca@linux.ibm.com> Signed-off-by: Heiko Carstens <hca@linux.ibm.com>
-rw-r--r--arch/s390/mm/vmem.c8
-rw-r--r--drivers/s390/char/sclp_mem.c16
2 files changed, 23 insertions, 1 deletions
diff --git a/arch/s390/mm/vmem.c b/arch/s390/mm/vmem.c
index faed09531499..eeadff45e0e1 100644
--- a/arch/s390/mm/vmem.c
+++ b/arch/s390/mm/vmem.c
@@ -437,9 +437,15 @@ static int modify_pagetable(unsigned long start, unsigned long end, bool add,
if (WARN_ON_ONCE(!PAGE_ALIGNED(start | end)))
return -EINVAL;
- /* Don't mess with any tables not fully in 1:1 mapping & vmemmap area */
+ /* Don't mess with any tables not fully in 1:1 mapping, vmemmap & kasan area */
+#ifdef CONFIG_KASAN
+ if (WARN_ON_ONCE(!(start >= KASAN_SHADOW_START && end <= KASAN_SHADOW_END) &&
+ end > __abs_lowcore))
+ return -EINVAL;
+#else
if (WARN_ON_ONCE(end > __abs_lowcore))
return -EINVAL;
+#endif
for (addr = start; addr < end; addr = next) {
next = pgd_addr_end(addr, end);
pgd = pgd_offset_k(addr);
diff --git a/drivers/s390/char/sclp_mem.c b/drivers/s390/char/sclp_mem.c
index 676c085b4f8a..27f0d2f12a8b 100644
--- a/drivers/s390/char/sclp_mem.c
+++ b/drivers/s390/char/sclp_mem.c
@@ -44,6 +44,9 @@ struct sclp_mem {
unsigned int id;
unsigned int memmap_on_memory;
unsigned int config;
+#ifdef CONFIG_KASAN
+ unsigned int early_shadow_mapped;
+#endif
};
struct sclp_mem_arg {
@@ -244,6 +247,16 @@ static ssize_t sclp_config_mem_store(struct kobject *kobj, struct kobj_attribute
put_device(&mem->dev);
sclp_mem_change_state(addr, block_size, 0);
__remove_memory(addr, block_size);
+#ifdef CONFIG_KASAN
+ if (sclp_mem->early_shadow_mapped) {
+ unsigned long start, end;
+
+ start = (unsigned long)kasan_mem_to_shadow(__va(addr));
+ end = start + (block_size >> KASAN_SHADOW_SCALE_SHIFT);
+ vmemmap_free(start, end, NULL);
+ sclp_mem->early_shadow_mapped = 0;
+ }
+#endif
WRITE_ONCE(sclp_mem->config, 0);
}
out_unlock:
@@ -316,6 +329,9 @@ static int sclp_create_mem(struct sclp_mem *sclp_mem, struct kset *kset,
sclp_mem->memmap_on_memory = memmap_on_memory;
sclp_mem->config = config;
+#ifdef CONFIG_KASAN
+ sclp_mem->early_shadow_mapped = config;
+#endif
sclp_mem->id = id;
kobject_init(&sclp_mem->kobj, &ktype);
rc = kobject_add(&sclp_mem->kobj, &kset->kobj, "memory%d", id);