summaryrefslogtreecommitdiff
path: root/arch/powerpc
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2019-06-24 21:20:39 +0800
committerLinus Torvalds <torvalds@linux-foundation.org>2019-06-24 21:20:39 +0800
commit26df62aaae9f85e900cd5f1a5c28e44488f60de6 (patch)
treef9b2a4437d00ae1ee49fc92772052b23e2b41454 /arch/powerpc
parent9216514602ac436c116664ac9eaf18127ccf34df (diff)
parent65565a68c582fec2f2a8f5aaa399118ab1209103 (diff)
Merge tag 'powerpc-5.2-6' of git://git.kernel.org/pub/scm/linux/kernel/git/powerpc/linux
Pull powerpc fixes from Michael Ellerman: "One fix for a bug in our context id handling on 64-bit hash CPUs, which can lead to unrelated processes being able to read/write to each other's virtual memory. See the commit for full details. That is the fix for CVE-2019-12817. This also adds a kernel selftest for the bug" * tag 'powerpc-5.2-6' of git://git.kernel.org/pub/scm/linux/kernel/git/powerpc/linux: selftests/powerpc: Add test of fork with mapping above 512TB powerpc/mm/64s/hash: Reallocate context ids on fork
Diffstat (limited to 'arch/powerpc')
-rw-r--r--arch/powerpc/mm/book3s64/mmu_context.c55
1 files changed, 47 insertions, 8 deletions
diff --git a/arch/powerpc/mm/book3s64/mmu_context.c b/arch/powerpc/mm/book3s64/mmu_context.c
index bb70391401f7..794404d50a85 100644
--- a/arch/powerpc/mm/book3s64/mmu_context.c
+++ b/arch/powerpc/mm/book3s64/mmu_context.c
@@ -50,20 +50,52 @@ EXPORT_SYMBOL_GPL(hash__alloc_context_id);
void slb_setup_new_exec(void);
+static int realloc_context_ids(mm_context_t *ctx)
+{
+ int i, id;
+
+ /*
+ * id 0 (aka. ctx->id) is special, we always allocate a new one, even if
+ * there wasn't one allocated previously (which happens in the exec
+ * case where ctx is newly allocated).
+ *
+ * We have to be a bit careful here. We must keep the existing ids in
+ * the array, so that we can test if they're non-zero to decide if we
+ * need to allocate a new one. However in case of error we must free the
+ * ids we've allocated but *not* any of the existing ones (or risk a
+ * UAF). That's why we decrement i at the start of the error handling
+ * loop, to skip the id that we just tested but couldn't reallocate.
+ */
+ for (i = 0; i < ARRAY_SIZE(ctx->extended_id); i++) {
+ if (i == 0 || ctx->extended_id[i]) {
+ id = hash__alloc_context_id();
+ if (id < 0)
+ goto error;
+
+ ctx->extended_id[i] = id;
+ }
+ }
+
+ /* The caller expects us to return id */
+ return ctx->id;
+
+error:
+ for (i--; i >= 0; i--) {
+ if (ctx->extended_id[i])
+ ida_free(&mmu_context_ida, ctx->extended_id[i]);
+ }
+
+ return id;
+}
+
static int hash__init_new_context(struct mm_struct *mm)
{
int index;
- index = hash__alloc_context_id();
- if (index < 0)
- return index;
-
mm->context.hash_context = kmalloc(sizeof(struct hash_mm_context),
GFP_KERNEL);
- if (!mm->context.hash_context) {
- ida_free(&mmu_context_ida, index);
+ if (!mm->context.hash_context)
return -ENOMEM;
- }
/*
* The old code would re-promote on fork, we don't do that when using
@@ -91,13 +123,20 @@ static int hash__init_new_context(struct mm_struct *mm)
mm->context.hash_context->spt = kmalloc(sizeof(struct subpage_prot_table),
GFP_KERNEL);
if (!mm->context.hash_context->spt) {
- ida_free(&mmu_context_ida, index);
kfree(mm->context.hash_context);
return -ENOMEM;
}
}
#endif
+ }
+ index = realloc_context_ids(&mm->context);
+ if (index < 0) {
+#ifdef CONFIG_PPC_SUBPAGE_PROT
+ kfree(mm->context.hash_context->spt);
+#endif
+ kfree(mm->context.hash_context);
+ return index;
}
pkey_mm_init(mm);