summaryrefslogtreecommitdiff
path: root/init
diff options
context:
space:
mode:
Diffstat (limited to 'init')
-rw-r--r--init/Kconfig16
-rw-r--r--init/main.c29
2 files changed, 34 insertions, 11 deletions
diff --git a/init/Kconfig b/init/Kconfig
index 8514b25db21c..78cb2461012e 100644
--- a/init/Kconfig
+++ b/init/Kconfig
@@ -1275,12 +1275,17 @@ config BASE_FULL
config FUTEX
bool "Enable futex support" if EXPERT
default y
- select RT_MUTEXES
+ imply RT_MUTEXES
help
Disabling this option will cause the kernel to be built without
support for "fast userspace mutexes". The resulting kernel may not
run glibc-based applications correctly.
+config FUTEX_PI
+ bool
+ depends on FUTEX && RT_MUTEXES
+ default y
+
config HAVE_FUTEX_CMPXCHG
bool
depends on FUTEX
@@ -1571,6 +1576,15 @@ config SLAB_FREELIST_RANDOM
security feature reduces the predictability of the kernel slab
allocator against heap overflows.
+config SLAB_FREELIST_HARDENED
+ bool "Harden slab freelist metadata"
+ depends on SLUB
+ help
+ Many kernel heap attacks try to target slab cache metadata and
+ other infrastructure. This options makes minor performance
+ sacrifies to harden the kernel slab allocator against common
+ freelist exploit methods.
+
config SLUB_CPU_PARTIAL
default y
depends on SLUB && SMP
diff --git a/init/main.c b/init/main.c
index 052481fbe363..0ee9c6866ada 100644
--- a/init/main.c
+++ b/init/main.c
@@ -430,7 +430,6 @@ static noinline void __ref rest_init(void)
* The boot idle thread must execute schedule()
* at least once to get things moving:
*/
- init_idle_bootup_task(current);
schedule_preempt_disabled();
/* Call into cpu_idle with preempt disabled */
cpu_startup_entry(CPUHP_ONLINE);
@@ -488,6 +487,8 @@ void __init __weak thread_stack_cache_init(void)
}
#endif
+void __init __weak mem_encrypt_init(void) { }
+
/*
* Set up kernel memory allocators
*/
@@ -500,7 +501,6 @@ static void __init mm_init(void)
page_ext_init_flatmem();
mem_init();
kmem_cache_init();
- percpu_init_late();
pgtable_init();
vmalloc_init();
ioremap_huge_init();
@@ -515,12 +515,6 @@ asmlinkage __visible void __init start_kernel(void)
smp_setup_processor_id();
debug_objects_early_init();
- /*
- * Set up the initial canary ASAP:
- */
- add_latent_entropy();
- boot_init_stack_canary();
-
cgroup_init_early();
local_irq_disable();
@@ -534,6 +528,13 @@ asmlinkage __visible void __init start_kernel(void)
page_address_init();
pr_notice("%s", linux_banner);
setup_arch(&command_line);
+ /*
+ * Set up the the initial canary and entropy after arch
+ * and after adding latent and command line entropy.
+ */
+ add_latent_entropy();
+ add_device_randomness(command_line, strlen(command_line));
+ boot_init_stack_canary();
mm_init_cpumask(&init_mm);
setup_command_line(command_line);
setup_nr_cpu_ids();
@@ -541,7 +542,7 @@ asmlinkage __visible void __init start_kernel(void)
boot_cpu_state_init();
smp_prepare_boot_cpu(); /* arch-specific boot-cpu hooks */
- build_all_zonelists(NULL, NULL);
+ build_all_zonelists(NULL);
page_alloc_init();
pr_notice("Kernel command line: %s\n", boot_command_line);
@@ -641,6 +642,14 @@ asmlinkage __visible void __init start_kernel(void)
*/
locking_selftest();
+ /*
+ * This needs to be called before any devices perform DMA
+ * operations that might use the SWIOTLB bounce buffers. It will
+ * mark the bounce buffers as decrypted so that their usage will
+ * not cause "plain-text" data to be decrypted when accessed.
+ */
+ mem_encrypt_init();
+
#ifdef CONFIG_BLK_DEV_INITRD
if (initrd_start && !initrd_below_start_ok &&
page_to_pfn(virt_to_page((void *)initrd_start)) < min_low_pfn) {
@@ -651,8 +660,8 @@ asmlinkage __visible void __init start_kernel(void)
}
#endif
page_ext_init();
- debug_objects_mem_init();
kmemleak_init();
+ debug_objects_mem_init();
setup_per_cpu_pageset();
numa_policy_init();
if (late_time_init)