diff options
Diffstat (limited to 'init/main.c')
| -rw-r--r-- | init/main.c | 396 |
1 files changed, 227 insertions, 169 deletions
diff --git a/init/main.c b/init/main.c index e1c3911d7c70..b84818ad9685 100644 --- a/init/main.c +++ b/init/main.c @@ -13,6 +13,7 @@ #define DEBUG /* Enable initcall_debug */ #include <linux/types.h> +#include <linux/export.h> #include <linux/extable.h> #include <linux/module.h> #include <linux/proc_fs.h> @@ -50,8 +51,8 @@ #include <linux/writeback.h> #include <linux/cpu.h> #include <linux/cpuset.h> +#include <linux/memcontrol.h> #include <linux/cgroup.h> -#include <linux/efi.h> #include <linux/tick.h> #include <linux/sched/isolation.h> #include <linux/interrupt.h> @@ -62,7 +63,6 @@ #include <linux/rmap.h> #include <linux/mempolicy.h> #include <linux/key.h> -#include <linux/page_ext.h> #include <linux/debug_locks.h> #include <linux/debugobjects.h> #include <linux/lockdep.h> @@ -89,6 +89,7 @@ #include <linux/sched/task_stack.h> #include <linux/context_tracking.h> #include <linux/random.h> +#include <linux/moduleloader.h> #include <linux/list.h> #include <linux/integrity.h> #include <linux/proc_ns.h> @@ -96,15 +97,16 @@ #include <linux/cache.h> #include <linux/rodata_test.h> #include <linux/jump_label.h> -#include <linux/mem_encrypt.h> #include <linux/kcsan.h> #include <linux/init_syscalls.h> #include <linux/stackdepot.h> #include <linux/randomize_kstack.h> +#include <linux/pidfs.h> +#include <linux/ptdump.h> +#include <linux/time_namespace.h> #include <net/net_namespace.h> #include <asm/io.h> -#include <asm/bugs.h> #include <asm/setup.h> #include <asm/sections.h> #include <asm/cacheflush.h> @@ -116,10 +118,6 @@ static int kernel_init(void *); -extern void init_IRQ(void); -extern void radix_tree_init(void); -extern void maple_tree_init(void); - /* * Debug helper: via this flag we know that we are in 'early bootup code' * where only the boot processor is running with IRQ disabled. This means @@ -138,7 +136,6 @@ EXPORT_SYMBOL(system_state); #define MAX_INIT_ARGS CONFIG_INIT_ENV_ARG_LIMIT #define MAX_INIT_ENVS CONFIG_INIT_ENV_ARG_LIMIT -extern void time_init(void); /* Default late time init is NULL. archs can override this later. */ void (*__initdata late_time_init)(void); @@ -197,8 +194,6 @@ static const char *argv_init[MAX_INIT_ARGS+2] = { "init", NULL, }; const char *envp_init[MAX_INIT_ENVS+2] = { "HOME=/", "TERM=linux", NULL, }; static const char *panic_later, *panic_param; -extern const struct obs_kernel_param __setup_start[], __setup_end[]; - static bool __init obsolete_checksetup(char *line) { const struct obs_kernel_param *p; @@ -334,7 +329,7 @@ static int __init xbc_snprint_cmdline(char *buf, size_t size, { struct xbc_node *knode, *vnode; char *end = buf + size; - const char *val; + const char *val, *q; int ret; xbc_node_for_each_key_value(root, knode, val) { @@ -352,8 +347,14 @@ static int __init xbc_snprint_cmdline(char *buf, size_t size, continue; } xbc_array_for_each_value(vnode, val) { - ret = snprintf(buf, rest(buf, end), "%s=\"%s\" ", - xbc_namebuf, val); + /* + * For prettier and more readable /proc/cmdline, only + * quote the value when necessary, i.e. when it contains + * whitespace. + */ + q = strpbrk(val, " \t\r\n") ? "\"" : ""; + ret = snprintf(buf, rest(buf, end), "%s=%s%s%s ", + xbc_namebuf, q, val, q); if (ret < 0) return ret; buf += ret; @@ -429,7 +430,7 @@ static void __init setup_boot_config(void) err = parse_args("bootconfig", tmp_cmdline, NULL, 0, 0, 0, NULL, bootconfig_params); - if (IS_ERR(err) || !bootconfig_found) + if (IS_ERR(err) || !(bootconfig_found || IS_ENABLED(CONFIG_BOOT_CONFIG_FORCE))) return; /* parse_args() stops at the next param of '--' and returns an address */ @@ -437,7 +438,11 @@ static void __init setup_boot_config(void) initargs_offs = err - tmp_cmdline; if (!data) { - pr_err("'bootconfig' found on command line, but no bootconfig found\n"); + /* If user intended to use bootconfig, show an error level message */ + if (bootconfig_found) + pr_err("'bootconfig' found on command line, but no bootconfig found\n"); + else + pr_info("No bootconfig data provided, so skipping bootconfig"); return; } @@ -490,6 +495,11 @@ static int __init warn_bootconfig(char *str) early_param("bootconfig", warn_bootconfig); +bool __init cmdline_has_extra_options(void) +{ + return extra_command_line || extra_init_args; +} + /* Change NUL term back to "=", to make "param" the whole string. */ static void __init repair_env_string(char *param, char *val) { @@ -535,9 +545,25 @@ static int __init unknown_bootoption(char *param, char *val, const char *unused, void *arg) { size_t len = strlen(param); + /* + * Well-known bootloader identifiers: + * 1. LILO/Grub pass "BOOT_IMAGE=..."; + * 2. kexec/kdump (kexec-tools) pass "kexec". + */ + const char *bootloader[] = { "BOOT_IMAGE=", "kexec", NULL }; + + /* Handle params aliased to sysctls */ + if (sysctl_is_alias(param)) + return 0; repair_env_string(param, val); + /* Handle bootloader identifier */ + for (int i = 0; bootloader[i]; i++) { + if (strstarts(param, bootloader[i])) + return 0; + } + /* Handle obsolete-style parameters */ if (obsolete_checksetup(param)) return 0; @@ -605,7 +631,6 @@ static int __init rdinit_setup(char *str) __setup("rdinit=", rdinit_setup); #ifndef CONFIG_SMP -static const unsigned int setup_max_cpus = NR_CPUS; static inline void setup_nr_cpu_ids(void) { } static inline void smp_prepare_cpus(unsigned int maxcpus) { } #endif @@ -622,18 +647,18 @@ static void __init setup_command_line(char *command_line) if (extra_command_line) xlen = strlen(extra_command_line); - if (extra_init_args) + if (extra_init_args) { + extra_init_args = strim(extra_init_args); /* remove trailing space */ ilen = strlen(extra_init_args) + 4; /* for " -- " */ + } - len = xlen + strlen(boot_command_line) + 1; + len = xlen + strlen(boot_command_line) + ilen + 1; - saved_command_line = memblock_alloc(len + ilen, SMP_CACHE_BYTES); - if (!saved_command_line) - panic("%s: Failed to allocate %zu bytes\n", __func__, len + ilen); + saved_command_line = memblock_alloc_or_panic(len, SMP_CACHE_BYTES); - static_command_line = memblock_alloc(len, SMP_CACHE_BYTES); - if (!static_command_line) - panic("%s: Failed to allocate %zu bytes\n", __func__, len); + len = xlen + strlen(command_line) + 1; + + static_command_line = memblock_alloc_or_panic(len, SMP_CACHE_BYTES); if (xlen) { /* @@ -683,7 +708,7 @@ static void __init setup_command_line(char *command_line) static __initdata DECLARE_COMPLETION(kthreadd_done); -noinline void __ref rest_init(void) +static noinline void __ref __noreturn rest_init(void) { struct task_struct *tsk; int pid; @@ -707,7 +732,7 @@ noinline void __ref rest_init(void) rcu_read_unlock(); numa_default_policy(); - pid = kernel_thread(kthreadd, NULL, CLONE_FS | CLONE_FILES); + pid = kernel_thread(kthreadd, NULL, NULL, CLONE_FS | CLONE_FILES); rcu_read_lock(); kthreadd_task = find_task_by_pid_ns(pid, &init_pid_ns); rcu_read_unlock(); @@ -739,10 +764,7 @@ static int __init do_early_param(char *param, char *val, const struct obs_kernel_param *p; for (p = __setup_start; p < __setup_end; p++) { - if ((p->early && parameq(param, p->str)) || - (strcmp(param, "console") == 0 && - strcmp(p->str, "earlycon") == 0) - ) { + if (p->early && parameq(param, p->str)) { if (p->setup_func(val) != 0) pr_warn("Malformed early option '%s'\n", param); } @@ -778,14 +800,16 @@ void __init __weak smp_setup_processor_id(void) { } +void __init __weak smp_prepare_boot_cpu(void) +{ +} + # if THREAD_SIZE >= PAGE_SIZE void __init __weak thread_stack_cache_init(void) { } #endif -void __init __weak mem_encrypt_init(void) { } - void __init __weak poking_init(void) { } void __init __weak pgtable_cache_init(void) { } @@ -803,69 +827,6 @@ static inline void initcall_debug_enable(void) } #endif -/* Report memory auto-initialization states for this boot. */ -static void __init report_meminit(void) -{ - const char *stack; - - if (IS_ENABLED(CONFIG_INIT_STACK_ALL_PATTERN)) - stack = "all(pattern)"; - else if (IS_ENABLED(CONFIG_INIT_STACK_ALL_ZERO)) - stack = "all(zero)"; - else if (IS_ENABLED(CONFIG_GCC_PLUGIN_STRUCTLEAK_BYREF_ALL)) - stack = "byref_all(zero)"; - else if (IS_ENABLED(CONFIG_GCC_PLUGIN_STRUCTLEAK_BYREF)) - stack = "byref(zero)"; - else if (IS_ENABLED(CONFIG_GCC_PLUGIN_STRUCTLEAK_USER)) - stack = "__user(zero)"; - else - stack = "off"; - - pr_info("mem auto-init: stack:%s, heap alloc:%s, heap free:%s\n", - stack, want_init_on_alloc(GFP_KERNEL) ? "on" : "off", - want_init_on_free() ? "on" : "off"); - if (want_init_on_free()) - pr_info("mem auto-init: clearing system memory may take some time...\n"); -} - -/* - * Set up kernel memory allocators - */ -static void __init mm_init(void) -{ - /* - * page_ext requires contiguous pages, - * bigger than MAX_ORDER unless SPARSEMEM. - */ - page_ext_init_flatmem(); - init_mem_debugging_and_hardening(); - kfence_alloc_pool(); - report_meminit(); - kmsan_init_shadow(); - stack_depot_early_init(); - mem_init(); - mem_init_print_info(); - kmem_cache_init(); - /* - * page_owner must be initialized after buddy is ready, and also after - * slab is ready so that stack_depot_init() works properly - */ - page_ext_init_flatmem_late(); - kmemleak_init(); - pgtable_init(); - debug_objects_mem_init(); - vmalloc_init(); - /* Should be run after vmap initialization */ - if (early_page_ext_enabled()) - page_ext_init(); - /* Should be run before the first non-init thread is created */ - init_espfix_bsp(); - /* Should be run after espfix64 is set up. */ - pti_init(); - kmsan_init_runtime(); - mm_cache_init(); -} - #ifdef CONFIG_RANDOMIZE_KSTACK_OFFSET DEFINE_STATIC_KEY_MAYBE_RO(CONFIG_RANDOMIZE_KSTACK_OFFSET_DEFAULT, randomize_kstack_offset); @@ -889,11 +850,6 @@ static int __init early_randomize_kstack_offset(char *buf) early_param("randomize_kstack_offset", early_randomize_kstack_offset); #endif -void __init __weak arch_call_rest_init(void) -{ - rest_init(); -} - static void __init print_unknown_bootoptions(void) { char *unknown_options; @@ -937,7 +893,116 @@ static void __init print_unknown_bootoptions(void) memblock_free(unknown_options, len); } -asmlinkage __visible void __init __no_sanitize_address start_kernel(void) +static void __init early_numa_node_init(void) +{ +#ifdef CONFIG_USE_PERCPU_NUMA_NODE_ID +#ifndef cpu_to_node + int cpu; + + /* The early_cpu_to_node() should be ready here. */ + for_each_possible_cpu(cpu) + set_cpu_numa_node(cpu, early_cpu_to_node(cpu)); +#endif +#endif +} + +#define KERNEL_CMDLINE_PREFIX "Kernel command line: " +#define KERNEL_CMDLINE_PREFIX_LEN (sizeof(KERNEL_CMDLINE_PREFIX) - 1) +#define KERNEL_CMDLINE_CONTINUATION " \\" +#define KERNEL_CMDLINE_CONTINUATION_LEN (sizeof(KERNEL_CMDLINE_CONTINUATION) - 1) + +#define MIN_CMDLINE_LOG_WRAP_IDEAL_LEN (KERNEL_CMDLINE_PREFIX_LEN + \ + KERNEL_CMDLINE_CONTINUATION_LEN) +#define CMDLINE_LOG_WRAP_IDEAL_LEN (CONFIG_CMDLINE_LOG_WRAP_IDEAL_LEN > \ + MIN_CMDLINE_LOG_WRAP_IDEAL_LEN ? \ + CONFIG_CMDLINE_LOG_WRAP_IDEAL_LEN : \ + MIN_CMDLINE_LOG_WRAP_IDEAL_LEN) + +#define IDEAL_CMDLINE_LEN (CMDLINE_LOG_WRAP_IDEAL_LEN - KERNEL_CMDLINE_PREFIX_LEN) +#define IDEAL_CMDLINE_SPLIT_LEN (IDEAL_CMDLINE_LEN - KERNEL_CMDLINE_CONTINUATION_LEN) + +/** + * print_kernel_cmdline() - Print the kernel cmdline with wrapping. + * @cmdline: The cmdline to print. + * + * Print the kernel command line, trying to wrap based on the Kconfig knob + * CONFIG_CMDLINE_LOG_WRAP_IDEAL_LEN. + * + * Wrapping is based on spaces, ignoring quotes. All lines are prefixed + * with "Kernel command line: " and lines that are not the last line have + * a " \" suffix added to them. The prefix and suffix count towards the + * line length for wrapping purposes. The ideal length will be exceeded + * if no appropriate place to wrap is found. + * + * Example output if CONFIG_CMDLINE_LOG_WRAP_IDEAL_LEN is 40: + * Kernel command line: loglevel=7 \ + * Kernel command line: init=/sbin/init \ + * Kernel command line: root=PARTUUID=8c3efc1a-768b-6642-8d0c-89eb782f19f0/PARTNROFF=1 \ + * Kernel command line: rootwait ro \ + * Kernel command line: my_quoted_arg="The \ + * Kernel command line: quick brown fox \ + * Kernel command line: jumps over the \ + * Kernel command line: lazy dog." + */ +static void __init print_kernel_cmdline(const char *cmdline) +{ + size_t len; + + /* Config option of 0 or anything longer than the max disables wrapping */ + if (CONFIG_CMDLINE_LOG_WRAP_IDEAL_LEN == 0 || + IDEAL_CMDLINE_LEN >= COMMAND_LINE_SIZE - 1) { + pr_notice("%s%s\n", KERNEL_CMDLINE_PREFIX, cmdline); + return; + } + + len = strlen(cmdline); + while (len > IDEAL_CMDLINE_LEN) { + const char *first_space; + const char *prev_cutoff; + const char *cutoff; + int to_print; + size_t used; + + /* Find the last ' ' that wouldn't make the line too long */ + prev_cutoff = NULL; + cutoff = cmdline; + while (true) { + cutoff = strchr(cutoff + 1, ' '); + if (!cutoff || cutoff - cmdline > IDEAL_CMDLINE_SPLIT_LEN) + break; + prev_cutoff = cutoff; + } + if (prev_cutoff) + cutoff = prev_cutoff; + else if (!cutoff) + break; + + /* Find the beginning and end of the string of spaces */ + first_space = cutoff; + while (first_space > cmdline && first_space[-1] == ' ') + first_space--; + to_print = first_space - cmdline; + while (*cutoff == ' ') + cutoff++; + used = cutoff - cmdline; + + /* If the whole string is used, break and do the final printout */ + if (len == used) + break; + + if (to_print) + pr_notice("%s%.*s%s\n", KERNEL_CMDLINE_PREFIX, + to_print, cmdline, KERNEL_CMDLINE_CONTINUATION); + + len -= used; + cmdline += used; + } + if (len) + pr_notice("%s%s\n", KERNEL_CMDLINE_PREFIX, cmdline); +} + +asmlinkage __visible __init __no_sanitize_address __noreturn __no_stack_protector +void start_kernel(void) { char *command_line; char *after_dashes; @@ -959,21 +1024,21 @@ asmlinkage __visible void __init __no_sanitize_address start_kernel(void) boot_cpu_init(); page_address_init(); pr_notice("%s", linux_banner); - early_security_init(); setup_arch(&command_line); + /* Static keys and static calls are needed by LSMs */ + jump_label_init(); + static_call_init(); + early_security_init(); setup_boot_config(); setup_command_line(command_line); setup_nr_cpu_ids(); setup_per_cpu_areas(); smp_prepare_boot_cpu(); /* arch-specific boot-cpu hooks */ + early_numa_node_init(); boot_cpu_hotplug_init(); - build_all_zonelists(NULL); - page_alloc_init(); - - pr_notice("Kernel command line: %s\n", saved_command_line); + print_kernel_cmdline(saved_command_line); /* parameters may set static keys */ - jump_label_init(); parse_early_param(); after_dashes = parse_args("Booting kernel", static_command_line, __start___param, @@ -992,13 +1057,14 @@ asmlinkage __visible void __init __no_sanitize_address start_kernel(void) /* * These use large bootmem allocations and must precede - * kmem_cache_init() + * initalization of page allocator */ setup_log_buf(0); vfs_caches_init_early(); sort_main_extable(); trap_init(); - mm_init(); + mm_core_init(); + maple_tree_init(); poking_init(); ftrace_init(); @@ -1016,7 +1082,6 @@ asmlinkage __visible void __init __no_sanitize_address start_kernel(void) "Interrupts were enabled *very* early, fixing it\n")) local_irq_disable(); radix_tree_init(); - maple_tree_init(); /* * Set up housekeeping before setting up workqueues to allow the unbound @@ -1032,6 +1097,7 @@ asmlinkage __visible void __init __no_sanitize_address start_kernel(void) workqueue_init_early(); rcu_init(); + kvfree_rcu_init(); /* Trace events are available after this */ trace_init(); @@ -1045,7 +1111,7 @@ asmlinkage __visible void __init __no_sanitize_address start_kernel(void) init_IRQ(); tick_init(); rcu_init_nohz(); - init_timers(); + timers_init(); srcu_init(); hrtimers_init(); softirq_init(); @@ -1088,14 +1154,6 @@ asmlinkage __visible void __init __no_sanitize_address start_kernel(void) */ locking_selftest(); - /* - * This needs to be called before any devices perform DMA - * operations that might use the SWIOTLB bounce buffers. It will - * mark the bounce buffers as decrypted so that their usage will - * not cause "plain-text" data to be decrypted when accessed. - */ - mem_encrypt_init(); - #ifdef CONFIG_BLK_DEV_INITRD if (initrd_start && !initrd_below_start_ok && page_to_pfn(virt_to_page((void *)initrd_start)) < min_low_pfn) { @@ -1112,17 +1170,17 @@ asmlinkage __visible void __init __no_sanitize_address start_kernel(void) late_time_init(); sched_clock_init(); calibrate_delay(); + + arch_cpu_finalize_init(); + pid_idr_init(); anon_vma_init(); -#ifdef CONFIG_X86 - if (efi_enabled(EFI_RUNTIME_SERVICES)) - efi_enter_virtual_mode(); -#endif thread_stack_cache_init(); cred_init(); fork_init(); proc_caches_init(); uts_ns_init(); + time_ns_init(); key_init(); security_init(); dbg_late_init(); @@ -1133,21 +1191,27 @@ asmlinkage __visible void __init __no_sanitize_address start_kernel(void) seq_file_init(); proc_root_init(); nsfs_init(); + pidfs_init(); cpuset_init(); + mem_cgroup_init(); cgroup_init(); taskstats_init_early(); delayacct_init(); - check_bugs(); - acpi_subsystem_init(); arch_post_acpi_subsys_init(); kcsan_init(); /* Do the rest non-__init'ed, we're now alive */ - arch_call_rest_init(); + rest_init(); + /* + * Avoid stack canaries in callers of boot_init_stack_canary for gcc-10 + * and older. + */ +#if !__has_attribute(__no_stack_protector__) prevent_tail_call_optimization(); +#endif } /* Call all constructor functions linked into the kernel. */ @@ -1185,16 +1249,10 @@ static int __init initcall_blacklist(char *str) str_entry = strsep(&str, ","); if (str_entry) { pr_debug("blacklisting initcall %s\n", str_entry); - entry = memblock_alloc(sizeof(*entry), + entry = memblock_alloc_or_panic(sizeof(*entry), SMP_CACHE_BYTES); - if (!entry) - panic("%s: Failed to allocate %zu bytes\n", - __func__, sizeof(*entry)); - entry->buf = memblock_alloc(strlen(str_entry) + 1, + entry->buf = memblock_alloc_or_panic(strlen(str_entry) + 1, SMP_CACHE_BYTES); - if (!entry->buf) - panic("%s: Failed to allocate %zu bytes\n", - __func__, strlen(str_entry) + 1); strcpy(entry->buf, str_entry); list_add(&entry->next, &blacklisted_initcalls); } @@ -1263,6 +1321,12 @@ trace_initcall_finish_cb(void *data, initcall_t fn, int ret) fn, ret, (unsigned long long)ktime_us_delta(rettime, *calltime)); } +static __init_or_module void +trace_initcall_level_cb(void *data, const char *level) +{ + printk(KERN_DEBUG "entering initcall level: %s\n", level); +} + static ktime_t initcall_calltime; #ifdef TRACEPOINTS_ENABLED @@ -1274,10 +1338,12 @@ static void __init initcall_debug_enable(void) &initcall_calltime); ret |= register_trace_initcall_finish(trace_initcall_finish_cb, &initcall_calltime); + ret |= register_trace_initcall_level(trace_initcall_level_cb, NULL); WARN(ret, "Failed to register initcall tracepoints\n"); } # define do_trace_initcall_start trace_initcall_start # define do_trace_initcall_finish trace_initcall_finish +# define do_trace_initcall_level trace_initcall_level #else static inline void do_trace_initcall_start(initcall_t fn) { @@ -1291,6 +1357,12 @@ static inline void do_trace_initcall_finish(initcall_t fn, int ret) return; trace_initcall_finish_cb(&initcall_calltime, fn, ret); } +static inline void do_trace_initcall_level(const char *level) +{ + if (!initcall_debug) + return; + trace_initcall_level_cb(NULL, level); +} #endif /* !TRACEPOINTS_ENABLED */ int __init_or_module do_one_initcall(initcall_t fn) @@ -1323,17 +1395,6 @@ int __init_or_module do_one_initcall(initcall_t fn) } -extern initcall_entry_t __initcall_start[]; -extern initcall_entry_t __initcall0_start[]; -extern initcall_entry_t __initcall1_start[]; -extern initcall_entry_t __initcall2_start[]; -extern initcall_entry_t __initcall3_start[]; -extern initcall_entry_t __initcall4_start[]; -extern initcall_entry_t __initcall5_start[]; -extern initcall_entry_t __initcall6_start[]; -extern initcall_entry_t __initcall7_start[]; -extern initcall_entry_t __initcall_end[]; - static initcall_entry_t *initcall_levels[] __initdata = { __initcall0_start, __initcall1_start, @@ -1374,7 +1435,7 @@ static void __init do_initcall_level(int level, char *command_line) level, level, NULL, ignore_unknown_bootoption); - trace_initcall_level(initcall_level_names[level]); + do_trace_initcall_level(initcall_level_names[level]); for (fn = initcall_levels[level]; fn < initcall_levels[level+1]; fn++) do_one_initcall(initcall_from_entry(fn)); } @@ -1418,7 +1479,7 @@ static void __init do_pre_smp_initcalls(void) { initcall_entry_t *fn; - trace_initcall_level("early"); + do_trace_initcall_level("early"); for (fn = __initcall_start; fn < __initcall0_start; fn++) do_one_initcall(initcall_from_entry(fn)); } @@ -1477,33 +1538,28 @@ static int __init set_debug_rodata(char *str) early_param("rodata", set_debug_rodata); #endif -#ifdef CONFIG_STRICT_KERNEL_RWX static void mark_readonly(void) { - if (rodata_enabled) { + if (IS_ENABLED(CONFIG_STRICT_KERNEL_RWX) && rodata_enabled) { /* * load_module() results in W+X mappings, which are cleaned - * up with call_rcu(). Let's make sure that queued work is + * up with init_free_wq. Let's make sure that queued work is * flushed so that we don't hit false positives looking for * insecure pages which are W+X. */ - rcu_barrier(); + flush_module_init_free_work(); + jump_label_init_ro(); mark_rodata_ro(); + debug_checkwx(); rodata_test(); - } else + } else if (IS_ENABLED(CONFIG_STRICT_KERNEL_RWX)) { pr_info("Kernel memory protection disabled.\n"); + } else if (IS_ENABLED(CONFIG_ARCH_HAS_STRICT_KERNEL_RWX)) { + pr_warn("Kernel memory protection not selected by kernel config.\n"); + } else { + pr_warn("This architecture does not have kernel memory protection.\n"); + } } -#elif defined(CONFIG_ARCH_HAS_STRICT_KERNEL_RWX) -static inline void mark_readonly(void) -{ - pr_warn("Kernel memory protection not selected by kernel config.\n"); -} -#else -static inline void mark_readonly(void) -{ - pr_warn("This architecture does not have kernel memory protection.\n"); -} -#endif void __weak free_initmem(void) { @@ -1618,18 +1674,16 @@ static noinline void __init kernel_init_freeable(void) init_mm_internals(); - rcu_init_tasks_generic(); do_pre_smp_initcalls(); lockup_detector_init(); smp_init(); sched_init_smp(); + workqueue_init_topology(); + async_init(); padata_init(); page_alloc_init_late(); - /* Initialize page ext after all struct pages are initialized. */ - if (!early_page_ext_enabled()) - page_ext_init(); do_basic_setup(); @@ -1642,7 +1696,11 @@ static noinline void __init kernel_init_freeable(void) * check if there is an early userspace init. If yes, let it do all * the work */ - if (init_eaccess(ramdisk_execute_command) != 0) { + int ramdisk_command_access; + ramdisk_command_access = init_eaccess(ramdisk_execute_command); + if (ramdisk_command_access != 0) { + pr_warn("check access for rdinit=%s failed: %i, ignoring\n", + ramdisk_execute_command, ramdisk_command_access); ramdisk_execute_command = NULL; prepare_namespace(); } |
