diff options
Diffstat (limited to 'arch/arm64/kernel/setup.c')
| -rw-r--r-- | arch/arm64/kernel/setup.c | 542 |
1 files changed, 301 insertions, 241 deletions
diff --git a/arch/arm64/kernel/setup.c b/arch/arm64/kernel/setup.c index add6ea616843..23c05dc7a8f2 100644 --- a/arch/arm64/kernel/setup.c +++ b/arch/arm64/kernel/setup.c @@ -1,68 +1,65 @@ +// SPDX-License-Identifier: GPL-2.0-only /* * Based on arch/arm/kernel/setup.c * * Copyright (C) 1995-2001 Russell King * Copyright (C) 2012 ARM Ltd. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program. If not, see <http://www.gnu.org/licenses/>. */ +#include <linux/acpi.h> #include <linux/export.h> #include <linux/kernel.h> #include <linux/stddef.h> #include <linux/ioport.h> #include <linux/delay.h> -#include <linux/utsname.h> #include <linux/initrd.h> #include <linux/console.h> -#include <linux/bootmem.h> -#include <linux/seq_file.h> +#include <linux/cache.h> #include <linux/screen_info.h> #include <linux/init.h> #include <linux/kexec.h> -#include <linux/crash_dump.h> #include <linux/root_dev.h> -#include <linux/clk-provider.h> #include <linux/cpu.h> #include <linux/interrupt.h> #include <linux/smp.h> #include <linux/fs.h> +#include <linux/panic_notifier.h> #include <linux/proc_fs.h> #include <linux/memblock.h> #include <linux/of_fdt.h> -#include <linux/of_platform.h> - +#include <linux/efi.h> +#include <linux/psci.h> +#include <linux/sched/task.h> +#include <linux/scs.h> +#include <linux/mm.h> + +#include <asm/acpi.h> +#include <asm/fixmap.h> +#include <asm/cpu.h> #include <asm/cputype.h> +#include <asm/daifflags.h> #include <asm/elf.h> -#include <asm/cputable.h> +#include <asm/cpufeature.h> +#include <asm/cpu_ops.h> +#include <asm/kasan.h> +#include <asm/numa.h> +#include <asm/rsi.h> +#include <asm/scs.h> #include <asm/sections.h> #include <asm/setup.h> #include <asm/smp_plat.h> #include <asm/cacheflush.h> #include <asm/tlbflush.h> #include <asm/traps.h> -#include <asm/memblock.h> -#include <asm/psci.h> +#include <asm/efi.h> +#include <asm/xen/hypervisor.h> +#include <asm/mmu_context.h> -unsigned int processor_id; -EXPORT_SYMBOL(processor_id); +static int num_standard_resources; +static struct resource *standard_resources; -unsigned int elf_hwcap __read_mostly; -EXPORT_SYMBOL_GPL(elf_hwcap); - -static const char *cpu_name; -static const char *machine_name; phys_addr_t __fdt_pointer __initdata; +u64 mmu_enabled_at_boot __initdata; /* * Standard memory resources @@ -72,305 +69,368 @@ static struct resource mem_res[] = { .name = "Kernel code", .start = 0, .end = 0, - .flags = IORESOURCE_MEM + .flags = IORESOURCE_SYSTEM_RAM }, { .name = "Kernel data", .start = 0, .end = 0, - .flags = IORESOURCE_MEM + .flags = IORESOURCE_SYSTEM_RAM } }; #define kernel_code mem_res[0] #define kernel_data mem_res[1] -void __init early_print(const char *str, ...) -{ - char buf[256]; - va_list ap; +/* + * The recorded values of x0 .. x3 upon kernel entry. + */ +u64 __cacheline_aligned boot_args[4]; - va_start(ap, str); - vsnprintf(buf, sizeof(buf), str, ap); - va_end(ap); +void __init smp_setup_processor_id(void) +{ + u64 mpidr = read_cpuid_mpidr() & MPIDR_HWID_BITMASK; + set_cpu_logical_map(0, mpidr); - printk("%s", buf); + pr_info("Booting Linux on physical CPU 0x%010lx [0x%08x]\n", + (unsigned long)mpidr, read_cpuid_id()); } -static void __init setup_processor(void) +bool arch_match_cpu_phys_id(int cpu, u64 phys_id) { - struct cpu_info *cpu_info; + return phys_id == cpu_logical_map(cpu); +} +struct mpidr_hash mpidr_hash; +/** + * smp_build_mpidr_hash - Pre-compute shifts required at each affinity + * level in order to build a linear index from an + * MPIDR value. Resulting algorithm is a collision + * free hash carried out through shifting and ORing + */ +static void __init smp_build_mpidr_hash(void) +{ + u32 i, affinity, fs[4], bits[4], ls; + u64 mask = 0; + /* + * Pre-scan the list of MPIDRS and filter out bits that do + * not contribute to affinity levels, ie they never toggle. + */ + for_each_possible_cpu(i) + mask |= (cpu_logical_map(i) ^ cpu_logical_map(0)); + pr_debug("mask of set bits %#llx\n", mask); /* - * locate processor in the list of supported processor - * types. The linker builds this table for us from the - * entries in arch/arm/mm/proc.S + * Find and stash the last and first bit set at all affinity levels to + * check how many bits are required to represent them. */ - cpu_info = lookup_processor_type(read_cpuid_id()); - if (!cpu_info) { - printk("CPU configuration botched (ID %08x), unable to continue.\n", - read_cpuid_id()); - while (1); + for (i = 0; i < 4; i++) { + affinity = MPIDR_AFFINITY_LEVEL(mask, i); + /* + * Find the MSB bit and LSB bits position + * to determine how many bits are required + * to express the affinity level. + */ + ls = fls(affinity); + fs[i] = affinity ? ffs(affinity) - 1 : 0; + bits[i] = ls - fs[i]; } - - cpu_name = cpu_info->cpu_name; - - printk("CPU: %s [%08x] revision %d\n", - cpu_name, read_cpuid_id(), read_cpuid_id() & 15); - - sprintf(init_utsname()->machine, "aarch64"); - elf_hwcap = 0; + /* + * An index can be created from the MPIDR_EL1 by isolating the + * significant bits at each affinity level and by shifting + * them in order to compress the 32 bits values space to a + * compressed set of values. This is equivalent to hashing + * the MPIDR_EL1 through shifting and ORing. It is a collision free + * hash though not minimal since some levels might contain a number + * of CPUs that is not an exact power of 2 and their bit + * representation might contain holes, eg MPIDR_EL1[7:0] = {0x2, 0x80}. + */ + mpidr_hash.shift_aff[0] = MPIDR_LEVEL_SHIFT(0) + fs[0]; + mpidr_hash.shift_aff[1] = MPIDR_LEVEL_SHIFT(1) + fs[1] - bits[0]; + mpidr_hash.shift_aff[2] = MPIDR_LEVEL_SHIFT(2) + fs[2] - + (bits[1] + bits[0]); + mpidr_hash.shift_aff[3] = MPIDR_LEVEL_SHIFT(3) + + fs[3] - (bits[2] + bits[1] + bits[0]); + mpidr_hash.mask = mask; + mpidr_hash.bits = bits[3] + bits[2] + bits[1] + bits[0]; + pr_debug("MPIDR hash: aff0[%u] aff1[%u] aff2[%u] aff3[%u] mask[%#llx] bits[%u]\n", + mpidr_hash.shift_aff[0], + mpidr_hash.shift_aff[1], + mpidr_hash.shift_aff[2], + mpidr_hash.shift_aff[3], + mpidr_hash.mask, + mpidr_hash.bits); + /* + * 4x is an arbitrary value used to warn on a hash table much bigger + * than expected on most systems. + */ + if (mpidr_hash_size() > 4 * num_possible_cpus()) + pr_warn("Large number of MPIDR hash buckets detected\n"); } static void __init setup_machine_fdt(phys_addr_t dt_phys) { - struct boot_param_header *devtree; - unsigned long dt_root; + int size = 0; + void *dt_virt = fixmap_remap_fdt(dt_phys, &size, PAGE_KERNEL); + const char *name; - /* Check we have a non-NULL DT pointer */ - if (!dt_phys) { - early_print("\n" - "Error: NULL or invalid device tree blob\n" - "The dtb must be 8-byte aligned and passed in the first 512MB of memory\n" - "\nPlease check your bootloader.\n"); - - while (true) - cpu_relax(); - - } + if (dt_virt) + memblock_reserve(dt_phys, size); - devtree = phys_to_virt(dt_phys); - - /* Check device tree validity */ - if (be32_to_cpu(devtree->magic) != OF_DT_HEADER) { - early_print("\n" - "Error: invalid device tree blob at physical address 0x%p (virtual address 0x%p)\n" - "Expected 0x%x, found 0x%x\n" + /* + * dt_virt is a fixmap address, hence __pa(dt_virt) can't be used. + * Pass dt_phys directly. + */ + if (!early_init_dt_scan(dt_virt, dt_phys)) { + pr_crit("\n" + "Error: invalid device tree blob: PA=%pa, VA=%px, size=%d bytes\n" + "The dtb must be 8-byte aligned and must not exceed 2 MB in size.\n" "\nPlease check your bootloader.\n", - dt_phys, devtree, OF_DT_HEADER, - be32_to_cpu(devtree->magic)); + &dt_phys, dt_virt, size); + /* + * Note that in this _really_ early stage we cannot even BUG() + * or oops, so the least terrible thing to do is cpu_relax(), + * or else we could end-up printing non-initialized data, etc. + */ while (true) cpu_relax(); } - initial_boot_params = devtree; - dt_root = of_get_flat_dt_root(); - - machine_name = of_get_flat_dt_prop(dt_root, "model", NULL); - if (!machine_name) - machine_name = of_get_flat_dt_prop(dt_root, "compatible", NULL); - if (!machine_name) - machine_name = "<unknown>"; - pr_info("Machine: %s\n", machine_name); - - /* Retrieve various information from the /chosen node */ - of_scan_flat_dt(early_init_dt_scan_chosen, boot_command_line); - /* Initialize {size,address}-cells info */ - of_scan_flat_dt(early_init_dt_scan_root, NULL); - /* Setup memory, calling early_init_dt_add_memory_arch */ - of_scan_flat_dt(early_init_dt_scan_memory, NULL); -} + /* Early fixups are done, map the FDT as read-only now */ + fixmap_remap_fdt(dt_phys, &size, PAGE_KERNEL_RO); -void __init early_init_dt_add_memory_arch(u64 base, u64 size) -{ - base &= PAGE_MASK; - size &= PAGE_MASK; - if (base + size < PHYS_OFFSET) { - pr_warning("Ignoring memory block 0x%llx - 0x%llx\n", - base, base + size); + name = of_flat_dt_get_machine_name(); + if (!name) return; - } - if (base < PHYS_OFFSET) { - pr_warning("Ignoring memory range 0x%llx - 0x%llx\n", - base, PHYS_OFFSET); - size -= PHYS_OFFSET - base; - base = PHYS_OFFSET; - } - memblock_add(base, size); + + pr_info("Machine model: %s\n", name); + dump_stack_set_arch_desc("%s (DT)", name); } -void * __init early_init_dt_alloc_memory_arch(u64 size, u64 align) +static void __init request_standard_resources(void) { - return __va(memblock_alloc(size, align)); + struct memblock_region *region; + struct resource *res; + unsigned long i = 0; + size_t res_size; + + kernel_code.start = __pa_symbol(_text); + kernel_code.end = __pa_symbol(__init_begin - 1); + kernel_data.start = __pa_symbol(_sdata); + kernel_data.end = __pa_symbol(_end - 1); + insert_resource(&iomem_resource, &kernel_code); + insert_resource(&iomem_resource, &kernel_data); + + num_standard_resources = memblock.memory.cnt; + res_size = num_standard_resources * sizeof(*standard_resources); + standard_resources = memblock_alloc_or_panic(res_size, SMP_CACHE_BYTES); + + for_each_mem_region(region) { + res = &standard_resources[i++]; + if (memblock_is_nomap(region)) { + res->name = "reserved"; + res->flags = IORESOURCE_MEM; + res->start = __pfn_to_phys(memblock_region_reserved_base_pfn(region)); + res->end = __pfn_to_phys(memblock_region_reserved_end_pfn(region)) - 1; + } else { + res->name = "System RAM"; + res->flags = IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY; + res->start = __pfn_to_phys(memblock_region_memory_base_pfn(region)); + res->end = __pfn_to_phys(memblock_region_memory_end_pfn(region)) - 1; + } + + insert_resource(&iomem_resource, res); + } } -/* - * Limit the memory size that was specified via FDT. - */ -static int __init early_mem(char *p) +static int __init reserve_memblock_reserved_regions(void) { - phys_addr_t limit; + u64 i, j; - if (!p) - return 1; + for (i = 0; i < num_standard_resources; ++i) { + struct resource *mem = &standard_resources[i]; + phys_addr_t r_start, r_end, mem_size = resource_size(mem); - limit = memparse(p, &p) & PAGE_MASK; - pr_notice("Memory limited to %lldMB\n", limit >> 20); + if (!memblock_is_region_reserved(mem->start, mem_size)) + continue; - memblock_enforce_memory_limit(limit); + for_each_reserved_mem_range(j, &r_start, &r_end) { + resource_size_t start, end; - return 0; -} -early_param("mem", early_mem); + start = max(PFN_PHYS(PFN_DOWN(r_start)), mem->start); + end = min(PFN_PHYS(PFN_UP(r_end)) - 1, mem->end); -static void __init request_standard_resources(void) -{ - struct memblock_region *region; - struct resource *res; + if (start > mem->end || end < mem->start) + continue; - kernel_code.start = virt_to_phys(_text); - kernel_code.end = virt_to_phys(_etext - 1); - kernel_data.start = virt_to_phys(_sdata); - kernel_data.end = virt_to_phys(_end - 1); - - for_each_memblock(memory, region) { - res = alloc_bootmem_low(sizeof(*res)); - res->name = "System RAM"; - res->start = __pfn_to_phys(memblock_region_memory_base_pfn(region)); - res->end = __pfn_to_phys(memblock_region_memory_end_pfn(region)) - 1; - res->flags = IORESOURCE_MEM | IORESOURCE_BUSY; - - request_resource(&iomem_resource, res); - - if (kernel_code.start >= res->start && - kernel_code.end <= res->end) - request_resource(res, &kernel_code); - if (kernel_data.start >= res->start && - kernel_data.end <= res->end) - request_resource(res, &kernel_data); + reserve_region_with_split(mem, start, end, "reserved"); + } } + + return 0; } +arch_initcall(reserve_memblock_reserved_regions); u64 __cpu_logical_map[NR_CPUS] = { [0 ... NR_CPUS-1] = INVALID_HWID }; -void __init setup_arch(char **cmdline_p) +u64 cpu_logical_map(unsigned int cpu) { - setup_processor(); - - setup_machine_fdt(__fdt_pointer); + return __cpu_logical_map[cpu]; +} - init_mm.start_code = (unsigned long) _text; - init_mm.end_code = (unsigned long) _etext; - init_mm.end_data = (unsigned long) _edata; - init_mm.brk = (unsigned long) _end; +void __init __no_sanitize_address setup_arch(char **cmdline_p) +{ + setup_initial_init_mm(_text, _etext, _edata, _end); *cmdline_p = boot_command_line; + kaslr_init(); + + early_fixmap_init(); + early_ioremap_init(); + + setup_machine_fdt(__fdt_pointer); + + /* + * Initialise the static keys early as they may be enabled by the + * cpufeature code and early parameters. + */ + jump_label_init(); parse_early_param(); - arm64_memblock_init(); + dynamic_scs_init(); - paging_init(); - request_standard_resources(); + /* + * The primary CPU enters the kernel with all DAIF exceptions masked. + * + * We must unmask Debug and SError before preemption or scheduling is + * possible to ensure that these are consistently unmasked across + * threads, and we want to unmask SError as soon as possible after + * initializing earlycon so that we can report any SErrors immediately. + * + * IRQ and FIQ will be unmasked after the root irqchip has been + * detected and initialized. + */ + local_daif_restore(DAIF_PROCCTX_NOIRQ); - unflatten_device_tree(); + /* + * TTBR0 is only used for the identity mapping at this stage. Make it + * point to zero page to avoid speculatively fetching new entries. + */ + cpu_uninstall_idmap(); - psci_init(); + xen_early_init(); + efi_init(); - cpu_logical_map(0) = read_cpuid_mpidr() & MPIDR_HWID_BITMASK; -#ifdef CONFIG_SMP - smp_init_cpus(); -#endif + if (!efi_enabled(EFI_BOOT)) { + if ((u64)_text % MIN_KIMG_ALIGN) + pr_warn(FW_BUG "Kernel image misaligned at boot, please fix your bootloader!"); + WARN_TAINT(mmu_enabled_at_boot, TAINT_FIRMWARE_WORKAROUND, + FW_BUG "Booted with MMU enabled!"); + } -#ifdef CONFIG_VT -#if defined(CONFIG_VGA_CONSOLE) - conswitchp = &vga_con; -#elif defined(CONFIG_DUMMY_CONSOLE) - conswitchp = &dummy_con; -#endif -#endif -} + arm64_memblock_init(); -static int __init arm64_device_init(void) -{ - of_clk_init(NULL); - of_platform_populate(NULL, of_default_bus_match_table, NULL, NULL); - return 0; -} -arch_initcall(arm64_device_init); + paging_init(); -static DEFINE_PER_CPU(struct cpu, cpu_data); + acpi_table_upgrade(); -static int __init topology_init(void) -{ - int i; + /* Parse the ACPI tables for possible boot-time configuration */ + acpi_boot_table_init(); - for_each_possible_cpu(i) { - struct cpu *cpu = &per_cpu(cpu_data, i); - cpu->hotpluggable = 1; - register_cpu(cpu, i); - } + if (acpi_disabled) + unflatten_device_tree(); - return 0; -} -subsys_initcall(topology_init); + bootmem_init(); -static const char *hwcap_str[] = { - "fp", - "asimd", - NULL -}; + kasan_init(); -static int c_show(struct seq_file *m, void *v) -{ - int i; + request_standard_resources(); - seq_printf(m, "Processor\t: %s rev %d (%s)\n", - cpu_name, read_cpuid_id() & 15, ELF_PLATFORM); + early_ioremap_reset(); - for_each_online_cpu(i) { - /* - * glibc reads /proc/cpuinfo to determine the number of - * online processors, looking for lines beginning with - * "processor". Give glibc what it expects. - */ -#ifdef CONFIG_SMP - seq_printf(m, "processor\t: %d\n", i); -#endif - seq_printf(m, "BogoMIPS\t: %lu.%02lu\n\n", - loops_per_jiffy / (500000UL/HZ), - loops_per_jiffy / (5000UL/HZ) % 100); - } + if (acpi_disabled) + psci_dt_init(); + else + psci_acpi_init(); - /* dump out the processor features */ - seq_puts(m, "Features\t: "); + arm64_rsi_init(); - for (i = 0; hwcap_str[i]; i++) - if (elf_hwcap & (1 << i)) - seq_printf(m, "%s ", hwcap_str[i]); + init_bootcpu_ops(); + smp_init_cpus(); + smp_build_mpidr_hash(); - seq_printf(m, "\nCPU implementer\t: 0x%02x\n", read_cpuid_id() >> 24); - seq_printf(m, "CPU architecture: AArch64\n"); - seq_printf(m, "CPU variant\t: 0x%x\n", (read_cpuid_id() >> 20) & 15); - seq_printf(m, "CPU part\t: 0x%03x\n", (read_cpuid_id() >> 4) & 0xfff); - seq_printf(m, "CPU revision\t: %d\n", read_cpuid_id() & 15); +#ifdef CONFIG_ARM64_SW_TTBR0_PAN + /* + * Make sure init_thread_info.ttbr0 always generates translation + * faults in case uaccess_enable() is inadvertently called by the init + * thread. + */ + init_task.thread_info.ttbr0 = phys_to_ttbr(__pa_symbol(reserved_pg_dir)); +#endif - seq_puts(m, "\n"); + if (boot_args[1] || boot_args[2] || boot_args[3]) { + pr_err("WARNING: x1-x3 nonzero in violation of boot protocol:\n" + "\tx1: %016llx\n\tx2: %016llx\n\tx3: %016llx\n" + "This indicates a broken bootloader or old kernel\n", + boot_args[1], boot_args[2], boot_args[3]); + } +} - seq_printf(m, "Hardware\t: %s\n", machine_name); +static inline bool cpu_can_disable(unsigned int cpu) +{ +#ifdef CONFIG_HOTPLUG_CPU + const struct cpu_operations *ops = get_cpu_ops(cpu); - return 0; + if (ops && ops->cpu_can_disable) + return ops->cpu_can_disable(cpu); +#endif + return false; } -static void *c_start(struct seq_file *m, loff_t *pos) +bool arch_cpu_is_hotpluggable(int num) { - return *pos < 1 ? (void *)1 : NULL; + return cpu_can_disable(num); } -static void *c_next(struct seq_file *m, void *v, loff_t *pos) +static void dump_kernel_offset(void) { - ++*pos; - return NULL; + const unsigned long offset = kaslr_offset(); + + if (IS_ENABLED(CONFIG_RANDOMIZE_BASE) && offset > 0) { + pr_emerg("Kernel Offset: 0x%lx from 0x%lx\n", + offset, KIMAGE_VADDR); + pr_emerg("PHYS_OFFSET: 0x%llx\n", PHYS_OFFSET); + } else { + pr_emerg("Kernel Offset: disabled\n"); + } } -static void c_stop(struct seq_file *m, void *v) +static int arm64_panic_block_dump(struct notifier_block *self, + unsigned long v, void *p) { + dump_kernel_offset(); + dump_cpu_features(); + dump_mem_limit(); + return 0; } -const struct seq_operations cpuinfo_op = { - .start = c_start, - .next = c_next, - .stop = c_stop, - .show = c_show +static struct notifier_block arm64_panic_block = { + .notifier_call = arm64_panic_block_dump }; + +static int __init register_arm64_panic_block(void) +{ + atomic_notifier_chain_register(&panic_notifier_list, + &arm64_panic_block); + return 0; +} +device_initcall(register_arm64_panic_block); + +static int __init check_mmu_enabled_at_boot(void) +{ + if (!efi_enabled(EFI_BOOT) && mmu_enabled_at_boot) + panic("Non-EFI boot detected with MMU and caches enabled"); + return 0; +} +device_initcall_sync(check_mmu_enabled_at_boot); |
