summaryrefslogtreecommitdiff
path: root/arch/powerpc/kernel/setup_64.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/powerpc/kernel/setup_64.c')
-rw-r--r--arch/powerpc/kernel/setup_64.c994
1 files changed, 607 insertions, 387 deletions
diff --git a/arch/powerpc/kernel/setup_64.c b/arch/powerpc/kernel/setup_64.c
index 389fb8077cc9..8fd7cbf3bd04 100644
--- a/arch/powerpc/kernel/setup_64.c
+++ b/arch/powerpc/kernel/setup_64.c
@@ -1,17 +1,11 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
/*
*
* Common boot and setup code.
*
* Copyright (C) 2001 PPC64 Team, IBM Corp
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version
- * 2 of the License, or (at your option) any later version.
*/
-#undef DEBUG
-
#include <linux/export.h>
#include <linux/string.h>
#include <linux/sched.h>
@@ -31,23 +25,27 @@
#include <linux/unistd.h>
#include <linux/serial.h>
#include <linux/serial_8250.h>
-#include <linux/bootmem.h>
+#include <linux/memblock.h>
#include <linux/pci.h>
#include <linux/lockdep.h>
-#include <linux/memblock.h>
-#include <linux/hugetlb.h>
-
+#include <linux/memory.h>
+#include <linux/nmi.h>
+#include <linux/pgtable.h>
+#include <linux/of.h>
+#include <linux/of_fdt.h>
+
+#include <asm/asm-prototypes.h>
+#include <asm/kvm_guest.h>
#include <asm/io.h>
#include <asm/kdump.h>
-#include <asm/prom.h>
#include <asm/processor.h>
-#include <asm/pgtable.h>
#include <asm/smp.h>
#include <asm/elf.h>
#include <asm/machdep.h>
#include <asm/paca.h>
#include <asm/time.h>
#include <asm/cputable.h>
+#include <asm/dt_cpu_ftrs.h>
#include <asm/sections.h>
#include <asm/btext.h>
#include <asm/nvram.h>
@@ -62,48 +60,72 @@
#include <asm/xmon.h>
#include <asm/udbg.h>
#include <asm/kexec.h>
-#include <asm/mmu_context.h>
-#include <asm/code-patching.h>
-#include <asm/kvm_ppc.h>
-#include <asm/hugetlb.h>
+#include <asm/text-patching.h>
+#include <asm/ftrace.h>
+#include <asm/opal.h>
+#include <asm/cputhreads.h>
+#include <asm/hw_irq.h>
+#include <asm/feature-fixups.h>
+#include <asm/kup.h>
+#include <asm/early_ioremap.h>
+#include <asm/pgalloc.h>
#include "setup.h"
-#ifdef DEBUG
-#define DBG(fmt...) udbg_printf(fmt)
-#else
-#define DBG(fmt...)
-#endif
-
-int boot_cpuid = 0;
int spinning_secondaries;
u64 ppc64_pft_size;
-/* Pick defaults since we might want to patch instructions
- * before we've read this from the device tree.
- */
struct ppc64_caches ppc64_caches = {
- .dline_size = 0x40,
- .log_dline_size = 6,
- .iline_size = 0x40,
- .log_iline_size = 6
+ .l1d = {
+ .block_size = 0x40,
+ .log_block_size = 6,
+ },
+ .l1i = {
+ .block_size = 0x40,
+ .log_block_size = 6
+ },
};
EXPORT_SYMBOL_GPL(ppc64_caches);
-/*
- * These are used in binfmt_elf.c to put aux entries on the stack
- * for each elf executable being started.
- */
-int dcache_bsize;
-int icache_bsize;
-int ucache_bsize;
+#if defined(CONFIG_PPC_BOOK3E_64) && defined(CONFIG_SMP)
+void __init setup_tlb_core_data(void)
+{
+ int cpu;
+
+ BUILD_BUG_ON(offsetof(struct tlb_core_data, lock) != 0);
+
+ for_each_possible_cpu(cpu) {
+ int first = cpu_first_thread_sibling(cpu);
+
+ /*
+ * If we boot via kdump on a non-primary thread,
+ * make sure we point at the thread that actually
+ * set up this TLB.
+ */
+ if (cpu_first_thread_sibling(boot_cpuid) == first)
+ first = boot_cpuid;
+
+ paca_ptrs[cpu]->tcd_ptr = &paca_ptrs[first]->tcd;
+
+ /*
+ * If we have threads, we need either tlbsrx.
+ * or e6500 tablewalk mode, or else TLB handlers
+ * will be racy and could produce duplicate entries.
+ * Should we panic instead?
+ */
+ WARN_ONCE(smt_enabled_at_boot >= 2 &&
+ book3e_htw_mode != PPC_HTW_E6500,
+ "%s: unsupported MMU configuration\n", __func__);
+ }
+}
+#endif
#ifdef CONFIG_SMP
static char *smt_enabled_cmdline;
/* Look for ibm,smt-enabled OF option */
-static void check_smt_enabled(void)
+void __init check_smt_enabled(void)
{
struct device_node *dn;
const char *smt_option;
@@ -118,13 +140,10 @@ static void check_smt_enabled(void)
else if (!strcmp(smt_enabled_cmdline, "off"))
smt_enabled_at_boot = 0;
else {
- long smt;
- int rc;
-
- rc = strict_strtol(smt_enabled_cmdline, 10, &smt);
- if (!rc)
+ int smt;
+ if (!kstrtoint(smt_enabled_cmdline, 10, &smt))
smt_enabled_at_boot =
- min(threads_per_core, (int)smt);
+ min(threads_per_core, smt);
}
} else {
dn = of_find_node_by_path("/options");
@@ -152,17 +171,148 @@ static int __init early_smt_enabled(char *p)
}
early_param("smt-enabled", early_smt_enabled);
-#else
-#define check_smt_enabled()
#endif /* CONFIG_SMP */
/** Fix up paca fields required for the boot cpu */
-static void fixup_boot_paca(void)
+static void __init fixup_boot_paca(struct paca_struct *boot_paca)
{
/* The boot cpu is started */
- get_paca()->cpu_start = 1;
+ boot_paca->cpu_start = 1;
+#ifdef CONFIG_PPC_BOOK3S_64
+ /*
+ * Give the early boot machine check stack somewhere to use, use
+ * half of the init stack. This is a bit hacky but there should not be
+ * deep stack usage in early init so shouldn't overflow it or overwrite
+ * things.
+ */
+ boot_paca->mc_emergency_sp = (void *)&init_thread_union +
+ (THREAD_SIZE/2);
+#endif
/* Allow percpu accesses to work until we setup percpu data */
- get_paca()->data_offset = 0;
+ boot_paca->data_offset = 0;
+ /* Mark interrupts soft and hard disabled in PACA */
+ boot_paca->irq_soft_mask = IRQS_DISABLED;
+ boot_paca->irq_happened = PACA_IRQ_HARD_DIS;
+ WARN_ON(mfmsr() & MSR_EE);
+}
+
+static void __init configure_exceptions(void)
+{
+ /*
+ * Setup the trampolines from the lowmem exception vectors
+ * to the kdump kernel when not using a relocatable kernel.
+ */
+ setup_kdump_trampoline();
+
+ /* Under a PAPR hypervisor, we need hypercalls */
+ if (firmware_has_feature(FW_FEATURE_SET_MODE)) {
+ /*
+ * - PR KVM does not support AIL mode interrupts in the host
+ * while a PR guest is running.
+ *
+ * - SCV system call interrupt vectors are only implemented for
+ * AIL mode interrupts.
+ *
+ * - On pseries, AIL mode can only be enabled and disabled
+ * system-wide so when a PR VM is created on a pseries host,
+ * all CPUs of the host are set to AIL=0 mode.
+ *
+ * - Therefore host CPUs must not execute scv while a PR VM
+ * exists.
+ *
+ * - SCV support can not be disabled dynamically because the
+ * feature is advertised to host userspace. Disabling the
+ * facility and emulating it would be possible but is not
+ * implemented.
+ *
+ * - So SCV support is blanket disabled if PR KVM could possibly
+ * run. That is, PR support compiled in, booting on pseries
+ * with hash MMU.
+ */
+ if (IS_ENABLED(CONFIG_KVM_BOOK3S_PR_POSSIBLE) && !radix_enabled()) {
+ init_task.thread.fscr &= ~FSCR_SCV;
+ cur_cpu_spec->cpu_user_features2 &= ~PPC_FEATURE2_SCV;
+ }
+
+ /* Enable AIL if possible */
+ if (!pseries_enable_reloc_on_exc()) {
+ init_task.thread.fscr &= ~FSCR_SCV;
+ cur_cpu_spec->cpu_user_features2 &= ~PPC_FEATURE2_SCV;
+ }
+
+ /*
+ * Tell the hypervisor that we want our exceptions to
+ * be taken in little endian mode.
+ *
+ * We don't call this for big endian as our calling convention
+ * makes us always enter in BE, and the call may fail under
+ * some circumstances with kdump.
+ */
+#ifdef __LITTLE_ENDIAN__
+ pseries_little_endian_exceptions();
+#endif
+ } else {
+ /* Set endian mode using OPAL */
+ if (firmware_has_feature(FW_FEATURE_OPAL))
+ opal_configure_cores();
+
+ /* AIL on native is done in cpu_ready_for_interrupts() */
+ }
+}
+
+static void cpu_ready_for_interrupts(void)
+{
+ /*
+ * Enable AIL if supported, and we are in hypervisor mode. This
+ * is called once for every processor.
+ *
+ * If we are not in hypervisor mode the job is done once for
+ * the whole partition in configure_exceptions().
+ */
+ if (cpu_has_feature(CPU_FTR_HVMODE)) {
+ unsigned long lpcr = mfspr(SPRN_LPCR);
+ unsigned long new_lpcr = lpcr;
+
+ if (cpu_has_feature(CPU_FTR_ARCH_31)) {
+ /* P10 DD1 does not have HAIL */
+ if (pvr_version_is(PVR_POWER10) &&
+ (mfspr(SPRN_PVR) & 0xf00) == 0x100)
+ new_lpcr |= LPCR_AIL_3;
+ else
+ new_lpcr |= LPCR_HAIL;
+ } else if (cpu_has_feature(CPU_FTR_ARCH_207S)) {
+ new_lpcr |= LPCR_AIL_3;
+ }
+
+ if (new_lpcr != lpcr)
+ mtspr(SPRN_LPCR, new_lpcr);
+ }
+
+ /*
+ * Set HFSCR:TM based on CPU features:
+ * In the special case of TM no suspend (P9N DD2.1), Linux is
+ * told TM is off via the dt-ftrs but told to (partially) use
+ * it via OPAL_REINIT_CPUS_TM_SUSPEND_DISABLED. So HFSCR[TM]
+ * will be off from dt-ftrs but we need to turn it on for the
+ * no suspend case.
+ */
+ if (cpu_has_feature(CPU_FTR_HVMODE)) {
+ if (cpu_has_feature(CPU_FTR_TM_COMP))
+ mtspr(SPRN_HFSCR, mfspr(SPRN_HFSCR) | HFSCR_TM);
+ else
+ mtspr(SPRN_HFSCR, mfspr(SPRN_HFSCR) & ~HFSCR_TM);
+ }
+
+ /* Set IR and DR in PACA MSR */
+ get_paca()->kernel_msr = MSR_KERNEL;
+}
+
+unsigned long spr_default_dscr = 0;
+
+static void __init record_spr_defaults(void)
+{
+ if (early_cpu_has_feature(CPU_FTR_DSCR))
+ spr_default_dscr = mfspr(SPRN_DSCR);
}
/*
@@ -190,74 +340,179 @@ void __init early_setup(unsigned long dt_ptr)
/* -------- printk is _NOT_ safe to use here ! ------- */
- /* Identify CPU type */
- identify_cpu(0, mfspr(SPRN_PVR));
-
- /* Assume we're on cpu 0 for now. Don't write to the paca yet! */
+ /*
+ * Assume we're on cpu 0 for now.
+ *
+ * We need to load a PACA very early for a few reasons.
+ *
+ * The stack protector canary is stored in the paca, so as soon as we
+ * call any stack protected code we need r13 pointing somewhere valid.
+ *
+ * If we are using kcov it will call in_task() in its instrumentation,
+ * which relies on the current task from the PACA.
+ *
+ * dt_cpu_ftrs_init() calls into generic OF/fdt code, as well as
+ * printk(), which can trigger both stack protector and kcov.
+ *
+ * percpu variables and spin locks also use the paca.
+ *
+ * So set up a temporary paca. It will be replaced below once we know
+ * what CPU we are on.
+ */
initialise_paca(&boot_paca, 0);
- setup_paca(&boot_paca);
- fixup_boot_paca();
-
- /* Initialize lockdep early or else spinlocks will blow */
- lockdep_init();
+ fixup_boot_paca(&boot_paca);
+ WARN_ON(local_paca);
+ setup_paca(&boot_paca); /* install the paca into registers */
/* -------- printk is now safe to use ------- */
+ if (IS_ENABLED(CONFIG_PPC_BOOK3S_64) && (mfmsr() & MSR_HV))
+ enable_machine_check();
+
+ /* Try new device tree based feature discovery ... */
+ if (!dt_cpu_ftrs_init(__va(dt_ptr)))
+ /* Otherwise use the old style CPU table */
+ identify_cpu(0, mfspr(SPRN_PVR));
+
/* Enable early debugging if any specified (see udbg.h) */
udbg_early_init();
- DBG(" -> early_setup(), dt_ptr: 0x%lx\n", dt_ptr);
+ udbg_printf(" -> %s(), dt_ptr: 0x%lx\n", __func__, dt_ptr);
/*
* Do early initialization using the flattened device
* tree, such as retrieving the physical memory map or
- * calculating/retrieving the hash table size.
+ * calculating/retrieving the hash table size, discover
+ * boot_cpuid and boot_cpu_hwid.
*/
early_init_devtree(__va(dt_ptr));
- /* Now we know the logical id of our boot cpu, setup the paca. */
- setup_paca(&paca[boot_cpuid]);
- fixup_boot_paca();
+ allocate_paca_ptrs();
+ allocate_paca(boot_cpuid);
+ set_hard_smp_processor_id(boot_cpuid, boot_cpu_hwid);
+ fixup_boot_paca(paca_ptrs[boot_cpuid]);
+ setup_paca(paca_ptrs[boot_cpuid]); /* install the paca into registers */
+ // smp_processor_id() now reports boot_cpuid
+
+#ifdef CONFIG_SMP
+ task_thread_info(current)->cpu = boot_cpuid; // fix task_cpu(current)
+#endif
- /* Probe the machine type */
- probe_machine();
+ /*
+ * Configure exception handlers. This include setting up trampolines
+ * if needed, setting exception endian mode, etc...
+ */
+ configure_exceptions();
- setup_kdump_trampoline();
+ /*
+ * Configure Kernel Userspace Protection. This needs to happen before
+ * feature fixups for platforms that implement this using features.
+ */
+ setup_kup();
- DBG("Found, Initializing memory management...\n");
+ /* Apply all the dynamic patching */
+ apply_feature_fixups();
+ setup_feature_keys();
/* Initialize the hash table or TLB handling */
early_init_mmu();
+ early_ioremap_setup();
+
+ /*
+ * After firmware and early platform setup code has set things up,
+ * we note the SPR values for configurable control/performance
+ * registers, and use those as initial defaults.
+ */
+ record_spr_defaults();
+
+ /*
+ * At this point, we can let interrupts switch to virtual mode
+ * (the MMU has been setup), so adjust the MSR in the PACA to
+ * have IR and DR set and enable AIL if it exists
+ */
+ cpu_ready_for_interrupts();
+
/*
- * Reserve any gigantic pages requested on the command line.
- * memblock needs to have been initialized by the time this is
- * called since this will reserve memory.
+ * We enable ftrace here, but since we only support DYNAMIC_FTRACE, it
+ * will only actually get enabled on the boot cpu much later once
+ * ftrace itself has been initialized.
*/
- reserve_hugetlb_gpages();
+ this_cpu_enable_ftrace();
- DBG(" <- early_setup()\n");
+ udbg_printf(" <- %s()\n", __func__);
+
+#ifdef CONFIG_PPC_EARLY_DEBUG_BOOTX
+ /*
+ * This needs to be done *last* (after the above udbg_printf() even)
+ *
+ * Right after we return from this function, we turn on the MMU
+ * which means the real-mode access trick that btext does will
+ * no longer work, it needs to switch to using a real MMU
+ * mapping. This call will ensure that it does
+ */
+ btext_map();
+#endif /* CONFIG_PPC_EARLY_DEBUG_BOOTX */
}
#ifdef CONFIG_SMP
void early_setup_secondary(void)
{
- /* Mark interrupts enabled in PACA */
- get_paca()->soft_enabled = 0;
+ /* Mark interrupts disabled in PACA */
+ irq_soft_mask_set(IRQS_DISABLED);
/* Initialize the hash table or TLB handling */
early_init_mmu_secondary();
+
+ /* Perform any KUP setup that is per-cpu */
+ setup_kup();
+
+ /*
+ * At this point, we can let interrupts switch to virtual mode
+ * (the MMU has been setup), so adjust the MSR in the PACA to
+ * have IR and DR set.
+ */
+ cpu_ready_for_interrupts();
}
#endif /* CONFIG_SMP */
-#if defined(CONFIG_SMP) || defined(CONFIG_KEXEC)
+void __noreturn panic_smp_self_stop(void)
+{
+ hard_irq_disable();
+ spin_begin();
+ while (1)
+ spin_cpu_relax();
+}
+
+#if defined(CONFIG_SMP) || defined(CONFIG_KEXEC_CORE)
+static bool use_spinloop(void)
+{
+ if (IS_ENABLED(CONFIG_PPC_BOOK3S)) {
+ /*
+ * See comments in head_64.S -- not all platforms insert
+ * secondaries at __secondary_hold and wait at the spin
+ * loop.
+ */
+ if (firmware_has_feature(FW_FEATURE_OPAL))
+ return false;
+ return true;
+ }
+
+ /*
+ * When book3e boots from kexec, the ePAPR spin table does
+ * not get used.
+ */
+ return of_property_read_bool(of_chosen, "linux,booted-from-kexec");
+}
+
void smp_release_cpus(void)
{
unsigned long *ptr;
int i;
- DBG(" -> smp_release_cpus()\n");
+ if (!use_spinloop())
+ return;
/* All secondary cpus are spinning on a common spinloop, release them
* all now so they can start to spin on their individual paca
@@ -267,7 +522,7 @@ void smp_release_cpus(void)
ptr = (unsigned long *)((unsigned long)&__secondary_hold_spinloop
- PHYSICAL_START);
- *ptr = __pa(generic_secondary_smp_init);
+ *ptr = ppc_function_entry(generic_secondary_smp_init);
/* And wait a bit for them to catch up */
for (i = 0; i < 100000; i++) {
@@ -277,11 +532,9 @@ void smp_release_cpus(void)
break;
udelay(1);
}
- DBG("spinning_secondaries = %d\n", spinning_secondaries);
-
- DBG(" <- smp_release_cpus()\n");
+ pr_debug("spinning_secondaries = %d\n", spinning_secondaries);
}
-#endif /* CONFIG_SMP || CONFIG_KEXEC */
+#endif /* CONFIG_SMP || CONFIG_KEXEC_CORE */
/*
* Initialize some remaining members of the ppc64_caches and systemcfg
@@ -290,250 +543,234 @@ void smp_release_cpus(void)
* cache informations about the CPU that will be used by cache flush
* routines and/or provided to userland
*/
-static void __init initialize_cache_info(void)
-{
- struct device_node *np;
- unsigned long num_cpus = 0;
-
- DBG(" -> initialize_cache_info()\n");
- for_each_node_by_type(np, "cpu") {
- num_cpus += 1;
-
- /*
- * We're assuming *all* of the CPUs have the same
- * d-cache and i-cache sizes... -Peter
- */
- if (num_cpus == 1) {
- const u32 *sizep, *lsizep;
- u32 size, lsize;
-
- size = 0;
- lsize = cur_cpu_spec->dcache_bsize;
- sizep = of_get_property(np, "d-cache-size", NULL);
- if (sizep != NULL)
- size = *sizep;
- lsizep = of_get_property(np, "d-cache-block-size",
- NULL);
- /* fallback if block size missing */
- if (lsizep == NULL)
- lsizep = of_get_property(np,
- "d-cache-line-size",
- NULL);
- if (lsizep != NULL)
- lsize = *lsizep;
- if (sizep == 0 || lsizep == 0)
- DBG("Argh, can't find dcache properties ! "
- "sizep: %p, lsizep: %p\n", sizep, lsizep);
-
- ppc64_caches.dsize = size;
- ppc64_caches.dline_size = lsize;
- ppc64_caches.log_dline_size = __ilog2(lsize);
- ppc64_caches.dlines_per_page = PAGE_SIZE / lsize;
-
- size = 0;
- lsize = cur_cpu_spec->icache_bsize;
- sizep = of_get_property(np, "i-cache-size", NULL);
- if (sizep != NULL)
- size = *sizep;
- lsizep = of_get_property(np, "i-cache-block-size",
- NULL);
- if (lsizep == NULL)
- lsizep = of_get_property(np,
- "i-cache-line-size",
- NULL);
- if (lsizep != NULL)
- lsize = *lsizep;
- if (sizep == 0 || lsizep == 0)
- DBG("Argh, can't find icache properties ! "
- "sizep: %p, lsizep: %p\n", sizep, lsizep);
-
- ppc64_caches.isize = size;
- ppc64_caches.iline_size = lsize;
- ppc64_caches.log_iline_size = __ilog2(lsize);
- ppc64_caches.ilines_per_page = PAGE_SIZE / lsize;
- }
- }
+static void __init init_cache_info(struct ppc_cache_info *info, u32 size, u32 lsize,
+ u32 bsize, u32 sets)
+{
+ info->size = size;
+ info->sets = sets;
+ info->line_size = lsize;
+ info->block_size = bsize;
+ info->log_block_size = __ilog2(bsize);
+ if (bsize)
+ info->blocks_per_page = PAGE_SIZE / bsize;
+ else
+ info->blocks_per_page = 0;
- DBG(" <- initialize_cache_info()\n");
+ if (sets == 0)
+ info->assoc = 0xffff;
+ else
+ info->assoc = size / (sets * lsize);
}
-
-/*
- * Do some initial setup of the system. The parameters are those which
- * were passed in from the bootloader.
- */
-void __init setup_system(void)
+static bool __init parse_cache_info(struct device_node *np,
+ bool icache,
+ struct ppc_cache_info *info)
{
- DBG(" -> setup_system()\n");
-
- /* Apply the CPUs-specific and firmware specific fixups to kernel
- * text (nop out sections not relevant to this CPU or this firmware)
- */
- do_feature_fixups(cur_cpu_spec->cpu_features,
- &__start___ftr_fixup, &__stop___ftr_fixup);
- do_feature_fixups(cur_cpu_spec->mmu_features,
- &__start___mmu_ftr_fixup, &__stop___mmu_ftr_fixup);
- do_feature_fixups(powerpc_firmware_features,
- &__start___fw_ftr_fixup, &__stop___fw_ftr_fixup);
- do_lwsync_fixups(cur_cpu_spec->cpu_features,
- &__start___lwsync_fixup, &__stop___lwsync_fixup);
- do_final_fixups();
+ static const char *ipropnames[] __initdata = {
+ "i-cache-size",
+ "i-cache-sets",
+ "i-cache-block-size",
+ "i-cache-line-size",
+ };
+ static const char *dpropnames[] __initdata = {
+ "d-cache-size",
+ "d-cache-sets",
+ "d-cache-block-size",
+ "d-cache-line-size",
+ };
+ const char **propnames = icache ? ipropnames : dpropnames;
+ const __be32 *sizep, *lsizep, *bsizep, *setsp;
+ u32 size, lsize, bsize, sets;
+ bool success = true;
+
+ size = 0;
+ sets = -1u;
+ lsize = bsize = cur_cpu_spec->dcache_bsize;
+ sizep = of_get_property(np, propnames[0], NULL);
+ if (sizep != NULL)
+ size = be32_to_cpu(*sizep);
+ setsp = of_get_property(np, propnames[1], NULL);
+ if (setsp != NULL)
+ sets = be32_to_cpu(*setsp);
+ bsizep = of_get_property(np, propnames[2], NULL);
+ lsizep = of_get_property(np, propnames[3], NULL);
+ if (bsizep == NULL)
+ bsizep = lsizep;
+ if (lsizep == NULL)
+ lsizep = bsizep;
+ if (lsizep != NULL)
+ lsize = be32_to_cpu(*lsizep);
+ if (bsizep != NULL)
+ bsize = be32_to_cpu(*bsizep);
+ if (sizep == NULL || bsizep == NULL || lsizep == NULL)
+ success = false;
/*
- * Unflatten the device-tree passed by prom_init or kexec
+ * OF is weird .. it represents fully associative caches
+ * as "1 way" which doesn't make much sense and doesn't
+ * leave room for direct mapped. We'll assume that 0
+ * in OF means direct mapped for that reason.
*/
- unflatten_device_tree();
+ if (sets == 1)
+ sets = 0;
+ else if (sets == 0)
+ sets = 1;
- /*
- * Fill the ppc64_caches & systemcfg structures with informations
- * retrieved from the device-tree.
- */
- initialize_cache_info();
+ init_cache_info(info, size, lsize, bsize, sets);
-#ifdef CONFIG_PPC_RTAS
- /*
- * Initialize RTAS if available
- */
- rtas_initialize();
-#endif /* CONFIG_PPC_RTAS */
+ return success;
+}
- /*
- * Check if we have an initrd provided via the device-tree
- */
- check_for_initrd();
+void __init initialize_cache_info(void)
+{
+ struct device_node *cpu = NULL, *l2, *l3 = NULL;
+ u32 pvr;
/*
- * Do some platform specific early initializations, that includes
- * setting up the hash table pointers. It also sets up some interrupt-mapping
- * related options that will be used by finish_device_tree()
- */
- if (ppc_md.init_early)
- ppc_md.init_early();
-
- /*
- * We can discover serial ports now since the above did setup the
- * hash table management for us, thus ioremap works. We do that early
- * so that further code can be debugged
+ * All shipping POWER8 machines have a firmware bug that
+ * puts incorrect information in the device-tree. This will
+ * be (hopefully) fixed for future chips but for now hard
+ * code the values if we are running on one of these
*/
- find_legacy_serial_ports();
+ pvr = PVR_VER(mfspr(SPRN_PVR));
+ if (pvr == PVR_POWER8 || pvr == PVR_POWER8E ||
+ pvr == PVR_POWER8NVL) {
+ /* size lsize blk sets */
+ init_cache_info(&ppc64_caches.l1i, 0x8000, 128, 128, 32);
+ init_cache_info(&ppc64_caches.l1d, 0x10000, 128, 128, 64);
+ init_cache_info(&ppc64_caches.l2, 0x80000, 128, 0, 512);
+ init_cache_info(&ppc64_caches.l3, 0x800000, 128, 0, 8192);
+ } else
+ cpu = of_find_node_by_type(NULL, "cpu");
/*
- * Register early console
+ * We're assuming *all* of the CPUs have the same
+ * d-cache and i-cache sizes... -Peter
*/
- register_early_udbg_console();
+ if (cpu) {
+ if (!parse_cache_info(cpu, false, &ppc64_caches.l1d))
+ pr_warn("Argh, can't find dcache properties !\n");
- /*
- * Initialize xmon
- */
- xmon_setup();
+ if (!parse_cache_info(cpu, true, &ppc64_caches.l1i))
+ pr_warn("Argh, can't find icache properties !\n");
- smp_setup_cpu_maps();
- check_smt_enabled();
+ /*
+ * Try to find the L2 and L3 if any. Assume they are
+ * unified and use the D-side properties.
+ */
+ l2 = of_find_next_cache_node(cpu);
+ of_node_put(cpu);
+ if (l2) {
+ parse_cache_info(l2, false, &ppc64_caches.l2);
+ l3 = of_find_next_cache_node(l2);
+ of_node_put(l2);
+ }
+ if (l3) {
+ parse_cache_info(l3, false, &ppc64_caches.l3);
+ of_node_put(l3);
+ }
+ }
-#ifdef CONFIG_SMP
- /* Release secondary cpus out of their spinloops at 0x60 now that
- * we can map physical -> logical CPU ids
- */
- smp_release_cpus();
-#endif
+ /* For use by binfmt_elf */
+ dcache_bsize = ppc64_caches.l1d.block_size;
+ icache_bsize = ppc64_caches.l1i.block_size;
- printk("Starting Linux PPC64 %s\n", init_utsname()->version);
-
- printk("-----------------------------------------------------\n");
- printk("ppc64_pft_size = 0x%llx\n", ppc64_pft_size);
- printk("physicalMemorySize = 0x%llx\n", memblock_phys_mem_size());
- if (ppc64_caches.dline_size != 0x80)
- printk("ppc64_caches.dcache_line_size = 0x%x\n",
- ppc64_caches.dline_size);
- if (ppc64_caches.iline_size != 0x80)
- printk("ppc64_caches.icache_line_size = 0x%x\n",
- ppc64_caches.iline_size);
-#ifdef CONFIG_PPC_STD_MMU_64
- if (htab_address)
- printk("htab_address = 0x%p\n", htab_address);
- printk("htab_hash_mask = 0x%lx\n", htab_hash_mask);
-#endif /* CONFIG_PPC_STD_MMU_64 */
- if (PHYSICAL_START > 0)
- printk("physical_start = 0x%llx\n",
- (unsigned long long)PHYSICAL_START);
- printk("-----------------------------------------------------\n");
-
- DBG(" <- setup_system()\n");
+ cur_cpu_spec->dcache_bsize = dcache_bsize;
+ cur_cpu_spec->icache_bsize = icache_bsize;
}
-/* This returns the limit below which memory accesses to the linear
- * mapping are guarnateed not to cause a TLB or SLB miss. This is
- * used to allocate interrupt or emergency stacks for which our
- * exception entry path doesn't deal with being interrupted.
+/*
+ * This returns the limit below which memory accesses to the linear
+ * mapping are guarnateed not to cause an architectural exception (e.g.,
+ * TLB or SLB miss fault).
+ *
+ * This is used to allocate PACAs and various interrupt stacks that
+ * that are accessed early in interrupt handlers that must not cause
+ * re-entrant interrupts.
*/
-static u64 safe_stack_limit(void)
+__init u64 ppc64_bolted_size(void)
{
-#ifdef CONFIG_PPC_BOOK3E
+#ifdef CONFIG_PPC_BOOK3E_64
/* Freescale BookE bolts the entire linear mapping */
- if (mmu_has_feature(MMU_FTR_TYPE_FSL_E))
- return linear_map_top;
- /* Other BookE, we assume the first GB is bolted */
- return 1ul << 30;
+ return linear_map_top;
#else
- /* BookS, the first segment is bolted */
- if (mmu_has_feature(MMU_FTR_1T_SEGMENT))
+ /* BookS radix, does not take faults on linear mapping */
+ if (early_radix_enabled())
+ return ULONG_MAX;
+
+ /* BookS hash, the first segment is bolted */
+ if (early_mmu_has_feature(MMU_FTR_1T_SEGMENT))
return 1UL << SID_SHIFT_1T;
return 1UL << SID_SHIFT;
#endif
}
-static void __init irqstack_early_init(void)
+static void *__init alloc_stack(unsigned long limit, int cpu)
+{
+ void *ptr;
+
+ BUILD_BUG_ON(STACK_INT_FRAME_SIZE % 16);
+
+ ptr = memblock_alloc_try_nid(THREAD_SIZE, THREAD_ALIGN,
+ MEMBLOCK_LOW_LIMIT, limit,
+ early_cpu_to_node(cpu));
+ if (!ptr)
+ panic("cannot allocate stacks");
+
+ return ptr;
+}
+
+void __init irqstack_early_init(void)
{
- u64 limit = safe_stack_limit();
+ u64 limit = ppc64_bolted_size();
unsigned int i;
/*
* Interrupt stacks must be in the first segment since we
- * cannot afford to take SLB misses on them.
+ * cannot afford to take SLB misses on them. They are not
+ * accessed in realmode.
*/
for_each_possible_cpu(i) {
- softirq_ctx[i] = (struct thread_info *)
- __va(memblock_alloc_base(THREAD_SIZE,
- THREAD_SIZE, limit));
- hardirq_ctx[i] = (struct thread_info *)
- __va(memblock_alloc_base(THREAD_SIZE,
- THREAD_SIZE, limit));
+ softirq_ctx[i] = alloc_stack(limit, i);
+ hardirq_ctx[i] = alloc_stack(limit, i);
}
}
-#ifdef CONFIG_PPC_BOOK3E
-static void __init exc_lvl_early_init(void)
+#ifdef CONFIG_PPC_BOOK3E_64
+void __init exc_lvl_early_init(void)
{
- extern unsigned int interrupt_base_book3e;
- extern unsigned int exc_debug_debug_book3e;
-
unsigned int i;
for_each_possible_cpu(i) {
- critirq_ctx[i] = (struct thread_info *)
- __va(memblock_alloc(THREAD_SIZE, THREAD_SIZE));
- dbgirq_ctx[i] = (struct thread_info *)
- __va(memblock_alloc(THREAD_SIZE, THREAD_SIZE));
- mcheckirq_ctx[i] = (struct thread_info *)
- __va(memblock_alloc(THREAD_SIZE, THREAD_SIZE));
+ void *sp;
+
+ sp = alloc_stack(ULONG_MAX, i);
+ critirq_ctx[i] = sp;
+ paca_ptrs[i]->crit_kstack = sp + THREAD_SIZE;
+
+ sp = alloc_stack(ULONG_MAX, i);
+ dbgirq_ctx[i] = sp;
+ paca_ptrs[i]->dbg_kstack = sp + THREAD_SIZE;
+
+ sp = alloc_stack(ULONG_MAX, i);
+ mcheckirq_ctx[i] = sp;
+ paca_ptrs[i]->mc_kstack = sp + THREAD_SIZE;
}
if (cpu_has_feature(CPU_FTR_DEBUG_LVL_EXC))
- patch_branch(&interrupt_base_book3e + (0x040 / 4) + 1,
- (unsigned long)&exc_debug_debug_book3e, 0);
+ patch_exception(0x040, exc_debug_debug_book3e);
}
-#else
-#define exc_lvl_early_init()
#endif
/*
* Stack space used when we detect a bad kernel stack pointer, and
- * early in SMP boots before relocation is enabled.
+ * early in SMP boots before relocation is enabled. Exclusive emergency
+ * stack for machine checks.
*/
-static void __init emergency_stack_init(void)
+void __init emergency_stack_init(void)
{
- u64 limit;
+ u64 limit, mce_limit;
unsigned int i;
/*
@@ -542,132 +779,55 @@ static void __init emergency_stack_init(void)
* aligned.
*
* Since we use these as temporary stacks during secondary CPU
- * bringup, we need to get at them in real mode. This means they
- * must also be within the RMO region.
+ * bringup, machine check, system reset, and HMI, we need to get
+ * at them in real mode. This means they must also be within the RMO
+ * region.
+ *
+ * The IRQ stacks allocated elsewhere in this file are zeroed and
+ * initialized in kernel/irq.c. These are initialized here in order
+ * to have emergency stacks available as early as possible.
*/
- limit = min(safe_stack_limit(), ppc64_rma_size);
-
- for_each_possible_cpu(i) {
- unsigned long sp;
- sp = memblock_alloc_base(THREAD_SIZE, THREAD_SIZE, limit);
- sp += THREAD_SIZE;
- paca[i].emergency_sp = __va(sp);
- }
-}
-
-/*
- * Called into from start_kernel this initializes bootmem, which is used
- * to manage page allocation until mem_init is called.
- */
-void __init setup_arch(char **cmdline_p)
-{
- ppc64_boot_msg(0x12, "Setup Arch");
-
- *cmdline_p = cmd_line;
+ limit = mce_limit = min(ppc64_bolted_size(), ppc64_rma_size);
/*
- * Set cache line size based on type of cpu as a default.
- * Systems with OF can look in the properties on the cpu node(s)
- * for a possibly more accurate value.
+ * Machine check on pseries calls rtas, but can't use the static
+ * rtas_args due to a machine check hitting while the lock is held.
+ * rtas args have to be under 4GB, so the machine check stack is
+ * limited to 4GB so args can be put on stack.
*/
- dcache_bsize = ppc64_caches.dline_size;
- icache_bsize = ppc64_caches.iline_size;
-
- /* reboot on panic */
- panic_timeout = 180;
+ if (firmware_has_feature(FW_FEATURE_LPAR) && mce_limit > SZ_4G)
+ mce_limit = SZ_4G;
- if (ppc_md.panic)
- setup_panic();
-
- init_mm.start_code = (unsigned long)_stext;
- init_mm.end_code = (unsigned long) _etext;
- init_mm.end_data = (unsigned long) _edata;
- init_mm.brk = klimit;
-#ifdef CONFIG_PPC_64K_PAGES
- init_mm.context.pte_frag = NULL;
-#endif
- irqstack_early_init();
- exc_lvl_early_init();
- emergency_stack_init();
+ for_each_possible_cpu(i) {
+ paca_ptrs[i]->emergency_sp = alloc_stack(limit, i) + THREAD_SIZE;
-#ifdef CONFIG_PPC_STD_MMU_64
- stabs_alloc();
-#endif
- /* set up the bootmem stuff with available memory */
- do_init_bootmem();
- sparse_init();
+#ifdef CONFIG_PPC_BOOK3S_64
+ /* emergency stack for NMI exception handling. */
+ paca_ptrs[i]->nmi_emergency_sp = alloc_stack(limit, i) + THREAD_SIZE;
-#ifdef CONFIG_DUMMY_CONSOLE
- conswitchp = &dummy_con;
+ /* emergency stack for machine check exception handling. */
+ paca_ptrs[i]->mc_emergency_sp = alloc_stack(mce_limit, i) + THREAD_SIZE;
#endif
-
- if (ppc_md.setup_arch)
- ppc_md.setup_arch();
-
- paging_init();
-
- /* Initialize the MMU context management stuff */
- mmu_context_init();
-
- kvm_linear_init();
-
- /* Interrupt code needs to be 64K-aligned */
- if ((unsigned long)_stext & 0xffff)
- panic("Kernelbase not 64K-aligned (0x%lx)!\n",
- (unsigned long)_stext);
-
- ppc64_boot_msg(0x15, "Setup Done");
-}
-
-
-/* ToDo: do something useful if ppc_md is not yet setup. */
-#define PPC64_LINUX_FUNCTION 0x0f000000
-#define PPC64_IPL_MESSAGE 0xc0000000
-#define PPC64_TERM_MESSAGE 0xb0000000
-
-static void ppc64_do_msg(unsigned int src, const char *msg)
-{
- if (ppc_md.progress) {
- char buf[128];
-
- sprintf(buf, "%08X\n", src);
- ppc_md.progress(buf, 0);
- snprintf(buf, 128, "%s", msg);
- ppc_md.progress(buf, 0);
}
}
-/* Print a boot progress message. */
-void ppc64_boot_msg(unsigned int src, const char *msg)
-{
- ppc64_do_msg(PPC64_LINUX_FUNCTION|PPC64_IPL_MESSAGE|src, msg);
- printk("[boot]%04x %s\n", src, msg);
-}
-
#ifdef CONFIG_SMP
-#define PCPU_DYN_SIZE ()
-
-static void * __init pcpu_fc_alloc(unsigned int cpu, size_t size, size_t align)
-{
- return __alloc_bootmem_node(NODE_DATA(cpu_to_node(cpu)), size, align,
- __pa(MAX_DMA_ADDRESS));
-}
-
-static void __init pcpu_fc_free(void *ptr, size_t size)
-{
- free_bootmem(__pa(ptr), size);
-}
-
static int pcpu_cpu_distance(unsigned int from, unsigned int to)
{
- if (cpu_to_node(from) == cpu_to_node(to))
+ if (early_cpu_to_node(from) == early_cpu_to_node(to))
return LOCAL_DISTANCE;
else
return REMOTE_DISTANCE;
}
+static __init int pcpu_cpu_to_node(int cpu)
+{
+ return early_cpu_to_node(cpu);
+}
+
unsigned long __per_cpu_offset[NR_CPUS] __read_mostly;
EXPORT_SYMBOL(__per_cpu_offset);
+DEFINE_STATIC_KEY_FALSE(__percpu_first_chunk_is_paged);
void __init setup_per_cpu_areas(void)
{
@@ -675,34 +835,94 @@ void __init setup_per_cpu_areas(void)
size_t atom_size;
unsigned long delta;
unsigned int cpu;
- int rc;
+ int rc = -EINVAL;
/*
- * Linear mapping is one of 4K, 1M and 16M. For 4K, no need
- * to group units. For larger mappings, use 1M atom which
- * should be large enough to contain a number of units.
+ * BookE and BookS radix are historical values and should be revisited.
*/
- if (mmu_linear_psize == MMU_PAGE_4K)
+ if (IS_ENABLED(CONFIG_PPC_BOOK3E_64)) {
+ atom_size = SZ_1M;
+ } else if (radix_enabled()) {
atom_size = PAGE_SIZE;
- else
- atom_size = 1 << 20;
+ } else if (IS_ENABLED(CONFIG_PPC_64S_HASH_MMU)) {
+ /*
+ * Linear mapping is one of 4K, 1M and 16M. For 4K, no need
+ * to group units. For larger mappings, use 1M atom which
+ * should be large enough to contain a number of units.
+ */
+ if (mmu_linear_psize == MMU_PAGE_4K)
+ atom_size = PAGE_SIZE;
+ else
+ atom_size = SZ_1M;
+ }
+
+ if (pcpu_chosen_fc != PCPU_FC_PAGE) {
+ rc = pcpu_embed_first_chunk(0, dyn_size, atom_size, pcpu_cpu_distance,
+ pcpu_cpu_to_node);
+ if (rc)
+ pr_warn("PERCPU: %s allocator failed (%d), "
+ "falling back to page size\n",
+ pcpu_fc_names[pcpu_chosen_fc], rc);
+ }
- rc = pcpu_embed_first_chunk(0, dyn_size, atom_size, pcpu_cpu_distance,
- pcpu_fc_alloc, pcpu_fc_free);
+ if (rc < 0)
+ rc = pcpu_page_first_chunk(0, pcpu_cpu_to_node);
if (rc < 0)
panic("cannot initialize percpu area (err=%d)", rc);
+ static_key_enable(&__percpu_first_chunk_is_paged.key);
delta = (unsigned long)pcpu_base_addr - (unsigned long)__per_cpu_start;
for_each_possible_cpu(cpu) {
__per_cpu_offset[cpu] = delta + pcpu_unit_offsets[cpu];
- paca[cpu].data_offset = __per_cpu_offset[cpu];
+ paca_ptrs[cpu]->data_offset = __per_cpu_offset[cpu];
}
}
#endif
+#ifdef CONFIG_MEMORY_HOTPLUG
+unsigned long memory_block_size_bytes(void)
+{
+ if (ppc_md.memory_block_size)
+ return ppc_md.memory_block_size();
-#ifdef CONFIG_PPC_INDIRECT_IO
+ return MIN_MEMORY_BLOCK_SIZE;
+}
+#endif
+
+#ifdef CONFIG_PPC_INDIRECT_PIO
struct ppc_pci_io ppc_pci_io;
EXPORT_SYMBOL(ppc_pci_io);
-#endif /* CONFIG_PPC_INDIRECT_IO */
+#endif
+#ifdef CONFIG_HARDLOCKUP_DETECTOR_PERF
+u64 hw_nmi_get_sample_period(int watchdog_thresh)
+{
+ return ppc_proc_freq * watchdog_thresh;
+}
+#endif
+
+/*
+ * The perf based hardlockup detector breaks PMU event based branches, so
+ * disable it by default. Book3S has a soft-nmi hardlockup detector based
+ * on the decrementer interrupt, so it does not suffer from this problem.
+ *
+ * It is likely to get false positives in KVM guests, so disable it there
+ * by default too. PowerVM will not stop or arbitrarily oversubscribe
+ * CPUs, but give a minimum regular allotment even with SPLPAR, so enable
+ * the detector for non-KVM guests, assume PowerVM.
+ */
+static int __init disable_hardlockup_detector(void)
+{
+#ifdef CONFIG_HARDLOCKUP_DETECTOR_PERF
+ hardlockup_detector_disable();
+#else
+ if (firmware_has_feature(FW_FEATURE_LPAR)) {
+ check_kvm_guest();
+ if (is_kvm_guest())
+ hardlockup_detector_disable();
+ }
+#endif
+
+ return 0;
+}
+early_initcall(disable_hardlockup_detector);