From 256588fda10f2a712631f8a4e72641a66adebdb8 Mon Sep 17 00:00:00 2001 From: Guenter Roeck Date: Mon, 9 Sep 2013 18:37:56 -0700 Subject: powerpc: Export cpu_to_chip_id() to fix build error powerpc allmodconfig build fails with: ERROR: ".cpu_to_chip_id" [drivers/block/mtip32xx/mtip32xx.ko] undefined! The problem was introduced with commit 15863ff3b (powerpc: Make chip-id information available to userspace). Export the missing symbol. Cc: Vasant Hegde Cc: Shivaprasad G Bhat Signed-off-by: Guenter Roeck Signed-off-by: Benjamin Herrenschmidt --- arch/powerpc/kernel/smp.c | 1 + 1 file changed, 1 insertion(+) (limited to 'arch') diff --git a/arch/powerpc/kernel/smp.c b/arch/powerpc/kernel/smp.c index 442d8e23f8f4..8e59abc237d7 100644 --- a/arch/powerpc/kernel/smp.c +++ b/arch/powerpc/kernel/smp.c @@ -611,6 +611,7 @@ int cpu_to_chip_id(int cpu) of_node_put(np); return of_get_ibm_chip_id(np); } +EXPORT_SYMBOL(cpu_to_chip_id); /* Helper routines for cpu to core mapping */ int cpu_core_index_of_thread(int cpu) -- cgit From 69e044dd759e26667c110ab771d2c908fbd5d3dd Mon Sep 17 00:00:00 2001 From: "Aneesh Kumar K.V" Date: Tue, 10 Sep 2013 18:44:42 +0530 Subject: powerpc: Fix possible deadlock on page fault stack_grow_into/14082 is trying to acquire lock: (&mm->mmap_sem){++++++}, at: [] .might_fault+0x78/0xe0 but task is already holding lock: (&mm->mmap_sem){++++++}, at: [] .do_page_fault+0x24c/0x910 other info that might help us debug this: Possible unsafe locking scenario: CPU0 ---- lock(&mm->mmap_sem); lock(&mm->mmap_sem); *** DEADLOCK *** May be due to missing lock nesting notation 1 lock held by stack_grow_into/14082: #0: (&mm->mmap_sem){++++++}, at: [] .do_page_fault+0x24c/0x910 stack backtrace: CPU: 21 PID: 14082 Comm: stack_grow_into Not tainted 3.10.0-10.el7.ppc64.debug #1 Call Trace: [c0000003d396b850] [c000000000016e7c] .show_stack+0x7c/0x1f0 (unreliable) [c0000003d396b920] [c000000000813fc8] .dump_stack+0x28/0x3c [c0000003d396b990] [c000000000124b90] .__lock_acquire+0x1640/0x1800 [c0000003d396bab0] [c00000000012570c] .lock_acquire+0xac/0x250 [c0000003d396bb80] [c000000000206d54] .might_fault+0xa4/0xe0 [c0000003d396bbf0] [c0000000007ffe2c] .do_page_fault+0x2ec/0x910 [c0000003d396be30] [c0000000000092e8] handle_page_fault+0x10/0x30 Signed-off-by: Aneesh Kumar K.V Signed-off-by: Benjamin Herrenschmidt --- arch/powerpc/mm/fault.c | 13 ++++++++++--- 1 file changed, 10 insertions(+), 3 deletions(-) (limited to 'arch') diff --git a/arch/powerpc/mm/fault.c b/arch/powerpc/mm/fault.c index 76d8e7cc7805..2dd69bf4af46 100644 --- a/arch/powerpc/mm/fault.c +++ b/arch/powerpc/mm/fault.c @@ -206,7 +206,7 @@ int __kprobes do_page_fault(struct pt_regs *regs, unsigned long address, int trap = TRAP(regs); int is_exec = trap == 0x400; int fault; - int rc = 0; + int rc = 0, store_update_sp = 0; #if !(defined(CONFIG_4xx) || defined(CONFIG_BOOKE)) /* @@ -280,6 +280,14 @@ int __kprobes do_page_fault(struct pt_regs *regs, unsigned long address, perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address); + /* + * We want to do this outside mmap_sem, because reading code around nip + * can result in fault, which will cause a deadlock when called with + * mmap_sem held + */ + if (user_mode(regs)) + store_update_sp = store_updates_sp(regs); + /* When running in the kernel we expect faults to occur only to * addresses in user space. All other faults represent errors in the * kernel and should generate an OOPS. Unfortunately, in the case of an @@ -345,8 +353,7 @@ retry: * between the last mapped region and the stack will * expand the stack rather than segfaulting. */ - if (address + 2048 < uregs->gpr[1] - && (!user_mode(regs) || !store_updates_sp(regs))) + if (address + 2048 < uregs->gpr[1] && !store_update_sp) goto bad_area; } if (expand_stack(vma, address)) -- cgit From 620e5050827008ab207a8dfcc44cb79f07f1942c Mon Sep 17 00:00:00 2001 From: Vladimir Murzin Date: Tue, 10 Sep 2013 18:42:07 +0200 Subject: powerpc: Fix section mismatch warning for prom_rtas_call While cross-building for PPC64 I've got WARNING: vmlinux.o(.text.unlikely+0x1ba): Section mismatch in reference from the function .prom_rtas_call() to the variable .init.data:dt_string_start The function .prom_rtas_call() references the variable __initdata dt_string_start. This is often because .prom_rtas_call lacks a __initdata annotation or the annotation of dt_string_start is wrong. WARNING: vmlinux.o(.meminit.text+0xeb0): Section mismatch in reference from the function .free_area_init_core.isra.47() to the function .init.text:.set_pageblock_order() The function __meminit .free_area_init_core.isra.47() references a function __init .set_pageblock_order(). If .set_pageblock_order is only used by .free_area_init_core.isra.47 then annotate .set_pageblock_order with a matching annotation. Fix it by proper annotation of prom_rtas_call. Signed-off-by: Vladimir Murzin Signed-off-by: Benjamin Herrenschmidt --- arch/powerpc/kernel/prom_init.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/powerpc/kernel/prom_init.c b/arch/powerpc/kernel/prom_init.c index 7b6391b68fb8..12e656ffe60e 100644 --- a/arch/powerpc/kernel/prom_init.c +++ b/arch/powerpc/kernel/prom_init.c @@ -1297,7 +1297,8 @@ static void __init prom_query_opal(void) prom_opal_align = 0x10000; } -static int prom_rtas_call(int token, int nargs, int nret, int *outputs, ...) +static int __init prom_rtas_call(int token, int nargs, int nret, + int *outputs, ...) { struct rtas_args rtas_args; va_list list; -- cgit From 363edbe2614aa90df706c0f19ccfa2a6c06af0be Mon Sep 17 00:00:00 2001 From: Vaidyanathan Srinivasan Date: Fri, 6 Sep 2013 00:25:06 +0530 Subject: powerpc: Default arch idle could cede processor on pseries When adding cpuidle support to pSeries, we introduced two regressions: - The new cpuidle backend driver only works under hypervisors supporting the "SLPLAR" option, which isn't the case of the old POWER4 hypervisor and the HV "light" used on js2x blades - The cpuidle driver registers fairly late, meaning that for a significant portion of the boot process, we end up having all threads spinning. This slows down the boot process and increases the overall resource usage if the hypervisor has shared processors. This fixes both by implementing a "default" idle that will cede to the hypervisor when possible, in a very simple way without all the bells and whisles of cpuidle. Reported-by: Paul Mackerras Signed-off-by: Vaidyanathan Srinivasan Acked-by: Deepthi Dharwar Signed-off-by: Benjamin Herrenschmidt CC: --- arch/powerpc/platforms/pseries/setup.c | 31 +++++++++++++++++++++---------- 1 file changed, 21 insertions(+), 10 deletions(-) (limited to 'arch') diff --git a/arch/powerpc/platforms/pseries/setup.c b/arch/powerpc/platforms/pseries/setup.c index d64feb3ea0be..1f97e2b87a62 100644 --- a/arch/powerpc/platforms/pseries/setup.c +++ b/arch/powerpc/platforms/pseries/setup.c @@ -354,7 +354,7 @@ static int alloc_dispatch_log_kmem_cache(void) } early_initcall(alloc_dispatch_log_kmem_cache); -static void pSeries_idle(void) +static void pseries_lpar_idle(void) { /* This would call on the cpuidle framework, and the back-end pseries * driver to go to idle states @@ -362,10 +362,22 @@ static void pSeries_idle(void) if (cpuidle_idle_call()) { /* On error, execute default handler * to go into low thread priority and possibly - * low power mode. + * low power mode by cedeing processor to hypervisor */ - HMT_low(); - HMT_very_low(); + + /* Indicate to hypervisor that we are idle. */ + get_lppaca()->idle = 1; + + /* + * Yield the processor to the hypervisor. We return if + * an external interrupt occurs (which are driven prior + * to returning here) or if a prod occurs from another + * processor. When returning here, external interrupts + * are enabled. + */ + cede_processor(); + + get_lppaca()->idle = 0; } } @@ -456,15 +468,14 @@ static void __init pSeries_setup_arch(void) pSeries_nvram_init(); - if (firmware_has_feature(FW_FEATURE_SPLPAR)) { + if (firmware_has_feature(FW_FEATURE_LPAR)) { vpa_init(boot_cpuid); - ppc_md.power_save = pSeries_idle; - } - - if (firmware_has_feature(FW_FEATURE_LPAR)) + ppc_md.power_save = pseries_lpar_idle; ppc_md.enable_pmcs = pseries_lpar_enable_pmcs; - else + } else { + /* No special idle routine */ ppc_md.enable_pmcs = power4_enable_pmcs; + } ppc_md.pcibios_root_bridge_prepare = pseries_root_bridge_prepare; -- cgit