summaryrefslogtreecommitdiff
path: root/arch/powerpc/mm/book3s64/slb.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/powerpc/mm/book3s64/slb.c')
-rw-r--r--arch/powerpc/mm/book3s64/slb.c133
1 files changed, 21 insertions, 112 deletions
diff --git a/arch/powerpc/mm/book3s64/slb.c b/arch/powerpc/mm/book3s64/slb.c
index c91bd85eb90e..15f73abd1506 100644
--- a/arch/powerpc/mm/book3s64/slb.c
+++ b/arch/powerpc/mm/book3s64/slb.c
@@ -9,11 +9,11 @@
* Copyright (C) 2002 Anton Blanchard <anton@au.ibm.com>, IBM
*/
-#include <asm/asm-prototypes.h>
#include <asm/interrupt.h>
#include <asm/mmu.h>
#include <asm/mmu_context.h>
#include <asm/paca.h>
+#include <asm/lppaca.h>
#include <asm/ppc-opcode.h>
#include <asm/cputable.h>
#include <asm/cacheflush.h>
@@ -24,7 +24,7 @@
#include <linux/pgtable.h>
#include <asm/udbg.h>
-#include <asm/code-patching.h>
+#include <asm/text-patching.h>
#include "internal.h"
@@ -42,6 +42,15 @@ early_param("stress_slb", parse_stress_slb);
__ro_after_init DEFINE_STATIC_KEY_FALSE(stress_slb_key);
+bool no_slb_preload __initdata;
+static int __init parse_no_slb_preload(char *p)
+{
+ no_slb_preload = true;
+ return 0;
+}
+early_param("no_slb_preload", parse_no_slb_preload);
+__ro_after_init DEFINE_STATIC_KEY_FALSE(no_slb_preload_key);
+
static void assert_slb_presence(bool present, unsigned long ea)
{
#ifdef CONFIG_DEBUG_VM
@@ -294,11 +303,14 @@ static bool preload_hit(struct thread_info *ti, unsigned long esid)
return false;
}
-static bool preload_add(struct thread_info *ti, unsigned long ea)
+static void preload_add(struct thread_info *ti, unsigned long ea)
{
unsigned char idx;
unsigned long esid;
+ if (slb_preload_disabled())
+ return;
+
if (mmu_has_feature(MMU_FTR_1T_SEGMENT)) {
/* EAs are stored >> 28 so 256MB segments don't need clearing */
if (ea & ESID_MASK_1T)
@@ -308,7 +320,7 @@ static bool preload_add(struct thread_info *ti, unsigned long ea)
esid = ea >> SID_SHIFT;
if (preload_hit(ti, esid))
- return false;
+ return;
idx = (ti->slb_preload_tail + ti->slb_preload_nr) % SLB_PRELOAD_NR;
ti->slb_preload_esid[idx] = esid;
@@ -316,8 +328,6 @@ static bool preload_add(struct thread_info *ti, unsigned long ea)
ti->slb_preload_tail = (ti->slb_preload_tail + 1) % SLB_PRELOAD_NR;
else
ti->slb_preload_nr++;
-
- return true;
}
static void preload_age(struct thread_info *ti)
@@ -328,94 +338,6 @@ static void preload_age(struct thread_info *ti)
ti->slb_preload_tail = (ti->slb_preload_tail + 1) % SLB_PRELOAD_NR;
}
-void slb_setup_new_exec(void)
-{
- struct thread_info *ti = current_thread_info();
- struct mm_struct *mm = current->mm;
- unsigned long exec = 0x10000000;
-
- WARN_ON(irqs_disabled());
-
- /*
- * preload cache can only be used to determine whether a SLB
- * entry exists if it does not start to overflow.
- */
- if (ti->slb_preload_nr + 2 > SLB_PRELOAD_NR)
- return;
-
- hard_irq_disable();
-
- /*
- * We have no good place to clear the slb preload cache on exec,
- * flush_thread is about the earliest arch hook but that happens
- * after we switch to the mm and have aleady preloaded the SLBEs.
- *
- * For the most part that's probably okay to use entries from the
- * previous exec, they will age out if unused. It may turn out to
- * be an advantage to clear the cache before switching to it,
- * however.
- */
-
- /*
- * preload some userspace segments into the SLB.
- * Almost all 32 and 64bit PowerPC executables are linked at
- * 0x10000000 so it makes sense to preload this segment.
- */
- if (!is_kernel_addr(exec)) {
- if (preload_add(ti, exec))
- slb_allocate_user(mm, exec);
- }
-
- /* Libraries and mmaps. */
- if (!is_kernel_addr(mm->mmap_base)) {
- if (preload_add(ti, mm->mmap_base))
- slb_allocate_user(mm, mm->mmap_base);
- }
-
- /* see switch_slb */
- asm volatile("isync" : : : "memory");
-
- local_irq_enable();
-}
-
-void preload_new_slb_context(unsigned long start, unsigned long sp)
-{
- struct thread_info *ti = current_thread_info();
- struct mm_struct *mm = current->mm;
- unsigned long heap = mm->start_brk;
-
- WARN_ON(irqs_disabled());
-
- /* see above */
- if (ti->slb_preload_nr + 3 > SLB_PRELOAD_NR)
- return;
-
- hard_irq_disable();
-
- /* Userspace entry address. */
- if (!is_kernel_addr(start)) {
- if (preload_add(ti, start))
- slb_allocate_user(mm, start);
- }
-
- /* Top of stack, grows down. */
- if (!is_kernel_addr(sp)) {
- if (preload_add(ti, sp))
- slb_allocate_user(mm, sp);
- }
-
- /* Bottom of heap, grows up. */
- if (heap && !is_kernel_addr(heap)) {
- if (preload_add(ti, heap))
- slb_allocate_user(mm, heap);
- }
-
- /* see switch_slb */
- asm volatile("isync" : : : "memory");
-
- local_irq_enable();
-}
-
static void slb_cache_slbie_kernel(unsigned int index)
{
unsigned long slbie_data = get_paca()->slb_cache[index];
@@ -502,6 +424,9 @@ void switch_slb(struct task_struct *tsk, struct mm_struct *mm)
copy_mm_to_paca(mm);
+ if (slb_preload_disabled())
+ return;
+
/*
* We gradually age out SLBs after a number of context switches to
* reduce reload overhead of unused entries (like we do with FP/VEC
@@ -616,7 +541,7 @@ static void slb_cache_update(unsigned long esid_data)
} else {
/*
* Our cache is full and the current cache content strictly
- * doesn't indicate the active SLB conents. Bump the ptr
+ * doesn't indicate the active SLB contents. Bump the ptr
* so that switch_slb() will ignore the cache.
*/
local_paca->slb_cache_ptr = SLB_CACHE_ENTRIES + 1;
@@ -822,7 +747,7 @@ DEFINE_INTERRUPT_HANDLER_RAW(do_slb_fault)
/* IRQs are not reconciled here, so can't check irqs_disabled */
VM_WARN_ON(mfmsr() & MSR_EE);
- if (unlikely(!(regs->msr & MSR_RI)))
+ if (regs_is_unrecoverable(regs))
return -EINVAL;
/*
@@ -868,19 +793,3 @@ DEFINE_INTERRUPT_HANDLER_RAW(do_slb_fault)
return err;
}
}
-
-DEFINE_INTERRUPT_HANDLER(do_bad_slb_fault)
-{
- int err = regs->result;
-
- if (err == -EFAULT) {
- if (user_mode(regs))
- _exception(SIGSEGV, regs, SEGV_BNDERR, regs->dar);
- else
- bad_page_fault(regs, SIGSEGV);
- } else if (err == -EINVAL) {
- unrecoverable_exception(regs);
- } else {
- BUG();
- }
-}