summaryrefslogtreecommitdiff
path: root/arch/sparc/include
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2017-08-04 10:17:45 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2017-08-04 10:17:45 -0700
commit0a23ea65ce9f10ec2ea392571006b781b150327f (patch)
tree1906d633c94131ed850b8b8b0672bc774b694697 /arch/sparc/include
parentc63716ab4d77f3df7d12260fc62cbf847c2a85d1 (diff)
parent0ede1c401332173ab0693121dc6cde04a4dbf131 (diff)
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/sparc
Pull sparc fixes from David Miller: - block interrupts properly across the entire MMU context change (both the hw MMU context change and the TSB table change) so that we don't get a perf event interrupt in the middle. From Rob Gardner. - be sure to register hugepages early enough, from Nitin Gupta. - UltraSPARC-III user copy exception handling would return garbage for the copied length in some circumstances. * git://git.kernel.org/pub/scm/linux/kernel/git/davem/sparc: sparc64: Fix exception handling in UltraSPARC-III memcpy. sbus: Convert to using %pOF instead of full_name sparc: defconfig: Cleanup from old Kconfig options sparc64: Register hugepages during arch init sparc64: Prevent perf from running during super critical sections
Diffstat (limited to 'arch/sparc/include')
-rw-r--r--arch/sparc/include/asm/mmu_context_64.h14
1 files changed, 9 insertions, 5 deletions
diff --git a/arch/sparc/include/asm/mmu_context_64.h b/arch/sparc/include/asm/mmu_context_64.h
index 2cddcda4f85f..87841d687f8d 100644
--- a/arch/sparc/include/asm/mmu_context_64.h
+++ b/arch/sparc/include/asm/mmu_context_64.h
@@ -27,9 +27,11 @@ void destroy_context(struct mm_struct *mm);
void __tsb_context_switch(unsigned long pgd_pa,
struct tsb_config *tsb_base,
struct tsb_config *tsb_huge,
- unsigned long tsb_descr_pa);
+ unsigned long tsb_descr_pa,
+ unsigned long secondary_ctx);
-static inline void tsb_context_switch(struct mm_struct *mm)
+static inline void tsb_context_switch_ctx(struct mm_struct *mm,
+ unsigned long ctx)
{
__tsb_context_switch(__pa(mm->pgd),
&mm->context.tsb_block[MM_TSB_BASE],
@@ -40,9 +42,12 @@ static inline void tsb_context_switch(struct mm_struct *mm)
#else
NULL
#endif
- , __pa(&mm->context.tsb_descr[MM_TSB_BASE]));
+ , __pa(&mm->context.tsb_descr[MM_TSB_BASE]),
+ ctx);
}
+#define tsb_context_switch(X) tsb_context_switch_ctx(X, 0)
+
void tsb_grow(struct mm_struct *mm,
unsigned long tsb_index,
unsigned long mm_rss);
@@ -112,8 +117,7 @@ static inline void switch_mm(struct mm_struct *old_mm, struct mm_struct *mm, str
* cpu0 to update it's TSB because at that point the cpu_vm_mask
* only had cpu1 set in it.
*/
- load_secondary_context(mm);
- tsb_context_switch(mm);
+ tsb_context_switch_ctx(mm, CTX_HWBITS(mm->context));
/* Any time a processor runs a context on an address space
* for the first time, we must flush that context out of the