summaryrefslogtreecommitdiff
path: root/arch/sparc/include/asm/mmu_context_64.h
diff options
context:
space:
mode:
Diffstat (limited to 'arch/sparc/include/asm/mmu_context_64.h')
-rw-r--r--arch/sparc/include/asm/mmu_context_64.h14
1 files changed, 10 insertions, 4 deletions
diff --git a/arch/sparc/include/asm/mmu_context_64.h b/arch/sparc/include/asm/mmu_context_64.h
index 7a8380c63aab..78bbacc14d2d 100644
--- a/arch/sparc/include/asm/mmu_context_64.h
+++ b/arch/sparc/include/asm/mmu_context_64.h
@@ -4,7 +4,7 @@
/* Derived heavily from Linus's Alpha/AXP ASN code... */
-#ifndef __ASSEMBLY__
+#ifndef __ASSEMBLER__
#include <linux/spinlock.h>
#include <linux/mm_types.h>
@@ -93,7 +93,7 @@ static inline void switch_mm(struct mm_struct *old_mm, struct mm_struct *mm, str
/* We have to be extremely careful here or else we will miss
* a TSB grow if we switch back and forth between a kernel
- * thread and an address space which has it's TSB size increased
+ * thread and an address space which has its TSB size increased
* on another processor.
*
* It is possible to play some games in order to optimize the
@@ -118,7 +118,7 @@ static inline void switch_mm(struct mm_struct *old_mm, struct mm_struct *mm, str
*
* At that point cpu0 continues to use a stale TSB, the one from
* before the TSB grow performed on cpu1. cpu1 did not cross-call
- * cpu0 to update it's TSB because at that point the cpu_vm_mask
+ * cpu0 to update its TSB because at that point the cpu_vm_mask
* only had cpu1 set in it.
*/
tsb_context_switch_ctx(mm, CTX_HWBITS(mm->context));
@@ -185,8 +185,14 @@ static inline void finish_arch_post_lock_switch(void)
}
}
+#define mm_untag_mask mm_untag_mask
+static inline unsigned long mm_untag_mask(struct mm_struct *mm)
+{
+ return -1UL >> adi_nbits();
+}
+
#include <asm-generic/mmu_context.h>
-#endif /* !(__ASSEMBLY__) */
+#endif /* !(__ASSEMBLER__) */
#endif /* !(__SPARC64_MMU_CONTEXT_H) */