summaryrefslogtreecommitdiff
path: root/arch/arm/include/asm/mmu_context.h
diff options
context:
space:
mode:
authorCatalin Marinas <catalin.marinas@arm.com>2010-01-26 19:09:42 +0100
committerRussell King <rmk+kernel@arm.linux.org.uk>2010-02-15 21:39:51 +0000
commit11805bcfa411c816b7c76fc40724be6733c74ffc (patch)
tree09c5c6c91deffb60339f027779521bafa42921ce /arch/arm/include/asm/mmu_context.h
parent48ab7e09e0a7c00a217f87e4b57dfbee9c603e79 (diff)
ARM: 5905/1: ARM: Global ASID allocation on SMP
The current ASID allocation algorithm doesn't ensure the notification of the other CPUs when the ASID rolls over. This may lead to two processes using the same ASID (but different generation) or multiple threads of the same process using different ASIDs. This patch adds the broadcasting of the ASID rollover event to the other CPUs. To avoid a race on multiple CPUs modifying "cpu_last_asid" during the handling of the broadcast, the ASID numbering now starts at "smp_processor_id() + 1". At rollover, the cpu_last_asid will be set to NR_CPUS. Signed-off-by: Catalin Marinas <catalin.marinas@arm.com> Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
Diffstat (limited to 'arch/arm/include/asm/mmu_context.h')
-rw-r--r--arch/arm/include/asm/mmu_context.h15
1 files changed, 15 insertions, 0 deletions
diff --git a/arch/arm/include/asm/mmu_context.h b/arch/arm/include/asm/mmu_context.h
index de6cefb329dd..a0b3cac0547c 100644
--- a/arch/arm/include/asm/mmu_context.h
+++ b/arch/arm/include/asm/mmu_context.h
@@ -43,12 +43,23 @@ void __check_kvm_seq(struct mm_struct *mm);
#define ASID_FIRST_VERSION (1 << ASID_BITS)
extern unsigned int cpu_last_asid;
+#ifdef CONFIG_SMP
+DECLARE_PER_CPU(struct mm_struct *, current_mm);
+#endif
void __init_new_context(struct task_struct *tsk, struct mm_struct *mm);
void __new_context(struct mm_struct *mm);
static inline void check_context(struct mm_struct *mm)
{
+ /*
+ * This code is executed with interrupts enabled. Therefore,
+ * mm->context.id cannot be updated to the latest ASID version
+ * on a different CPU (and condition below not triggered)
+ * without first getting an IPI to reset the context. The
+ * alternative is to take a read_lock on mm->context.id_lock
+ * (after changing its type to rwlock_t).
+ */
if (unlikely((mm->context.id ^ cpu_last_asid) >> ASID_BITS))
__new_context(mm);
@@ -108,6 +119,10 @@ switch_mm(struct mm_struct *prev, struct mm_struct *next,
__flush_icache_all();
#endif
if (!cpumask_test_and_set_cpu(cpu, mm_cpumask(next)) || prev != next) {
+#ifdef CONFIG_SMP
+ struct mm_struct **crt_mm = &per_cpu(current_mm, cpu);
+ *crt_mm = next;
+#endif
check_context(next);
cpu_switch_mm(next->pgd, next);
if (cache_is_vivt())