From 78305c863074f809f09a96ea97b86d0f0c1c8399 Mon Sep 17 00:00:00 2001 From: Ben Dooks Date: Fri, 15 Feb 2013 13:30:58 +0100 Subject: ARM: 7652/1: mm: fix missing use of 'asid' to get asid value from mm->context.id Fix missing use of the asid macro when getting the ASID from the mm->context.id field. Signed-off-by: Ben Dooks Signed-off-by: Russell King --- arch/arm/mm/proc-v7-3level.S | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch/arm/mm') diff --git a/arch/arm/mm/proc-v7-3level.S b/arch/arm/mm/proc-v7-3level.S index 50bf1dafc9ea..6ffd78c0f9ab 100644 --- a/arch/arm/mm/proc-v7-3level.S +++ b/arch/arm/mm/proc-v7-3level.S @@ -48,7 +48,7 @@ ENTRY(cpu_v7_switch_mm) #ifdef CONFIG_MMU mmid r1, r1 @ get mm->context.id - and r3, r1, #0xff + asid r3, r1 mov r3, r3, lsl #(48 - 32) @ ASID mcrr p15, 0, r0, r3, c2 @ set TTB 0 isb -- cgit From 37f47e3d62533c931b04cb409f2eb299e6342331 Mon Sep 17 00:00:00 2001 From: Will Deacon Date: Thu, 28 Feb 2013 17:47:20 +0100 Subject: ARM: 7658/1: mm: fix race updating mm->context.id on ASID rollover If a thread triggers an ASID rollover, other threads of the same process must be made to wait until the mm->context.id for the shared mm_struct has been updated to new generation and associated book-keeping (e.g. TLB invalidation) has ben performed. However, there is a *tiny* window where both mm->context.id and the relevant active_asids entry are updated to the new generation, but the TLB flush has not been performed, which could allow another thread to return to userspace with a dirty TLB, potentially leading to data corruption. In reality this will never occur because one CPU would need to perform a context-switch in the time it takes another to do a couple of atomic test/set operations but we should plug the race anyway. This patch moves the active_asids update until after the potential TLB flush on context-switch. Cc: # 3.8 Reviewed-by: Catalin Marinas Signed-off-by: Will Deacon Signed-off-by: Russell King --- arch/arm/mm/context.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) (limited to 'arch/arm/mm') diff --git a/arch/arm/mm/context.c b/arch/arm/mm/context.c index 7a0511191f6b..03ba181e359c 100644 --- a/arch/arm/mm/context.c +++ b/arch/arm/mm/context.c @@ -207,11 +207,11 @@ void check_and_switch_context(struct mm_struct *mm, struct task_struct *tsk) if ((mm->context.id ^ atomic64_read(&asid_generation)) >> ASID_BITS) new_context(mm, cpu); - atomic64_set(&per_cpu(active_asids, cpu), mm->context.id); - cpumask_set_cpu(cpu, mm_cpumask(mm)); - if (cpumask_test_and_clear_cpu(cpu, &tlb_flush_pending)) local_flush_tlb_all(); + + atomic64_set(&per_cpu(active_asids, cpu), mm->context.id); + cpumask_set_cpu(cpu, mm_cpumask(mm)); raw_spin_unlock_irqrestore(&cpu_asid_lock, flags); switch_mm_fastpath: -- cgit From 8a4e3a9ead7e37ce1505602b564c15da09ac039f Mon Sep 17 00:00:00 2001 From: Will Deacon Date: Thu, 28 Feb 2013 17:47:36 +0100 Subject: ARM: 7659/1: mm: make mm->context.id an atomic64_t variable mm->context.id is updated under asid_lock when a new ASID is allocated to an mm_struct. However, it is also read without the lock when a task is being scheduled and checking whether or not the current ASID generation is up-to-date. If two threads of the same process are being scheduled in parallel and the bottom bits of the generation in their mm->context.id match the current generation (that is, the mm_struct has not been used for ~2^24 rollovers) then the non-atomic, lockless access to mm->context.id may yield the incorrect ASID. This patch fixes this issue by making mm->context.id and atomic64_t, ensuring that the generation is always read consistently. For code that only requires access to the ASID bits (e.g. TLB flushing by mm), then the value is accessed directly, which GCC converts to an ldrb. Cc: # 3.8 Reviewed-by: Catalin Marinas Signed-off-by: Will Deacon Signed-off-by: Russell King --- arch/arm/mm/context.c | 21 +++++++++++++-------- 1 file changed, 13 insertions(+), 8 deletions(-) (limited to 'arch/arm/mm') diff --git a/arch/arm/mm/context.c b/arch/arm/mm/context.c index 03ba181e359c..44d4ee52f3e2 100644 --- a/arch/arm/mm/context.c +++ b/arch/arm/mm/context.c @@ -152,9 +152,9 @@ static int is_reserved_asid(u64 asid) return 0; } -static void new_context(struct mm_struct *mm, unsigned int cpu) +static u64 new_context(struct mm_struct *mm, unsigned int cpu) { - u64 asid = mm->context.id; + u64 asid = atomic64_read(&mm->context.id); u64 generation = atomic64_read(&asid_generation); if (asid != 0 && is_reserved_asid(asid)) { @@ -181,13 +181,14 @@ static void new_context(struct mm_struct *mm, unsigned int cpu) cpumask_clear(mm_cpumask(mm)); } - mm->context.id = asid; + return asid; } void check_and_switch_context(struct mm_struct *mm, struct task_struct *tsk) { unsigned long flags; unsigned int cpu = smp_processor_id(); + u64 asid; if (unlikely(mm->context.vmalloc_seq != init_mm.context.vmalloc_seq)) __check_vmalloc_seq(mm); @@ -198,19 +199,23 @@ void check_and_switch_context(struct mm_struct *mm, struct task_struct *tsk) */ cpu_set_reserved_ttbr0(); - if (!((mm->context.id ^ atomic64_read(&asid_generation)) >> ASID_BITS) - && atomic64_xchg(&per_cpu(active_asids, cpu), mm->context.id)) + asid = atomic64_read(&mm->context.id); + if (!((asid ^ atomic64_read(&asid_generation)) >> ASID_BITS) + && atomic64_xchg(&per_cpu(active_asids, cpu), asid)) goto switch_mm_fastpath; raw_spin_lock_irqsave(&cpu_asid_lock, flags); /* Check that our ASID belongs to the current generation. */ - if ((mm->context.id ^ atomic64_read(&asid_generation)) >> ASID_BITS) - new_context(mm, cpu); + asid = atomic64_read(&mm->context.id); + if ((asid ^ atomic64_read(&asid_generation)) >> ASID_BITS) { + asid = new_context(mm, cpu); + atomic64_set(&mm->context.id, asid); + } if (cpumask_test_and_clear_cpu(cpu, &tlb_flush_pending)) local_flush_tlb_all(); - atomic64_set(&per_cpu(active_asids, cpu), mm->context.id); + atomic64_set(&per_cpu(active_asids, cpu), asid); cpumask_set_cpu(cpu, mm_cpumask(mm)); raw_spin_unlock_irqrestore(&cpu_asid_lock, flags); -- cgit From 89c7e4b8bbb3d4fa52df5746a8ad38e610143651 Mon Sep 17 00:00:00 2001 From: Will Deacon Date: Thu, 28 Feb 2013 17:48:40 +0100 Subject: ARM: 7661/1: mm: perform explicit branch predictor maintenance when required The ARM ARM requires branch predictor maintenance if, for a given ASID, the instructions at a specific virtual address appear to change. From the kernel's point of view, that means: - Changing the kernel's view of memory (e.g. switching to the identity map) - ASID rollover (since ASIDs will be re-allocated to new tasks) This patch adds explicit branch predictor maintenance when either of the two conditions above are met. Reviewed-by: Catalin Marinas Signed-off-by: Will Deacon Signed-off-by: Russell King --- arch/arm/mm/context.c | 4 +++- arch/arm/mm/idmap.c | 1 + 2 files changed, 4 insertions(+), 1 deletion(-) (limited to 'arch/arm/mm') diff --git a/arch/arm/mm/context.c b/arch/arm/mm/context.c index 44d4ee52f3e2..a5a4b2bc42ba 100644 --- a/arch/arm/mm/context.c +++ b/arch/arm/mm/context.c @@ -212,8 +212,10 @@ void check_and_switch_context(struct mm_struct *mm, struct task_struct *tsk) atomic64_set(&mm->context.id, asid); } - if (cpumask_test_and_clear_cpu(cpu, &tlb_flush_pending)) + if (cpumask_test_and_clear_cpu(cpu, &tlb_flush_pending)) { + local_flush_bp_all(); local_flush_tlb_all(); + } atomic64_set(&per_cpu(active_asids, cpu), asid); cpumask_set_cpu(cpu, mm_cpumask(mm)); diff --git a/arch/arm/mm/idmap.c b/arch/arm/mm/idmap.c index 2dffc010cc41..5ee505c937d1 100644 --- a/arch/arm/mm/idmap.c +++ b/arch/arm/mm/idmap.c @@ -141,6 +141,7 @@ void setup_mm_for_reboot(void) { /* Switch to the identity mapping. */ cpu_switch_mm(idmap_pgd, &init_mm); + local_flush_bp_all(); #ifdef CONFIG_CPU_HAS_ASID /* -- cgit