diff options
Diffstat (limited to 'arch/x86/include/asm/tlb.h')
-rw-r--r-- | arch/x86/include/asm/tlb.h | 13 |
1 files changed, 2 insertions, 11 deletions
diff --git a/arch/x86/include/asm/tlb.h b/arch/x86/include/asm/tlb.h index 580636cdc257..77f52bc1578a 100644 --- a/arch/x86/include/asm/tlb.h +++ b/arch/x86/include/asm/tlb.h @@ -20,18 +20,9 @@ static inline void tlb_flush(struct mmu_gather *tlb) flush_tlb_mm_range(tlb->mm, start, end, stride_shift, tlb->freed_tables); } -/* - * While x86 architecture in general requires an IPI to perform TLB - * shootdown, enablement code for several hypervisors overrides - * .flush_tlb_others hook in pv_mmu_ops and implements it by issuing - * a hypercall. To keep software pagetable walkers safe in this case we - * switch to RCU based table free (MMU_GATHER_RCU_TABLE_FREE). See the comment - * below 'ifdef CONFIG_MMU_GATHER_RCU_TABLE_FREE' in include/asm-generic/tlb.h - * for more details. - */ -static inline void __tlb_remove_table(void *table) +static inline void invlpg(unsigned long addr) { - free_page_and_swap_cache(table); + asm volatile("invlpg (%0)" ::"r" (addr) : "memory"); } #endif /* _ASM_X86_TLB_H */ |