summaryrefslogtreecommitdiff
path: root/arch/x86/mm
diff options
context:
space:
mode:
authorPeter Zijlstra <peterz@infradead.org>2018-09-19 10:50:24 +0200
committerThomas Gleixner <tglx@linutronix.de>2018-09-27 20:39:42 +0200
commit7904ba8a66f400182a204893c92098994e22a88d (patch)
tree5364759f89275e6b6ab95586301dc9b2bb265b16 /arch/x86/mm
parent47e262ac5b84015c4a101ff51767c464fb7497a6 (diff)
x86/mm/cpa: Optimize __cpa_flush_range()
If we IPI for WBINDV, then we might as well kill the entire TLB too. But if we don't have to invalidate cache, there is no reason not to use a range TLB flush. Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Reviewed-by: Dave Hansen <dave.hansen@intel.com> Cc: Bin Yang <bin.yang@intel.com> Cc: Mark Gross <mark.gross@intel.com> Link: https://lkml.kernel.org/r/20180919085948.195633798@infradead.org
Diffstat (limited to 'arch/x86/mm')
-rw-r--r--arch/x86/mm/pageattr.c2
1 files changed, 1 insertions, 1 deletions
diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c
index dc552824e86a..62bb30b4bd2a 100644
--- a/arch/x86/mm/pageattr.c
+++ b/arch/x86/mm/pageattr.c
@@ -291,7 +291,7 @@ static bool __cpa_flush_range(unsigned long start, int numpages, int cache)
WARN_ON(PAGE_ALIGN(start) != start);
- if (!static_cpu_has(X86_FEATURE_CLFLUSH)) {
+ if (cache && !static_cpu_has(X86_FEATURE_CLFLUSH)) {
cpa_flush_all(cache);
return true;
}