summaryrefslogtreecommitdiff
path: root/arch/x86/kernel/hw_breakpoint.c
diff options
context:
space:
mode:
authorLai Jiangshan <laijs@linux.alibaba.com>2020-05-29 23:27:30 +0200
committerThomas Gleixner <tglx@linutronix.de>2020-06-11 15:15:20 +0200
commit97417cb9ad4ed052d7a4c5c0d75db1ff1b0981fb (patch)
tree7c6acb233789431d272714c5ad438215ba7a52b0 /arch/x86/kernel/hw_breakpoint.c
parentd390e6de89d30402bd06056c40cea72328aec9b1 (diff)
x86/hw_breakpoint: Prevent data breakpoints on direct GDT
A data breakpoint on the GDT can be fatal and must be avoided. The GDT in the CPU entry area is already protected, but not the direct GDT. Add the necessary protection. Signed-off-by: Lai Jiangshan <laijs@linux.alibaba.com> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Link: https://lkml.kernel.org/r/20200526014221.2119-3-laijs@linux.alibaba.com Link: https://lkml.kernel.org/r/20200529213320.840953950@infradead.org
Diffstat (limited to 'arch/x86/kernel/hw_breakpoint.c')
-rw-r--r--arch/x86/kernel/hw_breakpoint.c30
1 files changed, 22 insertions, 8 deletions
diff --git a/arch/x86/kernel/hw_breakpoint.c b/arch/x86/kernel/hw_breakpoint.c
index c149c7b29ac3..f859095c1b6c 100644
--- a/arch/x86/kernel/hw_breakpoint.c
+++ b/arch/x86/kernel/hw_breakpoint.c
@@ -32,6 +32,7 @@
#include <asm/processor.h>
#include <asm/debugreg.h>
#include <asm/user.h>
+#include <asm/desc.h>
/* Per cpu debug control register value */
DEFINE_PER_CPU(unsigned long, cpu_dr7);
@@ -237,13 +238,26 @@ static inline bool within_area(unsigned long addr, unsigned long end,
}
/*
- * Checks whether the range from addr to end, inclusive, overlaps the CPU
- * entry area range.
+ * Checks whether the range from addr to end, inclusive, overlaps the fixed
+ * mapped CPU entry area range or other ranges used for CPU entry.
*/
-static inline bool within_cpu_entry_area(unsigned long addr, unsigned long end)
+static inline bool within_cpu_entry(unsigned long addr, unsigned long end)
{
- return within_area(addr, end, CPU_ENTRY_AREA_BASE,
- CPU_ENTRY_AREA_TOTAL_SIZE);
+ int cpu;
+
+ /* CPU entry erea is always used for CPU entry */
+ if (within_area(addr, end, CPU_ENTRY_AREA_BASE,
+ CPU_ENTRY_AREA_TOTAL_SIZE))
+ return true;
+
+ for_each_possible_cpu(cpu) {
+ /* The original rw GDT is being used after load_direct_gdt() */
+ if (within_area(addr, end, (unsigned long)get_cpu_gdt_rw(cpu),
+ GDT_SIZE))
+ return true;
+ }
+
+ return false;
}
static int arch_build_bp_info(struct perf_event *bp,
@@ -257,12 +271,12 @@ static int arch_build_bp_info(struct perf_event *bp,
return -EINVAL;
/*
- * Prevent any breakpoint of any type that overlaps the
- * cpu_entry_area. This protects the IST stacks and also
+ * Prevent any breakpoint of any type that overlaps the CPU
+ * entry area and data. This protects the IST stacks and also
* reduces the chance that we ever find out what happens if
* there's a data breakpoint on the GDT, IDT, or TSS.
*/
- if (within_cpu_entry_area(attr->bp_addr, bp_end))
+ if (within_cpu_entry(attr->bp_addr, bp_end))
return -EINVAL;
hw->address = attr->bp_addr;