summaryrefslogtreecommitdiff
path: root/arch/x86/include/asm
diff options
context:
space:
mode:
authorH. Peter Anvin <hpa@zytor.com>2012-04-20 17:12:48 -0700
committerH. Peter Anvin <hpa@zytor.com>2012-04-20 17:22:34 -0700
commit706276543b699d80f546e45f8b12574e7b18d952 (patch)
treefc0fd24bf22517086685aa470a0fb596a71026ab /arch/x86/include/asm
parentfa574a48a1e9706bba38188d3bf61ecb66546a77 (diff)
x86, extable: Switch to relative exception table entries
Switch to using relative exception table entries on x86. On i386, this has the advantage that the exception table entries don't need to be relocated; on x86-64 this means the exception table entries take up only half the space. In either case, a 32-bit delta is sufficient, as the range of kernel code addresses is limited. Since part of the goal is to avoid needing to adjust the entries when the kernel is relocated, the old trick of using addresses in the NULL pointer range to indicate uaccess_err no longer works (and unlike RISC architectures we can't use a flag bit); instead use an delta just below +2G to indicate these special entries. The reach is still limited to a single instruction. Signed-off-by: H. Peter Anvin <hpa@zytor.com> Cc: David Daney <david.daney@cavium.com> Link: http://lkml.kernel.org/r/CA%2B55aFyijf43qSu3N9nWHEBwaGbb7T2Oq9A=9EyR=Jtyqfq_cQ@mail.gmail.com
Diffstat (limited to 'arch/x86/include/asm')
-rw-r--r--arch/x86/include/asm/asm.h20
-rw-r--r--arch/x86/include/asm/uaccess.h17
2 files changed, 23 insertions, 14 deletions
diff --git a/arch/x86/include/asm/asm.h b/arch/x86/include/asm/asm.h
index 0f15e8a4f565..1c2d247f65ce 100644
--- a/arch/x86/include/asm/asm.h
+++ b/arch/x86/include/asm/asm.h
@@ -42,26 +42,30 @@
#ifdef __ASSEMBLY__
# define _ASM_EXTABLE(from,to) \
.pushsection "__ex_table","a" ; \
- _ASM_ALIGN ; \
- _ASM_PTR from , to ; \
+ .balign 8 ; \
+ .long (from) - . ; \
+ .long (to) - . ; \
.popsection
# define _ASM_EXTABLE_EX(from,to) \
.pushsection "__ex_table","a" ; \
- _ASM_ALIGN ; \
- _ASM_PTR from , (to) - (from) ; \
+ .balign 8 ; \
+ .long (from) - . ; \
+ .long (to) - . + 0x7ffffff0 ; \
.popsection
#else
# define _ASM_EXTABLE(from,to) \
" .pushsection \"__ex_table\",\"a\"\n" \
- _ASM_ALIGN "\n" \
- _ASM_PTR #from "," #to "\n" \
+ " .balign 8\n" \
+ " .long (" #from ") - .\n" \
+ " .long (" #to ") - .\n" \
" .popsection\n"
# define _ASM_EXTABLE_EX(from,to) \
" .pushsection \"__ex_table\",\"a\"\n" \
- _ASM_ALIGN "\n" \
- _ASM_PTR #from ",(" #to ")-(" #from ")\n" \
+ " .balign 8\n" \
+ " .long (" #from ") - .\n" \
+ " .long (" #to ") - . + 0x7ffffff0\n" \
" .popsection\n"
#endif
diff --git a/arch/x86/include/asm/uaccess.h b/arch/x86/include/asm/uaccess.h
index 4ee59dd66f5d..851fe0dc13bc 100644
--- a/arch/x86/include/asm/uaccess.h
+++ b/arch/x86/include/asm/uaccess.h
@@ -79,11 +79,12 @@
#define access_ok(type, addr, size) (likely(__range_not_ok(addr, size) == 0))
/*
- * The exception table consists of pairs of addresses: the first is the
- * address of an instruction that is allowed to fault, and the second is
- * the address at which the program should continue. No registers are
- * modified, so it is entirely up to the continuation code to figure out
- * what to do.
+ * The exception table consists of pairs of addresses relative to the
+ * exception table enty itself: the first is the address of an
+ * instruction that is allowed to fault, and the second is the address
+ * at which the program should continue. No registers are modified,
+ * so it is entirely up to the continuation code to figure out what to
+ * do.
*
* All the routines below use bits of fixup code that are out of line
* with the main instruction path. This means when everything is well,
@@ -92,10 +93,14 @@
*/
struct exception_table_entry {
- unsigned long insn, fixup;
+ int insn, fixup;
};
+/* This is not the generic standard exception_table_entry format */
+#define ARCH_HAS_SORT_EXTABLE
+#define ARCH_HAS_SEARCH_EXTABLE
extern int fixup_exception(struct pt_regs *regs);
+extern int early_fixup_exception(unsigned long *ip);
/*
* These are the main single-value transfer routines. They automatically