diff options
Diffstat (limited to 'arch/x86/um/shared/sysdep/stub_32.h')
| -rw-r--r-- | arch/x86/um/shared/sysdep/stub_32.h | 111 |
1 files changed, 71 insertions, 40 deletions
diff --git a/arch/x86/um/shared/sysdep/stub_32.h b/arch/x86/um/shared/sysdep/stub_32.h index 38fa894b65d0..9dc2efaf5df1 100644 --- a/arch/x86/um/shared/sysdep/stub_32.h +++ b/arch/x86/um/shared/sysdep/stub_32.h @@ -6,99 +6,105 @@ #ifndef __SYSDEP_STUB_H #define __SYSDEP_STUB_H +#include <stddef.h> #include <asm/ptrace.h> #include <generated/asm-offsets.h> #define STUB_MMAP_NR __NR_mmap2 #define MMAP_OFFSET(o) ((o) >> UM_KERN_PAGE_SHIFT) -static inline long stub_syscall0(long syscall) +static __always_inline long stub_syscall0(long syscall) { long ret; - __asm__ volatile ("int $0x80" : "=a" (ret) : "0" (syscall)); + __asm__ volatile ("int $0x80" : "=a" (ret) : "0" (syscall) + : "memory"); return ret; } -static inline long stub_syscall1(long syscall, long arg1) +static __always_inline long stub_syscall1(long syscall, long arg1) { long ret; - __asm__ volatile ("int $0x80" : "=a" (ret) : "0" (syscall), "b" (arg1)); + __asm__ volatile ("int $0x80" : "=a" (ret) : "0" (syscall), "b" (arg1) + : "memory"); return ret; } -static inline long stub_syscall2(long syscall, long arg1, long arg2) +static __always_inline long stub_syscall2(long syscall, long arg1, long arg2) { long ret; __asm__ volatile ("int $0x80" : "=a" (ret) : "0" (syscall), "b" (arg1), - "c" (arg2)); + "c" (arg2) + : "memory"); return ret; } -static inline long stub_syscall3(long syscall, long arg1, long arg2, long arg3) +static __always_inline long stub_syscall3(long syscall, long arg1, long arg2, + long arg3) { long ret; __asm__ volatile ("int $0x80" : "=a" (ret) : "0" (syscall), "b" (arg1), - "c" (arg2), "d" (arg3)); + "c" (arg2), "d" (arg3) + : "memory"); return ret; } -static inline long stub_syscall4(long syscall, long arg1, long arg2, long arg3, - long arg4) +static __always_inline long stub_syscall4(long syscall, long arg1, long arg2, + long arg3, long arg4) { long ret; __asm__ volatile ("int $0x80" : "=a" (ret) : "0" (syscall), "b" (arg1), - "c" (arg2), "d" (arg3), "S" (arg4)); + "c" (arg2), "d" (arg3), "S" (arg4) + : "memory"); return ret; } -static inline long stub_syscall5(long syscall, long arg1, long arg2, long arg3, - long arg4, long arg5) +static __always_inline long stub_syscall5(long syscall, long arg1, long arg2, + long arg3, long arg4, long arg5) { long ret; __asm__ volatile ("int $0x80" : "=a" (ret) : "0" (syscall), "b" (arg1), - "c" (arg2), "d" (arg3), "S" (arg4), "D" (arg5)); + "c" (arg2), "d" (arg3), "S" (arg4), "D" (arg5) + : "memory"); return ret; } -static inline void trap_myself(void) +static __always_inline long stub_syscall6(long syscall, long arg1, long arg2, + long arg3, long arg4, long arg5, + long arg6) { - __asm("int3"); + struct syscall_args { + int ebx, ebp; + } args = { arg1, arg6 }; + long ret; + + __asm__ volatile ("pushl %%ebp;" + "movl 0x4(%%ebx),%%ebp;" + "movl (%%ebx),%%ebx;" + "int $0x80;" + "popl %%ebp" + : "=a" (ret) + : "0" (syscall), "b" (&args), + "c" (arg2), "d" (arg3), "S" (arg4), "D" (arg5) + : "memory"); + + return ret; } -static inline void remap_stack_and_trap(void) +static __always_inline void trap_myself(void) { - __asm__ volatile ( - "movl %%esp,%%ebx ;" - "andl %0,%%ebx ;" - "movl %1,%%eax ;" - "movl %%ebx,%%edi ; addl %2,%%edi ; movl (%%edi),%%edi ;" - "movl %%ebx,%%ebp ; addl %3,%%ebp ; movl (%%ebp),%%ebp ;" - "int $0x80 ;" - "addl %4,%%ebx ; movl %%eax, (%%ebx) ;" - "int $3" - : : - "g" (~(STUB_DATA_PAGES * UM_KERN_PAGE_SIZE - 1)), - "g" (STUB_MMAP_NR), - "g" (UML_STUB_FIELD_FD), - "g" (UML_STUB_FIELD_OFFSET), - "g" (UML_STUB_FIELD_CHILD_ERR), - "c" (STUB_DATA_PAGES * UM_KERN_PAGE_SIZE), - "d" (PROT_READ | PROT_WRITE), - "S" (MAP_FIXED | MAP_SHARED) - : - "memory"); + __asm("int3"); } static __always_inline void *get_stub_data(void) @@ -106,11 +112,36 @@ static __always_inline void *get_stub_data(void) unsigned long ret; asm volatile ( - "movl %%esp,%0 ;" - "andl %1,%0" + "call _here_%=;" + "_here_%=:" + "popl %0;" + "andl %1, %0 ;" + "addl %2, %0 ;" : "=a" (ret) - : "g" (~(STUB_DATA_PAGES * UM_KERN_PAGE_SIZE - 1))); + : "g" (~(UM_KERN_PAGE_SIZE - 1)), + "g" (UM_KERN_PAGE_SIZE)); return (void *)ret; } + +#define stub_start(fn) \ + asm volatile ( \ + "subl %0,%%esp ;" \ + "movl %1, %%eax ; " \ + "call *%%eax ;" \ + :: "i" (STUB_SIZE), \ + "i" (&fn)) + +static __always_inline void +stub_seccomp_restore_state(struct stub_data_arch *arch) +{ + for (int i = 0; i < sizeof(arch->tls) / sizeof(arch->tls[0]); i++) { + if (arch->sync & (1 << i)) + stub_syscall1(__NR_set_thread_area, + (unsigned long) &arch->tls[i]); + } + + arch->sync = 0; +} + #endif |
