diff options
Diffstat (limited to 'arch/x86/um/shared/sysdep/stub_64.h')
-rw-r--r-- | arch/x86/um/shared/sysdep/stub_64.h | 85 |
1 files changed, 57 insertions, 28 deletions
diff --git a/arch/x86/um/shared/sysdep/stub_64.h b/arch/x86/um/shared/sysdep/stub_64.h index b24168ef0ac4..9cfd31afa769 100644 --- a/arch/x86/um/shared/sysdep/stub_64.h +++ b/arch/x86/um/shared/sysdep/stub_64.h @@ -6,9 +6,11 @@ #ifndef __SYSDEP_STUB_H #define __SYSDEP_STUB_H +#include <stddef.h> #include <sysdep/ptrace_user.h> #include <generated/asm-offsets.h> #include <linux/stddef.h> +#include <asm/prctl.h> #define STUB_MMAP_NR __NR_mmap #define MMAP_OFFSET(o) (o) @@ -27,6 +29,17 @@ static __always_inline long stub_syscall0(long syscall) return ret; } +static __always_inline long stub_syscall1(long syscall, long arg1) +{ + long ret; + + __asm__ volatile (__syscall + : "=a" (ret) + : "0" (syscall), "D" (arg1) : __syscall_clobber ); + + return ret; +} + static __always_inline long stub_syscall2(long syscall, long arg1, long arg2) { long ret; @@ -79,35 +92,25 @@ static __always_inline long stub_syscall5(long syscall, long arg1, long arg2, return ret; } -static __always_inline void trap_myself(void) +static __always_inline long stub_syscall6(long syscall, long arg1, long arg2, + long arg3, long arg4, long arg5, + long arg6) { - __asm("int3"); + long ret; + + __asm__ volatile ("movq %5,%%r10 ; movq %6,%%r8 ; movq %7,%%r9 ; " + __syscall + : "=a" (ret) + : "0" (syscall), "D" (arg1), "S" (arg2), "d" (arg3), + "g" (arg4), "g" (arg5), "g" (arg6) + : __syscall_clobber, "r10", "r8", "r9"); + + return ret; } -static __always_inline void remap_stack_and_trap(void) +static __always_inline void trap_myself(void) { - __asm__ volatile ( - "movq %0,%%rax ;" - "movq %%rsp,%%rdi ;" - "andq %1,%%rdi ;" - "movq %2,%%r10 ;" - "movq %%rdi,%%r8 ; addq %3,%%r8 ; movq (%%r8),%%r8 ;" - "movq %%rdi,%%r9 ; addq %4,%%r9 ; movq (%%r9),%%r9 ;" - __syscall ";" - "movq %%rsp,%%rdi ; andq %1,%%rdi ;" - "addq %5,%%rdi ; movq %%rax, (%%rdi) ;" - "int3" - : : - "g" (STUB_MMAP_NR), - "g" (~(STUB_DATA_PAGES * UM_KERN_PAGE_SIZE - 1)), - "g" (MAP_FIXED | MAP_SHARED), - "g" (UML_STUB_FIELD_FD), - "g" (UML_STUB_FIELD_OFFSET), - "g" (UML_STUB_FIELD_CHILD_ERR), - "S" (STUB_DATA_PAGES * UM_KERN_PAGE_SIZE), - "d" (PROT_READ | PROT_WRITE) - : - __syscall_clobber, "r10", "r8", "r9"); + __asm("int3"); } static __always_inline void *get_stub_data(void) @@ -115,11 +118,37 @@ static __always_inline void *get_stub_data(void) unsigned long ret; asm volatile ( - "movq %%rsp,%0 ;" - "andq %1,%0" + "lea 0(%%rip), %0;" + "andq %1, %0 ;" + "addq %2, %0 ;" : "=a" (ret) - : "g" (~(STUB_DATA_PAGES * UM_KERN_PAGE_SIZE - 1))); + : "g" (~(UM_KERN_PAGE_SIZE - 1)), + "g" (UM_KERN_PAGE_SIZE)); return (void *)ret; } + +#define stub_start(fn) \ + asm volatile ( \ + "subq %0,%%rsp ;" \ + "movq %1,%%rax ;" \ + "call *%%rax ;" \ + :: "i" ((1 + STUB_DATA_PAGES) * UM_KERN_PAGE_SIZE), \ + "i" (&fn)) + +static __always_inline void +stub_seccomp_restore_state(struct stub_data_arch *arch) +{ + /* + * We could use _writefsbase_u64/_writegsbase_u64 if the host reports + * support in the hwcaps (HWCAP2_FSGSBASE). + */ + if (arch->sync & STUB_SYNC_FS_BASE) + stub_syscall2(__NR_arch_prctl, ARCH_SET_FS, arch->fs_base); + if (arch->sync & STUB_SYNC_GS_BASE) + stub_syscall2(__NR_arch_prctl, ARCH_SET_GS, arch->gs_base); + + arch->sync = 0; +} + #endif |