From 631b7abacd02b88f4b0795c08b54ad4fc3e7c7c0 Mon Sep 17 00:00:00 2001 From: "Steven Rostedt (Red Hat)" Date: Mon, 7 Nov 2016 16:26:35 -0500 Subject: ptrace: Remove maxargs from task_current_syscall() task_current_syscall() has a single user that passes in 6 for maxargs, which is the maximum arguments that can be used to get system calls from syscall_get_arguments(). Instead of passing in a number of arguments to grab, just get 6 arguments. The args argument even specifies that it's an array of 6 items. This will also allow changing syscall_get_arguments() to not get a variable number of arguments, but always grab 6. Linus also suggested not passing in a bunch of arguments to task_current_syscall() but to instead pass in a pointer to a structure, and just fill the structure. struct seccomp_data has almost all the parameters that is needed except for the stack pointer (sp). As seccomp_data is part of uapi, and I'm afraid to change it, a new structure was created "syscall_info", which includes seccomp_data and adds the "sp" field. Link: http://lkml.kernel.org/r/20161107213233.466776454@goodmis.org Cc: Andy Lutomirski Cc: Alexey Dobriyan Cc: Oleg Nesterov Cc: Kees Cook Cc: Al Viro Cc: linux-fsdevel@vger.kernel.org Reviewed-by: Thomas Gleixner Signed-off-by: Steven Rostedt (VMware) --- lib/syscall.c | 57 +++++++++++++++++++++++++-------------------------------- 1 file changed, 25 insertions(+), 32 deletions(-) (limited to 'lib') diff --git a/lib/syscall.c b/lib/syscall.c index 1a7077f20eae..e8467e17b9a2 100644 --- a/lib/syscall.c +++ b/lib/syscall.c @@ -5,16 +5,14 @@ #include #include -static int collect_syscall(struct task_struct *target, long *callno, - unsigned long args[6], unsigned int maxargs, - unsigned long *sp, unsigned long *pc) +static int collect_syscall(struct task_struct *target, struct syscall_info *info) { struct pt_regs *regs; if (!try_get_task_stack(target)) { /* Task has no stack, so the task isn't in a syscall. */ - *sp = *pc = 0; - *callno = -1; + memset(info, 0, sizeof(*info)); + info->data.nr = -1; return 0; } @@ -24,12 +22,13 @@ static int collect_syscall(struct task_struct *target, long *callno, return -EAGAIN; } - *sp = user_stack_pointer(regs); - *pc = instruction_pointer(regs); + info->sp = user_stack_pointer(regs); + info->data.instruction_pointer = instruction_pointer(regs); - *callno = syscall_get_nr(target, regs); - if (*callno != -1L && maxargs > 0) - syscall_get_arguments(target, regs, 0, maxargs, args); + info->data.nr = syscall_get_nr(target, regs); + if (info->data.nr != -1L) + syscall_get_arguments(target, regs, 0, 6, + (unsigned long *)&info->data.args[0]); put_task_stack(target); return 0; @@ -38,41 +37,35 @@ static int collect_syscall(struct task_struct *target, long *callno, /** * task_current_syscall - Discover what a blocked task is doing. * @target: thread to examine - * @callno: filled with system call number or -1 - * @args: filled with @maxargs system call arguments - * @maxargs: number of elements in @args to fill - * @sp: filled with user stack pointer - * @pc: filled with user PC + * @info: structure with the following fields: + * .sp - filled with user stack pointer + * .data.nr - filled with system call number or -1 + * .data.args - filled with @maxargs system call arguments + * .data.instruction_pointer - filled with user PC * - * If @target is blocked in a system call, returns zero with *@callno - * set to the the call's number and @args filled in with its arguments. - * Registers not used for system call arguments may not be available and - * it is not kosher to use &struct user_regset calls while the system + * If @target is blocked in a system call, returns zero with @info.data.nr + * set to the the call's number and @info.data.args filled in with its + * arguments. Registers not used for system call arguments may not be available + * and it is not kosher to use &struct user_regset calls while the system * call is still in progress. Note we may get this result if @target * has finished its system call but not yet returned to user mode, such * as when it's stopped for signal handling or syscall exit tracing. * * If @target is blocked in the kernel during a fault or exception, - * returns zero with *@callno set to -1 and does not fill in @args. - * If so, it's now safe to examine @target using &struct user_regset - * get() calls as long as we're sure @target won't return to user mode. + * returns zero with *@info.data.nr set to -1 and does not fill in + * @info.data.args. If so, it's now safe to examine @target using + * &struct user_regset get() calls as long as we're sure @target won't return + * to user mode. * * Returns -%EAGAIN if @target does not remain blocked. - * - * Returns -%EINVAL if @maxargs is too large (maximum is six). */ -int task_current_syscall(struct task_struct *target, long *callno, - unsigned long args[6], unsigned int maxargs, - unsigned long *sp, unsigned long *pc) +int task_current_syscall(struct task_struct *target, struct syscall_info *info) { long state; unsigned long ncsw; - if (unlikely(maxargs > 6)) - return -EINVAL; - if (target == current) - return collect_syscall(target, callno, args, maxargs, sp, pc); + return collect_syscall(target, info); state = target->state; if (unlikely(!state)) @@ -80,7 +73,7 @@ int task_current_syscall(struct task_struct *target, long *callno, ncsw = wait_task_inactive(target, state); if (unlikely(!ncsw) || - unlikely(collect_syscall(target, callno, args, maxargs, sp, pc)) || + unlikely(collect_syscall(target, info)) || unlikely(wait_task_inactive(target, state) != ncsw)) return -EAGAIN; -- cgit From b35f549df1d7520d37ba1e6d4a8d4df6bd52d136 Mon Sep 17 00:00:00 2001 From: "Steven Rostedt (Red Hat)" Date: Mon, 7 Nov 2016 16:26:37 -0500 Subject: syscalls: Remove start and number from syscall_get_arguments() args At Linux Plumbers, Andy Lutomirski approached me and pointed out that the function call syscall_get_arguments() implemented in x86 was horribly written and not optimized for the standard case of passing in 0 and 6 for the starting index and the number of system calls to get. When looking at all the users of this function, I discovered that all instances pass in only 0 and 6 for these arguments. Instead of having this function handle different cases that are never used, simply rewrite it to return the first 6 arguments of a system call. This should help out the performance of tracing system calls by ptrace, ftrace and perf. Link: http://lkml.kernel.org/r/20161107213233.754809394@goodmis.org Cc: Oleg Nesterov Cc: Kees Cook Cc: Andy Lutomirski Cc: Dominik Brodowski Cc: Dave Martin Cc: "Dmitry V. Levin" Cc: x86@kernel.org Cc: linux-snps-arc@lists.infradead.org Cc: linux-kernel@vger.kernel.org Cc: linux-arm-kernel@lists.infradead.org Cc: linux-c6x-dev@linux-c6x.org Cc: uclinux-h8-devel@lists.sourceforge.jp Cc: linux-hexagon@vger.kernel.org Cc: linux-ia64@vger.kernel.org Cc: linux-mips@vger.kernel.org Cc: nios2-dev@lists.rocketboards.org Cc: openrisc@lists.librecores.org Cc: linux-parisc@vger.kernel.org Cc: linuxppc-dev@lists.ozlabs.org Cc: linux-riscv@lists.infradead.org Cc: linux-s390@vger.kernel.org Cc: linux-sh@vger.kernel.org Cc: sparclinux@vger.kernel.org Cc: linux-um@lists.infradead.org Cc: linux-xtensa@linux-xtensa.org Cc: linux-arch@vger.kernel.org Acked-by: Paul Burton # MIPS parts Acked-by: Max Filippov # For xtensa changes Acked-by: Will Deacon # For the arm64 bits Reviewed-by: Thomas Gleixner # for x86 Reviewed-by: Dmitry V. Levin Reported-by: Andy Lutomirski Signed-off-by: Steven Rostedt (VMware) --- lib/syscall.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'lib') diff --git a/lib/syscall.c b/lib/syscall.c index e8467e17b9a2..fb328e7ccb08 100644 --- a/lib/syscall.c +++ b/lib/syscall.c @@ -27,7 +27,7 @@ static int collect_syscall(struct task_struct *target, struct syscall_info *info info->data.nr = syscall_get_nr(target, regs); if (info->data.nr != -1L) - syscall_get_arguments(target, regs, 0, 6, + syscall_get_arguments(target, regs, (unsigned long *)&info->data.args[0]); put_task_stack(target); -- cgit From 5f074f3e192f10c9fade898b9b3b8812e3d83342 Mon Sep 17 00:00:00 2001 From: Nick Desaulniers Date: Fri, 5 Apr 2019 18:38:45 -0700 Subject: lib/string.c: implement a basic bcmp A recent optimization in Clang (r355672) lowers comparisons of the return value of memcmp against zero to comparisons of the return value of bcmp against zero. This helps some platforms that implement bcmp more efficiently than memcmp. glibc simply aliases bcmp to memcmp, but an optimized implementation is in the works. This results in linkage failures for all targets with Clang due to the undefined symbol. For now, just implement bcmp as a tailcail to memcmp to unbreak the build. This routine can be further optimized in the future. Other ideas discussed: * A weak alias was discussed, but breaks for architectures that define their own implementations of memcmp since aliases to declarations are not permitted (only definitions). Arch-specific memcmp implementations typically declare memcmp in C headers, but implement them in assembly. * -ffreestanding also is used sporadically throughout the kernel. * -fno-builtin-bcmp doesn't work when doing LTO. Link: https://bugs.llvm.org/show_bug.cgi?id=41035 Link: https://code.woboq.org/userspace/glibc/string/memcmp.c.html#bcmp Link: https://github.com/llvm/llvm-project/commit/8e16d73346f8091461319a7dfc4ddd18eedcff13 Link: https://github.com/ClangBuiltLinux/linux/issues/416 Link: http://lkml.kernel.org/r/20190313211335.165605-1-ndesaulniers@google.com Signed-off-by: Nick Desaulniers Reported-by: Nathan Chancellor Reported-by: Adhemerval Zanella Suggested-by: Arnd Bergmann Suggested-by: James Y Knight Suggested-by: Masahiro Yamada Suggested-by: Nathan Chancellor Suggested-by: Rasmus Villemoes Acked-by: Steven Rostedt (VMware) Reviewed-by: Nathan Chancellor Tested-by: Nathan Chancellor Reviewed-by: Masahiro Yamada Reviewed-by: Andy Shevchenko Cc: David Laight Cc: Rasmus Villemoes Cc: Namhyung Kim Cc: Greg Kroah-Hartman Cc: Alexander Shishkin Cc: Dan Williams Cc: Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- lib/string.c | 20 ++++++++++++++++++++ 1 file changed, 20 insertions(+) (limited to 'lib') diff --git a/lib/string.c b/lib/string.c index 38e4ca08e757..3ab861c1a857 100644 --- a/lib/string.c +++ b/lib/string.c @@ -866,6 +866,26 @@ __visible int memcmp(const void *cs, const void *ct, size_t count) EXPORT_SYMBOL(memcmp); #endif +#ifndef __HAVE_ARCH_BCMP +/** + * bcmp - returns 0 if and only if the buffers have identical contents. + * @a: pointer to first buffer. + * @b: pointer to second buffer. + * @len: size of buffers. + * + * The sign or magnitude of a non-zero return value has no particular + * meaning, and architectures may implement their own more efficient bcmp(). So + * while this particular implementation is a simple (tail) call to memcmp, do + * not rely on anything but whether the return value is zero or non-zero. + */ +#undef bcmp +int bcmp(const void *a, const void *b, size_t len) +{ + return memcmp(a, b, len); +} +EXPORT_SYMBOL(bcmp); +#endif + #ifndef __HAVE_ARCH_MEMSCAN /** * memscan - Find a character in an area of memory. -- cgit From b11ed18efa8f3dc58b259b812588317b765b1cfc Mon Sep 17 00:00:00 2001 From: Dave Rodgman Date: Fri, 5 Apr 2019 18:38:58 -0700 Subject: lib/lzo: fix bugs for very short or empty input For very short input data (0 - 1 bytes), lzo-rle was not behaving correctly. Fix this behaviour and update documentation accordingly. For zero-length input, lzo v0 outputs an end-of-stream marker only, which was misinterpreted by lzo-rle as a bitstream version number. Ensure bitstream versions > 0 require a minimum stream length of 5. Also fixes a bug in handling the tail for very short inputs when a bitstream version is present. Link: http://lkml.kernel.org/r/20190326165857.34613-1-dave.rodgman@arm.com Signed-off-by: Dave Rodgman Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- lib/lzo/lzo1x_compress.c | 9 ++++++--- lib/lzo/lzo1x_decompress_safe.c | 4 +--- 2 files changed, 7 insertions(+), 6 deletions(-) (limited to 'lib') diff --git a/lib/lzo/lzo1x_compress.c b/lib/lzo/lzo1x_compress.c index 4525fb094844..a8ede77afe0d 100644 --- a/lib/lzo/lzo1x_compress.c +++ b/lib/lzo/lzo1x_compress.c @@ -291,13 +291,14 @@ int lzogeneric1x_1_compress(const unsigned char *in, size_t in_len, { const unsigned char *ip = in; unsigned char *op = out; + unsigned char *data_start; size_t l = in_len; size_t t = 0; signed char state_offset = -2; unsigned int m4_max_offset; - // LZO v0 will never write 17 as first byte, - // so this is used to version the bitstream + // LZO v0 will never write 17 as first byte (except for zero-length + // input), so this is used to version the bitstream if (bitstream_version > 0) { *op++ = 17; *op++ = bitstream_version; @@ -306,6 +307,8 @@ int lzogeneric1x_1_compress(const unsigned char *in, size_t in_len, m4_max_offset = M4_MAX_OFFSET_V0; } + data_start = op; + while (l > 20) { size_t ll = l <= (m4_max_offset + 1) ? l : (m4_max_offset + 1); uintptr_t ll_end = (uintptr_t) ip + ll; @@ -324,7 +327,7 @@ int lzogeneric1x_1_compress(const unsigned char *in, size_t in_len, if (t > 0) { const unsigned char *ii = in + in_len - t; - if (op == out && t <= 238) { + if (op == data_start && t <= 238) { *op++ = (17 + t); } else if (t <= 3) { op[state_offset] |= t; diff --git a/lib/lzo/lzo1x_decompress_safe.c b/lib/lzo/lzo1x_decompress_safe.c index 6d2600ea3b55..9e07e9ef1aad 100644 --- a/lib/lzo/lzo1x_decompress_safe.c +++ b/lib/lzo/lzo1x_decompress_safe.c @@ -54,11 +54,9 @@ int lzo1x_decompress_safe(const unsigned char *in, size_t in_len, if (unlikely(in_len < 3)) goto input_overrun; - if (likely(*ip == 17)) { + if (likely(in_len >= 5) && likely(*ip == 17)) { bitstream_version = ip[1]; ip += 2; - if (unlikely(in_len < 5)) - goto input_overrun; } else { bitstream_version = 0; } -- cgit