summaryrefslogtreecommitdiff
path: root/lib
diff options
context:
space:
mode:
Diffstat (limited to 'lib')
-rw-r--r--lib/Kconfig4
-rw-r--r--lib/Kconfig.debug7
-rw-r--r--lib/Makefile12
-rw-r--r--lib/fault-inject.c12
-rw-r--r--lib/iov_iter.c4
-rw-r--r--lib/lzo/lzo1x_compress.c9
-rw-r--r--lib/lzo/lzo1x_decompress_safe.c4
-rw-r--r--lib/rhashtable.c8
-rw-r--r--lib/sbitmap.c11
-rw-r--r--lib/stackdepot.c54
-rw-r--r--lib/string.c20
-rw-r--r--lib/strncpy_from_user.c5
-rw-r--r--lib/strnlen_user.c4
-rw-r--r--lib/syscall.c57
-rw-r--r--lib/test_vmalloc.c6
-rw-r--r--lib/ubsan.c69
-rw-r--r--lib/ubsan.h5
17 files changed, 163 insertions, 128 deletions
diff --git a/lib/Kconfig b/lib/Kconfig
index a9e56539bd11..e86975bfca6a 100644
--- a/lib/Kconfig
+++ b/lib/Kconfig
@@ -597,6 +597,10 @@ config ARCH_HAS_UACCESS_FLUSHCACHE
config ARCH_HAS_UACCESS_MCSAFE
bool
+# Temporary. Goes away when all archs are cleaned up
+config ARCH_STACKWALK
+ bool
+
config STACKDEPOT
bool
select STACKTRACE
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
index 0d9e81779e37..d5a4a4036d2f 100644
--- a/lib/Kconfig.debug
+++ b/lib/Kconfig.debug
@@ -753,9 +753,9 @@ endmenu # "Memory Debugging"
config ARCH_HAS_KCOV
bool
help
- KCOV does not have any arch-specific code, but currently it is enabled
- only for x86_64. KCOV requires testing on other archs, and most likely
- disabling of instrumentation for some early boot code.
+ An architecture should select this when it can successfully
+ build and run with CONFIG_KCOV. This typically requires
+ disabling instrumentation for some early boot code.
config CC_HAS_SANCOV_TRACE_PC
def_bool $(cc-option,-fsanitize-coverage=trace-pc)
@@ -1929,6 +1929,7 @@ config TEST_KMOD
depends on m
depends on BLOCK && (64BIT || LBDAF) # for XFS, BTRFS
depends on NETDEVICES && NET_CORE && INET # for TUN
+ depends on BLOCK
select TEST_LKM
select XFS_FS
select TUN
diff --git a/lib/Makefile b/lib/Makefile
index 3b08673e8881..e16e7aadc41a 100644
--- a/lib/Makefile
+++ b/lib/Makefile
@@ -17,6 +17,17 @@ KCOV_INSTRUMENT_list_debug.o := n
KCOV_INSTRUMENT_debugobjects.o := n
KCOV_INSTRUMENT_dynamic_debug.o := n
+# Early boot use of cmdline, don't instrument it
+ifdef CONFIG_AMD_MEM_ENCRYPT
+KASAN_SANITIZE_string.o := n
+
+ifdef CONFIG_FUNCTION_TRACER
+CFLAGS_REMOVE_string.o = -pg
+endif
+
+CFLAGS_string.o := $(call cc-option, -fno-stack-protector)
+endif
+
lib-y := ctype.o string.o vsprintf.o cmdline.o \
rbtree.o radix-tree.o timerqueue.o xarray.o \
idr.o int_sqrt.o extable.o \
@@ -268,6 +279,7 @@ obj-$(CONFIG_UCS2_STRING) += ucs2_string.o
obj-$(CONFIG_UBSAN) += ubsan.o
UBSAN_SANITIZE_ubsan.o := n
+CFLAGS_ubsan.o := $(call cc-option, -fno-conserve-stack -fno-stack-protector)
obj-$(CONFIG_SBITMAP) += sbitmap.o
diff --git a/lib/fault-inject.c b/lib/fault-inject.c
index cf7b129b0b2b..e26aa4f65eb9 100644
--- a/lib/fault-inject.c
+++ b/lib/fault-inject.c
@@ -65,22 +65,16 @@ static bool fail_task(struct fault_attr *attr, struct task_struct *task)
static bool fail_stacktrace(struct fault_attr *attr)
{
- struct stack_trace trace;
int depth = attr->stacktrace_depth;
unsigned long entries[MAX_STACK_TRACE_DEPTH];
- int n;
+ int n, nr_entries;
bool found = (attr->require_start == 0 && attr->require_end == ULONG_MAX);
if (depth == 0)
return found;
- trace.nr_entries = 0;
- trace.entries = entries;
- trace.max_entries = depth;
- trace.skip = 1;
-
- save_stack_trace(&trace);
- for (n = 0; n < trace.nr_entries; n++) {
+ nr_entries = stack_trace_save(entries, depth, 1);
+ for (n = 0; n < nr_entries; n++) {
if (attr->reject_start <= entries[n] &&
entries[n] < attr->reject_end)
return false;
diff --git a/lib/iov_iter.c b/lib/iov_iter.c
index ea36dc355da1..b396d328a764 100644
--- a/lib/iov_iter.c
+++ b/lib/iov_iter.c
@@ -1528,6 +1528,7 @@ EXPORT_SYMBOL(csum_and_copy_to_iter);
size_t hash_and_copy_to_iter(const void *addr, size_t bytes, void *hashp,
struct iov_iter *i)
{
+#ifdef CONFIG_CRYPTO
struct ahash_request *hash = hashp;
struct scatterlist sg;
size_t copied;
@@ -1537,6 +1538,9 @@ size_t hash_and_copy_to_iter(const void *addr, size_t bytes, void *hashp,
ahash_request_set_crypt(hash, &sg, NULL, copied);
crypto_ahash_update(hash);
return copied;
+#else
+ return 0;
+#endif
}
EXPORT_SYMBOL(hash_and_copy_to_iter);
diff --git a/lib/lzo/lzo1x_compress.c b/lib/lzo/lzo1x_compress.c
index 4525fb094844..a8ede77afe0d 100644
--- a/lib/lzo/lzo1x_compress.c
+++ b/lib/lzo/lzo1x_compress.c
@@ -291,13 +291,14 @@ int lzogeneric1x_1_compress(const unsigned char *in, size_t in_len,
{
const unsigned char *ip = in;
unsigned char *op = out;
+ unsigned char *data_start;
size_t l = in_len;
size_t t = 0;
signed char state_offset = -2;
unsigned int m4_max_offset;
- // LZO v0 will never write 17 as first byte,
- // so this is used to version the bitstream
+ // LZO v0 will never write 17 as first byte (except for zero-length
+ // input), so this is used to version the bitstream
if (bitstream_version > 0) {
*op++ = 17;
*op++ = bitstream_version;
@@ -306,6 +307,8 @@ int lzogeneric1x_1_compress(const unsigned char *in, size_t in_len,
m4_max_offset = M4_MAX_OFFSET_V0;
}
+ data_start = op;
+
while (l > 20) {
size_t ll = l <= (m4_max_offset + 1) ? l : (m4_max_offset + 1);
uintptr_t ll_end = (uintptr_t) ip + ll;
@@ -324,7 +327,7 @@ int lzogeneric1x_1_compress(const unsigned char *in, size_t in_len,
if (t > 0) {
const unsigned char *ii = in + in_len - t;
- if (op == out && t <= 238) {
+ if (op == data_start && t <= 238) {
*op++ = (17 + t);
} else if (t <= 3) {
op[state_offset] |= t;
diff --git a/lib/lzo/lzo1x_decompress_safe.c b/lib/lzo/lzo1x_decompress_safe.c
index 6d2600ea3b55..9e07e9ef1aad 100644
--- a/lib/lzo/lzo1x_decompress_safe.c
+++ b/lib/lzo/lzo1x_decompress_safe.c
@@ -54,11 +54,9 @@ int lzo1x_decompress_safe(const unsigned char *in, size_t in_len,
if (unlikely(in_len < 3))
goto input_overrun;
- if (likely(*ip == 17)) {
+ if (likely(in_len >= 5) && likely(*ip == 17)) {
bitstream_version = ip[1];
ip += 2;
- if (unlikely(in_len < 5))
- goto input_overrun;
} else {
bitstream_version = 0;
}
diff --git a/lib/rhashtable.c b/lib/rhashtable.c
index 0a105d4af166..97f59abc3e92 100644
--- a/lib/rhashtable.c
+++ b/lib/rhashtable.c
@@ -416,8 +416,12 @@ static void rht_deferred_worker(struct work_struct *work)
else if (tbl->nest)
err = rhashtable_rehash_alloc(ht, tbl, tbl->size);
- if (!err)
- err = rhashtable_rehash_table(ht);
+ if (!err || err == -EEXIST) {
+ int nerr;
+
+ nerr = rhashtable_rehash_table(ht);
+ err = err ?: nerr;
+ }
mutex_unlock(&ht->mutex);
diff --git a/lib/sbitmap.c b/lib/sbitmap.c
index 5b382c1244ed..155fe38756ec 100644
--- a/lib/sbitmap.c
+++ b/lib/sbitmap.c
@@ -591,6 +591,17 @@ EXPORT_SYMBOL_GPL(sbitmap_queue_wake_up);
void sbitmap_queue_clear(struct sbitmap_queue *sbq, unsigned int nr,
unsigned int cpu)
{
+ /*
+ * Once the clear bit is set, the bit may be allocated out.
+ *
+ * Orders READ/WRITE on the asssociated instance(such as request
+ * of blk_mq) by this bit for avoiding race with re-allocation,
+ * and its pair is the memory barrier implied in __sbitmap_get_word.
+ *
+ * One invariant is that the clear bit has to be zero when the bit
+ * is in use.
+ */
+ smp_mb__before_atomic();
sbitmap_deferred_clear_bit(&sbq->sb, nr);
/*
diff --git a/lib/stackdepot.c b/lib/stackdepot.c
index e513459a5601..605c61f65d94 100644
--- a/lib/stackdepot.c
+++ b/lib/stackdepot.c
@@ -194,40 +194,52 @@ static inline struct stack_record *find_stack(struct stack_record *bucket,
return NULL;
}
-void depot_fetch_stack(depot_stack_handle_t handle, struct stack_trace *trace)
+/**
+ * stack_depot_fetch - Fetch stack entries from a depot
+ *
+ * @handle: Stack depot handle which was returned from
+ * stack_depot_save().
+ * @entries: Pointer to store the entries address
+ *
+ * Return: The number of trace entries for this depot.
+ */
+unsigned int stack_depot_fetch(depot_stack_handle_t handle,
+ unsigned long **entries)
{
union handle_parts parts = { .handle = handle };
void *slab = stack_slabs[parts.slabindex];
size_t offset = parts.offset << STACK_ALLOC_ALIGN;
struct stack_record *stack = slab + offset;
- trace->nr_entries = trace->max_entries = stack->size;
- trace->entries = stack->entries;
- trace->skip = 0;
+ *entries = stack->entries;
+ return stack->size;
}
-EXPORT_SYMBOL_GPL(depot_fetch_stack);
+EXPORT_SYMBOL_GPL(stack_depot_fetch);
/**
- * depot_save_stack - save stack in a stack depot.
- * @trace - the stacktrace to save.
- * @alloc_flags - flags for allocating additional memory if required.
+ * stack_depot_save - Save a stack trace from an array
+ *
+ * @entries: Pointer to storage array
+ * @nr_entries: Size of the storage array
+ * @alloc_flags: Allocation gfp flags
*
- * Returns the handle of the stack struct stored in depot.
+ * Return: The handle of the stack struct stored in depot
*/
-depot_stack_handle_t depot_save_stack(struct stack_trace *trace,
- gfp_t alloc_flags)
+depot_stack_handle_t stack_depot_save(unsigned long *entries,
+ unsigned int nr_entries,
+ gfp_t alloc_flags)
{
- u32 hash;
- depot_stack_handle_t retval = 0;
struct stack_record *found = NULL, **bucket;
- unsigned long flags;
+ depot_stack_handle_t retval = 0;
struct page *page = NULL;
void *prealloc = NULL;
+ unsigned long flags;
+ u32 hash;
- if (unlikely(trace->nr_entries == 0))
+ if (unlikely(nr_entries == 0))
goto fast_exit;
- hash = hash_stack(trace->entries, trace->nr_entries);
+ hash = hash_stack(entries, nr_entries);
bucket = &stack_table[hash & STACK_HASH_MASK];
/*
@@ -235,8 +247,8 @@ depot_stack_handle_t depot_save_stack(struct stack_trace *trace,
* The smp_load_acquire() here pairs with smp_store_release() to
* |bucket| below.
*/
- found = find_stack(smp_load_acquire(bucket), trace->entries,
- trace->nr_entries, hash);
+ found = find_stack(smp_load_acquire(bucket), entries,
+ nr_entries, hash);
if (found)
goto exit;
@@ -264,10 +276,10 @@ depot_stack_handle_t depot_save_stack(struct stack_trace *trace,
spin_lock_irqsave(&depot_lock, flags);
- found = find_stack(*bucket, trace->entries, trace->nr_entries, hash);
+ found = find_stack(*bucket, entries, nr_entries, hash);
if (!found) {
struct stack_record *new =
- depot_alloc_stack(trace->entries, trace->nr_entries,
+ depot_alloc_stack(entries, nr_entries,
hash, &prealloc, alloc_flags);
if (new) {
new->next = *bucket;
@@ -297,4 +309,4 @@ exit:
fast_exit:
return retval;
}
-EXPORT_SYMBOL_GPL(depot_save_stack);
+EXPORT_SYMBOL_GPL(stack_depot_save);
diff --git a/lib/string.c b/lib/string.c
index 38e4ca08e757..3ab861c1a857 100644
--- a/lib/string.c
+++ b/lib/string.c
@@ -866,6 +866,26 @@ __visible int memcmp(const void *cs, const void *ct, size_t count)
EXPORT_SYMBOL(memcmp);
#endif
+#ifndef __HAVE_ARCH_BCMP
+/**
+ * bcmp - returns 0 if and only if the buffers have identical contents.
+ * @a: pointer to first buffer.
+ * @b: pointer to second buffer.
+ * @len: size of buffers.
+ *
+ * The sign or magnitude of a non-zero return value has no particular
+ * meaning, and architectures may implement their own more efficient bcmp(). So
+ * while this particular implementation is a simple (tail) call to memcmp, do
+ * not rely on anything but whether the return value is zero or non-zero.
+ */
+#undef bcmp
+int bcmp(const void *a, const void *b, size_t len)
+{
+ return memcmp(a, b, len);
+}
+EXPORT_SYMBOL(bcmp);
+#endif
+
#ifndef __HAVE_ARCH_MEMSCAN
/**
* memscan - Find a character in an area of memory.
diff --git a/lib/strncpy_from_user.c b/lib/strncpy_from_user.c
index 58eacd41526c..023ba9f3b99f 100644
--- a/lib/strncpy_from_user.c
+++ b/lib/strncpy_from_user.c
@@ -23,10 +23,11 @@
* hit it), 'max' is the address space maximum (and we return
* -EFAULT if we hit it).
*/
-static inline long do_strncpy_from_user(char *dst, const char __user *src, long count, unsigned long max)
+static inline long do_strncpy_from_user(char *dst, const char __user *src,
+ unsigned long count, unsigned long max)
{
const struct word_at_a_time constants = WORD_AT_A_TIME_CONSTANTS;
- long res = 0;
+ unsigned long res = 0;
/*
* Truncate 'max' to the user-specified limit, so that
diff --git a/lib/strnlen_user.c b/lib/strnlen_user.c
index 1c1a1b0e38a5..7f2db3fe311f 100644
--- a/lib/strnlen_user.c
+++ b/lib/strnlen_user.c
@@ -28,7 +28,7 @@
static inline long do_strnlen_user(const char __user *src, unsigned long count, unsigned long max)
{
const struct word_at_a_time constants = WORD_AT_A_TIME_CONSTANTS;
- long align, res = 0;
+ unsigned long align, res = 0;
unsigned long c;
/*
@@ -42,7 +42,7 @@ static inline long do_strnlen_user(const char __user *src, unsigned long count,
* Do everything aligned. But that means that we
* need to also expand the maximum..
*/
- align = (sizeof(long) - 1) & (unsigned long)src;
+ align = (sizeof(unsigned long) - 1) & (unsigned long)src;
src -= align;
max += align;
diff --git a/lib/syscall.c b/lib/syscall.c
index 1a7077f20eae..fb328e7ccb08 100644
--- a/lib/syscall.c
+++ b/lib/syscall.c
@@ -5,16 +5,14 @@
#include <linux/export.h>
#include <asm/syscall.h>
-static int collect_syscall(struct task_struct *target, long *callno,
- unsigned long args[6], unsigned int maxargs,
- unsigned long *sp, unsigned long *pc)
+static int collect_syscall(struct task_struct *target, struct syscall_info *info)
{
struct pt_regs *regs;
if (!try_get_task_stack(target)) {
/* Task has no stack, so the task isn't in a syscall. */
- *sp = *pc = 0;
- *callno = -1;
+ memset(info, 0, sizeof(*info));
+ info->data.nr = -1;
return 0;
}
@@ -24,12 +22,13 @@ static int collect_syscall(struct task_struct *target, long *callno,
return -EAGAIN;
}
- *sp = user_stack_pointer(regs);
- *pc = instruction_pointer(regs);
+ info->sp = user_stack_pointer(regs);
+ info->data.instruction_pointer = instruction_pointer(regs);
- *callno = syscall_get_nr(target, regs);
- if (*callno != -1L && maxargs > 0)
- syscall_get_arguments(target, regs, 0, maxargs, args);
+ info->data.nr = syscall_get_nr(target, regs);
+ if (info->data.nr != -1L)
+ syscall_get_arguments(target, regs,
+ (unsigned long *)&info->data.args[0]);
put_task_stack(target);
return 0;
@@ -38,41 +37,35 @@ static int collect_syscall(struct task_struct *target, long *callno,
/**
* task_current_syscall - Discover what a blocked task is doing.
* @target: thread to examine
- * @callno: filled with system call number or -1
- * @args: filled with @maxargs system call arguments
- * @maxargs: number of elements in @args to fill
- * @sp: filled with user stack pointer
- * @pc: filled with user PC
+ * @info: structure with the following fields:
+ * .sp - filled with user stack pointer
+ * .data.nr - filled with system call number or -1
+ * .data.args - filled with @maxargs system call arguments
+ * .data.instruction_pointer - filled with user PC
*
- * If @target is blocked in a system call, returns zero with *@callno
- * set to the the call's number and @args filled in with its arguments.
- * Registers not used for system call arguments may not be available and
- * it is not kosher to use &struct user_regset calls while the system
+ * If @target is blocked in a system call, returns zero with @info.data.nr
+ * set to the the call's number and @info.data.args filled in with its
+ * arguments. Registers not used for system call arguments may not be available
+ * and it is not kosher to use &struct user_regset calls while the system
* call is still in progress. Note we may get this result if @target
* has finished its system call but not yet returned to user mode, such
* as when it's stopped for signal handling or syscall exit tracing.
*
* If @target is blocked in the kernel during a fault or exception,
- * returns zero with *@callno set to -1 and does not fill in @args.
- * If so, it's now safe to examine @target using &struct user_regset
- * get() calls as long as we're sure @target won't return to user mode.
+ * returns zero with *@info.data.nr set to -1 and does not fill in
+ * @info.data.args. If so, it's now safe to examine @target using
+ * &struct user_regset get() calls as long as we're sure @target won't return
+ * to user mode.
*
* Returns -%EAGAIN if @target does not remain blocked.
- *
- * Returns -%EINVAL if @maxargs is too large (maximum is six).
*/
-int task_current_syscall(struct task_struct *target, long *callno,
- unsigned long args[6], unsigned int maxargs,
- unsigned long *sp, unsigned long *pc)
+int task_current_syscall(struct task_struct *target, struct syscall_info *info)
{
long state;
unsigned long ncsw;
- if (unlikely(maxargs > 6))
- return -EINVAL;
-
if (target == current)
- return collect_syscall(target, callno, args, maxargs, sp, pc);
+ return collect_syscall(target, info);
state = target->state;
if (unlikely(!state))
@@ -80,7 +73,7 @@ int task_current_syscall(struct task_struct *target, long *callno,
ncsw = wait_task_inactive(target, state);
if (unlikely(!ncsw) ||
- unlikely(collect_syscall(target, callno, args, maxargs, sp, pc)) ||
+ unlikely(collect_syscall(target, info)) ||
unlikely(wait_task_inactive(target, state) != ncsw))
return -EAGAIN;
diff --git a/lib/test_vmalloc.c b/lib/test_vmalloc.c
index 83cdcaa82bf6..f832b095afba 100644
--- a/lib/test_vmalloc.c
+++ b/lib/test_vmalloc.c
@@ -383,14 +383,14 @@ static void shuffle_array(int *arr, int n)
static int test_func(void *private)
{
struct test_driver *t = private;
- cpumask_t newmask = CPU_MASK_NONE;
int random_array[ARRAY_SIZE(test_case_array)];
int index, i, j, ret;
ktime_t kt;
u64 delta;
- cpumask_set_cpu(t->cpu, &newmask);
- set_cpus_allowed_ptr(current, &newmask);
+ ret = set_cpus_allowed_ptr(current, cpumask_of(t->cpu));
+ if (ret < 0)
+ pr_err("Failed to set affinity to %d CPU\n", t->cpu);
for (i = 0; i < ARRAY_SIZE(test_case_array); i++)
random_array[i] = i;
diff --git a/lib/ubsan.c b/lib/ubsan.c
index e4162f59a81c..ecc179338094 100644
--- a/lib/ubsan.c
+++ b/lib/ubsan.c
@@ -17,6 +17,7 @@
#include <linux/kernel.h>
#include <linux/types.h>
#include <linux/sched.h>
+#include <linux/uaccess.h>
#include "ubsan.h"
@@ -86,11 +87,13 @@ static bool is_inline_int(struct type_descriptor *type)
return bits <= inline_bits;
}
-static s_max get_signed_val(struct type_descriptor *type, unsigned long val)
+static s_max get_signed_val(struct type_descriptor *type, void *val)
{
if (is_inline_int(type)) {
unsigned extra_bits = sizeof(s_max)*8 - type_bit_width(type);
- return ((s_max)val) << extra_bits >> extra_bits;
+ unsigned long ulong_val = (unsigned long)val;
+
+ return ((s_max)ulong_val) << extra_bits >> extra_bits;
}
if (type_bit_width(type) == 64)
@@ -99,15 +102,15 @@ static s_max get_signed_val(struct type_descriptor *type, unsigned long val)
return *(s_max *)val;
}
-static bool val_is_negative(struct type_descriptor *type, unsigned long val)
+static bool val_is_negative(struct type_descriptor *type, void *val)
{
return type_is_signed(type) && get_signed_val(type, val) < 0;
}
-static u_max get_unsigned_val(struct type_descriptor *type, unsigned long val)
+static u_max get_unsigned_val(struct type_descriptor *type, void *val)
{
if (is_inline_int(type))
- return val;
+ return (unsigned long)val;
if (type_bit_width(type) == 64)
return *(u64 *)val;
@@ -116,7 +119,7 @@ static u_max get_unsigned_val(struct type_descriptor *type, unsigned long val)
}
static void val_to_string(char *str, size_t size, struct type_descriptor *type,
- unsigned long value)
+ void *value)
{
if (type_is_int(type)) {
if (type_bit_width(type) == 128) {
@@ -163,8 +166,8 @@ static void ubsan_epilogue(unsigned long *flags)
current->in_ubsan--;
}
-static void handle_overflow(struct overflow_data *data, unsigned long lhs,
- unsigned long rhs, char op)
+static void handle_overflow(struct overflow_data *data, void *lhs,
+ void *rhs, char op)
{
struct type_descriptor *type = data->type;
@@ -191,8 +194,7 @@ static void handle_overflow(struct overflow_data *data, unsigned long lhs,
}
void __ubsan_handle_add_overflow(struct overflow_data *data,
- unsigned long lhs,
- unsigned long rhs)
+ void *lhs, void *rhs)
{
handle_overflow(data, lhs, rhs, '+');
@@ -200,23 +202,21 @@ void __ubsan_handle_add_overflow(struct overflow_data *data,
EXPORT_SYMBOL(__ubsan_handle_add_overflow);
void __ubsan_handle_sub_overflow(struct overflow_data *data,
- unsigned long lhs,
- unsigned long rhs)
+ void *lhs, void *rhs)
{
handle_overflow(data, lhs, rhs, '-');
}
EXPORT_SYMBOL(__ubsan_handle_sub_overflow);
void __ubsan_handle_mul_overflow(struct overflow_data *data,
- unsigned long lhs,
- unsigned long rhs)
+ void *lhs, void *rhs)
{
handle_overflow(data, lhs, rhs, '*');
}
EXPORT_SYMBOL(__ubsan_handle_mul_overflow);
void __ubsan_handle_negate_overflow(struct overflow_data *data,
- unsigned long old_val)
+ void *old_val)
{
unsigned long flags;
char old_val_str[VALUE_LENGTH];
@@ -237,8 +237,7 @@ EXPORT_SYMBOL(__ubsan_handle_negate_overflow);
void __ubsan_handle_divrem_overflow(struct overflow_data *data,
- unsigned long lhs,
- unsigned long rhs)
+ void *lhs, void *rhs)
{
unsigned long flags;
char rhs_val_str[VALUE_LENGTH];
@@ -313,6 +312,7 @@ static void handle_object_size_mismatch(struct type_mismatch_data_common *data,
static void ubsan_type_mismatch_common(struct type_mismatch_data_common *data,
unsigned long ptr)
{
+ unsigned long flags = user_access_save();
if (!ptr)
handle_null_ptr_deref(data);
@@ -320,10 +320,12 @@ static void ubsan_type_mismatch_common(struct type_mismatch_data_common *data,
handle_misaligned_access(data, ptr);
else
handle_object_size_mismatch(data, ptr);
+
+ user_access_restore(flags);
}
void __ubsan_handle_type_mismatch(struct type_mismatch_data *data,
- unsigned long ptr)
+ void *ptr)
{
struct type_mismatch_data_common common_data = {
.location = &data->location,
@@ -332,12 +334,12 @@ void __ubsan_handle_type_mismatch(struct type_mismatch_data *data,
.type_check_kind = data->type_check_kind
};
- ubsan_type_mismatch_common(&common_data, ptr);
+ ubsan_type_mismatch_common(&common_data, (unsigned long)ptr);
}
EXPORT_SYMBOL(__ubsan_handle_type_mismatch);
void __ubsan_handle_type_mismatch_v1(struct type_mismatch_data_v1 *data,
- unsigned long ptr)
+ void *ptr)
{
struct type_mismatch_data_common common_data = {
@@ -347,30 +349,11 @@ void __ubsan_handle_type_mismatch_v1(struct type_mismatch_data_v1 *data,
.type_check_kind = data->type_check_kind
};
- ubsan_type_mismatch_common(&common_data, ptr);
+ ubsan_type_mismatch_common(&common_data, (unsigned long)ptr);
}
EXPORT_SYMBOL(__ubsan_handle_type_mismatch_v1);
-void __ubsan_handle_vla_bound_not_positive(struct vla_bound_data *data,
- unsigned long bound)
-{
- unsigned long flags;
- char bound_str[VALUE_LENGTH];
-
- if (suppress_report(&data->location))
- return;
-
- ubsan_prologue(&data->location, &flags);
-
- val_to_string(bound_str, sizeof(bound_str), data->type, bound);
- pr_err("variable length array bound value %s <= 0\n", bound_str);
-
- ubsan_epilogue(&flags);
-}
-EXPORT_SYMBOL(__ubsan_handle_vla_bound_not_positive);
-
-void __ubsan_handle_out_of_bounds(struct out_of_bounds_data *data,
- unsigned long index)
+void __ubsan_handle_out_of_bounds(struct out_of_bounds_data *data, void *index)
{
unsigned long flags;
char index_str[VALUE_LENGTH];
@@ -388,7 +371,7 @@ void __ubsan_handle_out_of_bounds(struct out_of_bounds_data *data,
EXPORT_SYMBOL(__ubsan_handle_out_of_bounds);
void __ubsan_handle_shift_out_of_bounds(struct shift_out_of_bounds_data *data,
- unsigned long lhs, unsigned long rhs)
+ void *lhs, void *rhs)
{
unsigned long flags;
struct type_descriptor *rhs_type = data->rhs_type;
@@ -439,7 +422,7 @@ void __ubsan_handle_builtin_unreachable(struct unreachable_data *data)
EXPORT_SYMBOL(__ubsan_handle_builtin_unreachable);
void __ubsan_handle_load_invalid_value(struct invalid_value_data *data,
- unsigned long val)
+ void *val)
{
unsigned long flags;
char val_str[VALUE_LENGTH];
diff --git a/lib/ubsan.h b/lib/ubsan.h
index f4d8d0bd4016..b8fa83864467 100644
--- a/lib/ubsan.h
+++ b/lib/ubsan.h
@@ -57,11 +57,6 @@ struct nonnull_arg_data {
int arg_index;
};
-struct vla_bound_data {
- struct source_location location;
- struct type_descriptor *type;
-};
-
struct out_of_bounds_data {
struct source_location location;
struct type_descriptor *array_type;