diff options
Diffstat (limited to 'kernel')
-rw-r--r-- | kernel/Kconfig.preempt | 12 | ||||
-rw-r--r-- | kernel/dma/direct.c | 10 | ||||
-rw-r--r-- | kernel/dma/mapping.c | 2 | ||||
-rw-r--r-- | kernel/entry/common.c | 14 | ||||
-rw-r--r-- | kernel/signal.c | 40 | ||||
-rw-r--r-- | kernel/trace/Kconfig | 1 | ||||
-rw-r--r-- | kernel/trace/fgraph.c | 17 | ||||
-rw-r--r-- | kernel/trace/trace_events_user.c | 86 | ||||
-rw-r--r-- | kernel/watch_queue.c | 1 |
9 files changed, 26 insertions, 157 deletions
diff --git a/kernel/Kconfig.preempt b/kernel/Kconfig.preempt index 8c6de5a9ecc4..c2f1fd95a821 100644 --- a/kernel/Kconfig.preempt +++ b/kernel/Kconfig.preempt @@ -133,14 +133,4 @@ config SCHED_CORE which is the likely usage by Linux distributions, there should be no measurable impact on performance. -config ARCH_WANTS_RT_DELAYED_SIGNALS - bool - help - This option is selected by architectures where raising signals - can happen in atomic contexts on PREEMPT_RT enabled kernels. This - option delays raising the signal until the return to user space - loop where it is also delivered. X86 requires this to deliver - signals from trap handlers which run on IST stacks. - -config RT_DELAYED_SIGNALS - def_bool PREEMPT_RT && ARCH_WANTS_RT_DELAYED_SIGNALS + diff --git a/kernel/dma/direct.c b/kernel/dma/direct.c index 35a1d29d6a2e..9743c6ccce1a 100644 --- a/kernel/dma/direct.c +++ b/kernel/dma/direct.c @@ -277,12 +277,16 @@ void *dma_direct_alloc(struct device *dev, size_t size, } if (remap) { + pgprot_t prot = dma_pgprot(dev, PAGE_KERNEL, attrs); + + if (force_dma_unencrypted(dev)) + prot = pgprot_decrypted(prot); + /* remove any dirty cache lines on the kernel alias */ arch_dma_prep_coherent(page, size); /* create a coherent mapping */ - ret = dma_common_contiguous_remap(page, size, - dma_pgprot(dev, PAGE_KERNEL, attrs), + ret = dma_common_contiguous_remap(page, size, prot, __builtin_return_address(0)); if (!ret) goto out_free_pages; @@ -535,6 +539,8 @@ int dma_direct_mmap(struct device *dev, struct vm_area_struct *vma, int ret = -ENXIO; vma->vm_page_prot = dma_pgprot(dev, vma->vm_page_prot, attrs); + if (force_dma_unencrypted(dev)) + vma->vm_page_prot = pgprot_decrypted(vma->vm_page_prot); if (dma_mmap_from_dev_coherent(dev, vma, cpu_addr, size, &ret)) return ret; diff --git a/kernel/dma/mapping.c b/kernel/dma/mapping.c index 559461a826ba..db7244291b74 100644 --- a/kernel/dma/mapping.c +++ b/kernel/dma/mapping.c @@ -407,8 +407,6 @@ EXPORT_SYMBOL(dma_get_sgtable_attrs); */ pgprot_t dma_pgprot(struct device *dev, pgprot_t prot, unsigned long attrs) { - if (force_dma_unencrypted(dev)) - prot = pgprot_decrypted(prot); if (dev_is_dma_coherent(dev)) return prot; #ifdef CONFIG_ARCH_HAS_DMA_WRITE_COMBINE diff --git a/kernel/entry/common.c b/kernel/entry/common.c index ef8d94a98b7e..e57a224d6b79 100644 --- a/kernel/entry/common.c +++ b/kernel/entry/common.c @@ -142,18 +142,6 @@ void noinstr exit_to_user_mode(void) /* Workaround to allow gradual conversion of architecture code */ void __weak arch_do_signal_or_restart(struct pt_regs *regs) { } -#ifdef CONFIG_RT_DELAYED_SIGNALS -static inline void raise_delayed_signal(void) -{ - if (unlikely(current->forced_info.si_signo)) { - force_sig_info(¤t->forced_info); - current->forced_info.si_signo = 0; - } -} -#else -static inline void raise_delayed_signal(void) { } -#endif - static unsigned long exit_to_user_mode_loop(struct pt_regs *regs, unsigned long ti_work) { @@ -168,8 +156,6 @@ static unsigned long exit_to_user_mode_loop(struct pt_regs *regs, if (ti_work & _TIF_NEED_RESCHED) schedule(); - raise_delayed_signal(); - if (ti_work & _TIF_UPROBE) uprobe_notify_resume(regs); diff --git a/kernel/signal.c b/kernel/signal.c index 368a34c25bbf..30cd1ca43bcd 100644 --- a/kernel/signal.c +++ b/kernel/signal.c @@ -1308,43 +1308,6 @@ enum sig_handler { }; /* - * On some archictectures, PREEMPT_RT has to delay sending a signal from a - * trap since it cannot enable preemption, and the signal code's - * spin_locks turn into mutexes. Instead, it must set TIF_NOTIFY_RESUME - * which will send the signal on exit of the trap. - */ -#ifdef CONFIG_RT_DELAYED_SIGNALS -static inline bool force_sig_delayed(struct kernel_siginfo *info, - struct task_struct *t) -{ - if (!in_atomic()) - return false; - - if (WARN_ON_ONCE(t->forced_info.si_signo)) - return true; - - if (is_si_special(info)) { - WARN_ON_ONCE(info != SEND_SIG_PRIV); - t->forced_info.si_signo = info->si_signo; - t->forced_info.si_errno = 0; - t->forced_info.si_code = SI_KERNEL; - t->forced_info.si_pid = 0; - t->forced_info.si_uid = 0; - } else { - t->forced_info = *info; - } - set_tsk_thread_flag(t, TIF_NOTIFY_RESUME); - return true; -} -#else -static inline bool force_sig_delayed(struct kernel_siginfo *info, - struct task_struct *t) -{ - return false; -} -#endif - -/* * Force a signal that the process can't ignore: if necessary * we unblock the signal and change any SIG_IGN to SIG_DFL. * @@ -1364,9 +1327,6 @@ force_sig_info_to_task(struct kernel_siginfo *info, struct task_struct *t, struct k_sigaction *action; int sig = info->si_signo; - if (force_sig_delayed(info, t)) - return 0; - spin_lock_irqsave(&t->sighand->siglock, flags); action = &t->sighand->action[sig-1]; ignored = action->sa.sa_handler == SIG_IGN; diff --git a/kernel/trace/Kconfig b/kernel/trace/Kconfig index 9bb54c0b3b2d..2c43e327a619 100644 --- a/kernel/trace/Kconfig +++ b/kernel/trace/Kconfig @@ -767,6 +767,7 @@ config USER_EVENTS bool "User trace events" select TRACING select DYNAMIC_EVENTS + depends on BROKEN || COMPILE_TEST # API needs to be straighten out help User trace events are user-defined trace events that can be used like an existing kernel trace event. User trace diff --git a/kernel/trace/fgraph.c b/kernel/trace/fgraph.c index 19028e072cdb..8f4fb328133a 100644 --- a/kernel/trace/fgraph.c +++ b/kernel/trace/fgraph.c @@ -7,6 +7,7 @@ * * Highly modified by Steven Rostedt (VMware). */ +#include <linux/jump_label.h> #include <linux/suspend.h> #include <linux/ftrace.h> #include <linux/slab.h> @@ -23,25 +24,13 @@ #define ASSIGN_OPS_HASH(opsname, val) #endif -static bool kill_ftrace_graph; +DEFINE_STATIC_KEY_FALSE(kill_ftrace_graph); int ftrace_graph_active; /* Both enabled by default (can be cleared by function_graph tracer flags */ static bool fgraph_sleep_time = true; /** - * ftrace_graph_is_dead - returns true if ftrace_graph_stop() was called - * - * ftrace_graph_stop() is called when a severe error is detected in - * the function graph tracing. This function is called by the critical - * paths of function graph to keep those paths from doing any more harm. - */ -bool ftrace_graph_is_dead(void) -{ - return kill_ftrace_graph; -} - -/** * ftrace_graph_stop - set to permanently disable function graph tracing * * In case of an error int function graph tracing, this is called @@ -51,7 +40,7 @@ bool ftrace_graph_is_dead(void) */ void ftrace_graph_stop(void) { - kill_ftrace_graph = true; + static_branch_enable(&kill_ftrace_graph); } /* Add a function return address to the trace stack on thread info.*/ diff --git a/kernel/trace/trace_events_user.c b/kernel/trace/trace_events_user.c index 8b3d241a31c2..706e1686b5eb 100644 --- a/kernel/trace/trace_events_user.c +++ b/kernel/trace/trace_events_user.c @@ -18,7 +18,12 @@ #include <linux/tracefs.h> #include <linux/types.h> #include <linux/uaccess.h> +/* Reminder to move to uapi when everything works */ +#ifdef CONFIG_COMPILE_TEST +#include <linux/user_events.h> +#else #include <uapi/linux/user_events.h> +#endif #include "trace.h" #include "trace_dynevent.h" @@ -42,9 +47,6 @@ #define MAX_FIELD_ARRAY_SIZE 1024 #define MAX_FIELD_ARG_NAME 256 -#define MAX_BPF_COPY_SIZE PAGE_SIZE -#define MAX_STACK_BPF_DATA 512 - static char *register_page_data; static DEFINE_MUTEX(reg_mutex); @@ -405,19 +407,6 @@ parse: type[0] != 'u', FILTER_OTHER); } -static void user_event_parse_flags(struct user_event *user, char *flags) -{ - char *flag; - - if (flags == NULL) - return; - - while ((flag = strsep(&flags, ",")) != NULL) { - if (strcmp(flag, "BPF_ITER") == 0) - user->flags |= FLAG_BPF_ITER; - } -} - static int user_event_parse_fields(struct user_event *user, char *args) { char *field; @@ -713,64 +702,14 @@ discard: } #ifdef CONFIG_PERF_EVENTS -static void user_event_bpf(struct user_event *user, struct iov_iter *i) -{ - struct user_bpf_context context; - struct user_bpf_iter bpf_i; - char fast_data[MAX_STACK_BPF_DATA]; - void *temp = NULL; - - if ((user->flags & FLAG_BPF_ITER) && iter_is_iovec(i)) { - /* Raw iterator */ - context.data_type = USER_BPF_DATA_ITER; - context.data_len = i->count; - context.iter = &bpf_i; - - bpf_i.iov_offset = i->iov_offset; - bpf_i.iov = i->iov; - bpf_i.nr_segs = i->nr_segs; - } else if (i->nr_segs == 1 && iter_is_iovec(i)) { - /* Single buffer from user */ - context.data_type = USER_BPF_DATA_USER; - context.data_len = i->count; - context.udata = i->iov->iov_base + i->iov_offset; - } else { - /* Multi buffer from user */ - struct iov_iter copy = *i; - size_t copy_size = min_t(size_t, i->count, MAX_BPF_COPY_SIZE); - - context.data_type = USER_BPF_DATA_KERNEL; - context.kdata = fast_data; - - if (unlikely(copy_size > sizeof(fast_data))) { - temp = kmalloc(copy_size, GFP_NOWAIT); - - if (temp) - context.kdata = temp; - else - copy_size = sizeof(fast_data); - } - - context.data_len = copy_nofault(context.kdata, - copy_size, ©); - } - - trace_call_bpf(&user->call, &context); - - kfree(temp); -} - /* - * Writes the user supplied payload out to perf ring buffer or eBPF program. + * Writes the user supplied payload out to perf ring buffer. */ static void user_event_perf(struct user_event *user, struct iov_iter *i, void *tpdata, bool *faulted) { struct hlist_head *perf_head; - if (bpf_prog_array_valid(&user->call)) - user_event_bpf(user, i); - perf_head = this_cpu_ptr(user->call.perf_events); if (perf_head && !hlist_empty(perf_head)) { @@ -1136,8 +1075,6 @@ static int user_event_parse(char *name, char *args, char *flags, user->tracepoint.name = name; - user_event_parse_flags(user, flags); - ret = user_event_parse_fields(user, args); if (ret) @@ -1165,11 +1102,11 @@ static int user_event_parse(char *name, char *args, char *flags, #endif mutex_lock(&event_mutex); + ret = user_event_trace_register(user); - mutex_unlock(&event_mutex); if (ret) - goto put_user; + goto put_user_lock; user->index = index; @@ -1181,8 +1118,12 @@ static int user_event_parse(char *name, char *args, char *flags, set_bit(user->index, page_bitmap); hash_add(register_table, &user->node, key); + mutex_unlock(&event_mutex); + *newuser = user; return 0; +put_user_lock: + mutex_unlock(&event_mutex); put_user: user_event_destroy_fields(user); user_event_destroy_validators(user); @@ -1575,9 +1516,6 @@ static int user_seq_show(struct seq_file *m, void *p) busy++; } - if (flags & FLAG_BPF_ITER) - seq_puts(m, " FLAG:BPF_ITER"); - seq_puts(m, "\n"); active++; } diff --git a/kernel/watch_queue.c b/kernel/watch_queue.c index 3990e4df3d7b..230038d4f908 100644 --- a/kernel/watch_queue.c +++ b/kernel/watch_queue.c @@ -370,6 +370,7 @@ static void __put_watch_queue(struct kref *kref) for (i = 0; i < wqueue->nr_pages; i++) __free_page(wqueue->notes[i]); + kfree(wqueue->notes); bitmap_free(wqueue->notes_bitmap); wfilter = rcu_access_pointer(wqueue->filter); |