summaryrefslogtreecommitdiff
path: root/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'kernel')
-rw-r--r--kernel/fork.c8
-rw-r--r--kernel/kcov.c21
-rw-r--r--kernel/kexec_core.c4
-rw-r--r--kernel/relay.c2
-rw-r--r--kernel/sched/core.c4
5 files changed, 36 insertions, 3 deletions
diff --git a/kernel/fork.c b/kernel/fork.c
index 92870be50bba..9440d61b925c 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -440,6 +440,14 @@ static __latent_entropy int dup_mmap(struct mm_struct *mm,
continue;
}
charge = 0;
+ /*
+ * Don't duplicate many vmas if we've been oom-killed (for
+ * example)
+ */
+ if (fatal_signal_pending(current)) {
+ retval = -EINTR;
+ goto out;
+ }
if (mpnt->vm_flags & VM_ACCOUNT) {
unsigned long len = vma_pages(mpnt);
diff --git a/kernel/kcov.c b/kernel/kcov.c
index 2c16f1ab5e10..3ebd09efe72a 100644
--- a/kernel/kcov.c
+++ b/kernel/kcov.c
@@ -58,7 +58,7 @@ struct kcov {
static bool check_kcov_mode(enum kcov_mode needed_mode, struct task_struct *t)
{
- enum kcov_mode mode;
+ unsigned int mode;
/*
* We are interested in code coverage as a function of a syscall inputs,
@@ -241,7 +241,8 @@ static void kcov_put(struct kcov *kcov)
void kcov_task_init(struct task_struct *t)
{
- t->kcov_mode = KCOV_MODE_DISABLED;
+ WRITE_ONCE(t->kcov_mode, KCOV_MODE_DISABLED);
+ barrier();
t->kcov_size = 0;
t->kcov_area = NULL;
t->kcov = NULL;
@@ -323,6 +324,21 @@ static int kcov_close(struct inode *inode, struct file *filep)
return 0;
}
+/*
+ * Fault in a lazily-faulted vmalloc area before it can be used by
+ * __santizer_cov_trace_pc(), to avoid recursion issues if any code on the
+ * vmalloc fault handling path is instrumented.
+ */
+static void kcov_fault_in_area(struct kcov *kcov)
+{
+ unsigned long stride = PAGE_SIZE / sizeof(unsigned long);
+ unsigned long *area = kcov->area;
+ unsigned long offset;
+
+ for (offset = 0; offset < kcov->size; offset += stride)
+ READ_ONCE(area[offset]);
+}
+
static int kcov_ioctl_locked(struct kcov *kcov, unsigned int cmd,
unsigned long arg)
{
@@ -371,6 +387,7 @@ static int kcov_ioctl_locked(struct kcov *kcov, unsigned int cmd,
#endif
else
return -EINVAL;
+ kcov_fault_in_area(kcov);
/* Cache in task struct for performance. */
t->kcov_size = kcov->size;
t->kcov_area = kcov->area;
diff --git a/kernel/kexec_core.c b/kernel/kexec_core.c
index 20fef1a38602..23a83a4da38a 100644
--- a/kernel/kexec_core.c
+++ b/kernel/kexec_core.c
@@ -829,6 +829,8 @@ static int kimage_load_normal_segment(struct kimage *image,
else
buf += mchunk;
mbytes -= mchunk;
+
+ cond_resched();
}
out:
return result;
@@ -893,6 +895,8 @@ static int kimage_load_crash_segment(struct kimage *image,
else
buf += mchunk;
mbytes -= mchunk;
+
+ cond_resched();
}
out:
return result;
diff --git a/kernel/relay.c b/kernel/relay.c
index 9f5326e8a036..04f248644e06 100644
--- a/kernel/relay.c
+++ b/kernel/relay.c
@@ -39,7 +39,7 @@ static void relay_file_mmap_close(struct vm_area_struct *vma)
/*
* fault() vm_op implementation for relay file mapping.
*/
-static int relay_buf_fault(struct vm_fault *vmf)
+static vm_fault_t relay_buf_fault(struct vm_fault *vmf)
{
struct page *page;
struct rchan_buf *buf = vmf->vma->vm_private_data;
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index a98d54cd5535..78d8facba456 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -10,6 +10,8 @@
#include <linux/kthread.h>
#include <linux/nospec.h>
+#include <linux/kcov.h>
+
#include <asm/switch_to.h>
#include <asm/tlb.h>
@@ -2633,6 +2635,7 @@ static inline void
prepare_task_switch(struct rq *rq, struct task_struct *prev,
struct task_struct *next)
{
+ kcov_prepare_switch(prev);
sched_info_switch(rq, prev, next);
perf_event_task_sched_out(prev, next);
rseq_preempt(prev);
@@ -2702,6 +2705,7 @@ static struct rq *finish_task_switch(struct task_struct *prev)
finish_task(prev);
finish_lock_switch(rq);
finish_arch_post_lock_switch();
+ kcov_finish_switch(current);
fire_sched_in_preempt_notifiers(current);
/*