summaryrefslogtreecommitdiff
path: root/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'kernel')
-rw-r--r--kernel/cpu.c25
-rw-r--r--kernel/events/core.c22
-rw-r--r--kernel/events/uprobes.c8
-rw-r--r--kernel/irq/manage.c3
-rw-r--r--kernel/liveupdate/Kconfig1
-rw-r--r--kernel/liveupdate/luo_core.c4
-rw-r--r--kernel/liveupdate/luo_file.c7
7 files changed, 42 insertions, 28 deletions
diff --git a/kernel/cpu.c b/kernel/cpu.c
index b674fdf96208..8df2d773fe3b 100644
--- a/kernel/cpu.c
+++ b/kernel/cpu.c
@@ -249,6 +249,14 @@ err:
return ret;
}
+/*
+ * The former STARTING/DYING states, ran with IRQs disabled and must not fail.
+ */
+static bool cpuhp_is_atomic_state(enum cpuhp_state state)
+{
+ return CPUHP_AP_IDLE_DEAD <= state && state < CPUHP_AP_ONLINE;
+}
+
#ifdef CONFIG_SMP
static bool cpuhp_is_ap_state(enum cpuhp_state state)
{
@@ -271,14 +279,6 @@ static inline void complete_ap_thread(struct cpuhp_cpu_state *st, bool bringup)
complete(done);
}
-/*
- * The former STARTING/DYING states, ran with IRQs disabled and must not fail.
- */
-static bool cpuhp_is_atomic_state(enum cpuhp_state state)
-{
- return CPUHP_AP_IDLE_DEAD <= state && state < CPUHP_AP_ONLINE;
-}
-
/* Synchronization state management */
enum cpuhp_sync_state {
SYNC_STATE_DEAD,
@@ -2364,7 +2364,14 @@ static int cpuhp_issue_call(int cpu, enum cpuhp_state state, bool bringup,
else
ret = cpuhp_invoke_callback(cpu, state, bringup, node, NULL);
#else
- ret = cpuhp_invoke_callback(cpu, state, bringup, node, NULL);
+ if (cpuhp_is_atomic_state(state)) {
+ guard(irqsave)();
+ ret = cpuhp_invoke_callback(cpu, state, bringup, node, NULL);
+ /* STARTING/DYING must not fail! */
+ WARN_ON_ONCE(ret);
+ } else {
+ ret = cpuhp_invoke_callback(cpu, state, bringup, node, NULL);
+ }
#endif
BUG_ON(ret && !bringup);
return ret;
diff --git a/kernel/events/core.c b/kernel/events/core.c
index ece716879cbc..dad0d3d2e85f 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -2317,8 +2317,6 @@ out:
perf_event__header_size(leader);
}
-static void sync_child_event(struct perf_event *child_event);
-
static void perf_child_detach(struct perf_event *event)
{
struct perf_event *parent_event = event->parent;
@@ -2337,7 +2335,6 @@ static void perf_child_detach(struct perf_event *event)
lockdep_assert_held(&parent_event->child_mutex);
*/
- sync_child_event(event);
list_del_init(&event->child_list);
}
@@ -4588,6 +4585,7 @@ out:
static void perf_remove_from_owner(struct perf_event *event);
static void perf_event_exit_event(struct perf_event *event,
struct perf_event_context *ctx,
+ struct task_struct *task,
bool revoke);
/*
@@ -4615,7 +4613,7 @@ static void perf_event_remove_on_exec(struct perf_event_context *ctx)
modified = true;
- perf_event_exit_event(event, ctx, false);
+ perf_event_exit_event(event, ctx, ctx->task, false);
}
raw_spin_lock_irqsave(&ctx->lock, flags);
@@ -12518,7 +12516,7 @@ static void __pmu_detach_event(struct pmu *pmu, struct perf_event *event,
/*
* De-schedule the event and mark it REVOKED.
*/
- perf_event_exit_event(event, ctx, true);
+ perf_event_exit_event(event, ctx, ctx->task, true);
/*
* All _free_event() bits that rely on event->pmu:
@@ -14075,14 +14073,13 @@ void perf_pmu_migrate_context(struct pmu *pmu, int src_cpu, int dst_cpu)
}
EXPORT_SYMBOL_GPL(perf_pmu_migrate_context);
-static void sync_child_event(struct perf_event *child_event)
+static void sync_child_event(struct perf_event *child_event,
+ struct task_struct *task)
{
struct perf_event *parent_event = child_event->parent;
u64 child_val;
if (child_event->attr.inherit_stat) {
- struct task_struct *task = child_event->ctx->task;
-
if (task && task != TASK_TOMBSTONE)
perf_event_read_event(child_event, task);
}
@@ -14101,7 +14098,9 @@ static void sync_child_event(struct perf_event *child_event)
static void
perf_event_exit_event(struct perf_event *event,
- struct perf_event_context *ctx, bool revoke)
+ struct perf_event_context *ctx,
+ struct task_struct *task,
+ bool revoke)
{
struct perf_event *parent_event = event->parent;
unsigned long detach_flags = DETACH_EXIT;
@@ -14124,6 +14123,9 @@ perf_event_exit_event(struct perf_event *event,
mutex_lock(&parent_event->child_mutex);
/* PERF_ATTACH_ITRACE might be set concurrently */
attach_state = READ_ONCE(event->attach_state);
+
+ if (attach_state & PERF_ATTACH_CHILD)
+ sync_child_event(event, task);
}
if (revoke)
@@ -14215,7 +14217,7 @@ static void perf_event_exit_task_context(struct task_struct *task, bool exit)
perf_event_task(task, ctx, 0);
list_for_each_entry_safe(child_event, next, &ctx->event_list, event_entry)
- perf_event_exit_event(child_event, ctx, false);
+ perf_event_exit_event(child_event, ctx, exit ? task : NULL, false);
mutex_unlock(&ctx->mutex);
diff --git a/kernel/events/uprobes.c b/kernel/events/uprobes.c
index f11ceb8be8c4..d546d32390a8 100644
--- a/kernel/events/uprobes.c
+++ b/kernel/events/uprobes.c
@@ -79,7 +79,7 @@ struct uprobe {
* The generic code assumes that it has two members of unknown type
* owned by the arch-specific code:
*
- * insn - copy_insn() saves the original instruction here for
+ * insn - copy_insn() saves the original instruction here for
* arch_uprobe_analyze_insn().
*
* ixol - potentially modified instruction to execute out of
@@ -107,8 +107,8 @@ static LIST_HEAD(delayed_uprobe_list);
* allocated.
*/
struct xol_area {
- wait_queue_head_t wq; /* if all slots are busy */
- unsigned long *bitmap; /* 0 = free slot */
+ wait_queue_head_t wq; /* if all slots are busy */
+ unsigned long *bitmap; /* 0 = free slot */
struct page *page;
/*
@@ -116,7 +116,7 @@ struct xol_area {
* itself. The probed process or a naughty kernel module could make
* the vma go away, and we must handle that reasonably gracefully.
*/
- unsigned long vaddr; /* Page(s) of instruction slots */
+ unsigned long vaddr; /* Page(s) of instruction slots */
};
static void uprobe_warn(struct task_struct *t, const char *msg)
diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c
index 0bb29316b436..8b1b4c8a4f54 100644
--- a/kernel/irq/manage.c
+++ b/kernel/irq/manage.c
@@ -2470,6 +2470,9 @@ int setup_percpu_irq(unsigned int irq, struct irqaction *act)
if (retval < 0)
return retval;
+ if (!act->affinity)
+ act->affinity = cpu_online_mask;
+
retval = __setup_irq(irq, desc, act);
if (retval)
diff --git a/kernel/liveupdate/Kconfig b/kernel/liveupdate/Kconfig
index 9b2515f31afb..d2aeaf13c3ac 100644
--- a/kernel/liveupdate/Kconfig
+++ b/kernel/liveupdate/Kconfig
@@ -54,6 +54,7 @@ config KEXEC_HANDOVER_ENABLE_DEFAULT
config LIVEUPDATE
bool "Live Update Orchestrator"
depends on KEXEC_HANDOVER
+ depends on SHMEM
help
Enable the Live Update Orchestrator. Live Update is a mechanism,
typically based on kexec, that allows the kernel to be updated
diff --git a/kernel/liveupdate/luo_core.c b/kernel/liveupdate/luo_core.c
index f7ecaf7740d1..944663d99dd9 100644
--- a/kernel/liveupdate/luo_core.c
+++ b/kernel/liveupdate/luo_core.c
@@ -399,10 +399,8 @@ static long luo_ioctl(struct file *filep, unsigned int cmd, unsigned long arg)
int err;
nr = _IOC_NR(cmd);
- if (nr < LIVEUPDATE_CMD_BASE ||
- (nr - LIVEUPDATE_CMD_BASE) >= ARRAY_SIZE(luo_ioctl_ops)) {
+ if (nr - LIVEUPDATE_CMD_BASE >= ARRAY_SIZE(luo_ioctl_ops))
return -EINVAL;
- }
ucmd.ubuffer = (void __user *)arg;
err = get_user(ucmd.user_size, (u32 __user *)ucmd.ubuffer);
diff --git a/kernel/liveupdate/luo_file.c b/kernel/liveupdate/luo_file.c
index ddff87917b21..a32a777f6df8 100644
--- a/kernel/liveupdate/luo_file.c
+++ b/kernel/liveupdate/luo_file.c
@@ -554,17 +554,20 @@ int luo_retrieve_file(struct luo_file_set *file_set, u64 token,
{
struct liveupdate_file_op_args args = {0};
struct luo_file *luo_file;
+ bool found = false;
int err;
if (list_empty(&file_set->files_list))
return -ENOENT;
list_for_each_entry(luo_file, &file_set->files_list, list) {
- if (luo_file->token == token)
+ if (luo_file->token == token) {
+ found = true;
break;
+ }
}
- if (luo_file->token != token)
+ if (!found)
return -ENOENT;
guard(mutex)(&luo_file->mutex);