summaryrefslogtreecommitdiff
path: root/kernel/kprobes.c
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2020-10-12 14:21:15 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2020-10-12 14:21:15 -0700
commit1c6890707eb1438b0fb4e0a10d4afe48a217628b (patch)
treef600dc002e5076353efb005727d2fa3c47560dd4 /kernel/kprobes.c
parent3bff6112c80cecb76af5fe485506f96e8adb6122 (diff)
parentbcb53209be5cb32d485507452edda19b78f31d84 (diff)
Merge tag 'perf-kprobes-2020-10-12' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull perf/kprobes updates from Ingo Molnar: "This prepares to unify the kretprobe trampoline handler and make kretprobe lockless (those patches are still work in progress)" * tag 'perf-kprobes-2020-10-12' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: kprobes: Fix to check probe enabled before disarm_kprobe_ftrace() kprobes: Make local functions static kprobes: Free kretprobe_instance with RCU callback kprobes: Remove NMI context check sparc: kprobes: Use generic kretprobe trampoline handler sh: kprobes: Use generic kretprobe trampoline handler s390: kprobes: Use generic kretprobe trampoline handler powerpc: kprobes: Use generic kretprobe trampoline handler parisc: kprobes: Use generic kretprobe trampoline handler mips: kprobes: Use generic kretprobe trampoline handler ia64: kprobes: Use generic kretprobe trampoline handler csky: kprobes: Use generic kretprobe trampoline handler arc: kprobes: Use generic kretprobe trampoline handler arm64: kprobes: Use generic kretprobe trampoline handler arm: kprobes: Use generic kretprobe trampoline handler x86/kprobes: Use generic kretprobe trampoline handler kprobes: Add generic kretprobe trampoline handler
Diffstat (limited to 'kernel/kprobes.c')
-rw-r--r--kernel/kprobes.c128
1 files changed, 102 insertions, 26 deletions
diff --git a/kernel/kprobes.c b/kernel/kprobes.c
index c16c3236f6cf..789002da7766 100644
--- a/kernel/kprobes.c
+++ b/kernel/kprobes.c
@@ -1224,8 +1224,7 @@ void kprobes_inc_nmissed_count(struct kprobe *p)
}
NOKPROBE_SYMBOL(kprobes_inc_nmissed_count);
-void recycle_rp_inst(struct kretprobe_instance *ri,
- struct hlist_head *head)
+static void recycle_rp_inst(struct kretprobe_instance *ri)
{
struct kretprobe *rp = ri->rp;
@@ -1237,12 +1236,11 @@ void recycle_rp_inst(struct kretprobe_instance *ri,
hlist_add_head(&ri->hlist, &rp->free_instances);
raw_spin_unlock(&rp->lock);
} else
- /* Unregistering */
- hlist_add_head(&ri->hlist, head);
+ kfree_rcu(ri, rcu);
}
NOKPROBE_SYMBOL(recycle_rp_inst);
-void kretprobe_hash_lock(struct task_struct *tsk,
+static void kretprobe_hash_lock(struct task_struct *tsk,
struct hlist_head **head, unsigned long *flags)
__acquires(hlist_lock)
{
@@ -1264,7 +1262,7 @@ __acquires(hlist_lock)
}
NOKPROBE_SYMBOL(kretprobe_table_lock);
-void kretprobe_hash_unlock(struct task_struct *tsk,
+static void kretprobe_hash_unlock(struct task_struct *tsk,
unsigned long *flags)
__releases(hlist_lock)
{
@@ -1285,7 +1283,7 @@ __releases(hlist_lock)
}
NOKPROBE_SYMBOL(kretprobe_table_unlock);
-struct kprobe kprobe_busy = {
+static struct kprobe kprobe_busy = {
.addr = (void *) get_kprobe,
};
@@ -1314,7 +1312,7 @@ void kprobe_busy_end(void)
void kprobe_flush_task(struct task_struct *tk)
{
struct kretprobe_instance *ri;
- struct hlist_head *head, empty_rp;
+ struct hlist_head *head;
struct hlist_node *tmp;
unsigned long hash, flags = 0;
@@ -1324,19 +1322,14 @@ void kprobe_flush_task(struct task_struct *tk)
kprobe_busy_begin();
- INIT_HLIST_HEAD(&empty_rp);
hash = hash_ptr(tk, KPROBE_HASH_BITS);
head = &kretprobe_inst_table[hash];
kretprobe_table_lock(hash, &flags);
hlist_for_each_entry_safe(ri, tmp, head, hlist) {
if (ri->task == tk)
- recycle_rp_inst(ri, &empty_rp);
+ recycle_rp_inst(ri);
}
kretprobe_table_unlock(hash, &flags);
- hlist_for_each_entry_safe(ri, tmp, &empty_rp, hlist) {
- hlist_del(&ri->hlist);
- kfree(ri);
- }
kprobe_busy_end();
}
@@ -1360,7 +1353,8 @@ static void cleanup_rp_inst(struct kretprobe *rp)
struct hlist_node *next;
struct hlist_head *head;
- /* No race here */
+ /* To avoid recursive kretprobe by NMI, set kprobe busy here */
+ kprobe_busy_begin();
for (hash = 0; hash < KPROBE_TABLE_SIZE; hash++) {
kretprobe_table_lock(hash, &flags);
head = &kretprobe_inst_table[hash];
@@ -1370,6 +1364,8 @@ static void cleanup_rp_inst(struct kretprobe *rp)
}
kretprobe_table_unlock(hash, &flags);
}
+ kprobe_busy_end();
+
free_rp_inst(rp);
}
NOKPROBE_SYMBOL(cleanup_rp_inst);
@@ -1929,6 +1925,97 @@ unsigned long __weak arch_deref_entry_point(void *entry)
}
#ifdef CONFIG_KRETPROBES
+
+unsigned long __kretprobe_trampoline_handler(struct pt_regs *regs,
+ void *trampoline_address,
+ void *frame_pointer)
+{
+ struct kretprobe_instance *ri = NULL, *last = NULL;
+ struct hlist_head *head;
+ struct hlist_node *tmp;
+ unsigned long flags;
+ kprobe_opcode_t *correct_ret_addr = NULL;
+ bool skipped = false;
+
+ kretprobe_hash_lock(current, &head, &flags);
+
+ /*
+ * It is possible to have multiple instances associated with a given
+ * task either because multiple functions in the call path have
+ * return probes installed on them, and/or more than one
+ * return probe was registered for a target function.
+ *
+ * We can handle this because:
+ * - instances are always pushed into the head of the list
+ * - when multiple return probes are registered for the same
+ * function, the (chronologically) first instance's ret_addr
+ * will be the real return address, and all the rest will
+ * point to kretprobe_trampoline.
+ */
+ hlist_for_each_entry(ri, head, hlist) {
+ if (ri->task != current)
+ /* another task is sharing our hash bucket */
+ continue;
+ /*
+ * Return probes must be pushed on this hash list correct
+ * order (same as return order) so that it can be popped
+ * correctly. However, if we find it is pushed it incorrect
+ * order, this means we find a function which should not be
+ * probed, because the wrong order entry is pushed on the
+ * path of processing other kretprobe itself.
+ */
+ if (ri->fp != frame_pointer) {
+ if (!skipped)
+ pr_warn("kretprobe is stacked incorrectly. Trying to fixup.\n");
+ skipped = true;
+ continue;
+ }
+
+ correct_ret_addr = ri->ret_addr;
+ if (skipped)
+ pr_warn("%ps must be blacklisted because of incorrect kretprobe order\n",
+ ri->rp->kp.addr);
+
+ if (correct_ret_addr != trampoline_address)
+ /*
+ * This is the real return address. Any other
+ * instances associated with this task are for
+ * other calls deeper on the call stack
+ */
+ break;
+ }
+
+ BUG_ON(!correct_ret_addr || (correct_ret_addr == trampoline_address));
+ last = ri;
+
+ hlist_for_each_entry_safe(ri, tmp, head, hlist) {
+ if (ri->task != current)
+ /* another task is sharing our hash bucket */
+ continue;
+ if (ri->fp != frame_pointer)
+ continue;
+
+ if (ri->rp && ri->rp->handler) {
+ struct kprobe *prev = kprobe_running();
+
+ __this_cpu_write(current_kprobe, &ri->rp->kp);
+ ri->ret_addr = correct_ret_addr;
+ ri->rp->handler(ri, regs);
+ __this_cpu_write(current_kprobe, prev);
+ }
+
+ recycle_rp_inst(ri);
+
+ if (ri == last)
+ break;
+ }
+
+ kretprobe_hash_unlock(current, &flags);
+
+ return (unsigned long)correct_ret_addr;
+}
+NOKPROBE_SYMBOL(__kretprobe_trampoline_handler)
+
/*
* This kprobe pre_handler is registered with every kretprobe. When probe
* hits it will set up the return probe.
@@ -1939,17 +2026,6 @@ static int pre_handler_kretprobe(struct kprobe *p, struct pt_regs *regs)
unsigned long hash, flags = 0;
struct kretprobe_instance *ri;
- /*
- * To avoid deadlocks, prohibit return probing in NMI contexts,
- * just skip the probe and increase the (inexact) 'nmissed'
- * statistical counter, so that the user is informed that
- * something happened:
- */
- if (unlikely(in_nmi())) {
- rp->nmissed++;
- return 0;
- }
-
/* TODO: consider to only swap the RA after the last pre_handler fired */
hash = hash_ptr(current, KPROBE_HASH_BITS);
raw_spin_lock_irqsave(&rp->lock, flags);