summaryrefslogtreecommitdiff
path: root/lib/once.c
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2015-10-08 05:26:44 -0700
committerDavid S. Miller <davem@davemloft.net>2015-10-08 05:26:44 -0700
commitdf718423250c000ca4323a767cedc2f3219b685c (patch)
tree3afa13e8acbdf49a9f8c1a7993065c3836997a01 /lib/once.c
parent28335a7445202a3d118145a07d9138e9881ebe18 (diff)
parent3ad0040573b0c00f88488bc31958acd07a55ee2e (diff)
Merge branch 'bpf_random32'
Daniel Borkmann says: ==================== BPF/random32 updates BPF update to split the prandom state apart, and to move the *once helpers to the core. For details, please see individual patches. Given the changes and since it's in the tree for quite some time, net-next is a better choice in our opinion. v1 -> v2: - Make DO_ONCE() type-safe, remove the kvec helper. Credits go to Alexei Starovoitov for the __VA_ARGS__ hint, thanks! - Add a comment to the DO_ONCE() helper as suggested by Alexei. - Rework prandom_init_once() helper to the new API. - Keep Alexei's Acked-by on the last patch. ==================== Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'lib/once.c')
-rw-r--r--lib/once.c62
1 files changed, 62 insertions, 0 deletions
diff --git a/lib/once.c b/lib/once.c
new file mode 100644
index 000000000000..05c8604627eb
--- /dev/null
+++ b/lib/once.c
@@ -0,0 +1,62 @@
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/once.h>
+#include <linux/random.h>
+
+struct once_work {
+ struct work_struct work;
+ struct static_key *key;
+};
+
+static void once_deferred(struct work_struct *w)
+{
+ struct once_work *work;
+
+ work = container_of(w, struct once_work, work);
+ BUG_ON(!static_key_enabled(work->key));
+ static_key_slow_dec(work->key);
+ kfree(work);
+}
+
+static void once_disable_jump(struct static_key *key)
+{
+ struct once_work *w;
+
+ w = kmalloc(sizeof(*w), GFP_ATOMIC);
+ if (!w)
+ return;
+
+ INIT_WORK(&w->work, once_deferred);
+ w->key = key;
+ schedule_work(&w->work);
+}
+
+static DEFINE_SPINLOCK(once_lock);
+
+bool __do_once_start(bool *done, unsigned long *flags)
+ __acquires(once_lock)
+{
+ spin_lock_irqsave(&once_lock, *flags);
+ if (*done) {
+ spin_unlock_irqrestore(&once_lock, *flags);
+ /* Keep sparse happy by restoring an even lock count on
+ * this lock. In case we return here, we don't call into
+ * __do_once_done but return early in the DO_ONCE() macro.
+ */
+ __acquire(once_lock);
+ return false;
+ }
+
+ return true;
+}
+EXPORT_SYMBOL(__do_once_start);
+
+void __do_once_done(bool *done, struct static_key *once_key,
+ unsigned long *flags)
+ __releases(once_lock)
+{
+ *done = true;
+ spin_unlock_irqrestore(&once_lock, *flags);
+ once_disable_jump(once_key);
+}
+EXPORT_SYMBOL(__do_once_done);