summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authoranish kumar <anish198519851985@gmail.com>2013-02-03 22:08:23 +0100
committerIngo Molnar <mingo@kernel.org>2013-02-04 11:50:59 +0100
commitc02cf5f8ed6137e2b3b2f10e0fca336e06e09ba4 (patch)
tree808036e5a89ace73ea2a92719529df6c6321977b
parent786133f6e8ff94aaa78cd6b7844d04c227098327 (diff)
irq_work: Remove return value from the irq_work_queue() function
As no one is using the return value of irq_work_queue(), so it is better to just make it void. Signed-off-by: anish kumar <anish198519851985@gmail.com> Acked-by: Steven Rostedt <rostedt@goodmis.org> [ Fix stale comments, remove now unnecessary __irq_work_queue() intermediate function ] Signed-off-by: Frederic Weisbecker <fweisbec@gmail.com> Link: http://lkml.kernel.org/r/1359925703-24304-1-git-send-email-fweisbec@gmail.com Signed-off-by: Ingo Molnar <mingo@kernel.org>
-rw-r--r--include/linux/irq_work.h2
-rw-r--r--kernel/irq_work.c31
2 files changed, 11 insertions, 22 deletions
diff --git a/include/linux/irq_work.h b/include/linux/irq_work.h
index 6a9e8f5399e2..ce60c084635b 100644
--- a/include/linux/irq_work.h
+++ b/include/linux/irq_work.h
@@ -16,7 +16,7 @@ void init_irq_work(struct irq_work *work, void (*func)(struct irq_work *))
work->func = func;
}
-bool irq_work_queue(struct irq_work *work);
+void irq_work_queue(struct irq_work *work);
void irq_work_run(void);
void irq_work_sync(struct irq_work *work);
diff --git a/kernel/irq_work.c b/kernel/irq_work.c
index 64eddd59ed83..c9d7478e4889 100644
--- a/kernel/irq_work.c
+++ b/kernel/irq_work.c
@@ -63,12 +63,20 @@ void __weak arch_irq_work_raise(void)
}
/*
- * Queue the entry and raise the IPI if needed.
+ * Enqueue the irq_work @entry unless it's already pending
+ * somewhere.
+ *
+ * Can be re-enqueued while the callback is still in progress.
*/
-static void __irq_work_queue(struct irq_work *work)
+void irq_work_queue(struct irq_work *work)
{
bool empty;
+ /* Only queue if not already pending */
+ if (!irq_work_claim(work))
+ return;
+
+ /* Queue the entry and raise the IPI if needed. */
preempt_disable();
empty = llist_add(&work->llnode, &__get_cpu_var(irq_work_list));
@@ -78,25 +86,6 @@ static void __irq_work_queue(struct irq_work *work)
preempt_enable();
}
-
-/*
- * Enqueue the irq_work @entry, returns true on success, failure when the
- * @entry was already enqueued by someone else.
- *
- * Can be re-enqueued while the callback is still in progress.
- */
-bool irq_work_queue(struct irq_work *work)
-{
- if (!irq_work_claim(work)) {
- /*
- * Already enqueued, can't do!
- */
- return false;
- }
-
- __irq_work_queue(work);
- return true;
-}
EXPORT_SYMBOL_GPL(irq_work_queue);
/*