summaryrefslogtreecommitdiff
path: root/kernel
diff options
context:
space:
mode:
authorJens Axboe <axboe@kernel.dk>2013-05-17 09:58:43 +0200
committerJens Axboe <axboe@kernel.dk>2013-10-25 11:55:59 +0100
commitc84a83e2aaab02a5ca64a982aa55342784934479 (patch)
tree405880cd8121ba0fb1905549cc992071d27c6ca4 /kernel
parente3daab6ce467199ffea12fc1b2df8f61335ce1ca (diff)
smp: don't warn about csd->flags having CSD_FLAG_LOCK cleared for !wait
blk-mq reuses the request potentially immediately, since the most cache hot is always given out first. This means that rq->csd could be reused between csd->func() being called and csd_unlock() being called. This isn't a problem, since we never use wait == 1 for the smp call function. Add CSD_FLAG_WAIT to be able to tell the difference, retaining the warning for other cases. Cc: Ingo Molnar <mingo@kernel.org> Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'kernel')
-rw-r--r--kernel/smp.c6
1 files changed, 5 insertions, 1 deletions
diff --git a/kernel/smp.c b/kernel/smp.c
index 46d3d0b48a8d..dbc3d42d2c68 100644
--- a/kernel/smp.c
+++ b/kernel/smp.c
@@ -18,6 +18,7 @@
#ifdef CONFIG_USE_GENERIC_SMP_HELPERS
enum {
CSD_FLAG_LOCK = 0x01,
+ CSD_FLAG_WAIT = 0x02,
};
struct call_function_data {
@@ -124,7 +125,7 @@ static void csd_lock(struct call_single_data *csd)
static void csd_unlock(struct call_single_data *csd)
{
- WARN_ON(!(csd->flags & CSD_FLAG_LOCK));
+ WARN_ON((csd->flags & CSD_FLAG_WAIT) && !(csd->flags & CSD_FLAG_LOCK));
/*
* ensure we're all done before releasing data:
@@ -146,6 +147,9 @@ void generic_exec_single(int cpu, struct call_single_data *csd, int wait)
unsigned long flags;
int ipi;
+ if (wait)
+ csd->flags |= CSD_FLAG_WAIT;
+
raw_spin_lock_irqsave(&dst->lock, flags);
ipi = list_empty(&dst->list);
list_add_tail(&csd->list, &dst->list);