From e3daab6ce467199ffea12fc1b2df8f61335ce1ca Mon Sep 17 00:00:00 2001 From: Jens Axboe Date: Fri, 25 Oct 2013 11:45:35 +0100 Subject: smp: export __smp_call_function_single() The blk-mq core and the blk-mq null driver uses it. Reviewed-by: Christoph Hellwig Acked-by: Ingo Molnar Signed-off-by: Jens Axboe --- kernel/smp.c | 1 + 1 file changed, 1 insertion(+) (limited to 'kernel') diff --git a/kernel/smp.c b/kernel/smp.c index 0564571dcdf7..46d3d0b48a8d 100644 --- a/kernel/smp.c +++ b/kernel/smp.c @@ -340,6 +340,7 @@ void __smp_call_function_single(int cpu, struct call_single_data *csd, } put_cpu(); } +EXPORT_SYMBOL_GPL(__smp_call_function_single); /** * smp_call_function_many(): Run a function on a set of other CPUs. -- cgit From c84a83e2aaab02a5ca64a982aa55342784934479 Mon Sep 17 00:00:00 2001 From: Jens Axboe Date: Fri, 17 May 2013 09:58:43 +0200 Subject: smp: don't warn about csd->flags having CSD_FLAG_LOCK cleared for !wait blk-mq reuses the request potentially immediately, since the most cache hot is always given out first. This means that rq->csd could be reused between csd->func() being called and csd_unlock() being called. This isn't a problem, since we never use wait == 1 for the smp call function. Add CSD_FLAG_WAIT to be able to tell the difference, retaining the warning for other cases. Cc: Ingo Molnar Signed-off-by: Jens Axboe --- kernel/smp.c | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) (limited to 'kernel') diff --git a/kernel/smp.c b/kernel/smp.c index 46d3d0b48a8d..dbc3d42d2c68 100644 --- a/kernel/smp.c +++ b/kernel/smp.c @@ -18,6 +18,7 @@ #ifdef CONFIG_USE_GENERIC_SMP_HELPERS enum { CSD_FLAG_LOCK = 0x01, + CSD_FLAG_WAIT = 0x02, }; struct call_function_data { @@ -124,7 +125,7 @@ static void csd_lock(struct call_single_data *csd) static void csd_unlock(struct call_single_data *csd) { - WARN_ON(!(csd->flags & CSD_FLAG_LOCK)); + WARN_ON((csd->flags & CSD_FLAG_WAIT) && !(csd->flags & CSD_FLAG_LOCK)); /* * ensure we're all done before releasing data: @@ -146,6 +147,9 @@ void generic_exec_single(int cpu, struct call_single_data *csd, int wait) unsigned long flags; int ipi; + if (wait) + csd->flags |= CSD_FLAG_WAIT; + raw_spin_lock_irqsave(&dst->lock, flags); ipi = list_empty(&dst->list); list_add_tail(&csd->list, &dst->list); -- cgit From a404d5576bbe586a1097a8bc2f32c5f22651b0aa Mon Sep 17 00:00:00 2001 From: Jan Kara Date: Tue, 17 Sep 2013 22:30:31 +0200 Subject: blktrace: Send BLK_TN_PROCESS events to all running traces Currently each task sends BLK_TN_PROCESS event to the first traced device it interacts with after a new trace is started. When there are several traced devices and the task accesses more devices, this logic can result in BLK_TN_PROCESS being sent several times to some devices while it is never sent to other devices. Thus blkparse doesn't display command name when parsing some blktrace files. Fix the problem by sending BLK_TN_PROCESS event to all traced devices when a task interacts with any of them. Signed-off-by: Jan Kara Review-by: Jeff Moyer Signed-off-by: Jens Axboe --- kernel/trace/blktrace.c | 33 +++++++++++++++++++++++++++------ 1 file changed, 27 insertions(+), 6 deletions(-) (limited to 'kernel') diff --git a/kernel/trace/blktrace.c b/kernel/trace/blktrace.c index b8b8560bfb95..7f727b34280d 100644 --- a/kernel/trace/blktrace.c +++ b/kernel/trace/blktrace.c @@ -26,6 +26,7 @@ #include #include #include +#include #include @@ -38,6 +39,9 @@ static unsigned int blktrace_seq __read_mostly = 1; static struct trace_array *blk_tr; static bool blk_tracer_enabled __read_mostly; +static LIST_HEAD(running_trace_list); +static __cacheline_aligned_in_smp DEFINE_SPINLOCK(running_trace_lock); + /* Select an alternative, minimalistic output than the original one */ #define TRACE_BLK_OPT_CLASSIC 0x1 @@ -107,10 +111,18 @@ record_it: * Send out a notify for this process, if we haven't done so since a trace * started */ -static void trace_note_tsk(struct blk_trace *bt, struct task_struct *tsk) +static void trace_note_tsk(struct task_struct *tsk) { + unsigned long flags; + struct blk_trace *bt; + tsk->btrace_seq = blktrace_seq; - trace_note(bt, tsk->pid, BLK_TN_PROCESS, tsk->comm, sizeof(tsk->comm)); + spin_lock_irqsave(&running_trace_lock, flags); + list_for_each_entry(bt, &running_trace_list, running_list) { + trace_note(bt, tsk->pid, BLK_TN_PROCESS, tsk->comm, + sizeof(tsk->comm)); + } + spin_unlock_irqrestore(&running_trace_lock, flags); } static void trace_note_time(struct blk_trace *bt) @@ -229,16 +241,15 @@ static void __blk_add_trace(struct blk_trace *bt, sector_t sector, int bytes, goto record_it; } + if (unlikely(tsk->btrace_seq != blktrace_seq)) + trace_note_tsk(tsk); + /* * A word about the locking here - we disable interrupts to reserve * some space in the relay per-cpu buffer, to prevent an irq * from coming in and stepping on our toes. */ local_irq_save(flags); - - if (unlikely(tsk->btrace_seq != blktrace_seq)) - trace_note_tsk(bt, tsk); - t = relay_reserve(bt->rchan, sizeof(*t) + pdu_len); if (t) { sequence = per_cpu_ptr(bt->sequence, cpu); @@ -477,6 +488,7 @@ int do_blk_trace_setup(struct request_queue *q, char *name, dev_t dev, bt->dir = dir; bt->dev = dev; atomic_set(&bt->dropped, 0); + INIT_LIST_HEAD(&bt->running_list); ret = -EIO; bt->dropped_file = debugfs_create_file("dropped", 0444, dir, bt, @@ -601,6 +613,9 @@ int blk_trace_startstop(struct request_queue *q, int start) blktrace_seq++; smp_mb(); bt->trace_state = Blktrace_running; + spin_lock_irq(&running_trace_lock); + list_add(&bt->running_list, &running_trace_list); + spin_unlock_irq(&running_trace_lock); trace_note_time(bt); ret = 0; @@ -608,6 +623,9 @@ int blk_trace_startstop(struct request_queue *q, int start) } else { if (bt->trace_state == Blktrace_running) { bt->trace_state = Blktrace_stopped; + spin_lock_irq(&running_trace_lock); + list_del_init(&bt->running_list); + spin_unlock_irq(&running_trace_lock); relay_flush(bt->rchan); ret = 0; } @@ -1472,6 +1490,9 @@ static int blk_trace_remove_queue(struct request_queue *q) if (atomic_dec_and_test(&blk_probes_ref)) blk_unregister_tracepoints(); + spin_lock_irq(&running_trace_lock); + list_del(&bt->running_list); + spin_unlock_irq(&running_trace_lock); blk_trace_free(bt); return 0; } -- cgit From f8c5e94486671ffcac696886c246baa6ba89b5cf Mon Sep 17 00:00:00 2001 From: Chen Gang Date: Sun, 3 Nov 2013 22:23:39 +0800 Subject: kernel: trace: blktrace: remove redundent memcpy() in compat_blk_trace_setup() do_blk_trace_setup() will fully initialize 'buts.name', so can remove the related memcpy(). And also use BLKTRACE_BDEV_SIZE and ARRAY_SIZE instead of hard code number '32'. Signed-off-by: Chen Gang Signed-off-by: Jens Axboe --- kernel/trace/blktrace.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) (limited to 'kernel') diff --git a/kernel/trace/blktrace.c b/kernel/trace/blktrace.c index 7f727b34280d..f785aef65799 100644 --- a/kernel/trace/blktrace.c +++ b/kernel/trace/blktrace.c @@ -579,13 +579,12 @@ static int compat_blk_trace_setup(struct request_queue *q, char *name, .end_lba = cbuts.end_lba, .pid = cbuts.pid, }; - memcpy(&buts.name, &cbuts.name, 32); ret = do_blk_trace_setup(q, name, dev, bdev, &buts); if (ret) return ret; - if (copy_to_user(arg, &buts.name, 32)) { + if (copy_to_user(arg, &buts.name, ARRAY_SIZE(buts.name))) { blk_trace_remove(q); return -EFAULT; } -- cgit