summaryrefslogtreecommitdiff
path: root/block/blk-core.c
diff options
context:
space:
mode:
authorMike Snitzer <snitzer@redhat.com>2018-12-06 11:41:18 -0500
committerJens Axboe <axboe@kernel.dk>2018-12-10 08:30:37 -0700
commit112f158f66cbe25fd561a5dfe9c3826e06abf757 (patch)
tree192a838ddd4e61581e6be953dc843d84730de6e8 /block/blk-core.c
parentdbd3bbd291a03f1383de6242e7999e8487cb869b (diff)
block: stop passing 'cpu' to all percpu stats methods
All of part_stat_* and related methods are used with preempt disabled, so there is no need to pass cpu around to allow of them. Just call smp_processor_id() as needed. Suggested-by: Jens Axboe <axboe@kernel.dk> Signed-off-by: Mike Snitzer <snitzer@redhat.com> Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'block/blk-core.c')
-rw-r--r--block/blk-core.c34
1 files changed, 15 insertions, 19 deletions
diff --git a/block/blk-core.c b/block/blk-core.c
index ad59102ee30a..734b768c9d9d 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -584,14 +584,14 @@ struct request *blk_get_request(struct request_queue *q, unsigned int op,
}
EXPORT_SYMBOL(blk_get_request);
-static void part_round_stats_single(struct request_queue *q, int cpu,
+static void part_round_stats_single(struct request_queue *q,
struct hd_struct *part, unsigned long now,
unsigned int inflight)
{
if (inflight) {
- __part_stat_add(cpu, part, time_in_queue,
+ __part_stat_add(part, time_in_queue,
inflight * (now - part->stamp));
- __part_stat_add(cpu, part, io_ticks, (now - part->stamp));
+ __part_stat_add(part, io_ticks, (now - part->stamp));
}
part->stamp = now;
}
@@ -599,7 +599,6 @@ static void part_round_stats_single(struct request_queue *q, int cpu,
/**
* part_round_stats() - Round off the performance stats on a struct disk_stats.
* @q: target block queue
- * @cpu: cpu number for stats access
* @part: target partition
*
* The average IO queue length and utilisation statistics are maintained
@@ -613,7 +612,7 @@ static void part_round_stats_single(struct request_queue *q, int cpu,
* /proc/diskstats. This accounts immediately for all queue usage up to
* the current jiffies and restarts the counters again.
*/
-void part_round_stats(struct request_queue *q, int cpu, struct hd_struct *part)
+void part_round_stats(struct request_queue *q, struct hd_struct *part)
{
struct hd_struct *part2 = NULL;
unsigned long now = jiffies;
@@ -635,9 +634,9 @@ void part_round_stats(struct request_queue *q, int cpu, struct hd_struct *part)
part_in_flight(q, part, inflight);
if (stats & 2)
- part_round_stats_single(q, cpu, part2, now, inflight[1]);
+ part_round_stats_single(q, part2, now, inflight[1]);
if (stats & 1)
- part_round_stats_single(q, cpu, part, now, inflight[0]);
+ part_round_stats_single(q, part, now, inflight[0]);
}
EXPORT_SYMBOL_GPL(part_round_stats);
@@ -1362,11 +1361,10 @@ void blk_account_io_completion(struct request *req, unsigned int bytes)
if (blk_do_io_stat(req)) {
const int sgrp = op_stat_group(req_op(req));
struct hd_struct *part;
- int cpu;
- cpu = part_stat_lock();
+ part_stat_lock();
part = req->part;
- part_stat_add(cpu, part, sectors[sgrp], bytes >> 9);
+ part_stat_add(part, sectors[sgrp], bytes >> 9);
part_stat_unlock();
}
}
@@ -1381,14 +1379,13 @@ void blk_account_io_done(struct request *req, u64 now)
if (blk_do_io_stat(req) && !(req->rq_flags & RQF_FLUSH_SEQ)) {
const int sgrp = op_stat_group(req_op(req));
struct hd_struct *part;
- int cpu;
- cpu = part_stat_lock();
+ part_stat_lock();
part = req->part;
- part_stat_inc(cpu, part, ios[sgrp]);
- part_stat_add(cpu, part, nsecs[sgrp], now - req->start_time_ns);
- part_round_stats(req->q, cpu, part);
+ part_stat_inc(part, ios[sgrp]);
+ part_stat_add(part, nsecs[sgrp], now - req->start_time_ns);
+ part_round_stats(req->q, part);
part_dec_in_flight(req->q, part, rq_data_dir(req));
hd_struct_put(part);
@@ -1400,16 +1397,15 @@ void blk_account_io_start(struct request *rq, bool new_io)
{
struct hd_struct *part;
int rw = rq_data_dir(rq);
- int cpu;
if (!blk_do_io_stat(rq))
return;
- cpu = part_stat_lock();
+ part_stat_lock();
if (!new_io) {
part = rq->part;
- part_stat_inc(cpu, part, merges[rw]);
+ part_stat_inc(part, merges[rw]);
} else {
part = disk_map_sector_rcu(rq->rq_disk, blk_rq_pos(rq));
if (!hd_struct_try_get(part)) {
@@ -1424,7 +1420,7 @@ void blk_account_io_start(struct request *rq, bool new_io)
part = &rq->rq_disk->part0;
hd_struct_get(part);
}
- part_round_stats(rq->q, cpu, part);
+ part_round_stats(rq->q, part);
part_inc_in_flight(rq->q, part, rw);
rq->part = part;
}