summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--block/bfq-cgroup.c2
-rw-r--r--block/bfq-iosched.c2
-rw-r--r--block/blk-cgroup.c21
-rw-r--r--block/blk-cgroup.h9
-rw-r--r--block/blk-iocost.c4
-rw-r--r--block/blk-iolatency.c4
-rw-r--r--block/blk-ioprio.c4
-rw-r--r--block/blk-throttle.c4
8 files changed, 25 insertions, 25 deletions
diff --git a/block/bfq-cgroup.c b/block/bfq-cgroup.c
index 1d4a3f15049b..032c14f0451a 100644
--- a/block/bfq-cgroup.c
+++ b/block/bfq-cgroup.c
@@ -1291,7 +1291,7 @@ struct bfq_group *bfq_create_group_hierarchy(struct bfq_data *bfqd, int node)
{
int ret;
- ret = blkcg_activate_policy(bfqd->queue, &blkcg_policy_bfq);
+ ret = blkcg_activate_policy(bfqd->queue->disk, &blkcg_policy_bfq);
if (ret)
return NULL;
diff --git a/block/bfq-iosched.c b/block/bfq-iosched.c
index 5afa661fa2ea..777dcab73c8e 100644
--- a/block/bfq-iosched.c
+++ b/block/bfq-iosched.c
@@ -7146,7 +7146,7 @@ static void bfq_exit_queue(struct elevator_queue *e)
bfqg_and_blkg_put(bfqd->root_group);
#ifdef CONFIG_BFQ_GROUP_IOSCHED
- blkcg_deactivate_policy(bfqd->queue, &blkcg_policy_bfq);
+ blkcg_deactivate_policy(bfqd->queue->disk, &blkcg_policy_bfq);
#else
spin_lock_irq(&bfqd->lock);
bfq_put_async_queues(bfqd, bfqd->root_group);
diff --git a/block/blk-cgroup.c b/block/blk-cgroup.c
index 168b2f803238..c20929bce812 100644
--- a/block/blk-cgroup.c
+++ b/block/blk-cgroup.c
@@ -1390,14 +1390,14 @@ struct cgroup_subsys io_cgrp_subsys = {
EXPORT_SYMBOL_GPL(io_cgrp_subsys);
/**
- * blkcg_activate_policy - activate a blkcg policy on a request_queue
- * @q: request_queue of interest
+ * blkcg_activate_policy - activate a blkcg policy on a gendisk
+ * @disk: gendisk of interest
* @pol: blkcg policy to activate
*
- * Activate @pol on @q. Requires %GFP_KERNEL context. @q goes through
+ * Activate @pol on @disk. Requires %GFP_KERNEL context. @disk goes through
* bypass mode to populate its blkgs with policy_data for @pol.
*
- * Activation happens with @q bypassed, so nobody would be accessing blkgs
+ * Activation happens with @disk bypassed, so nobody would be accessing blkgs
* from IO path. Update of each blkg is protected by both queue and blkcg
* locks so that holding either lock and testing blkcg_policy_enabled() is
* always enough for dereferencing policy data.
@@ -1405,9 +1405,9 @@ EXPORT_SYMBOL_GPL(io_cgrp_subsys);
* The caller is responsible for synchronizing [de]activations and policy
* [un]registerations. Returns 0 on success, -errno on failure.
*/
-int blkcg_activate_policy(struct request_queue *q,
- const struct blkcg_policy *pol)
+int blkcg_activate_policy(struct gendisk *disk, const struct blkcg_policy *pol)
{
+ struct request_queue *q = disk->queue;
struct blkg_policy_data *pd_prealloc = NULL;
struct blkcg_gq *blkg, *pinned_blkg = NULL;
int ret;
@@ -1508,16 +1508,17 @@ enomem:
EXPORT_SYMBOL_GPL(blkcg_activate_policy);
/**
- * blkcg_deactivate_policy - deactivate a blkcg policy on a request_queue
- * @q: request_queue of interest
+ * blkcg_deactivate_policy - deactivate a blkcg policy on a gendisk
+ * @disk: gendisk of interest
* @pol: blkcg policy to deactivate
*
- * Deactivate @pol on @q. Follows the same synchronization rules as
+ * Deactivate @pol on @disk. Follows the same synchronization rules as
* blkcg_activate_policy().
*/
-void blkcg_deactivate_policy(struct request_queue *q,
+void blkcg_deactivate_policy(struct gendisk *disk,
const struct blkcg_policy *pol)
{
+ struct request_queue *q = disk->queue;
struct blkcg_gq *blkg;
if (!blkcg_policy_enabled(q, pol))
diff --git a/block/blk-cgroup.h b/block/blk-cgroup.h
index 996572a9a0b7..27068faa2cd0 100644
--- a/block/blk-cgroup.h
+++ b/block/blk-cgroup.h
@@ -194,9 +194,8 @@ void blkcg_exit_disk(struct gendisk *disk);
/* Blkio controller policy registration */
int blkcg_policy_register(struct blkcg_policy *pol);
void blkcg_policy_unregister(struct blkcg_policy *pol);
-int blkcg_activate_policy(struct request_queue *q,
- const struct blkcg_policy *pol);
-void blkcg_deactivate_policy(struct request_queue *q,
+int blkcg_activate_policy(struct gendisk *disk, const struct blkcg_policy *pol);
+void blkcg_deactivate_policy(struct gendisk *disk,
const struct blkcg_policy *pol);
const char *blkg_dev_name(struct blkcg_gq *blkg);
@@ -495,9 +494,9 @@ static inline int blkcg_init_disk(struct gendisk *disk) { return 0; }
static inline void blkcg_exit_disk(struct gendisk *disk) { }
static inline int blkcg_policy_register(struct blkcg_policy *pol) { return 0; }
static inline void blkcg_policy_unregister(struct blkcg_policy *pol) { }
-static inline int blkcg_activate_policy(struct request_queue *q,
+static inline int blkcg_activate_policy(struct gendisk *disk,
const struct blkcg_policy *pol) { return 0; }
-static inline void blkcg_deactivate_policy(struct request_queue *q,
+static inline void blkcg_deactivate_policy(struct gendisk *disk,
const struct blkcg_policy *pol) { }
static inline struct blkg_policy_data *blkg_to_pd(struct blkcg_gq *blkg,
diff --git a/block/blk-iocost.c b/block/blk-iocost.c
index a2e9bf30039b..078b77709519 100644
--- a/block/blk-iocost.c
+++ b/block/blk-iocost.c
@@ -2821,7 +2821,7 @@ static void ioc_rqos_exit(struct rq_qos *rqos)
{
struct ioc *ioc = rqos_to_ioc(rqos);
- blkcg_deactivate_policy(rqos->disk->queue, &blkcg_policy_iocost);
+ blkcg_deactivate_policy(rqos->disk, &blkcg_policy_iocost);
spin_lock_irq(&ioc->lock);
ioc->running = IOC_STOP;
@@ -2893,7 +2893,7 @@ static int blk_iocost_init(struct gendisk *disk)
if (ret)
goto err_free_ioc;
- ret = blkcg_activate_policy(disk->queue, &blkcg_policy_iocost);
+ ret = blkcg_activate_policy(disk, &blkcg_policy_iocost);
if (ret)
goto err_del_qos;
return 0;
diff --git a/block/blk-iolatency.c b/block/blk-iolatency.c
index 8e1e43bbde6f..39853fc5c2b0 100644
--- a/block/blk-iolatency.c
+++ b/block/blk-iolatency.c
@@ -646,7 +646,7 @@ static void blkcg_iolatency_exit(struct rq_qos *rqos)
timer_shutdown_sync(&blkiolat->timer);
flush_work(&blkiolat->enable_work);
- blkcg_deactivate_policy(rqos->disk->queue, &blkcg_policy_iolatency);
+ blkcg_deactivate_policy(rqos->disk, &blkcg_policy_iolatency);
kfree(blkiolat);
}
@@ -768,7 +768,7 @@ int blk_iolatency_init(struct gendisk *disk)
&blkcg_iolatency_ops);
if (ret)
goto err_free;
- ret = blkcg_activate_policy(disk->queue, &blkcg_policy_iolatency);
+ ret = blkcg_activate_policy(disk, &blkcg_policy_iolatency);
if (ret)
goto err_qos_del;
diff --git a/block/blk-ioprio.c b/block/blk-ioprio.c
index 8bb6b8eba4ce..8194826cc824 100644
--- a/block/blk-ioprio.c
+++ b/block/blk-ioprio.c
@@ -204,12 +204,12 @@ void blkcg_set_ioprio(struct bio *bio)
void blk_ioprio_exit(struct gendisk *disk)
{
- blkcg_deactivate_policy(disk->queue, &ioprio_policy);
+ blkcg_deactivate_policy(disk, &ioprio_policy);
}
int blk_ioprio_init(struct gendisk *disk)
{
- return blkcg_activate_policy(disk->queue, &ioprio_policy);
+ return blkcg_activate_policy(disk, &ioprio_policy);
}
static int __init ioprio_init(void)
diff --git a/block/blk-throttle.c b/block/blk-throttle.c
index f802d8f90994..efc0a9092c69 100644
--- a/block/blk-throttle.c
+++ b/block/blk-throttle.c
@@ -2395,7 +2395,7 @@ int blk_throtl_init(struct gendisk *disk)
td->low_downgrade_time = jiffies;
/* activate policy */
- ret = blkcg_activate_policy(q, &blkcg_policy_throtl);
+ ret = blkcg_activate_policy(disk, &blkcg_policy_throtl);
if (ret) {
free_percpu(td->latency_buckets[READ]);
free_percpu(td->latency_buckets[WRITE]);
@@ -2411,7 +2411,7 @@ void blk_throtl_exit(struct gendisk *disk)
BUG_ON(!q->td);
del_timer_sync(&q->td->service_queue.pending_timer);
throtl_shutdown_wq(q);
- blkcg_deactivate_policy(q, &blkcg_policy_throtl);
+ blkcg_deactivate_policy(disk, &blkcg_policy_throtl);
free_percpu(q->td->latency_buckets[READ]);
free_percpu(q->td->latency_buckets[WRITE]);
kfree(q->td);