summaryrefslogtreecommitdiff
path: root/block
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2020-02-06 06:15:23 +0000
committerLinus Torvalds <torvalds@linux-foundation.org>2020-02-06 06:15:23 +0000
commited535f2c9e00eafdeb57d6310b7c8c5a009a9262 (patch)
tree588d440a180c93dfb420b2a22b842a4001691192 /block
parent0384066381ed5572cf1f57f8d01eaccd3f6d4785 (diff)
parentb74e58cd472cb782d34ecfad553c12c66eb02b6b (diff)
Merge tag 'block-5.6-2020-02-05' of git://git.kernel.dk/linux-block
Pull more block updates from Jens Axboe: "Some later arrivals, but all fixes at this point: - bcache fix series (Coly) - Series of BFQ fixes (Paolo) - NVMe pull request from Keith with a few minor NVMe fixes - Various little tweaks" * tag 'block-5.6-2020-02-05' of git://git.kernel.dk/linux-block: (23 commits) nvmet: update AEN list and array at one place nvmet: Fix controller use after free nvmet: Fix error print message at nvmet_install_queue function brd: check and limit max_part par nvme-pci: remove nvmeq->tags nvmet: fix dsm failure when payload does not match sgl descriptor nvmet: Pass lockdep expression to RCU lists block, bfq: clarify the goal of bfq_split_bfqq() block, bfq: get a ref to a group when adding it to a service tree block, bfq: remove ifdefs from around gets/puts of bfq groups block, bfq: extend incomplete name of field on_st block, bfq: get extra ref to prevent a queue from being freed during a group move block, bfq: do not insert oom queue into position tree block, bfq: do not plug I/O for bfq_queues with no proc refs bcache: check return value of prio_read() bcache: fix incorrect data type usage in btree_flush_write() bcache: add readahead cache policy options via sysfs interface bcache: explicity type cast in bset_bkey_last() bcache: fix memory corruption in bch_cache_accounting_clear() xen/blkfront: limit allocated memory size to actual use case ...
Diffstat (limited to 'block')
-rw-r--r--block/bfq-cgroup.c16
-rw-r--r--block/bfq-iosched.c26
-rw-r--r--block/bfq-iosched.h4
-rw-r--r--block/bfq-wf2q.c23
4 files changed, 54 insertions, 15 deletions
diff --git a/block/bfq-cgroup.c b/block/bfq-cgroup.c
index e1419edde2ec..09b69a3ed490 100644
--- a/block/bfq-cgroup.c
+++ b/block/bfq-cgroup.c
@@ -332,7 +332,7 @@ static void bfqg_put(struct bfq_group *bfqg)
kfree(bfqg);
}
-static void bfqg_and_blkg_get(struct bfq_group *bfqg)
+void bfqg_and_blkg_get(struct bfq_group *bfqg)
{
/* see comments in bfq_bic_update_cgroup for why refcounting bfqg */
bfqg_get(bfqg);
@@ -651,9 +651,15 @@ void bfq_bfqq_move(struct bfq_data *bfqd, struct bfq_queue *bfqq,
bfq_bfqq_expire(bfqd, bfqd->in_service_queue,
false, BFQQE_PREEMPTED);
+ /*
+ * get extra reference to prevent bfqq from being freed in
+ * next possible deactivate
+ */
+ bfqq->ref++;
+
if (bfq_bfqq_busy(bfqq))
bfq_deactivate_bfqq(bfqd, bfqq, false, false);
- else if (entity->on_st)
+ else if (entity->on_st_or_in_serv)
bfq_put_idle_entity(bfq_entity_service_tree(entity), entity);
bfqg_and_blkg_put(bfqq_group(bfqq));
@@ -670,6 +676,8 @@ void bfq_bfqq_move(struct bfq_data *bfqd, struct bfq_queue *bfqq,
if (!bfqd->in_service_queue && !bfqd->rq_in_driver)
bfq_schedule_dispatch(bfqd);
+ /* release extra ref taken above */
+ bfq_put_queue(bfqq);
}
/**
@@ -1398,6 +1406,10 @@ struct bfq_group *bfqq_group(struct bfq_queue *bfqq)
return bfqq->bfqd->root_group;
}
+void bfqg_and_blkg_get(struct bfq_group *bfqg) {}
+
+void bfqg_and_blkg_put(struct bfq_group *bfqg) {}
+
struct bfq_group *bfq_create_group_hierarchy(struct bfq_data *bfqd, int node)
{
struct bfq_group *bfqg;
diff --git a/block/bfq-iosched.c b/block/bfq-iosched.c
index 4686b68b48b4..8c436abfaf14 100644
--- a/block/bfq-iosched.c
+++ b/block/bfq-iosched.c
@@ -613,6 +613,10 @@ bfq_pos_tree_add_move(struct bfq_data *bfqd, struct bfq_queue *bfqq)
bfqq->pos_root = NULL;
}
+ /* oom_bfqq does not participate in queue merging */
+ if (bfqq == &bfqd->oom_bfqq)
+ return;
+
/*
* bfqq cannot be merged any longer (see comments in
* bfq_setup_cooperator): no point in adding bfqq into the
@@ -1055,7 +1059,7 @@ bfq_bfqq_resume_state(struct bfq_queue *bfqq, struct bfq_data *bfqd,
static int bfqq_process_refs(struct bfq_queue *bfqq)
{
- return bfqq->ref - bfqq->allocated - bfqq->entity.on_st -
+ return bfqq->ref - bfqq->allocated - bfqq->entity.on_st_or_in_serv -
(bfqq->weight_counter != NULL);
}
@@ -3443,6 +3447,10 @@ static void bfq_dispatch_remove(struct request_queue *q, struct request *rq)
static bool idling_needed_for_service_guarantees(struct bfq_data *bfqd,
struct bfq_queue *bfqq)
{
+ /* No point in idling for bfqq if it won't get requests any longer */
+ if (unlikely(!bfqq_process_refs(bfqq)))
+ return false;
+
return (bfqq->wr_coeff > 1 &&
(bfqd->wr_busy_queues <
bfq_tot_busy_queues(bfqd) ||
@@ -4076,6 +4084,10 @@ static bool idling_boosts_thr_without_issues(struct bfq_data *bfqd,
bfqq_sequential_and_IO_bound,
idling_boosts_thr;
+ /* No point in idling for bfqq if it won't get requests any longer */
+ if (unlikely(!bfqq_process_refs(bfqq)))
+ return false;
+
bfqq_sequential_and_IO_bound = !BFQQ_SEEKY(bfqq) &&
bfq_bfqq_IO_bound(bfqq) && bfq_bfqq_has_short_ttime(bfqq);
@@ -4169,6 +4181,10 @@ static bool bfq_better_to_idle(struct bfq_queue *bfqq)
struct bfq_data *bfqd = bfqq->bfqd;
bool idling_boosts_thr_with_no_issue, idling_needed_for_service_guar;
+ /* No point in idling for bfqq if it won't get requests any longer */
+ if (unlikely(!bfqq_process_refs(bfqq)))
+ return false;
+
if (unlikely(bfqd->strict_guarantees))
return true;
@@ -4809,9 +4825,7 @@ void bfq_put_queue(struct bfq_queue *bfqq)
{
struct bfq_queue *item;
struct hlist_node *n;
-#ifdef CONFIG_BFQ_GROUP_IOSCHED
struct bfq_group *bfqg = bfqq_group(bfqq);
-#endif
if (bfqq->bfqd)
bfq_log_bfqq(bfqq->bfqd, bfqq, "put_queue: %p %d",
@@ -4884,9 +4898,7 @@ void bfq_put_queue(struct bfq_queue *bfqq)
bfqq->bfqd->last_completed_rq_bfqq = NULL;
kmem_cache_free(bfq_pool, bfqq);
-#ifdef CONFIG_BFQ_GROUP_IOSCHED
bfqg_and_blkg_put(bfqg);
-#endif
}
static void bfq_put_cooperator(struct bfq_queue *bfqq)
@@ -5967,6 +5979,8 @@ static void bfq_finish_requeue_request(struct request *rq)
}
/*
+ * Removes the association between the current task and bfqq, assuming
+ * that bic points to the bfq iocontext of the task.
* Returns NULL if a new bfqq should be allocated, or the old bfqq if this
* was the last process referring to that bfqq.
*/
@@ -6374,10 +6388,10 @@ static void bfq_exit_queue(struct elevator_queue *e)
hrtimer_cancel(&bfqd->idle_slice_timer);
-#ifdef CONFIG_BFQ_GROUP_IOSCHED
/* release oom-queue reference to root group */
bfqg_and_blkg_put(bfqd->root_group);
+#ifdef CONFIG_BFQ_GROUP_IOSCHED
blkcg_deactivate_policy(bfqd->queue, &blkcg_policy_bfq);
#else
spin_lock_irq(&bfqd->lock);
diff --git a/block/bfq-iosched.h b/block/bfq-iosched.h
index 8526f20c53bc..d1233af9c684 100644
--- a/block/bfq-iosched.h
+++ b/block/bfq-iosched.h
@@ -150,7 +150,7 @@ struct bfq_entity {
* Flag, true if the entity is on a tree (either the active or
* the idle one of its service_tree) or is in service.
*/
- bool on_st;
+ bool on_st_or_in_serv;
/* B-WF2Q+ start and finish timestamps [sectors/weight] */
u64 start, finish;
@@ -921,6 +921,7 @@ struct bfq_group {
#else
struct bfq_group {
+ struct bfq_entity entity;
struct bfq_sched_data sched_data;
struct bfq_queue *async_bfqq[2][IOPRIO_BE_NR];
@@ -984,6 +985,7 @@ struct bfq_group *bfq_find_set_group(struct bfq_data *bfqd,
struct blkcg_gq *bfqg_to_blkg(struct bfq_group *bfqg);
struct bfq_group *bfqq_group(struct bfq_queue *bfqq);
struct bfq_group *bfq_create_group_hierarchy(struct bfq_data *bfqd, int node);
+void bfqg_and_blkg_get(struct bfq_group *bfqg);
void bfqg_and_blkg_put(struct bfq_group *bfqg);
#ifdef CONFIG_BFQ_GROUP_IOSCHED
diff --git a/block/bfq-wf2q.c b/block/bfq-wf2q.c
index ffe9ce9faa89..eb0e2a6daabe 100644
--- a/block/bfq-wf2q.c
+++ b/block/bfq-wf2q.c
@@ -533,7 +533,9 @@ static void bfq_get_entity(struct bfq_entity *entity)
bfqq->ref++;
bfq_log_bfqq(bfqq->bfqd, bfqq, "get_entity: %p %d",
bfqq, bfqq->ref);
- }
+ } else
+ bfqg_and_blkg_get(container_of(entity, struct bfq_group,
+ entity));
}
/**
@@ -645,10 +647,16 @@ static void bfq_forget_entity(struct bfq_service_tree *st,
{
struct bfq_queue *bfqq = bfq_entity_to_bfqq(entity);
- entity->on_st = false;
+ entity->on_st_or_in_serv = false;
st->wsum -= entity->weight;
- if (bfqq && !is_in_service)
+ if (is_in_service)
+ return;
+
+ if (bfqq)
bfq_put_queue(bfqq);
+ else
+ bfqg_and_blkg_put(container_of(entity, struct bfq_group,
+ entity));
}
/**
@@ -999,7 +1007,7 @@ static void __bfq_activate_entity(struct bfq_entity *entity,
*/
bfq_get_entity(entity);
- entity->on_st = true;
+ entity->on_st_or_in_serv = true;
}
#ifdef CONFIG_BFQ_GROUP_IOSCHED
@@ -1165,7 +1173,10 @@ bool __bfq_deactivate_entity(struct bfq_entity *entity, bool ins_into_idle_tree)
struct bfq_service_tree *st;
bool is_in_service;
- if (!entity->on_st) /* entity never activated, or already inactive */
+ if (!entity->on_st_or_in_serv) /*
+ * entity never activated, or
+ * already inactive
+ */
return false;
/*
@@ -1620,7 +1631,7 @@ bool __bfq_bfqd_reset_in_service(struct bfq_data *bfqd)
* service tree either, then release the service reference to
* the queue it represents (taken with bfq_get_entity).
*/
- if (!in_serv_entity->on_st) {
+ if (!in_serv_entity->on_st_or_in_serv) {
/*
* If no process is referencing in_serv_bfqq any
* longer, then the service reference may be the only