From 8c911f3d4c074a17955a1757c9d1d5a9a5209ca5 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Wed, 1 Jul 2020 11:06:21 +0200 Subject: writeback: remove struct bdi_writeback_congested We never set any congested bits in the group writeback instances of it. And for the simpler bdi-wide case a simple scalar field is all that that is needed. Signed-off-by: Christoph Hellwig Signed-off-by: Jens Axboe --- mm/backing-dev.c | 149 ++++--------------------------------------------------- 1 file changed, 9 insertions(+), 140 deletions(-) (limited to 'mm') diff --git a/mm/backing-dev.c b/mm/backing-dev.c index 3ebe5144a102..8e8b00627bb2 100644 --- a/mm/backing-dev.c +++ b/mm/backing-dev.c @@ -281,7 +281,7 @@ void wb_wakeup_delayed(struct bdi_writeback *wb) #define INIT_BW (100 << (20 - PAGE_SHIFT)) static int wb_init(struct bdi_writeback *wb, struct backing_dev_info *bdi, - int blkcg_id, gfp_t gfp) + gfp_t gfp) { int i, err; @@ -308,15 +308,9 @@ static int wb_init(struct bdi_writeback *wb, struct backing_dev_info *bdi, INIT_DELAYED_WORK(&wb->dwork, wb_workfn); wb->dirty_sleep = jiffies; - wb->congested = wb_congested_get_create(bdi, blkcg_id, gfp); - if (!wb->congested) { - err = -ENOMEM; - goto out_put_bdi; - } - err = fprop_local_init_percpu(&wb->completions, gfp); if (err) - goto out_put_cong; + goto out_put_bdi; for (i = 0; i < NR_WB_STAT_ITEMS; i++) { err = percpu_counter_init(&wb->stat[i], 0, gfp); @@ -330,8 +324,6 @@ out_destroy_stat: while (i--) percpu_counter_destroy(&wb->stat[i]); fprop_local_destroy_percpu(&wb->completions); -out_put_cong: - wb_congested_put(wb->congested); out_put_bdi: if (wb != &bdi->wb) bdi_put(bdi); @@ -374,7 +366,6 @@ static void wb_exit(struct bdi_writeback *wb) percpu_counter_destroy(&wb->stat[i]); fprop_local_destroy_percpu(&wb->completions); - wb_congested_put(wb->congested); if (wb != &wb->bdi->wb) bdi_put(wb->bdi); } @@ -384,99 +375,12 @@ static void wb_exit(struct bdi_writeback *wb) #include /* - * cgwb_lock protects bdi->cgwb_tree, bdi->cgwb_congested_tree, - * blkcg->cgwb_list, and memcg->cgwb_list. bdi->cgwb_tree is also RCU - * protected. + * cgwb_lock protects bdi->cgwb_tree, blkcg->cgwb_list, and memcg->cgwb_list. + * bdi->cgwb_tree is also RCU protected. */ static DEFINE_SPINLOCK(cgwb_lock); static struct workqueue_struct *cgwb_release_wq; -/** - * wb_congested_get_create - get or create a wb_congested - * @bdi: associated bdi - * @blkcg_id: ID of the associated blkcg - * @gfp: allocation mask - * - * Look up the wb_congested for @blkcg_id on @bdi. If missing, create one. - * The returned wb_congested has its reference count incremented. Returns - * NULL on failure. - */ -struct bdi_writeback_congested * -wb_congested_get_create(struct backing_dev_info *bdi, int blkcg_id, gfp_t gfp) -{ - struct bdi_writeback_congested *new_congested = NULL, *congested; - struct rb_node **node, *parent; - unsigned long flags; -retry: - spin_lock_irqsave(&cgwb_lock, flags); - - node = &bdi->cgwb_congested_tree.rb_node; - parent = NULL; - - while (*node != NULL) { - parent = *node; - congested = rb_entry(parent, struct bdi_writeback_congested, - rb_node); - if (congested->blkcg_id < blkcg_id) - node = &parent->rb_left; - else if (congested->blkcg_id > blkcg_id) - node = &parent->rb_right; - else - goto found; - } - - if (new_congested) { - /* !found and storage for new one already allocated, insert */ - congested = new_congested; - rb_link_node(&congested->rb_node, parent, node); - rb_insert_color(&congested->rb_node, &bdi->cgwb_congested_tree); - spin_unlock_irqrestore(&cgwb_lock, flags); - return congested; - } - - spin_unlock_irqrestore(&cgwb_lock, flags); - - /* allocate storage for new one and retry */ - new_congested = kzalloc(sizeof(*new_congested), gfp); - if (!new_congested) - return NULL; - - refcount_set(&new_congested->refcnt, 1); - new_congested->__bdi = bdi; - new_congested->blkcg_id = blkcg_id; - goto retry; - -found: - refcount_inc(&congested->refcnt); - spin_unlock_irqrestore(&cgwb_lock, flags); - kfree(new_congested); - return congested; -} - -/** - * wb_congested_put - put a wb_congested - * @congested: wb_congested to put - * - * Put @congested and destroy it if the refcnt reaches zero. - */ -void wb_congested_put(struct bdi_writeback_congested *congested) -{ - unsigned long flags; - - if (!refcount_dec_and_lock_irqsave(&congested->refcnt, &cgwb_lock, &flags)) - return; - - /* bdi might already have been destroyed leaving @congested unlinked */ - if (congested->__bdi) { - rb_erase(&congested->rb_node, - &congested->__bdi->cgwb_congested_tree); - congested->__bdi = NULL; - } - - spin_unlock_irqrestore(&cgwb_lock, flags); - kfree(congested); -} - static void cgwb_release_workfn(struct work_struct *work) { struct bdi_writeback *wb = container_of(work, struct bdi_writeback, @@ -558,7 +462,7 @@ static int cgwb_create(struct backing_dev_info *bdi, goto out_put; } - ret = wb_init(wb, bdi, blkcg_css->id, gfp); + ret = wb_init(wb, bdi, gfp); if (ret) goto err_free; @@ -696,11 +600,10 @@ static int cgwb_bdi_init(struct backing_dev_info *bdi) int ret; INIT_RADIX_TREE(&bdi->cgwb_tree, GFP_ATOMIC); - bdi->cgwb_congested_tree = RB_ROOT; mutex_init(&bdi->cgwb_release_mutex); init_rwsem(&bdi->wb_switch_rwsem); - ret = wb_init(&bdi->wb, bdi, 1, GFP_KERNEL); + ret = wb_init(&bdi->wb, bdi, GFP_KERNEL); if (!ret) { bdi->wb.memcg_css = &root_mem_cgroup->css; bdi->wb.blkcg_css = blkcg_root_css; @@ -769,21 +672,6 @@ void wb_blkcg_offline(struct blkcg *blkcg) spin_unlock_irq(&cgwb_lock); } -static void cgwb_bdi_exit(struct backing_dev_info *bdi) -{ - struct rb_node *rbn; - - spin_lock_irq(&cgwb_lock); - while ((rbn = rb_first(&bdi->cgwb_congested_tree))) { - struct bdi_writeback_congested *congested = - rb_entry(rbn, struct bdi_writeback_congested, rb_node); - - rb_erase(rbn, &bdi->cgwb_congested_tree); - congested->__bdi = NULL; /* mark @congested unlinked */ - } - spin_unlock_irq(&cgwb_lock); -} - static void cgwb_bdi_register(struct backing_dev_info *bdi) { spin_lock_irq(&cgwb_lock); @@ -810,29 +698,11 @@ subsys_initcall(cgwb_init); static int cgwb_bdi_init(struct backing_dev_info *bdi) { - int err; - - bdi->wb_congested = kzalloc(sizeof(*bdi->wb_congested), GFP_KERNEL); - if (!bdi->wb_congested) - return -ENOMEM; - - refcount_set(&bdi->wb_congested->refcnt, 1); - - err = wb_init(&bdi->wb, bdi, 1, GFP_KERNEL); - if (err) { - wb_congested_put(bdi->wb_congested); - return err; - } - return 0; + return wb_init(&bdi->wb, bdi, GFP_KERNEL); } static void cgwb_bdi_unregister(struct backing_dev_info *bdi) { } -static void cgwb_bdi_exit(struct backing_dev_info *bdi) -{ - wb_congested_put(bdi->wb_congested); -} - static void cgwb_bdi_register(struct backing_dev_info *bdi) { list_add_tail_rcu(&bdi->wb.bdi_node, &bdi->wb_list); @@ -1023,7 +893,6 @@ static void release_bdi(struct kref *ref) bdi_unregister(bdi); WARN_ON_ONCE(bdi->dev); wb_exit(&bdi->wb); - cgwb_bdi_exit(bdi); kfree(bdi); } @@ -1053,7 +922,7 @@ void clear_bdi_congested(struct backing_dev_info *bdi, int sync) enum wb_congested_state bit; bit = sync ? WB_sync_congested : WB_async_congested; - if (test_and_clear_bit(bit, &bdi->wb.congested->state)) + if (test_and_clear_bit(bit, &bdi->wb.congested)) atomic_dec(&nr_wb_congested[sync]); smp_mb__after_atomic(); if (waitqueue_active(wqh)) @@ -1066,7 +935,7 @@ void set_bdi_congested(struct backing_dev_info *bdi, int sync) enum wb_congested_state bit; bit = sync ? WB_sync_congested : WB_async_congested; - if (!test_and_set_bit(bit, &bdi->wb.congested->state)) + if (!test_and_set_bit(bit, &bdi->wb.congested)) atomic_inc(&nr_wb_congested[sync]); } EXPORT_SYMBOL(set_bdi_congested); -- cgit