diff options
Diffstat (limited to 'fs/btrfs/delayed-ref.c')
| -rw-r--r-- | fs/btrfs/delayed-ref.c | 45 |
1 files changed, 34 insertions, 11 deletions
diff --git a/fs/btrfs/delayed-ref.c b/fs/btrfs/delayed-ref.c index 481802efaa14..e8bc37453336 100644 --- a/fs/btrfs/delayed-ref.c +++ b/fs/btrfs/delayed-ref.c @@ -228,7 +228,7 @@ int btrfs_delayed_refs_rsv_refill(struct btrfs_fs_info *fs_info, if (!num_bytes) return 0; - ret = btrfs_reserve_metadata_bytes(fs_info, space_info, num_bytes, flush); + ret = btrfs_reserve_metadata_bytes(space_info, num_bytes, flush); if (ret) return ret; @@ -798,9 +798,13 @@ static void init_delayed_ref_head(struct btrfs_delayed_ref_head *head_ref, } /* - * helper function to actually insert a head node into the rbtree. - * this does all the dirty work in terms of maintaining the correct - * overall modification count. + * Helper function to actually insert a head node into the xarray. This does all + * the dirty work in terms of maintaining the correct overall modification + * count. + * + * The caller is responsible for calling kfree() on @qrecord. More specifically, + * if this function reports that it did not insert it as noted in + * @qrecord_inserted_ret, then it's safe to call kfree() on it. * * Returns an error pointer in case of an error. */ @@ -814,7 +818,14 @@ add_delayed_ref_head(struct btrfs_trans_handle *trans, struct btrfs_delayed_ref_head *existing; struct btrfs_delayed_ref_root *delayed_refs; const unsigned long index = (head_ref->bytenr >> fs_info->sectorsize_bits); - bool qrecord_inserted = false; + + /* + * If 'qrecord_inserted_ret' is provided, then the first thing we need + * to do is to initialize it to false just in case we have an exit + * before trying to insert the record. + */ + if (qrecord_inserted_ret) + *qrecord_inserted_ret = false; delayed_refs = &trans->transaction->delayed_refs; lockdep_assert_held(&delayed_refs->lock); @@ -833,6 +844,12 @@ add_delayed_ref_head(struct btrfs_trans_handle *trans, /* Record qgroup extent info if provided */ if (qrecord) { + /* + * Setting 'qrecord' but not 'qrecord_inserted_ret' will likely + * result in a memory leakage. + */ + ASSERT(qrecord_inserted_ret != NULL); + int ret; ret = btrfs_qgroup_trace_extent_nolock(fs_info, delayed_refs, qrecord, @@ -840,12 +857,10 @@ add_delayed_ref_head(struct btrfs_trans_handle *trans, if (ret) { /* Clean up if insertion fails or item exists. */ xa_release(&delayed_refs->dirty_extents, index); - /* Caller responsible for freeing qrecord on error. */ if (ret < 0) return ERR_PTR(ret); - kfree(qrecord); - } else { - qrecord_inserted = true; + } else if (qrecord_inserted_ret) { + *qrecord_inserted_ret = true; } } @@ -888,8 +903,6 @@ add_delayed_ref_head(struct btrfs_trans_handle *trans, delayed_refs->num_heads++; delayed_refs->num_heads_ready++; } - if (qrecord_inserted_ret) - *qrecord_inserted_ret = qrecord_inserted; return head_ref; } @@ -1049,6 +1062,14 @@ static int add_delayed_ref(struct btrfs_trans_handle *trans, xa_release(&delayed_refs->head_refs, index); spin_unlock(&delayed_refs->lock); ret = PTR_ERR(new_head_ref); + + /* + * It's only safe to call kfree() on 'qrecord' if + * add_delayed_ref_head() has _not_ inserted it for + * tracing. Otherwise we need to handle this here. + */ + if (!qrecord_reserved || qrecord_inserted) + goto free_head_ref; goto free_record; } head_ref = new_head_ref; @@ -1071,6 +1092,8 @@ static int add_delayed_ref(struct btrfs_trans_handle *trans, if (qrecord_inserted) return btrfs_qgroup_trace_extent_post(trans, record, generic_ref->bytenr); + + kfree(record); return 0; free_record: |
