summaryrefslogtreecommitdiff
path: root/fs/xfs/xfs_log_cil.c
diff options
context:
space:
mode:
authorDave Chinner <dchinner@redhat.com>2022-07-07 18:52:59 +1000
committerDave Chinner <david@fromorbit.com>2022-07-07 18:52:59 +1000
commitdf7a4a2134b0a201c93e96efe4bb2be747f9da9f (patch)
tree3a6cc963af087878f3bdaad68fb747aca4f1a6fe /fs/xfs/xfs_log_cil.c
parent1dd2a2c18e314ad89200f8296c86dd4ecd53dea6 (diff)
xfs: convert CIL busy extents to per-cpu
To get them out from under the CIL lock. This is an unordered list, so we can simply punt it to per-cpu lists during transaction commits and reaggregate it back into a single list during the CIL push work. Signed-off-by: Dave Chinner <dchinner@redhat.com> Reviewed-by: Darrick J. Wong <djwong@kernel.org>
Diffstat (limited to 'fs/xfs/xfs_log_cil.c')
-rw-r--r--fs/xfs/xfs_log_cil.c26
1 files changed, 20 insertions, 6 deletions
diff --git a/fs/xfs/xfs_log_cil.c b/fs/xfs/xfs_log_cil.c
index e38e10082da2..f02a75d5a03e 100644
--- a/fs/xfs/xfs_log_cil.c
+++ b/fs/xfs/xfs_log_cil.c
@@ -128,6 +128,11 @@ xlog_cil_push_pcp_aggregate(
ctx->ticket->t_curr_res += cilpcp->space_reserved;
cilpcp->space_reserved = 0;
+ if (!list_empty(&cilpcp->busy_extents)) {
+ list_splice_init(&cilpcp->busy_extents,
+ &ctx->busy_extents);
+ }
+
/*
* We're in the middle of switching cil contexts. Reset the
* counter we use to detect when the current context is nearing
@@ -634,6 +639,9 @@ xlog_cil_insert_items(
} else {
cilpcp->space_used += len;
}
+ /* attach the transaction to the CIL if it has any busy extents */
+ if (!list_empty(&tp->t_busy))
+ list_splice_init(&tp->t_busy, &cilpcp->busy_extents);
put_cpu_ptr(cilpcp);
/*
@@ -656,9 +664,6 @@ xlog_cil_insert_items(
list_move_tail(&lip->li_cil, &cil->xc_cil);
}
- /* attach the transaction to the CIL if it has any busy extents */
- if (!list_empty(&tp->t_busy))
- list_splice_init(&tp->t_busy, &ctx->busy_extents);
spin_unlock(&cil->xc_cil_lock);
/*
@@ -1756,6 +1761,8 @@ xlog_cil_pcp_dead(
ctx->ticket->t_curr_res += cilpcp->space_reserved;
cilpcp->space_reserved = 0;
+ if (!list_empty(&cilpcp->busy_extents))
+ list_splice_init(&cilpcp->busy_extents, &ctx->busy_extents);
atomic_add(cilpcp->space_used, &ctx->space_used);
cilpcp->space_used = 0;
up_write(&cil->xc_ctx_lock);
@@ -1766,10 +1773,12 @@ xlog_cil_pcp_dead(
*/
int
xlog_cil_init(
- struct xlog *log)
+ struct xlog *log)
{
- struct xfs_cil *cil;
- struct xfs_cil_ctx *ctx;
+ struct xfs_cil *cil;
+ struct xfs_cil_ctx *ctx;
+ struct xlog_cil_pcp *cilpcp;
+ int cpu;
cil = kmem_zalloc(sizeof(*cil), KM_MAYFAIL);
if (!cil)
@@ -1789,6 +1798,11 @@ xlog_cil_init(
if (!cil->xc_pcp)
goto out_destroy_wq;
+ for_each_possible_cpu(cpu) {
+ cilpcp = per_cpu_ptr(cil->xc_pcp, cpu);
+ INIT_LIST_HEAD(&cilpcp->busy_extents);
+ }
+
INIT_LIST_HEAD(&cil->xc_cil);
INIT_LIST_HEAD(&cil->xc_committing);
spin_lock_init(&cil->xc_cil_lock);