From: Dave Chinner <dchinner@xxxxxxxxxx> To get them out from under the CIL lock. This is an unordered list, so we can simply punt it to per-cpu lists during transaction commits and reaggregate it back into a single list during the CIL push work. Signed-off-by: Dave Chinner <dchinner@xxxxxxxxxx> --- fs/xfs/xfs_log_cil.c | 26 ++++++++++++++++++-------- 1 file changed, 18 insertions(+), 8 deletions(-) diff --git a/fs/xfs/xfs_log_cil.c b/fs/xfs/xfs_log_cil.c index a2f93bd7644b..7428b98c8279 100644 --- a/fs/xfs/xfs_log_cil.c +++ b/fs/xfs/xfs_log_cil.c @@ -501,6 +501,9 @@ xlog_cil_insert_items( atomic_add(cilpcp->space_used, &ctx->space_used); cilpcp->space_used = 0; } + /* attach the transaction to the CIL if it has any busy extents */ + if (!list_empty(&tp->t_busy)) + list_splice_init(&tp->t_busy, &cilpcp->busy_extents); put_cpu_ptr(cilpcp); /* @@ -540,9 +543,6 @@ xlog_cil_insert_items( list_move_tail(&lip->li_cil, &cil->xc_cil); } - /* attach the transaction to the CIL if it has any busy extents */ - if (!list_empty(&tp->t_busy)) - list_splice_init(&tp->t_busy, &ctx->busy_extents); spin_unlock(&cil->xc_cil_lock); if (tp->t_ticket->t_curr_res < 0) @@ -802,7 +802,10 @@ xlog_cil_push_work( ctx->ticket->t_curr_res += cilpcp->space_reserved; cilpcp->space_used = 0; cilpcp->space_reserved = 0; - + if (!list_empty(&cilpcp->busy_extents)) { + list_splice_init(&cilpcp->busy_extents, + &ctx->busy_extents); + } } spin_lock(&cil->xc_push_lock); @@ -1459,17 +1462,24 @@ static void __percpu * xlog_cil_pcp_alloc( struct xfs_cil *cil) { + void __percpu *pcptr; struct xlog_cil_pcp *cilpcp; + int cpu; - cilpcp = alloc_percpu(struct xlog_cil_pcp); - if (!cilpcp) + pcptr = alloc_percpu(struct xlog_cil_pcp); + if (!pcptr) return NULL; + for_each_possible_cpu(cpu) { + cilpcp = per_cpu_ptr(pcptr, cpu); + INIT_LIST_HEAD(&cilpcp->busy_extents); + } + if (xlog_cil_pcp_hpadd(cil) < 0) { - free_percpu(cilpcp); + free_percpu(pcptr); return NULL; } - return cilpcp; + return pcptr; } static void -- 2.28.0