During shmem_split_large_entry(), large swap entries are covering n slots and an order-0 folio needs to be inserted. Instead of splitting all n slots, only the 1 slot covered by the folio need to be split and the remaining n-1 shadow entries can be retained with orders ranging from 0 to n-1. This method only requires (n/XA_CHUNK_SHIFT) new xa_nodes instead of (n % XA_CHUNK_SHIFT) * (n/XA_CHUNK_SHIFT) new xa_nodes, compared to the original xas_split_alloc() + xas_split() one. For example, to split an order-9 large swap entry (assuming XA_CHUNK_SHIFT is 6), 1 xa_node is needed instead of 8. xas_try_split_min_order() is used to reduce the number of calls to xas_try_split() during split. Signed-off-by: Zi Yan <ziy@xxxxxxxxxx> --- mm/shmem.c | 43 ++++++++++++++++--------------------------- 1 file changed, 16 insertions(+), 27 deletions(-) diff --git a/mm/shmem.c b/mm/shmem.c index 671f63063fd4..b35ba250c53d 100644 --- a/mm/shmem.c +++ b/mm/shmem.c @@ -2162,14 +2162,14 @@ static int shmem_split_large_entry(struct inode *inode, pgoff_t index, { struct address_space *mapping = inode->i_mapping; XA_STATE_ORDER(xas, &mapping->i_pages, index, 0); - void *alloced_shadow = NULL; - int alloced_order = 0, i; + int split_order = 0; + int i; /* Convert user data gfp flags to xarray node gfp flags */ gfp &= GFP_RECLAIM_MASK; for (;;) { - int order = -1, split_order = 0; + int order = -1; void *old = NULL; xas_lock_irq(&xas); @@ -2181,20 +2181,21 @@ static int shmem_split_large_entry(struct inode *inode, pgoff_t index, order = xas_get_order(&xas); - /* Swap entry may have changed before we re-acquire the lock */ - if (alloced_order && - (old != alloced_shadow || order != alloced_order)) { - xas_destroy(&xas); - alloced_order = 0; - } - /* Try to split large swap entry in pagecache */ if (order > 0) { - if (!alloced_order) { - split_order = order; - goto unlock; + int cur_order = order; + + split_order = xas_try_split_min_order(cur_order); + + while (cur_order > 0) { + xas_set_order(&xas, index, split_order); + xas_try_split(&xas, old, cur_order, GFP_NOWAIT); + if (xas_error(&xas)) + goto unlock; + cur_order = split_order; + split_order = + xas_try_split_min_order(split_order); } - xas_split(&xas, old, order); /* * Re-set the swap entry after splitting, and the swap @@ -2213,26 +2214,14 @@ static int shmem_split_large_entry(struct inode *inode, pgoff_t index, unlock: xas_unlock_irq(&xas); - /* split needed, alloc here and retry. */ - if (split_order) { - xas_split_alloc(&xas, old, split_order, gfp); - if (xas_error(&xas)) - goto error; - alloced_shadow = old; - alloced_order = split_order; - xas_reset(&xas); - continue; - } - if (!xas_nomem(&xas, gfp)) break; } -error: if (xas_error(&xas)) return xas_error(&xas); - return alloced_order; + return split_order; } /* -- 2.47.2