On 12/4/20 3:52 AM, Zhaoyang Huang wrote: > The scenario on which "Free swap = -4kB" happens in my system, which is caused > by several get_swap_pages racing with each other and show_swap_cache_info > happens simutaniously. No need to add a lock on get_swap_page_of_type as we > remove "Presub/PosAdd" here. > > ProcessA ProcessB ProcessC > ngoals = 1 ngoals = 1 > avail = nr_swap_pages(1) avail = nr_swap_pages(1) > nr_swap_pages(1) -= ngoals > nr_swap_pages(0) -= ngoals > nr_swap_pages = -1 > > Signed-off-by: Zhaoyang Huang <zhaoyang.huang@xxxxxxxxxx> Better now. Acked-by: Vlastimil Babka <vbabka@xxxxxxx> > --- > change of v2: fix bug of unpaired of spin_lock > --- > --- > mm/swapfile.c | 11 ++++++----- > 1 file changed, 6 insertions(+), 5 deletions(-) > > diff --git a/mm/swapfile.c b/mm/swapfile.c > index cf63b5f..1212f17 100644 > --- a/mm/swapfile.c > +++ b/mm/swapfile.c > @@ -974,9 +974,13 @@ int get_swap_pages(int n_goal, swp_entry_t swp_entries[], int entry_size) > /* Only single cluster request supported */ > WARN_ON_ONCE(n_goal > 1 && size == SWAPFILE_CLUSTER); > > + spin_lock(&swap_avail_lock); > + > avail_pgs = atomic_long_read(&nr_swap_pages) / size; > - if (avail_pgs <= 0) > + if (avail_pgs <= 0) { > + spin_unlock(&swap_avail_lock); > goto noswap; > + } > > if (n_goal > SWAP_BATCH) > n_goal = SWAP_BATCH; > @@ -986,8 +990,6 @@ int get_swap_pages(int n_goal, swp_entry_t swp_entries[], int entry_size) > > atomic_long_sub(n_goal * size, &nr_swap_pages); > > - spin_lock(&swap_avail_lock); > - > start_over: > node = numa_node_id(); > plist_for_each_entry_safe(si, next, &swap_avail_heads[node], avail_lists[node]) { > @@ -1061,14 +1063,13 @@ swp_entry_t get_swap_page_of_type(int type) > > spin_lock(&si->lock); > if (si->flags & SWP_WRITEOK) { > - atomic_long_dec(&nr_swap_pages); > /* This is called for allocating swap entry, not cache */ > offset = scan_swap_map(si, 1); > if (offset) { > + atomic_long_dec(&nr_swap_pages); > spin_unlock(&si->lock); > return swp_entry(type, offset); > } > - atomic_long_inc(&nr_swap_pages); > } > spin_unlock(&si->lock); > fail: >