Add a hook when a new mapping is added to an order-0 mlocked folio to check if the mapcount overflowed beyond the allocated 20 bits into the mlock_count. This is useful to know if this alarm if this happens frequently enough to cause a problem. We do so by checking if the folio has the lower 20 bits as all 0s. For file-backed folios, we do not hold the folio lock while adding a new mapping, so there's a chance that two mappings are added in quick succession such that the warning doesn't fire. Don't sweat it. Signed-off-by: Yosry Ahmed <yosryahmed@xxxxxxxxxx> --- include/linux/mm.h | 4 ++++ mm/mlock.c | 13 +++++++++++++ mm/rmap.c | 2 ++ 3 files changed, 19 insertions(+) diff --git a/include/linux/mm.h b/include/linux/mm.h index b341477a83e8..917f81996e22 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -1050,6 +1050,7 @@ unsigned long vmalloc_to_pfn(const void *addr); extern bool is_vmalloc_addr(const void *x); extern int is_vmalloc_or_module_addr(const void *x); extern int folio_mlocked_mapcount(struct folio *folio); +extern void folio_mlock_map_check(struct folio *folio); extern void folio_mlock_unmap_check(struct folio *folio); #else static inline bool is_vmalloc_addr(const void *x) @@ -1064,6 +1065,9 @@ static inline int folio_mlocked_mapcount(struct folio *folio) { return 0; } +static inline void folio_mlock_map_check(struct folio *folio) +{ +} static inline void folio_mlock_unmap_check(struct folio *folio) { } diff --git a/mm/mlock.c b/mm/mlock.c index 8261df11d6a6..f8b3fb1b2986 100644 --- a/mm/mlock.c +++ b/mm/mlock.c @@ -140,6 +140,19 @@ static int folio_mlock_count_dec(struct folio *folio) return mlock_count - 1; } +/* + * Call after incrementing the mapcount. WARN_ON() if the mapcount overflows + * beyond the lower 20 bits for order-0 mlocked folios. + */ +void folio_mlock_map_check(struct folio *folio) +{ + int mapcount = atomic_read(&folio->_mapcount) + 1; + + /* WARN if we overflow beyond the lower 20 bits */ + if (unlikely(!folio_test_large(folio) && folio_test_mlocked(folio))) + WARN_ON((mapcount & MLOCK_MAPCOUNT_MASK) == 0); +} + /* * Call after decrementing the mapcount. If the mapcount previously overflowed * beyond the lower 20 bits for an order-0 mlocked folio, munlock() have diff --git a/mm/rmap.c b/mm/rmap.c index 02e558551f15..092529319782 100644 --- a/mm/rmap.c +++ b/mm/rmap.c @@ -1228,6 +1228,7 @@ void page_add_anon_rmap(struct page *page, struct vm_area_struct *vma, nr = atomic_inc_return_relaxed(mapped); nr = (nr < COMPOUND_MAPPED); } + folio_mlock_map_check(folio); } else if (folio_test_pmd_mappable(folio)) { /* That test is redundant: it's for safety or to optimize out */ @@ -1330,6 +1331,7 @@ void page_add_file_rmap(struct page *page, struct vm_area_struct *vma, nr = atomic_inc_return_relaxed(mapped); nr = (nr < COMPOUND_MAPPED); } + folio_mlock_map_check(folio); } else if (folio_test_pmd_mappable(folio)) { /* That test is redundant: it's for safety or to optimize out */ -- 2.41.0.162.gfafddb0af9-goog