Since we are supported to delay zswap initializaton, we need to invoke ops->init for the swap device which is already online when register backend. This patch is a revert of f328c1d16e4c ("frontswap: simplify frontswap_register_ops") and 633423a09cb5 ("mm: mark swap_lock and swap_active_head static") Signed-off-by: Liu Shixin <liushixin2@xxxxxxxxxx> --- include/linux/swapfile.h | 2 ++ mm/frontswap.c | 47 ++++++++++++++++++++++++++++++++++++++++ mm/swapfile.c | 4 ++-- 3 files changed, 51 insertions(+), 2 deletions(-) diff --git a/include/linux/swapfile.h b/include/linux/swapfile.h index 2fbcc9afd814..75fc069594a5 100644 --- a/include/linux/swapfile.h +++ b/include/linux/swapfile.h @@ -6,6 +6,8 @@ * these were static in swapfile.c but frontswap.c needs them and we don't * want to expose them to the dozens of source files that include swap.h */ +extern spinlock_t swap_lock; +extern struct plist_head swap_active_head; extern struct swap_info_struct *swap_info[]; extern unsigned long generic_max_swapfile_size(void); /* Maximum swapfile size supported for the arch (not inclusive). */ diff --git a/mm/frontswap.c b/mm/frontswap.c index 620f95af81dd..449e6f499b88 100644 --- a/mm/frontswap.c +++ b/mm/frontswap.c @@ -96,11 +96,58 @@ static inline void inc_frontswap_invalidates(void) { } */ int frontswap_register_ops(const struct frontswap_ops *ops) { + DECLARE_BITMAP(a, MAX_SWAPFILES); + DECLARE_BITMAP(b, MAX_SWAPFILES); + struct swap_info_struct *si; + unsigned int i; + if (frontswap_ops) return -EINVAL; + bitmap_zero(a, MAX_SWAPFILES); + bitmap_zero(b, MAX_SWAPFILES); + + spin_lock(&swap_lock); + plist_for_each_entry(si, &swap_active_head, list) { + if (!WARN_ON(!si->frontswap_map)) + __set_bit(si->type, a); + } + spin_unlock(&swap_lock); + + /* the new ops needs to know the currently active swap devices */ + for_each_set_bit(i, a, MAX_SWAPFILES) { + pr_err("init frontswap_ops\n"); + ops->init(i); + } + frontswap_ops = ops; static_branch_inc(&frontswap_enabled_key); + + spin_lock(&swap_lock); + plist_for_each_entry(si, &swap_active_head, list) { + if (si->frontswap_map) + __set_bit(si->type, b); + } + spin_unlock(&swap_lock); + + /* + * On the very unlikely chance that a swap device was added or + * removed between setting the "a" list bits and the ops init + * calls, we re-check and do init or invalidate for any changed + * bits. + */ + if (unlikely(!bitmap_equal(a, b, MAX_SWAPFILES))) { + for (i = 0; i < MAX_SWAPFILES; i++) { + if (!test_bit(i, a) && test_bit(i, b)) { + pr_err("init frontswap_ops re\n"); + ops->init(i); + } else if (test_bit(i, a) && !test_bit(i, b)) { + pr_err("inval frontswap_ops re\n"); + ops->invalidate_area(i); + } + } + } + return 0; } diff --git a/mm/swapfile.c b/mm/swapfile.c index 469d9af86be2..d383b282f269 100644 --- a/mm/swapfile.c +++ b/mm/swapfile.c @@ -51,7 +51,7 @@ static bool swap_count_continued(struct swap_info_struct *, pgoff_t, unsigned char); static void free_swap_count_continuations(struct swap_info_struct *); -static DEFINE_SPINLOCK(swap_lock); +DEFINE_SPINLOCK(swap_lock); static unsigned int nr_swapfiles; atomic_long_t nr_swap_pages; /* @@ -77,7 +77,7 @@ static const char Unused_offset[] = "Unused swap offset entry "; * all active swap_info_structs * protected with swap_lock, and ordered by priority. */ -static PLIST_HEAD(swap_active_head); +PLIST_HEAD(swap_active_head); /* * all available (active, not full) swap_info_structs -- 2.25.1