This reverts commit f328c1d16e4c764992895ac9c9425cea861b2ca0. Since we are supported to delay zswap initializaton, we need to invoke ops->init for the swap device which is already online when register backend. Signed-off-by: Liu Shixin <liushixin2@xxxxxxxxxx> --- mm/frontswap.c | 42 ++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 42 insertions(+) diff --git a/mm/frontswap.c b/mm/frontswap.c index 279e55b4ed87..aa3a1e3ddf91 100644 --- a/mm/frontswap.c +++ b/mm/frontswap.c @@ -96,11 +96,53 @@ static inline void inc_frontswap_invalidates(void) { } */ int frontswap_register_ops(const struct frontswap_ops *ops) { + DECLARE_BITMAP(a, MAX_SWAPFILES); + DECLARE_BITMAP(b, MAX_SWAPFILES); + struct swap_info_struct *si; + unsigned int i; + if (frontswap_ops) return -EINVAL; + bitmap_zero(a, MAX_SWAPFILES); + bitmap_zero(b, MAX_SWAPFILES); + + spin_lock(&swap_lock); + plist_for_each_entry(si, &swap_active_head, list) { + if (!WARN_ON(!si->frontswap_map)) + __set_bit(si->type, a); + } + spin_unlock(&swap_lock); + + /* the new ops needs to know the currently active swap devices */ + for_each_set_bit(i, a, MAX_SWAPFILES) + ops->init(i); + frontswap_ops = ops; static_branch_inc(&frontswap_enabled_key); + + spin_lock(&swap_lock); + plist_for_each_entry(si, &swap_active_head, list) { + if (si->frontswap_map) + __set_bit(si->type, b); + } + spin_unlock(&swap_lock); + + /* + * On the very unlikely chance that a swap device was added or + * removed between setting the "a" list bits and the ops init + * calls, we re-check and do init or invalidate for any changed + * bits. + */ + if (unlikely(!bitmap_equal(a, b, MAX_SWAPFILES))) { + for (i = 0; i < MAX_SWAPFILES; i++) { + if (!test_bit(i, a) && test_bit(i, b)) + ops->init(i); + else if (test_bit(i, a) && !test_bit(i, b)) + ops->invalidate_area(i); + } + } + return 0; } -- 2.25.1