On Aug 4, 2022, at 1:39 PM, Peter Xu <peterx@xxxxxxxxxx> wrote: > When page migration happens, we always ignore the young/dirty bit settings > in the old pgtable, and marking the page as old in the new page table using > either pte_mkold() or pmd_mkold(), and keeping the pte clean. > > That's fine from functional-wise, but that's not friendly to page reclaim > because the moving page can be actively accessed within the procedure. Not > to mention hardware setting the young bit can bring quite some overhead on > some systems, e.g. x86_64 needs a few hundreds nanoseconds to set the bit. > The same slowdown problem to dirty bits when the memory is first written > after page migration happened. > > Actually we can easily remember the A/D bit configuration and recover the > information after the page is migrated. To achieve it, define a new set of > bits in the migration swap offset field to cache the A/D bits for old pte. > Then when removing/recovering the migration entry, we can recover the A/D > bits even if the page changed. > > One thing to mention is that here we used max_swapfile_size() to detect how > many swp offset bits we have, and we'll only enable this feature if we know > the swp offset can be big enough to store both the PFN value and the young > bit. Otherwise the A/D bits are dropped like before. > > Signed-off-by: Peter Xu <peterx@xxxxxxxxxx> > --- > include/linux/swapops.h | 91 +++++++++++++++++++++++++++++++++++++++++ > mm/huge_memory.c | 26 +++++++++++- > mm/migrate.c | 6 ++- > mm/migrate_device.c | 4 ++ > mm/rmap.c | 5 ++- > 5 files changed, 128 insertions(+), 4 deletions(-) > > diff --git a/include/linux/swapops.h b/include/linux/swapops.h > index 1d17e4bb3d2f..34aa448ac6ee 100644 > --- a/include/linux/swapops.h > +++ b/include/linux/swapops.h > @@ -8,6 +8,8 @@ > > #ifdef CONFIG_MMU > > +#include <linux/swapfile.h> Shouldn’t the ifdef go into linux/swapfile.h if that’s the right thing to do to prevent others from mistakenly including it? > + > /* > * swapcache pages are stored in the swapper_space radix tree. We want to > * get good packing density in that tree, so the index should be dense in > @@ -35,6 +37,24 @@ > #endif > #define SWP_PFN_MASK ((1UL << SWP_PFN_BITS) - 1) > > +/** > + * Migration swap entry specific bitfield definitions. > + * > + * @SWP_MIG_YOUNG_BIT: Whether the page used to have young bit set > + * @SWP_MIG_DIRTY_BIT: Whether the page used to have dirty bit set > + * > + * Note: these bits will be stored in migration entries iff there're enough > + * free bits in arch specific swp offset. By default we'll ignore A/D bits > + * when migrating a page. Please refer to migration_entry_supports_ad() > + * for more information. > + */ > +#define SWP_MIG_YOUNG_BIT (SWP_PFN_BITS) > +#define SWP_MIG_DIRTY_BIT (SWP_PFN_BITS + 1) > +#define SWP_MIG_TOTAL_BITS (SWP_PFN_BITS + 2) > + > +#define SWP_MIG_YOUNG (1UL << SWP_MIG_YOUNG_BIT) > +#define SWP_MIG_DIRTY (1UL << SWP_MIG_DIRTY_BIT) Any reason not to use BIT(x) ? > + > static inline bool is_pfn_swap_entry(swp_entry_t entry); > > /* Clear all flags but only keep swp_entry_t related information */ > @@ -265,6 +285,57 @@ static inline swp_entry_t make_writable_migration_entry(pgoff_t offset) > return swp_entry(SWP_MIGRATION_WRITE, offset); > } > > +/* > + * Returns whether the host has large enough swap offset field to support > + * carrying over pgtable A/D bits for page migrations. The result is > + * pretty much arch specific. > + */ > +static inline bool migration_entry_supports_ad(void) > +{ > + /* > + * max_swapfile_size() returns the max supported swp-offset plus 1. > + * We can support the migration A/D bits iff the pfn swap entry has > + * the offset large enough to cover all of them (PFN, A & D bits). > + */ > +#ifdef CONFIG_SWAP > + return max_swapfile_size() >= (1UL << SWP_MIG_TOTAL_BITS); This is an actual a function call (unless LTO has some trick). A bit of a shame it cannot be at least memoized. Or at least mark max_swapfile_size() as __attribute_const__ so it would not be called twice for make_migration_entry_young() and make_migration_entry_dirty(). > +#else > + return false; > +#endif > +} > + > +static inline swp_entry_t make_migration_entry_young(swp_entry_t entry) > +{ > + if (migration_entry_supports_ad()) > + return swp_entry(swp_type(entry), > + swp_offset(entry) | SWP_MIG_YOUNG); > + return entry; > +} > + > +static inline bool is_migration_entry_young(swp_entry_t entry) > +{ > + if (migration_entry_supports_ad()) > + return swp_offset(entry) & SWP_MIG_YOUNG; > + /* Keep the old behavior of aging page after migration */ > + return false; > +} > + > +static inline swp_entry_t make_migration_entry_dirty(swp_entry_t entry) > +{ > + if (migration_entry_supports_ad()) > + return swp_entry(swp_type(entry), > + swp_offset(entry) | SWP_MIG_DIRTY); > + return entry; > +} > + > +static inline bool is_migration_entry_dirty(swp_entry_t entry) > +{ > + if (migration_entry_supports_ad()) > + return swp_offset(entry) & SWP_MIG_YOUNG_BIT; Shouldn’t it be SWP_MIG_DIRTY ? > + /* Keep the old behavior of clean page after migration */ > + return false; > +} > + > extern void __migration_entry_wait(struct mm_struct *mm, pte_t *ptep, > spinlock_t *ptl); > extern void migration_entry_wait(struct mm_struct *mm, pmd_t *pmd, > @@ -311,6 +382,26 @@ static inline int is_readable_migration_entry(swp_entry_t entry) > return 0; > } > > +static inline swp_entry_t make_migration_entry_young(swp_entry_t entry) > +{ > + return entry; > +} > + > +static inline bool is_migration_entry_young(swp_entry_t entry) > +{ > + return false; > +} > + > +static inline swp_entry_t make_migration_entry_dirty(swp_entry_t entry) > +{ > + return entry; > +} > + > +static inline bool is_migration_entry_dirty(swp_entry_t entry) > +{ > + return false; > +} > + > #endif While at it, can you change to: #endif /* CONFIG_MIGRATION */ [ these ifdefs burn my eyes ] Other than that looks good. Thanks, Nadav