On Tue, Nov 03, 2020 at 02:08:16PM +0300, Kirill A. Shutemov wrote: > On Sun, Nov 01, 2020 at 07:08:13PM +0200, Mike Rapoport wrote: > > diff --git a/kernel/power/snapshot.c b/kernel/power/snapshot.c > > index 46b1804c1ddf..054c8cce4236 100644 > > --- a/kernel/power/snapshot.c > > +++ b/kernel/power/snapshot.c > > @@ -76,6 +76,32 @@ static inline void hibernate_restore_protect_page(void *page_address) {} > > static inline void hibernate_restore_unprotect_page(void *page_address) {} > > #endif /* CONFIG_STRICT_KERNEL_RWX && CONFIG_ARCH_HAS_SET_MEMORY */ > > > > +static inline void hibernate_map_page(struct page *page, int enable) > > +{ > > + if (IS_ENABLED(CONFIG_ARCH_HAS_SET_DIRECT_MAP)) { > > + unsigned long addr = (unsigned long)page_address(page); > > + int ret; > > + > > + /* > > + * This should not fail because remapping a page here means > > + * that we only update protection bits in an existing PTE. > > + * It is still worth to have WARN_ON() here if something > > + * changes and this will no longer be the case. > > + */ > > + if (enable) > > + ret = set_direct_map_default_noflush(page); > > + else > > + ret = set_direct_map_invalid_noflush(page); > > + > > + if (WARN_ON(ret)) > > _ONCE? I've changed it to pr_warn() after David said people enable panic on warn in production kernels. > > + return; > > + > > + flush_tlb_kernel_range(addr, addr + PAGE_SIZE); > > + } else { > > + debug_pagealloc_map_pages(page, 1, enable); > > + } > > +} > > + > > static int swsusp_page_is_free(struct page *); > > static void swsusp_set_page_forbidden(struct page *); > > static void swsusp_unset_page_forbidden(struct page *); > > -- > Kirill A. Shutemov -- Sincerely yours, Mike.