This is like lock_page_killable() but for use by callers who know they have a folio. Convert __lock_page_killable() to be __lock_folio_killable(). This saves one call to compound_head() per contended call to lock_page_killable(). __lock_folio_killable() is 20 bytes smaller than __lock_page_killable() was. lock_page_maybe_drop_mmap() shrinks by 68 bytes and __lock_page_or_retry() shrinks by 66 bytes. That's a total of 154 bytes of text saved. Signed-off-by: Matthew Wilcox (Oracle) <willy@xxxxxxxxxxxxx> --- include/linux/pagemap.h | 15 ++++++++++----- mm/filemap.c | 17 +++++++++-------- 2 files changed, 19 insertions(+), 13 deletions(-) diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h index c96ba0dfe111..aa7f564e5ecf 100644 --- a/include/linux/pagemap.h +++ b/include/linux/pagemap.h @@ -694,7 +694,7 @@ static inline bool wake_page_match(struct wait_page_queue *wait_page, } void __lock_folio(struct folio *folio); -extern int __lock_page_killable(struct page *page); +int __lock_folio_killable(struct folio *folio); extern int __lock_page_async(struct page *page, struct wait_page_queue *wait); extern int __lock_page_or_retry(struct page *page, struct mm_struct *mm, unsigned int flags); @@ -735,6 +735,14 @@ static inline void lock_page(struct page *page) __lock_folio(folio); } +static inline int lock_folio_killable(struct folio *folio) +{ + might_sleep(); + if (!trylock_folio(folio)) + return __lock_folio_killable(folio); + return 0; +} + /* * lock_page_killable is like lock_page but can be interrupted by fatal * signals. It returns 0 if it locked the page and -EINTR if it was @@ -742,10 +750,7 @@ static inline void lock_page(struct page *page) */ static inline int lock_page_killable(struct page *page) { - might_sleep(); - if (!trylock_page(page)) - return __lock_page_killable(page); - return 0; + return lock_folio_killable(page_folio(page)); } /* diff --git a/mm/filemap.c b/mm/filemap.c index 99c05e2c0eea..7cac47db78a5 100644 --- a/mm/filemap.c +++ b/mm/filemap.c @@ -1546,14 +1546,13 @@ void __lock_folio(struct folio *folio) } EXPORT_SYMBOL(__lock_folio); -int __lock_page_killable(struct page *__page) +int __lock_folio_killable(struct folio *folio) { - struct page *page = compound_head(__page); - wait_queue_head_t *q = page_waitqueue(page); - return wait_on_page_bit_common(q, page, PG_locked, TASK_KILLABLE, + wait_queue_head_t *q = page_waitqueue(&folio->page); + return wait_on_page_bit_common(q, &folio->page, PG_locked, TASK_KILLABLE, EXCLUSIVE); } -EXPORT_SYMBOL_GPL(__lock_page_killable); +EXPORT_SYMBOL_GPL(__lock_folio_killable); int __lock_page_async(struct page *page, struct wait_page_queue *wait) { @@ -1595,6 +1594,8 @@ int __lock_page_async(struct page *page, struct wait_page_queue *wait) int __lock_page_or_retry(struct page *page, struct mm_struct *mm, unsigned int flags) { + struct folio *folio = page_folio(page); + if (fault_flag_allow_retry_first(flags)) { /* * CAUTION! In this case, mmap_lock is not released @@ -1613,13 +1614,13 @@ int __lock_page_or_retry(struct page *page, struct mm_struct *mm, if (flags & FAULT_FLAG_KILLABLE) { int ret; - ret = __lock_page_killable(page); + ret = __lock_folio_killable(folio); if (ret) { mmap_read_unlock(mm); return 0; } } else { - __lock_folio(page_folio(page)); + __lock_folio(folio); } return 1; @@ -2781,7 +2782,7 @@ static int lock_page_maybe_drop_mmap(struct vm_fault *vmf, struct page *page, *fpin = maybe_unlock_mmap_for_io(vmf, *fpin); if (vmf->flags & FAULT_FLAG_KILLABLE) { - if (__lock_page_killable(&folio->page)) { + if (__lock_folio_killable(folio)) { /* * We didn't have the right flags to drop the mmap_lock, * but all fault_handlers only check for fatal signals -- 2.30.2 -- Linux-cachefs mailing list Linux-cachefs@xxxxxxxxxx https://listman.redhat.com/mailman/listinfo/linux-cachefs