The quilt patch titled Subject: sched: remove wait bookmarks has been removed from the -mm tree. Its filename was sched-remove-wait-bookmarks.patch This patch was dropped because it was merged into the mm-stable branch of git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm ------------------------------------------------------ From: "Matthew Wilcox (Oracle)" <willy@xxxxxxxxxxxxx> Subject: sched: remove wait bookmarks Date: Tue, 10 Oct 2023 04:58:29 +0100 There are no users of wait bookmarks left, so simplify the wait code by removing them. Link: https://lkml.kernel.org/r/20231010035829.544242-2-willy@xxxxxxxxxxxxx Signed-off-by: Matthew Wilcox (Oracle) <willy@xxxxxxxxxxxxx> Acked-by: Ingo Molnar <mingo@xxxxxxxxxx> Cc: Benjamin Segall <bsegall@xxxxxxxxxx> Cc: Bin Lai <sclaibin@xxxxxxxxx> Cc: Daniel Bristot de Oliveira <bristot@xxxxxxxxxx> Cc: Dietmar Eggemann <dietmar.eggemann@xxxxxxx> Cc: Ingo Molnar <mingo@xxxxxxxxxx> Cc: Juri Lelli <juri.lelli@xxxxxxxxxx> Cc: Mel Gorman <mgorman@xxxxxxx> Cc: Peter Zijlstra <peterz@xxxxxxxxxxxxx> Cc: Steven Rostedt (Google) <rostedt@xxxxxxxxxxx> Cc: Valentin Schneider <vschneid@xxxxxxxxxx> Cc: Vincent Guittot <vincent.guittot@xxxxxxxxxx> Signed-off-by: Andrew Morton <akpm@xxxxxxxxxxxxxxxxxxxx> --- include/linux/wait.h | 9 ++---- kernel/sched/wait.c | 60 ++++++----------------------------------- 2 files changed, 13 insertions(+), 56 deletions(-) --- a/include/linux/wait.h~sched-remove-wait-bookmarks +++ a/include/linux/wait.h @@ -19,10 +19,9 @@ int default_wake_function(struct wait_qu /* wait_queue_entry::flags */ #define WQ_FLAG_EXCLUSIVE 0x01 #define WQ_FLAG_WOKEN 0x02 -#define WQ_FLAG_BOOKMARK 0x04 -#define WQ_FLAG_CUSTOM 0x08 -#define WQ_FLAG_DONE 0x10 -#define WQ_FLAG_PRIORITY 0x20 +#define WQ_FLAG_CUSTOM 0x04 +#define WQ_FLAG_DONE 0x08 +#define WQ_FLAG_PRIORITY 0x10 /* * A single wait-queue entry structure: @@ -212,8 +211,6 @@ __remove_wait_queue(struct wait_queue_he int __wake_up(struct wait_queue_head *wq_head, unsigned int mode, int nr, void *key); void __wake_up_on_current_cpu(struct wait_queue_head *wq_head, unsigned int mode, void *key); void __wake_up_locked_key(struct wait_queue_head *wq_head, unsigned int mode, void *key); -void __wake_up_locked_key_bookmark(struct wait_queue_head *wq_head, - unsigned int mode, void *key, wait_queue_entry_t *bookmark); void __wake_up_sync_key(struct wait_queue_head *wq_head, unsigned int mode, void *key); void __wake_up_locked_sync_key(struct wait_queue_head *wq_head, unsigned int mode, void *key); void __wake_up_locked(struct wait_queue_head *wq_head, unsigned int mode, int nr); --- a/kernel/sched/wait.c~sched-remove-wait-bookmarks +++ a/kernel/sched/wait.c @@ -58,13 +58,6 @@ void remove_wait_queue(struct wait_queue EXPORT_SYMBOL(remove_wait_queue); /* - * Scan threshold to break wait queue walk. - * This allows a waker to take a break from holding the - * wait queue lock during the wait queue walk. - */ -#define WAITQUEUE_WALK_BREAK_CNT 64 - -/* * The core wakeup function. Non-exclusive wakeups (nr_exclusive == 0) just * wake everything up. If it's an exclusive wakeup (nr_exclusive == small +ve * number) then we wake that number of exclusive tasks, and potentially all @@ -78,21 +71,13 @@ EXPORT_SYMBOL(remove_wait_queue); * zero in this (rare) case, and we handle it by continuing to scan the queue. */ static int __wake_up_common(struct wait_queue_head *wq_head, unsigned int mode, - int nr_exclusive, int wake_flags, void *key, - wait_queue_entry_t *bookmark) + int nr_exclusive, int wake_flags, void *key) { wait_queue_entry_t *curr, *next; - int cnt = 0; lockdep_assert_held(&wq_head->lock); - if (bookmark && (bookmark->flags & WQ_FLAG_BOOKMARK)) { - curr = list_next_entry(bookmark, entry); - - list_del(&bookmark->entry); - bookmark->flags = 0; - } else - curr = list_first_entry(&wq_head->head, wait_queue_entry_t, entry); + curr = list_first_entry(&wq_head->head, wait_queue_entry_t, entry); if (&curr->entry == &wq_head->head) return nr_exclusive; @@ -101,21 +86,11 @@ static int __wake_up_common(struct wait_ unsigned flags = curr->flags; int ret; - if (flags & WQ_FLAG_BOOKMARK) - continue; - ret = curr->func(curr, mode, wake_flags, key); if (ret < 0) break; if (ret && (flags & WQ_FLAG_EXCLUSIVE) && !--nr_exclusive) break; - - if (bookmark && (++cnt > WAITQUEUE_WALK_BREAK_CNT) && - (&next->entry != &wq_head->head)) { - bookmark->flags = WQ_FLAG_BOOKMARK; - list_add_tail(&bookmark->entry, &next->entry); - break; - } } return nr_exclusive; @@ -125,20 +100,12 @@ static int __wake_up_common_lock(struct int nr_exclusive, int wake_flags, void *key) { unsigned long flags; - wait_queue_entry_t bookmark; - int remaining = nr_exclusive; + int remaining; - bookmark.flags = 0; - bookmark.private = NULL; - bookmark.func = NULL; - INIT_LIST_HEAD(&bookmark.entry); - - do { - spin_lock_irqsave(&wq_head->lock, flags); - remaining = __wake_up_common(wq_head, mode, remaining, - wake_flags, key, &bookmark); - spin_unlock_irqrestore(&wq_head->lock, flags); - } while (bookmark.flags & WQ_FLAG_BOOKMARK); + spin_lock_irqsave(&wq_head->lock, flags); + remaining = __wake_up_common(wq_head, mode, nr_exclusive, wake_flags, + key); + spin_unlock_irqrestore(&wq_head->lock, flags); return nr_exclusive - remaining; } @@ -171,23 +138,16 @@ void __wake_up_on_current_cpu(struct wai */ void __wake_up_locked(struct wait_queue_head *wq_head, unsigned int mode, int nr) { - __wake_up_common(wq_head, mode, nr, 0, NULL, NULL); + __wake_up_common(wq_head, mode, nr, 0, NULL); } EXPORT_SYMBOL_GPL(__wake_up_locked); void __wake_up_locked_key(struct wait_queue_head *wq_head, unsigned int mode, void *key) { - __wake_up_common(wq_head, mode, 1, 0, key, NULL); + __wake_up_common(wq_head, mode, 1, 0, key); } EXPORT_SYMBOL_GPL(__wake_up_locked_key); -void __wake_up_locked_key_bookmark(struct wait_queue_head *wq_head, - unsigned int mode, void *key, wait_queue_entry_t *bookmark) -{ - __wake_up_common(wq_head, mode, 1, 0, key, bookmark); -} -EXPORT_SYMBOL_GPL(__wake_up_locked_key_bookmark); - /** * __wake_up_sync_key - wake up threads blocked on a waitqueue. * @wq_head: the waitqueue @@ -233,7 +193,7 @@ EXPORT_SYMBOL_GPL(__wake_up_sync_key); void __wake_up_locked_sync_key(struct wait_queue_head *wq_head, unsigned int mode, void *key) { - __wake_up_common(wq_head, mode, 1, WF_SYNC, key, NULL); + __wake_up_common(wq_head, mode, 1, WF_SYNC, key); } EXPORT_SYMBOL_GPL(__wake_up_locked_sync_key); _ Patches currently in -mm which might be from willy@xxxxxxxxxxxxx are buffer-make-folio_create_empty_buffers-return-a-buffer_head.patch mpage-convert-map_buffer_to_folio-to-folio_create_empty_buffers.patch ext4-convert-to-folio_create_empty_buffers.patch buffer-add-get_nth_bh.patch gfs2-convert-inode-unstuffing-to-use-a-folio.patch gfs2-convert-gfs2_getbuf-to-folios.patch gfs2-convert-gfs2_getjdatabuf-to-use-a-folio.patch gfs2-convert-gfs2_write_buf_to_page-to-use-a-folio.patch nilfs2-convert-nilfs_mdt_freeze_buffer-to-use-a-folio.patch nilfs2-convert-nilfs_grab_buffer-to-use-a-folio.patch nilfs2-convert-nilfs_copy_page-to-nilfs_copy_folio.patch nilfs2-convert-nilfs_mdt_forget_block-to-use-a-folio.patch nilfs2-convert-nilfs_mdt_get_frozen_buffer-to-use-a-folio.patch nilfs2-remove-nilfs_page_get_nth_block.patch nilfs2-convert-nilfs_lookup_dirty_data_buffers-to-use-folio_create_empty_buffers.patch ntfs-convert-ntfs_read_block-to-use-a-folio.patch ntfs-convert-ntfs_writepage-to-use-a-folio.patch ntfs-convert-ntfs_prepare_pages_for_non_resident_write-to-folios.patch ntfs3-convert-ntfs_zero_range-to-use-a-folio.patch ocfs2-convert-ocfs2_map_page_blocks-to-use-a-folio.patch reiserfs-convert-writepage-to-use-a-folio.patch ufs-add-ufs_get_locked_folio-and-ufs_put_locked_folio.patch ufs-use-ufs_get_locked_folio-in-ufs_alloc_lastblock.patch ufs-convert-ufs_change_blocknr-to-use-folios.patch ufs-remove-ufs_get_locked_page.patch buffer-remove-folio_create_empty_buffers.patch