Port the __I_LRU_ISOLATING mechanism to use the new var event mechanism. Signed-off-by: Christian Brauner <brauner@xxxxxxxxxx> --- fs/inode.c | 27 +++++++++++++++++++-------- 1 file changed, 19 insertions(+), 8 deletions(-) diff --git a/fs/inode.c b/fs/inode.c index 37f20c7c2f72..8fb8e4f9acc3 100644 --- a/fs/inode.c +++ b/fs/inode.c @@ -511,24 +511,35 @@ static void inode_unpin_lru_isolating(struct inode *inode) spin_lock(&inode->i_lock); WARN_ON(!(inode->i_state & I_LRU_ISOLATING)); inode->i_state &= ~I_LRU_ISOLATING; - smp_mb(); - wake_up_bit(&inode->i_state, __I_LRU_ISOLATING); + /* Called with inode->i_lock which ensures memory ordering. */ + inode_wake_up_bit(inode, __I_LRU_ISOLATING); spin_unlock(&inode->i_lock); } static void inode_wait_for_lru_isolating(struct inode *inode) { + struct wait_bit_queue_entry wqe; + struct wait_queue_head *wq_head; + lockdep_assert_held(&inode->i_lock); - if (inode->i_state & I_LRU_ISOLATING) { - DEFINE_WAIT_BIT(wq, &inode->i_state, __I_LRU_ISOLATING); - wait_queue_head_t *wqh; + if (!(inode->i_state & I_LRU_ISOLATING)) + return; - wqh = bit_waitqueue(&inode->i_state, __I_LRU_ISOLATING); + wq_head = inode_bit_waitqueue(&wqe, inode, __I_LRU_ISOLATING); + for (;;) { + prepare_to_wait_event(wq_head, &wqe.wq_entry, TASK_UNINTERRUPTIBLE); + /* + * Checking I_LRU_ISOLATING with inode->i_lock guarantees + * memory ordering. + */ + if (!(inode->i_state & I_LRU_ISOLATING)) + break; spin_unlock(&inode->i_lock); - __wait_on_bit(wqh, &wq, bit_wait, TASK_UNINTERRUPTIBLE); + schedule(); spin_lock(&inode->i_lock); - WARN_ON(inode->i_state & I_LRU_ISOLATING); } + finish_wait(wq_head, &wqe.wq_entry); + WARN_ON(inode->i_state & I_LRU_ISOLATING); } /** -- 2.43.0