Similar to lruvec lock, we use the same approach to make the split queue lock safe when LRU pages reparented. Signed-off-by: Muchun Song <songmuchun@xxxxxxxxxxxxx> --- mm/huge_memory.c | 23 +++++++++++++++++++++++ 1 file changed, 23 insertions(+) diff --git a/mm/huge_memory.c b/mm/huge_memory.c index 9d8dfa82991a..12950d4988e6 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c @@ -535,9 +535,22 @@ static struct deferred_split *split_queue_lock(struct page *head) { struct deferred_split *queue; + rcu_read_lock(); +retry: queue = page_split_queue(head); spin_lock(&queue->split_queue_lock); + if (unlikely(split_queue_memcg(queue) != page_memcg(head))) { + spin_unlock(&queue->split_queue_lock); + goto retry; + } + + /* + * Preemption is disabled in the internal of spin_lock, which can serve + * as RCU read-side critical sections. + */ + rcu_read_unlock(); + return queue; } @@ -546,9 +559,19 @@ split_queue_lock_irqsave(struct page *head, unsigned long *flags) { struct deferred_split *queue; + rcu_read_lock(); +retry: queue = page_split_queue(head); spin_lock_irqsave(&queue->split_queue_lock, *flags); + if (unlikely(split_queue_memcg(queue) != page_memcg(head))) { + spin_unlock_irqrestore(&queue->split_queue_lock, *flags); + goto retry; + } + + /* See the comments in split_queue_lock(). */ + rcu_read_unlock(); + return queue; } -- 2.11.0