Similar to lruvec lock, we use the same approach to make the lock safe when the LRU pages reparented. Signed-off-by: Muchun Song <songmuchun@xxxxxxxxxxxxx> --- mm/huge_memory.c | 23 +++++++++++++++++++++++ 1 file changed, 23 insertions(+) diff --git a/mm/huge_memory.c b/mm/huge_memory.c index 275dbfc8b2ae..aa5d7b72d5fc 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c @@ -497,6 +497,8 @@ static struct deferred_split *lock_split_queue(struct page *page) struct deferred_split *queue; struct mem_cgroup *memcg; + rcu_read_lock(); +retry: memcg = page_memcg(compound_head(page)); if (memcg) queue = &memcg->deferred_split_queue; @@ -504,6 +506,17 @@ static struct deferred_split *lock_split_queue(struct page *page) queue = &NODE_DATA(page_to_nid(page))->deferred_split_queue; spin_lock(&queue->split_queue_lock); + if (unlikely(memcg != page_memcg(page))) { + spin_unlock(&queue->split_queue_lock); + goto retry; + } + + /* + * Preemption is disabled in the internal of spin_lock, which can serve + * as RCU read-side critical sections. + */ + rcu_read_unlock(); + return queue; } @@ -513,6 +526,8 @@ static struct deferred_split *lock_split_queue_irqsave(struct page *page, struct deferred_split *queue; struct mem_cgroup *memcg; + rcu_read_lock(); +retry: memcg = page_memcg(compound_head(page)); if (memcg) queue = &memcg->deferred_split_queue; @@ -520,6 +535,14 @@ static struct deferred_split *lock_split_queue_irqsave(struct page *page, queue = &NODE_DATA(page_to_nid(page))->deferred_split_queue; spin_lock_irqsave(&queue->split_queue_lock, *flags); + if (unlikely(memcg != page_memcg(page))) { + spin_unlock_irqrestore(&queue->split_queue_lock, *flags); + goto retry; + } + + /* See the comments in lock_split_queue(). */ + rcu_read_unlock(); + return queue; } #else -- 2.11.0