When building the kernel with sparse enabled 'C=1' the following warnings can be seen: fs/proc/task_mmu.c:587:17: warning: context imbalance in 'smaps_pte_range' - unexpected unlock fs/proc/task_mmu.c:1145:28: warning: context imbalance in 'clear_refs_pte_range' - unexpected unlock fs/proc/task_mmu.c:1473:28: warning: context imbalance in 'pagemap_pmd_range' - unexpected unlock fs/proc/task_mmu.c:1811:28: warning: context imbalance in 'gather_pte_stats' - unexpected unlock Rework to add __acquire() and __release() to tell sparse that it is all good. Signed-off-by: Anders Roxell <anders.roxell@xxxxxxxxxx> --- fs/proc/task_mmu.c | 31 +++++++++++++++++++++++++++++++ mm/huge_memory.c | 4 +++- 2 files changed, 34 insertions(+), 1 deletion(-) diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c index ad667dbc96f5..6b702c030802 100644 --- a/fs/proc/task_mmu.c +++ b/fs/proc/task_mmu.c @@ -584,6 +584,13 @@ static int smaps_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end, ptl = pmd_trans_huge_lock(pmd, vma); if (ptl) { + /* + * Fake out sparse. + * The pmd_lock are held in pmd_trans_huge_lock() and if we + * get in here we have to unlock. We tell sparse that + * everyting is as it should with the __acquire() directive. + */ + __acquire(ptl); smaps_pmd_entry(pmd, addr, walk); spin_unlock(ptl); goto out; @@ -1127,6 +1134,14 @@ static int clear_refs_pte_range(pmd_t *pmd, unsigned long addr, ptl = pmd_trans_huge_lock(pmd, vma); if (ptl) { + /* + * Fake out sparse. + * The pmd_lock are held in pmd_trans_huge_lock() and if we + * get in here we have to unlock. We tell sparse that + * everyting is as it should with the __acquire() directive. + */ + __acquire(ptl); + if (cp->type == CLEAR_REFS_SOFT_DIRTY) { clear_soft_dirty_pmd(vma, addr, pmd); goto out; @@ -1418,6 +1433,14 @@ static int pagemap_pmd_range(pmd_t *pmdp, unsigned long addr, unsigned long end, pmd_t pmd = *pmdp; struct page *page = NULL; + /* + * Fake out sparse. + * The pmd_lock are held in pmd_trans_huge_lock() and if we + * get in here we have to unlock. We tell sparse that + * everyting is as it should with the __acquire() directive. + */ + __acquire(ptl); + if (vma->vm_flags & VM_SOFTDIRTY) flags |= PM_SOFT_DIRTY; @@ -1804,6 +1827,14 @@ static int gather_pte_stats(pmd_t *pmd, unsigned long addr, if (ptl) { struct page *page; + /* + * Fake out sparse. + * The pmd_lock are held in pmd_trans_huge_lock() and if we + * get in here we have to unlock. We tell sparse that + * everyting is as it should with the __acquire() directive. + */ + __acquire(ptl); + page = can_gather_numa_stats_pmd(*pmd, vma, addr); if (page) gather_stats(page, md, pmd_dirty(*pmd), diff --git a/mm/huge_memory.c b/mm/huge_memory.c index e5ea5f775d5c..c873bf5947ae 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c @@ -1833,8 +1833,10 @@ spinlock_t *__pmd_trans_huge_lock(pmd_t *pmd, struct vm_area_struct *vma) spinlock_t *ptl; ptl = pmd_lock(vma->vm_mm, pmd); if (likely(is_swap_pmd(*pmd) || pmd_trans_huge(*pmd) || - pmd_devmap(*pmd))) + pmd_devmap(*pmd))) { + __release(ptl); /* fake out sparse */ return ptl; + } spin_unlock(ptl); return NULL; } -- 2.33.0