Now no users are using the pte_offset_map_nolock(), remove it. Signed-off-by: Qi Zheng <zhengqi.arch@xxxxxxxxxxxxx> --- Documentation/mm/split_page_table_lock.rst | 3 --- include/linux/mm.h | 2 -- mm/pgtable-generic.c | 21 --------------------- 3 files changed, 26 deletions(-) diff --git a/Documentation/mm/split_page_table_lock.rst b/Documentation/mm/split_page_table_lock.rst index f54f717ae8bdf..596b425fb28e6 100644 --- a/Documentation/mm/split_page_table_lock.rst +++ b/Documentation/mm/split_page_table_lock.rst @@ -16,9 +16,6 @@ There are helpers to lock/unlock a table and other accessor functions: - pte_offset_map_lock() maps PTE and takes PTE table lock, returns pointer to PTE with pointer to its PTE table lock, or returns NULL if no PTE table; - - pte_offset_map_nolock() - maps PTE, returns pointer to PTE with pointer to its PTE table - lock (not taken), or returns NULL if no PTE table; - pte_offset_map_readonly_nolock() maps PTE, returns pointer to PTE with pointer to its PTE table lock (not taken), or returns NULL if no PTE table; diff --git a/include/linux/mm.h b/include/linux/mm.h index 1fe0ceabcaf39..f7c207c3ab701 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -2952,8 +2952,6 @@ static inline pte_t *pte_offset_map_lock(struct mm_struct *mm, pmd_t *pmd, return pte; } -pte_t *pte_offset_map_nolock(struct mm_struct *mm, pmd_t *pmd, - unsigned long addr, spinlock_t **ptlp); pte_t *pte_offset_map_readonly_nolock(struct mm_struct *mm, pmd_t *pmd, unsigned long addr, spinlock_t **ptlp); pte_t *pte_offset_map_maywrite_nolock(struct mm_struct *mm, pmd_t *pmd, diff --git a/mm/pgtable-generic.c b/mm/pgtable-generic.c index 29d1fd6fd2963..9ba2e423bdb41 100644 --- a/mm/pgtable-generic.c +++ b/mm/pgtable-generic.c @@ -305,18 +305,6 @@ pte_t *__pte_offset_map(pmd_t *pmd, unsigned long addr, pmd_t *pmdvalp) return NULL; } -pte_t *pte_offset_map_nolock(struct mm_struct *mm, pmd_t *pmd, - unsigned long addr, spinlock_t **ptlp) -{ - pmd_t pmdval; - pte_t *pte; - - pte = __pte_offset_map(pmd, addr, &pmdval); - if (likely(pte)) - *ptlp = pte_lockptr(mm, &pmdval); - return pte; -} - pte_t *pte_offset_map_readonly_nolock(struct mm_struct *mm, pmd_t *pmd, unsigned long addr, spinlock_t **ptlp) { @@ -374,15 +362,6 @@ pte_t *pte_offset_map_maywrite_nolock(struct mm_struct *mm, pmd_t *pmd, * and disconnected table. Until pte_unmap(pte) unmaps and rcu_read_unlock()s * afterwards. * - * pte_offset_map_nolock(mm, pmd, addr, ptlp), above, is like pte_offset_map(); - * but when successful, it also outputs a pointer to the spinlock in ptlp - as - * pte_offset_map_lock() does, but in this case without locking it. This helps - * the caller to avoid a later pte_lockptr(mm, *pmd), which might by that time - * act on a changed *pmd: pte_offset_map_nolock() provides the correct spinlock - * pointer for the page table that it returns. In principle, the caller should - * recheck *pmd once the lock is taken; in practice, no callsite needs that - - * either the mmap_lock for write, or pte_same() check on contents, is enough. - * * pte_offset_map_readonly_nolock(mm, pmd, addr, ptlp), above, is like * pte_offset_map(); but when successful, it also outputs a pointer to the * spinlock in ptlp - as pte_offset_map_lock() does, but in this case without -- 2.20.1