User bkpt will use background page replacement approach to insert/delete breakpoints. Background page replacement approach is based on replace_page. Hence replace_page() loses its static attribute. Signed-off-by: Srikar Dronamraju <srikar@xxxxxxxxxxxxxxxxxx> Signed-off-by: Ananth N Mavinakayanahalli <ananth@xxxxxxxxxx> --- include/linux/mm.h | 2 ++ mm/ksm.c | 62 ---------------------------------------------------- mm/memory.c | 62 ++++++++++++++++++++++++++++++++++++++++++++++++++++ 3 files changed, 64 insertions(+), 62 deletions(-) diff --git a/include/linux/mm.h b/include/linux/mm.h index 679300c..01a0740 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -984,6 +984,8 @@ void account_page_writeback(struct page *page); int set_page_dirty(struct page *page); int set_page_dirty_lock(struct page *page); int clear_page_dirty_for_io(struct page *page); +int replace_page(struct vm_area_struct *vma, struct page *page, + struct page *kpage, pte_t orig_pte); /* Is the vma a continuation of the stack vma above it? */ static inline int vma_stack_continue(struct vm_area_struct *vma, unsigned long addr) diff --git a/mm/ksm.c b/mm/ksm.c index c2b2a94..f46e20d 100644 --- a/mm/ksm.c +++ b/mm/ksm.c @@ -765,68 +765,6 @@ out: return err; } -/** - * replace_page - replace page in vma by new ksm page - * @vma: vma that holds the pte pointing to page - * @page: the page we are replacing by kpage - * @kpage: the ksm page we replace page by - * @orig_pte: the original value of the pte - * - * Returns 0 on success, -EFAULT on failure. - */ -static int replace_page(struct vm_area_struct *vma, struct page *page, - struct page *kpage, pte_t orig_pte) -{ - struct mm_struct *mm = vma->vm_mm; - pgd_t *pgd; - pud_t *pud; - pmd_t *pmd; - pte_t *ptep; - spinlock_t *ptl; - unsigned long addr; - int err = -EFAULT; - - addr = page_address_in_vma(page, vma); - if (addr == -EFAULT) - goto out; - - pgd = pgd_offset(mm, addr); - if (!pgd_present(*pgd)) - goto out; - - pud = pud_offset(pgd, addr); - if (!pud_present(*pud)) - goto out; - - pmd = pmd_offset(pud, addr); - BUG_ON(pmd_trans_huge(*pmd)); - if (!pmd_present(*pmd)) - goto out; - - ptep = pte_offset_map_lock(mm, pmd, addr, &ptl); - if (!pte_same(*ptep, orig_pte)) { - pte_unmap_unlock(ptep, ptl); - goto out; - } - - get_page(kpage); - page_add_anon_rmap(kpage, vma, addr); - - flush_cache_page(vma, addr, pte_pfn(*ptep)); - ptep_clear_flush(vma, addr, ptep); - set_pte_at_notify(mm, addr, ptep, mk_pte(kpage, vma->vm_page_prot)); - - page_remove_rmap(page); - if (!page_mapped(page)) - try_to_free_swap(page); - put_page(page); - - pte_unmap_unlock(ptep, ptl); - err = 0; -out: - return err; -} - static int page_trans_compound_anon_split(struct page *page) { int ret = 0; diff --git a/mm/memory.c b/mm/memory.c index 5823698..2a3021c 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -2669,6 +2669,68 @@ void unmap_mapping_range(struct address_space *mapping, } EXPORT_SYMBOL(unmap_mapping_range); +/** + * replace_page - replace page in vma by new ksm page + * @vma: vma that holds the pte pointing to page + * @page: the page we are replacing by kpage + * @kpage: the ksm page we replace page by + * @orig_pte: the original value of the pte + * + * Returns 0 on success, -EFAULT on failure. + */ +int replace_page(struct vm_area_struct *vma, struct page *page, + struct page *kpage, pte_t orig_pte) +{ + struct mm_struct *mm = vma->vm_mm; + pgd_t *pgd; + pud_t *pud; + pmd_t *pmd; + pte_t *ptep; + spinlock_t *ptl; + unsigned long addr; + int err = -EFAULT; + + addr = page_address_in_vma(page, vma); + if (addr == -EFAULT) + goto out; + + pgd = pgd_offset(mm, addr); + if (!pgd_present(*pgd)) + goto out; + + pud = pud_offset(pgd, addr); + if (!pud_present(*pud)) + goto out; + + pmd = pmd_offset(pud, addr); + BUG_ON(pmd_trans_huge(*pmd)); + if (!pmd_present(*pmd)) + goto out; + + ptep = pte_offset_map_lock(mm, pmd, addr, &ptl); + if (!pte_same(*ptep, orig_pte)) { + pte_unmap_unlock(ptep, ptl); + goto out; + } + + get_page(kpage); + page_add_anon_rmap(kpage, vma, addr); + + flush_cache_page(vma, addr, pte_pfn(*ptep)); + ptep_clear_flush(vma, addr, ptep); + set_pte_at_notify(mm, addr, ptep, mk_pte(kpage, vma->vm_page_prot)); + + page_remove_rmap(page); + if (!page_mapped(page)) + try_to_free_swap(page); + put_page(page); + + pte_unmap_unlock(ptep, ptl); + err = 0; +out: + return err; +} + int vmtruncate_range(struct inode *inode, loff_t offset, loff_t end) { struct address_space *mapping = inode->i_mapping; -- To unsubscribe, send a message with 'unsubscribe linux-mm' in the body to majordomo@xxxxxxxxxx For more info on Linux MM, see: http://www.linux-mm.org/ . Fight unfair telecom internet charges in Canada: sign http://stopthemeter.ca/ Don't email: <a href=mailto:"dont@xxxxxxxxx"> email@xxxxxxxxx </a>