From: Arjun Roy <arjunroy@xxxxxxxxxx> pte_index() is either defined as a macro (e.g. sparc64) or as an inlined function (e.g. x86). vm_insert_pages() depends on pte_index but it is not defined on all platforms (e.g. m68k). To fix compilation of vm_insert_pages() on architectures not providing pte_index(), we perform the following fix: 0. For platforms where it is meaningful, and defined as a macro, no change is needed. 1. For platforms where it is meaningful and defined as an inlined function, and we want to use it with vm_insert_pages(), we define a degenerate macro of the form: #define pte_index pte_index 2. vm_insert_pages() checks for the existence of a pte_index macro definition. If found, it implements a batched insert. If not found, it devolves to calling vm_insert_page() in a loop. This patch implements step 2. v3 of this patch fixes a compilation warning for an unused method. v2 of this patch moved a macro definition to a more readable location. Signed-off-by: Arjun Roy <arjunroy@xxxxxxxxxx> --- mm/memory.c | 41 ++++++++++++++++++++++++++++------------- 1 file changed, 28 insertions(+), 13 deletions(-) diff --git a/mm/memory.c b/mm/memory.c index d6f834f7d145..47b28fcc73c2 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -1460,18 +1460,6 @@ static int insert_page_into_pte_locked(struct mm_struct *mm, pte_t *pte, return 0; } -static int insert_page_in_batch_locked(struct mm_struct *mm, pmd_t *pmd, - unsigned long addr, struct page *page, pgprot_t prot) -{ - int err; - - if (!page_count(page)) - return -EINVAL; - err = validate_page_before_insert(page); - return err ? err : insert_page_into_pte_locked( - mm, pte_offset_map(pmd, addr), addr, page, prot); -} - /* * This is the old fallback for page remapping. * @@ -1500,8 +1488,21 @@ static int insert_page(struct vm_area_struct *vma, unsigned long addr, return retval; } +#ifdef pte_index +static int insert_page_in_batch_locked(struct mm_struct *mm, pmd_t *pmd, + unsigned long addr, struct page *page, pgprot_t prot) +{ + int err; + + if (!page_count(page)) + return -EINVAL; + err = validate_page_before_insert(page); + return err ? err : insert_page_into_pte_locked( + mm, pte_offset_map(pmd, addr), addr, page, prot); +} + /* insert_pages() amortizes the cost of spinlock operations - * when inserting pages in a loop. + * when inserting pages in a loop. Arch *must* define pte_index. */ static int insert_pages(struct vm_area_struct *vma, unsigned long addr, struct page **pages, unsigned long *num, pgprot_t prot) @@ -1556,6 +1557,7 @@ static int insert_pages(struct vm_area_struct *vma, unsigned long addr, *num = remaining_pages_total; return ret; } +#endif /* ifdef pte_index */ /** * vm_insert_pages - insert multiple pages into user vma, batching the pmd lock. @@ -1575,6 +1577,7 @@ static int insert_pages(struct vm_area_struct *vma, unsigned long addr, int vm_insert_pages(struct vm_area_struct *vma, unsigned long addr, struct page **pages, unsigned long *num) { +#ifdef pte_index const unsigned long end_addr = addr + (*num * PAGE_SIZE) - 1; if (addr < vma->vm_start || end_addr >= vma->vm_end) @@ -1586,6 +1589,18 @@ int vm_insert_pages(struct vm_area_struct *vma, unsigned long addr, } /* Defer page refcount checking till we're about to map that page. */ return insert_pages(vma, addr, pages, num, vma->vm_page_prot); +#else + unsigned long idx = 0, pgcount = *num; + int err; + + for (; idx < pgcount; ++idx) { + err = vm_insert_page(vma, addr + (PAGE_SIZE * idx), pages[idx]); + if (err) + break; + } + *num = pgcount - idx; + return err; +#endif /* ifdef pte_index */ } EXPORT_SYMBOL(vm_insert_pages); -- 2.25.1.481.gfbce0eb801-goog