mm/gup.c provides a kernel interface that accepts user addresses and manipulates user pages directly (for example get_user_pages, that is used by the futex syscall). Here we also need to handle the case of tagged user pointers. Untag addresses passed to this interface. Signed-off-by: Andrey Konovalov <andreyknvl at google.com> --- mm/gup.c | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/mm/gup.c b/mm/gup.c index 1b46e6e74881..4d820c4792d7 100644 --- a/mm/gup.c +++ b/mm/gup.c @@ -386,6 +386,8 @@ struct page *follow_page_mask(struct vm_area_struct *vma, struct page *page; struct mm_struct *mm = vma->vm_mm; + address = untagged_addr(address); + *page_mask = 0; /* make this handle hugepd */ @@ -647,6 +649,8 @@ static long __get_user_pages(struct task_struct *tsk, struct mm_struct *mm, if (!nr_pages) return 0; + start = untagged_addr(start); + VM_BUG_ON(!!pages != !!(gup_flags & FOLL_GET)); /* @@ -801,6 +805,8 @@ int fixup_user_fault(struct task_struct *tsk, struct mm_struct *mm, struct vm_area_struct *vma; int ret, major = 0; + address = untagged_addr(address); + if (unlocked) fault_flags |= FAULT_FLAG_ALLOW_RETRY; @@ -854,6 +860,8 @@ static __always_inline long __get_user_pages_locked(struct task_struct *tsk, long ret, pages_done; bool lock_dropped; + start = untagged_addr(start); + if (locked) { /* if VM_FAULT_RETRY can be returned, vmas become invalid */ BUG_ON(vmas); @@ -1746,6 +1754,8 @@ int __get_user_pages_fast(unsigned long start, int nr_pages, int write, unsigned long flags; int nr = 0; + start = untagged_addr(start); + start &= PAGE_MASK; addr = start; len = (unsigned long) nr_pages << PAGE_SHIFT; @@ -1798,6 +1808,8 @@ int get_user_pages_fast(unsigned long start, int nr_pages, int write, unsigned long addr, len, end; int nr = 0, ret = 0; + start = untagged_addr(start); + start &= PAGE_MASK; addr = start; len = (unsigned long) nr_pages << PAGE_SHIFT; -- 2.16.2.395.g2e18187dfd-goog