mm/gup.c provides a kernel interface that accepts user addresses and manipulates user pages directly (for example get_user_pages, that is used by the futex syscall). Here we also need to handle the case of tagged user pointers. Untag addresses passed to this interface. Signed-off-by: Andrey Konovalov <andreyknvl@xxxxxxxxxx> --- mm/gup.c | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/mm/gup.c b/mm/gup.c index 6afae32571ca..9c4afcf50dfa 100644 --- a/mm/gup.c +++ b/mm/gup.c @@ -386,6 +386,8 @@ struct page *follow_page_mask(struct vm_area_struct *vma, struct page *page; struct mm_struct *mm = vma->vm_mm; + address = untagged_addr(address); + *page_mask = 0; /* make this handle hugepd */ @@ -647,6 +649,8 @@ static long __get_user_pages(struct task_struct *tsk, struct mm_struct *mm, if (!nr_pages) return 0; + start = untagged_addr(start); + VM_BUG_ON(!!pages != !!(gup_flags & FOLL_GET)); /* @@ -801,6 +805,8 @@ int fixup_user_fault(struct task_struct *tsk, struct mm_struct *mm, struct vm_area_struct *vma; int ret, major = 0; + address = untagged_addr(address); + if (unlocked) fault_flags |= FAULT_FLAG_ALLOW_RETRY; @@ -854,6 +860,8 @@ static __always_inline long __get_user_pages_locked(struct task_struct *tsk, long ret, pages_done; bool lock_dropped; + start = untagged_addr(start); + if (locked) { /* if VM_FAULT_RETRY can be returned, vmas become invalid */ BUG_ON(vmas); @@ -1749,6 +1757,8 @@ int __get_user_pages_fast(unsigned long start, int nr_pages, int write, unsigned long flags; int nr = 0; + start = untagged_addr(start); + start &= PAGE_MASK; addr = start; len = (unsigned long) nr_pages << PAGE_SHIFT; @@ -1801,6 +1811,8 @@ int get_user_pages_fast(unsigned long start, int nr_pages, int write, unsigned long addr, len, end; int nr = 0, ret = 0; + start = untagged_addr(start); + start &= PAGE_MASK; addr = start; len = (unsigned long) nr_pages << PAGE_SHIFT; -- 2.17.0.rc0.231.g781580f067-goog