When specifying FAULT_FLAG_RETRY_NOWAIT as flag for gmap_fault(), the gmap fault will be processed only if it can be resolved quickly and without sleeping. This will be needed for pfault. Refactor gmap_fault() to improve readability. Signed-off-by: Claudio Imbrenda <imbrenda@xxxxxxxxxxxxx> Acked-by: Alexander Gordeev <agordeev@xxxxxxxxxxxxx> Reviewed-by: Heiko Carstens <hca@xxxxxxxxxxxxx> --- arch/s390/mm/gmap.c | 119 +++++++++++++++++++++++++++++++++++++------- 1 file changed, 100 insertions(+), 19 deletions(-) diff --git a/arch/s390/mm/gmap.c b/arch/s390/mm/gmap.c index eb0b51a36be0..f51ad948ba53 100644 --- a/arch/s390/mm/gmap.c +++ b/arch/s390/mm/gmap.c @@ -637,44 +637,125 @@ int __gmap_link(struct gmap *gmap, unsigned long gaddr, unsigned long vmaddr) } /** - * gmap_fault - resolve a fault on a guest address + * fixup_user_fault_nowait - manually resolve a user page fault without waiting + * @mm: mm_struct of target mm + * @address: user address + * @fault_flags:flags to pass down to handle_mm_fault() + * @unlocked: did we unlock the mmap_lock while retrying + * + * This function behaves similarly to fixup_user_fault(), but it guarantees + * that the fault will be resolved without waiting. The function might drop + * and re-acquire the mm lock, in which case @unlocked will be set to true. + * + * The guarantee is that the fault is handled without waiting, but the + * function itself might sleep, due to the lock. + * + * Context: Needs to be called with mm->mmap_lock held in read mode, and will + * return with the lock held in read mode; @unlocked will indicate whether + * the lock has been dropped and re-acquired. This is the same behaviour as + * fixup_user_fault(). + * + * Return: 0 on success, -EAGAIN if the fault cannot be resolved without + * waiting, -EFAULT if the fault cannot be resolved, -ENOMEM if out of + * memory. + */ +static int fixup_user_fault_nowait(struct mm_struct *mm, unsigned long address, + unsigned int fault_flags, bool *unlocked) +{ + struct vm_area_struct *vma; + unsigned int test_flags; + vm_fault_t fault; + int rc; + + fault_flags |= FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_RETRY_NOWAIT; + test_flags = fault_flags & FAULT_FLAG_WRITE ? VM_WRITE : VM_READ; + + vma = find_vma(mm, address); + if (unlikely(!vma || address < vma->vm_start)) + return -EFAULT; + if (unlikely(!(vma->vm_flags & test_flags))) + return -EFAULT; + + fault = handle_mm_fault(vma, address, fault_flags, NULL); + /* the mm lock has been dropped, take it again */ + if (fault & VM_FAULT_COMPLETED) { + *unlocked = true; + mmap_read_lock(mm); + return 0; + } + /* the mm lock has not been dropped */ + if (fault & VM_FAULT_ERROR) { + rc = vm_fault_to_errno(fault, 0); + BUG_ON(!rc); + return rc; + } + /* the mm lock has not been dropped because of FAULT_FLAG_RETRY_NOWAIT */ + if (fault & VM_FAULT_RETRY) + return -EAGAIN; + /* nothing needed to be done and the mm lock has not been dropped */ + return 0; +} + +/** + * __gmap_fault - resolve a fault on a guest address * @gmap: pointer to guest mapping meta data structure * @gaddr: guest address * @fault_flags: flags to pass down to handle_mm_fault() * - * Returns 0 on success, -ENOMEM for out of memory conditions, and -EFAULT - * if the vm address is already mapped to a different guest segment. + * Context: Needs to be called with mm->mmap_lock held in read mode. Might + * drop and re-acquire the lock. Will always return with the lock held. */ -int gmap_fault(struct gmap *gmap, unsigned long gaddr, - unsigned int fault_flags) +static int __gmap_fault(struct gmap *gmap, unsigned long gaddr, unsigned int fault_flags) { unsigned long vmaddr; - int rc; bool unlocked; - - mmap_read_lock(gmap->mm); + int rc = 0; retry: unlocked = false; + vmaddr = __gmap_translate(gmap, gaddr); - if (IS_ERR_VALUE(vmaddr)) { - rc = vmaddr; - goto out_up; - } - if (fixup_user_fault(gmap->mm, vmaddr, fault_flags, - &unlocked)) { - rc = -EFAULT; - goto out_up; + if (IS_ERR_VALUE(vmaddr)) + return vmaddr; + + if (fault_flags & FAULT_FLAG_RETRY_NOWAIT) { + rc = fixup_user_fault_nowait(gmap->mm, vmaddr, fault_flags, &unlocked); + if (rc) + return rc; + } else if (fixup_user_fault(gmap->mm, vmaddr, fault_flags, &unlocked)) { + return -EFAULT; } /* * In the case that fixup_user_fault unlocked the mmap_lock during - * faultin redo __gmap_translate to not race with a map/unmap_segment. + * fault-in, redo __gmap_translate() to avoid racing with a + * map/unmap_segment. + * In particular, __gmap_translate(), fixup_user_fault{,_nowait}(), + * and __gmap_link() must all be called atomically in one go; if the + * lock had been dropped in between, a retry is needed. */ if (unlocked) goto retry; - rc = __gmap_link(gmap, gaddr, vmaddr); -out_up: + return __gmap_link(gmap, gaddr, vmaddr); +} + +/** + * gmap_fault - resolve a fault on a guest address + * @gmap: pointer to guest mapping meta data structure + * @gaddr: guest address + * @fault_flags: flags to pass down to handle_mm_fault() + * + * Returns 0 on success, -ENOMEM for out of memory conditions, -EFAULT if the + * vm address is already mapped to a different guest segment, and -EAGAIN if + * FAULT_FLAG_RETRY_NOWAIT was specified and the fault could not be processed + * immediately. + */ +int gmap_fault(struct gmap *gmap, unsigned long gaddr, unsigned int fault_flags) +{ + int rc; + + mmap_read_lock(gmap->mm); + rc = __gmap_fault(gmap, gaddr, fault_flags); mmap_read_unlock(gmap->mm); return rc; } -- 2.47.0