This is a note to let you know that I've just added the patch titled vdpa: Make use of PFN_PHYS/PFN_UP/PFN_DOWN helper macro to the 5.10-stable tree which can be found at: http://www.kernel.org/git/?p=linux/kernel/git/stable/stable-queue.git;a=summary The filename of the patch is: vdpa-make-use-of-pfn_phys-pfn_up-pfn_down-helper-mac.patch and it can be found in the queue-5.10 subdirectory. If you, or anyone else, feels it should not be added to the stable tree, please let <stable@xxxxxxxxxxxxxxx> know about it. commit e5e838cc00a8facc70cd18f2ba84495c3ebf81e2 Author: Cai Huoqing <cai.huoqing@xxxxxxxxx> Date: Mon Aug 2 09:37:17 2021 +0800 vdpa: Make use of PFN_PHYS/PFN_UP/PFN_DOWN helper macro [ Upstream commit 729ce5a5bd6fda5eb2322a39db2287f1f26f92f3 ] it's a nice refactor to make use of PFN_PHYS/PFN_UP/PFN_DOWN helper macro Signed-off-by: Cai Huoqing <caihuoqing@xxxxxxxxx> Link: https://lore.kernel.org/r/20210802013717.851-1-caihuoqing@xxxxxxxxx Acked-by: Jason Wang <jasowang@xxxxxxxxxx> Signed-off-by: Michael S. Tsirkin <mst@xxxxxxxxxx> Stable-dep-of: 0823dc64586b ("vhost-vdpa: switch to use vmf_insert_pfn() in the fault handler") Signed-off-by: Sasha Levin <sashal@xxxxxxxxxx> diff --git a/drivers/vhost/vdpa.c b/drivers/vhost/vdpa.c index 04578aa87e4da..f48047a1027a2 100644 --- a/drivers/vhost/vdpa.c +++ b/drivers/vhost/vdpa.c @@ -519,15 +519,15 @@ static void vhost_vdpa_iotlb_unmap(struct vhost_vdpa *v, u64 start, u64 last) unsigned long pfn, pinned; while ((map = vhost_iotlb_itree_first(iotlb, start, last)) != NULL) { - pinned = map->size >> PAGE_SHIFT; - for (pfn = map->addr >> PAGE_SHIFT; + pinned = PFN_DOWN(map->size); + for (pfn = PFN_DOWN(map->addr); pinned > 0; pfn++, pinned--) { page = pfn_to_page(pfn); if (map->perm & VHOST_ACCESS_WO) set_page_dirty_lock(page); unpin_user_page(page); } - atomic64_sub(map->size >> PAGE_SHIFT, &dev->mm->pinned_vm); + atomic64_sub(PFN_DOWN(map->size), &dev->mm->pinned_vm); vhost_iotlb_map_free(iotlb, map); } } @@ -589,7 +589,7 @@ static int vhost_vdpa_map(struct vhost_vdpa *v, if (r) vhost_iotlb_del_range(dev->iotlb, iova, iova + size - 1); else - atomic64_add(size >> PAGE_SHIFT, &dev->mm->pinned_vm); + atomic64_add(PFN_DOWN(size), &dev->mm->pinned_vm); return r; } @@ -643,7 +643,7 @@ static int vhost_vdpa_process_iotlb_update(struct vhost_vdpa *v, if (msg->perm & VHOST_ACCESS_WO) gup_flags |= FOLL_WRITE; - npages = PAGE_ALIGN(msg->size + (iova & ~PAGE_MASK)) >> PAGE_SHIFT; + npages = PFN_UP(msg->size + (iova & ~PAGE_MASK)); if (!npages) { ret = -EINVAL; goto free; @@ -651,7 +651,7 @@ static int vhost_vdpa_process_iotlb_update(struct vhost_vdpa *v, mmap_read_lock(dev->mm); - lock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT; + lock_limit = PFN_DOWN(rlimit(RLIMIT_MEMLOCK)); if (npages + atomic64_read(&dev->mm->pinned_vm) > lock_limit) { ret = -ENOMEM; goto unlock; @@ -685,9 +685,9 @@ static int vhost_vdpa_process_iotlb_update(struct vhost_vdpa *v, if (last_pfn && (this_pfn != last_pfn + 1)) { /* Pin a contiguous chunk of memory */ - csize = (last_pfn - map_pfn + 1) << PAGE_SHIFT; + csize = PFN_PHYS(last_pfn - map_pfn + 1); ret = vhost_vdpa_map(v, iova, csize, - map_pfn << PAGE_SHIFT, + PFN_PHYS(map_pfn), msg->perm); if (ret) { /* @@ -711,13 +711,13 @@ static int vhost_vdpa_process_iotlb_update(struct vhost_vdpa *v, last_pfn = this_pfn; } - cur_base += pinned << PAGE_SHIFT; + cur_base += PFN_PHYS(pinned); npages -= pinned; } /* Pin the rest chunk */ - ret = vhost_vdpa_map(v, iova, (last_pfn - map_pfn + 1) << PAGE_SHIFT, - map_pfn << PAGE_SHIFT, msg->perm); + ret = vhost_vdpa_map(v, iova, PFN_PHYS(last_pfn - map_pfn + 1), + PFN_PHYS(map_pfn), msg->perm); out: if (ret) { if (nchunks) { @@ -961,7 +961,7 @@ static vm_fault_t vhost_vdpa_fault(struct vm_fault *vmf) vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); if (remap_pfn_range(vma, vmf->address & PAGE_MASK, - notify.addr >> PAGE_SHIFT, PAGE_SIZE, + PFN_DOWN(notify.addr), PAGE_SIZE, vma->vm_page_prot)) return VM_FAULT_SIGBUS;