+ mm-mmu_notifier-convert-user-range-blockable-to-helper-function.patch added to -mm tree

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



The patch titled
     Subject: mm/mmu_notifier: convert user range->blockable to helper function
has been added to the -mm tree.  Its filename is
     mm-mmu_notifier-convert-user-range-blockable-to-helper-function.patch

This patch should soon appear at
    http://ozlabs.org/~akpm/mmots/broken-out/mm-mmu_notifier-convert-user-range-blockable-to-helper-function.patch
and later at
    http://ozlabs.org/~akpm/mmotm/broken-out/mm-mmu_notifier-convert-user-range-blockable-to-helper-function.patch

Before you just go and hit "reply", please:
   a) Consider who else should be cc'ed
   b) Prefer to cc a suitable mailing list as well
   c) Ideally: find the original patch on the mailing list and do a
      reply-to-all to that, adding suitable additional cc's

*** Remember to use Documentation/process/submit-checklist.rst when testing your code ***

The -mm tree is included into linux-next and is updated
there every 3-4 working days

------------------------------------------------------
From: Jérôme Glisse <jglisse@xxxxxxxxxx>
Subject: mm/mmu_notifier: convert user range->blockable to helper function

Use the mmu_notifier_range_blockable() helper function instead of directly
dereferencing the range->blockable field.  This is done to make it easier
to change the mmu_notifier range field.

This patch is the outcome of the following coccinelle patch:

%<-------------------------------------------------------------------
@@
identifier I1, FN;
@@
FN(..., struct mmu_notifier_range *I1, ...) {
<...
-I1->blockable
+mmu_notifier_range_blockable(I1)
...>
}
------------------------------------------------------------------->%

spatch --in-place --sp-file blockable.spatch --dir .

Link: http://lkml.kernel.org/r/20190326164747.24405-3-jglisse@xxxxxxxxxx
Signed-off-by: Jérôme Glisse <jglisse@xxxxxxxxxx>
Cc: Christian König <christian.koenig@xxxxxxx>
Cc: Joonas Lahtinen <joonas.lahtinen@xxxxxxxxxxxxxxx>
Cc: Jani Nikula <jani.nikula@xxxxxxxxxxxxxxx>
Cc: Rodrigo Vivi <rodrigo.vivi@xxxxxxxxx>
Cc: Jan Kara <jack@xxxxxxx>
Cc: Andrea Arcangeli <aarcange@xxxxxxxxxx>
Cc: Peter Xu <peterx@xxxxxxxxxx>
Cc: Felix Kuehling <Felix.Kuehling@xxxxxxx>
Cc: Jason Gunthorpe <jgg@xxxxxxxxxxxx>
Cc: Ross Zwisler <zwisler@xxxxxxxxxx>
Cc: Dan Williams <dan.j.williams@xxxxxxxxx>
Cc: Paolo Bonzini <pbonzini@xxxxxxxxxx>
Cc: Radim Krcmar <rkrcmar@xxxxxxxxxx>
Cc: Michal Hocko <mhocko@xxxxxxxxxx>
Cc: Christian Koenig <christian.koenig@xxxxxxx>
Cc: Ralph Campbell <rcampbell@xxxxxxxxxx>
Cc: John Hubbard <jhubbard@xxxxxxxxxx>
Cc: Arnd Bergmann <arnd@xxxxxxxx>
Signed-off-by: Andrew Morton <akpm@xxxxxxxxxxxxxxxxxxxx>
---

 drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c  |    8 ++++----
 drivers/gpu/drm/i915/i915_gem_userptr.c |    2 +-
 drivers/gpu/drm/radeon/radeon_mn.c      |    4 ++--
 drivers/infiniband/core/umem_odp.c      |    5 +++--
 drivers/xen/gntdev.c                    |    6 +++---
 mm/hmm.c                                |    6 +++---
 mm/mmu_notifier.c                       |    2 +-
 virt/kvm/kvm_main.c                     |    3 ++-
 8 files changed, 19 insertions(+), 17 deletions(-)

--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c~mm-mmu_notifier-convert-user-range-blockable-to-helper-function
+++ a/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c
@@ -256,14 +256,14 @@ static int amdgpu_mn_invalidate_range_st
 	/* TODO we should be able to split locking for interval tree and
 	 * amdgpu_mn_invalidate_node
 	 */
-	if (amdgpu_mn_read_lock(amn, range->blockable))
+	if (amdgpu_mn_read_lock(amn, mmu_notifier_range_blockable(range)))
 		return -EAGAIN;
 
 	it = interval_tree_iter_first(&amn->objects, range->start, end);
 	while (it) {
 		struct amdgpu_mn_node *node;
 
-		if (!range->blockable) {
+		if (!mmu_notifier_range_blockable(range)) {
 			amdgpu_mn_read_unlock(amn);
 			return -EAGAIN;
 		}
@@ -299,7 +299,7 @@ static int amdgpu_mn_invalidate_range_st
 	/* notification is exclusive, but interval is inclusive */
 	end = range->end - 1;
 
-	if (amdgpu_mn_read_lock(amn, range->blockable))
+	if (amdgpu_mn_read_lock(amn, mmu_notifier_range_blockable(range)))
 		return -EAGAIN;
 
 	it = interval_tree_iter_first(&amn->objects, range->start, end);
@@ -307,7 +307,7 @@ static int amdgpu_mn_invalidate_range_st
 		struct amdgpu_mn_node *node;
 		struct amdgpu_bo *bo;
 
-		if (!range->blockable) {
+		if (!mmu_notifier_range_blockable(range)) {
 			amdgpu_mn_read_unlock(amn);
 			return -EAGAIN;
 		}
--- a/drivers/gpu/drm/i915/i915_gem_userptr.c~mm-mmu_notifier-convert-user-range-blockable-to-helper-function
+++ a/drivers/gpu/drm/i915/i915_gem_userptr.c
@@ -122,7 +122,7 @@ userptr_mn_invalidate_range_start(struct
 	while (it) {
 		struct drm_i915_gem_object *obj;
 
-		if (!range->blockable) {
+		if (!mmu_notifier_range_blockable(range)) {
 			ret = -EAGAIN;
 			break;
 		}
--- a/drivers/gpu/drm/radeon/radeon_mn.c~mm-mmu_notifier-convert-user-range-blockable-to-helper-function
+++ a/drivers/gpu/drm/radeon/radeon_mn.c
@@ -133,7 +133,7 @@ static int radeon_mn_invalidate_range_st
 	/* TODO we should be able to split locking for interval tree and
 	 * the tear down.
 	 */
-	if (range->blockable)
+	if (mmu_notifier_range_blockable(range))
 		mutex_lock(&rmn->lock);
 	else if (!mutex_trylock(&rmn->lock))
 		return -EAGAIN;
@@ -144,7 +144,7 @@ static int radeon_mn_invalidate_range_st
 		struct radeon_bo *bo;
 		long r;
 
-		if (!range->blockable) {
+		if (!mmu_notifier_range_blockable(range)) {
 			ret = -EAGAIN;
 			goto out_unlock;
 		}
--- a/drivers/infiniband/core/umem_odp.c~mm-mmu_notifier-convert-user-range-blockable-to-helper-function
+++ a/drivers/infiniband/core/umem_odp.c
@@ -152,7 +152,7 @@ static int ib_umem_notifier_invalidate_r
 	struct ib_ucontext_per_mm *per_mm =
 		container_of(mn, struct ib_ucontext_per_mm, mn);
 
-	if (range->blockable)
+	if (mmu_notifier_range_blockable(range))
 		down_read(&per_mm->umem_rwsem);
 	else if (!down_read_trylock(&per_mm->umem_rwsem))
 		return -EAGAIN;
@@ -170,7 +170,8 @@ static int ib_umem_notifier_invalidate_r
 	return rbt_ib_umem_for_each_in_range(&per_mm->umem_tree, range->start,
 					     range->end,
 					     invalidate_range_start_trampoline,
-					     range->blockable, NULL);
+					     mmu_notifier_range_blockable(range),
+					     NULL);
 }
 
 static int invalidate_range_end_trampoline(struct ib_umem_odp *item, u64 start,
--- a/drivers/xen/gntdev.c~mm-mmu_notifier-convert-user-range-blockable-to-helper-function
+++ a/drivers/xen/gntdev.c
@@ -526,20 +526,20 @@ static int mn_invl_range_start(struct mm
 	struct gntdev_grant_map *map;
 	int ret = 0;
 
-	if (range->blockable)
+	if (mmu_notifier_range_blockable(range))
 		mutex_lock(&priv->lock);
 	else if (!mutex_trylock(&priv->lock))
 		return -EAGAIN;
 
 	list_for_each_entry(map, &priv->maps, next) {
 		ret = unmap_if_in_range(map, range->start, range->end,
-					range->blockable);
+					mmu_notifier_range_blockable(range));
 		if (ret)
 			goto out_unlock;
 	}
 	list_for_each_entry(map, &priv->freeable_maps, next) {
 		ret = unmap_if_in_range(map, range->start, range->end,
-					range->blockable);
+					mmu_notifier_range_blockable(range));
 		if (ret)
 			goto out_unlock;
 	}
--- a/mm/hmm.c~mm-mmu_notifier-convert-user-range-blockable-to-helper-function
+++ a/mm/hmm.c
@@ -205,9 +205,9 @@ static int hmm_invalidate_range_start(st
 	update.start = nrange->start;
 	update.end = nrange->end;
 	update.event = HMM_UPDATE_INVALIDATE;
-	update.blockable = nrange->blockable;
+	update.blockable = mmu_notifier_range_blockable(nrange);
 
-	if (nrange->blockable)
+	if (mmu_notifier_range_blockable(nrange))
 		mutex_lock(&hmm->lock);
 	else if (!mutex_trylock(&hmm->lock)) {
 		ret = -EAGAIN;
@@ -222,7 +222,7 @@ static int hmm_invalidate_range_start(st
 	}
 	mutex_unlock(&hmm->lock);
 
-	if (nrange->blockable)
+	if (mmu_notifier_range_blockable(nrange))
 		down_read(&hmm->mirrors_sem);
 	else if (!down_read_trylock(&hmm->mirrors_sem)) {
 		ret = -EAGAIN;
--- a/mm/mmu_notifier.c~mm-mmu_notifier-convert-user-range-blockable-to-helper-function
+++ a/mm/mmu_notifier.c
@@ -180,7 +180,7 @@ int __mmu_notifier_invalidate_range_star
 			if (_ret) {
 				pr_info("%pS callback failed with %d in %sblockable context.\n",
 					mn->ops->invalidate_range_start, _ret,
-					!range->blockable ? "non-" : "");
+					!mmu_notifier_range_blockable(range) ? "non-" : "");
 				ret = _ret;
 			}
 		}
--- a/virt/kvm/kvm_main.c~mm-mmu_notifier-convert-user-range-blockable-to-helper-function
+++ a/virt/kvm/kvm_main.c
@@ -391,7 +391,8 @@ static int kvm_mmu_notifier_invalidate_r
 	spin_unlock(&kvm->mmu_lock);
 
 	ret = kvm_arch_mmu_notifier_invalidate_range(kvm, range->start,
-					range->end, range->blockable);
+					range->end,
+					mmu_notifier_range_blockable(range));
 
 	srcu_read_unlock(&kvm->srcu, idx);
 
_

Patches currently in -mm which might be from jglisse@xxxxxxxxxx are

mm-hmm-select-mmu-notifier-when-selecting-hmm-v2.patch
mm-hmm-use-reference-counting-for-hmm-struct-v3.patch
mm-hmm-do-not-erase-snapshot-when-a-range-is-invalidated.patch
mm-hmm-improve-and-rename-hmm_vma_get_pfns-to-hmm_range_snapshot-v2.patch
mm-hmm-improve-and-rename-hmm_vma_fault-to-hmm_range_fault-v3.patch
mm-hmm-improve-driver-api-to-work-and-wait-over-a-range-v3.patch
mm-hmm-add-default-fault-flags-to-avoid-the-need-to-pre-fill-pfns-arrays-v2.patch
mm-hmm-mirror-hugetlbfs-snapshoting-faulting-and-dma-mapping-v3.patch
mm-hmm-allow-to-mirror-vma-of-a-file-on-a-dax-backed-filesystem-v3.patch
mm-hmm-add-helpers-to-test-if-mm-is-still-alive-or-not.patch
mm-hmm-add-an-helper-function-that-fault-pages-and-map-them-to-a-device-v3.patch
mm-hmm-add-an-helper-function-that-fault-pages-and-map-them-to-a-device-v3-fix.patch
mm-hmm-convert-various-hmm_pfn_-to-device_entry-which-is-a-better-name.patch
mm-mmu_notifier-helper-to-test-if-a-range-invalidation-is-blockable.patch
mm-mmu_notifier-convert-user-range-blockable-to-helper-function.patch
mm-mmu_notifier-convert-mmu_notifier_range-blockable-to-a-flags.patch
mm-mmu_notifier-contextual-information-for-event-enums.patch
mm-mmu_notifier-contextual-information-for-event-triggering-invalidation-v2.patch
mm-mmu_notifier-use-correct-mmu_notifier-events-for-each-invalidation.patch
mm-mmu_notifier-pass-down-vma-and-reasons-why-mmu-notifier-is-happening-v2.patch
mm-mmu_notifier-mmu_notifier_range_update_to_read_only-helper.patch




[Index of Archives]     [Kernel Archive]     [IETF Annouce]     [DCCP]     [Netdev]     [Networking]     [Security]     [Bugtraq]     [Yosemite]     [MIPS Linux]     [ARM Linux]     [Linux Security]     [Linux RAID]     [Linux SCSI]

  Powered by Linux