+ mm-hmm-invalidate-device-page-table-at-start-of-invalidation.patch added to -mm tree

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



The patch titled
     Subject: mm/hmm: invalidate device page table at start of invalidation
has been added to the -mm tree.  Its filename is
     mm-hmm-invalidate-device-page-table-at-start-of-invalidation.patch

This patch should soon appear at
    http://ozlabs.org/~akpm/mmots/broken-out/mm-hmm-invalidate-device-page-table-at-start-of-invalidation.patch
and later at
    http://ozlabs.org/~akpm/mmotm/broken-out/mm-hmm-invalidate-device-page-table-at-start-of-invalidation.patch

Before you just go and hit "reply", please:
   a) Consider who else should be cc'ed
   b) Prefer to cc a suitable mailing list as well
   c) Ideally: find the original patch on the mailing list and do a
      reply-to-all to that, adding suitable additional cc's

*** Remember to use Documentation/process/submit-checklist.rst when testing your code ***

The -mm tree is included into linux-next and is updated
there every 3-4 working days

------------------------------------------------------
From: Jérôme Glisse <jglisse@xxxxxxxxxx>
Subject: mm/hmm: invalidate device page table at start of invalidation

Invalidate device page table at start of invalidation and invalidate in
progress CPU page table snapshooting at both start and end of any
invalidation.

This is helpful when device need to dirty page because the device page
table report the page as dirty.  Dirtying page must happen in the start
mmu notifier callback and not in the end one.

Link: http://lkml.kernel.org/r/20181019160442.18723-7-jglisse@xxxxxxxxxx
Signed-off-by: Jérôme Glisse <jglisse@xxxxxxxxxx>
Cc: Ralph Campbell <rcampbell@xxxxxxxxxx>
Cc: John Hubbard <jhubbard@xxxxxxxxxx>
Signed-off-by: Andrew Morton <akpm@xxxxxxxxxxxxxxxxxxxx>
---

 mm/hmm.c |   27 +++++++++++++++------------
 1 file changed, 15 insertions(+), 12 deletions(-)

--- a/mm/hmm.c~mm-hmm-invalidate-device-page-table-at-start-of-invalidation
+++ a/mm/hmm.c
@@ -43,7 +43,6 @@ static const struct mmu_notifier_ops hmm
  *
  * @mm: mm struct this HMM struct is bound to
  * @lock: lock protecting ranges list
- * @sequence: we track updates to the CPU page table with a sequence number
  * @ranges: list of range being snapshotted
  * @mirrors: list of mirrors for this mm
  * @mmu_notifier: mmu notifier to track updates to CPU page table
@@ -52,7 +51,6 @@ static const struct mmu_notifier_ops hmm
 struct hmm {
 	struct mm_struct	*mm;
 	spinlock_t		lock;
-	atomic_t		sequence;
 	struct list_head	ranges;
 	struct list_head	mirrors;
 	struct mmu_notifier	mmu_notifier;
@@ -85,7 +83,6 @@ static struct hmm *hmm_register(struct m
 		return NULL;
 	INIT_LIST_HEAD(&hmm->mirrors);
 	init_rwsem(&hmm->mirrors_sem);
-	atomic_set(&hmm->sequence, 0);
 	hmm->mmu_notifier.ops = NULL;
 	INIT_LIST_HEAD(&hmm->ranges);
 	spin_lock_init(&hmm->lock);
@@ -126,7 +123,7 @@ void hmm_mm_destroy(struct mm_struct *mm
 	kfree(mm->hmm);
 }
 
-static int hmm_invalidate_range(struct hmm *hmm,
+static int hmm_invalidate_range(struct hmm *hmm, bool device,
 				const struct hmm_update *update)
 {
 	struct hmm_mirror *mirror;
@@ -147,6 +144,9 @@ static int hmm_invalidate_range(struct h
 	}
 	spin_unlock(&hmm->lock);
 
+	if (!device)
+		return 0;
+
 	down_read(&hmm->mirrors_sem);
 	list_for_each_entry(mirror, &hmm->mirrors, list) {
 		int ret;
@@ -189,18 +189,21 @@ static void hmm_release(struct mmu_notif
 }
 
 static int hmm_invalidate_range_start(struct mmu_notifier *mn,
-				       struct mm_struct *mm,
-				       unsigned long start,
-				       unsigned long end,
-				       bool blockable)
+				      struct mm_struct *mm,
+				      unsigned long start,
+				      unsigned long end,
+				      bool blockable)
 {
+	struct hmm_update update;
 	struct hmm *hmm = mm->hmm;
 
 	VM_BUG_ON(!hmm);
 
-	atomic_inc(&hmm->sequence);
-
-	return 0;
+	update.start = start;
+	update.end = end;
+	update.event = HMM_UPDATE_INVALIDATE;
+	update.blockable = blockable;
+	return hmm_invalidate_range(hmm, true, &update);
 }
 
 static void hmm_invalidate_range_end(struct mmu_notifier *mn,
@@ -217,7 +220,7 @@ static void hmm_invalidate_range_end(str
 	update.end = end;
 	update.event = HMM_UPDATE_INVALIDATE;
 	update.blockable = true;
-	hmm_invalidate_range(hmm, &update);
+	hmm_invalidate_range(hmm, false, &update);
 }
 
 static const struct mmu_notifier_ops hmm_mmu_notifier_ops = {
_

Patches currently in -mm which might be from jglisse@xxxxxxxxxx are

mm-hmm-fix-utf8.patch
mm-hmm-properly-handle-migration-pmd-v3.patch
mm-hmm-use-a-structure-for-update-callback-parameters-v2.patch
mm-hmm-invalidate-device-page-table-at-start-of-invalidation.patch




[Index of Archives]     [Kernel Archive]     [IETF Annouce]     [DCCP]     [Netdev]     [Networking]     [Security]     [Bugtraq]     [Yosemite]     [MIPS Linux]     [ARM Linux]     [Linux Security]     [Linux RAID]     [Linux SCSI]

  Powered by Linux