[PATCH v3 4/4] Enabling Access bit when doing memory swapping

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



Enabling Access bit when doing memory swapping.

Signed-off-by: Haitao Shan <haitao.shan@xxxxxxxxx>
Signed-off-by: Xudong Hao <xudong.hao@xxxxxxxxx>
---
 arch/x86/kvm/mmu.c |   14 ++++++++------
 arch/x86/kvm/vmx.c |    6 ++++--
 2 files changed, 12 insertions(+), 8 deletions(-)

diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index 72102e0..c2fef8e 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -1242,7 +1242,8 @@ static int kvm_age_rmapp(struct kvm *kvm, unsigned long *rmapp,
        int young = 0;

        /*
-        * Emulate the accessed bit for EPT, by checking if this page has
+        * In case of absence of EPT Access and Dirty Bits supports,
+        * emulate the accessed bit for EPT, by checking if this page has
         * an EPT mapping, and clearing it if it does. On the next access,
         * a new EPT mapping will be established.
         * This has some overhead, but not as much as the cost of swapping
@@ -1253,11 +1254,12 @@ static int kvm_age_rmapp(struct kvm *kvm, unsigned long *rmapp,

        for (sptep = rmap_get_first(*rmapp, &iter); sptep;
             sptep = rmap_get_next(&iter)) {
-               BUG_ON(!(*sptep & PT_PRESENT_MASK));
+               BUG_ON(!is_shadow_present_pte(*sptep));

-               if (*sptep & PT_ACCESSED_MASK) {
+               if (*sptep & shadow_accessed_mask) {
                        young = 1;
-                       clear_bit(PT_ACCESSED_SHIFT, (unsigned long *)sptep);
+                       clear_bit((ffs(shadow_accessed_mask) - 1),
+                                (unsigned long *)sptep);
                }
        }

@@ -1281,9 +1283,9 @@ static int kvm_test_age_rmapp(struct kvm *kvm, unsigned long *rmapp,

        for (sptep = rmap_get_first(*rmapp, &iter); sptep;
             sptep = rmap_get_next(&iter)) {
-               BUG_ON(!(*sptep & PT_PRESENT_MASK));
+               BUG_ON(!is_shadow_present_pte(*sptep));

-               if (*sptep & PT_ACCESSED_MASK) {
+               if (*sptep & shadow_accessed_mask) {
                        young = 1;
                        break;
                }
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index 32eb588..ea6390e 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -7275,8 +7275,10 @@ static int __init vmx_init(void)
        vmx_disable_intercept_for_msr(MSR_IA32_SYSENTER_EIP, false);

        if (enable_ept) {
-               kvm_mmu_set_mask_ptes(0ull, 0ull, 0ull, 0ull,
-                               VMX_EPT_EXECUTABLE_MASK);
+               kvm_mmu_set_mask_ptes(0ull,
+                       (enable_ept_ad_bits) ? VMX_EPT_ACCESS_BIT : 0ull,
+                       (enable_ept_ad_bits) ? VMX_EPT_DIRTY_BIT : 0ull,
+                       0ull, VMX_EPT_EXECUTABLE_MASK);
                ept_set_mmio_spte_mask();
                kvm_enable_tdp();
        } else

--
To unsubscribe from this list: send the line "unsubscribe kvm" in
the body of a message to majordomo@xxxxxxxxxxxxxxx
More majordomo info at  http://vger.kernel.org/majordomo-info.html


[Index of Archives]     [KVM ARM]     [KVM ia64]     [KVM ppc]     [Virtualization Tools]     [Spice Development]     [Libvirt]     [Libvirt Users]     [Linux USB Devel]     [Linux Audio Users]     [Yosemite Questions]     [Linux Kernel]     [Linux SCSI]     [XFree86]
  Powered by Linux