Xiao's "KVM: MMU: flush tlb if the spte can be locklessly modified" allows us to release mmu_lock before flushing TLBs. Signed-off-by: Takuya Yoshikawa <yoshikawa_takuya_b1@xxxxxxxxxxxxx> Cc: Xiao Guangrong <xiaoguangrong@xxxxxxxxxxxxxxxxxx> --- Xiao can change the remaining mmu_lock to RCU's read-side lock: The grace period will be reasonably limited. arch/x86/kvm/mmu.c | 4 ++++ arch/x86/kvm/x86.c | 4 ---- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c index 5d9efb1..c6da9ba 100644 --- a/arch/x86/kvm/mmu.c +++ b/arch/x86/kvm/mmu.c @@ -1249,6 +1249,8 @@ void kvm_mmu_write_protect_pt_masked(struct kvm *kvm, { unsigned long *rmapp; + spin_lock(&kvm->mmu_lock); + while (mask) { rmapp = __gfn_to_rmap(slot->base_gfn + gfn_offset + __ffs(mask), PT_PAGE_TABLE_LEVEL, slot); @@ -1257,6 +1259,8 @@ void kvm_mmu_write_protect_pt_masked(struct kvm *kvm, /* clear the first set bit */ mask &= mask - 1; } + + spin_unlock(&kvm->mmu_lock); } static bool rmap_write_protect(struct kvm *kvm, u64 gfn) diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index e5ca72a..1d1f6df 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -3543,8 +3543,6 @@ int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log) dirty_bitmap_buffer = dirty_bitmap + n / sizeof(long); memset(dirty_bitmap_buffer, 0, n); - spin_lock(&kvm->mmu_lock); - for (i = 0; i < n / sizeof(long); i++) { unsigned long mask; gfn_t offset; @@ -3563,8 +3561,6 @@ int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log) if (is_dirty) kvm_flush_remote_tlbs(kvm); - spin_unlock(&kvm->mmu_lock); - r = -EFAULT; if (copy_to_user(log->dirty_bitmap, dirty_bitmap_buffer, n)) goto out; -- 1.7.9.5 -- To unsubscribe from this list: send the line "unsubscribe kvm" in the body of a message to majordomo@xxxxxxxxxxxxxxx More majordomo info at http://vger.kernel.org/majordomo-info.html