To reduce MMU lock contention during dirty logging, all permission relaxation operations would be performed under read lock. Signed-off-by: Jing Zhang <jingzhangos@xxxxxxxxxx> --- arch/arm64/kvm/mmu.c | 20 ++++++++++++++++++-- 1 file changed, 18 insertions(+), 2 deletions(-) diff --git a/arch/arm64/kvm/mmu.c b/arch/arm64/kvm/mmu.c index cafd5813c949..15393cb61a3f 100644 --- a/arch/arm64/kvm/mmu.c +++ b/arch/arm64/kvm/mmu.c @@ -1084,6 +1084,7 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa, unsigned long vma_pagesize, fault_granule; enum kvm_pgtable_prot prot = KVM_PGTABLE_PROT_R; struct kvm_pgtable *pgt; + bool use_mmu_readlock = false; fault_granule = 1UL << ARM64_HW_PGTABLE_LEVEL_SHIFT(fault_level); write_fault = kvm_is_write_fault(vcpu); @@ -1212,7 +1213,19 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa, if (exec_fault && device) return -ENOEXEC; - write_lock(&kvm->mmu_lock); + if (fault_status == FSC_PERM && fault_granule == PAGE_SIZE + && logging_active && write_fault) + use_mmu_readlock = true; + /* + * To reduce MMU contentions and enhance concurrency during dirty + * logging dirty logging, only acquire read lock for permission + * relaxation. This fast path would greatly reduce the performance + * degradation of guest workloads. + */ + if (use_mmu_readlock) + read_lock(&kvm->mmu_lock); + else + write_lock(&kvm->mmu_lock); pgt = vcpu->arch.hw_mmu->pgt; if (mmu_notifier_retry(kvm, mmu_seq)) goto out_unlock; @@ -1271,7 +1284,10 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa, } out_unlock: - write_unlock(&kvm->mmu_lock); + if (use_mmu_readlock) + read_unlock(&kvm->mmu_lock); + else + write_unlock(&kvm->mmu_lock); kvm_set_pfn_accessed(pfn); kvm_release_pfn_clean(pfn); return ret != -EAGAIN ? ret : 0; -- 2.34.1.703.g22d0c6ccf7-goog