Add a new statistic max_mmu_rmap_size, which stores the maximum size of rmap for the vm. Signed-off-by: Peter Xu <peterx@xxxxxxxxxx> --- arch/x86/include/asm/kvm_host.h | 1 + arch/x86/kvm/mmu/mmu.c | 2 ++ arch/x86/kvm/x86.c | 1 + 3 files changed, 4 insertions(+) diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h index 974cbfb1eefe..d798650ad793 100644 --- a/arch/x86/include/asm/kvm_host.h +++ b/arch/x86/include/asm/kvm_host.h @@ -1209,6 +1209,7 @@ struct kvm_vm_stat { u64 lpages; u64 nx_lpage_splits; u64 max_mmu_page_hash_collisions; + u64 max_mmu_rmap_size; }; struct kvm_vcpu_stat { diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c index b888385d1933..eb16c1dbbb32 100644 --- a/arch/x86/kvm/mmu/mmu.c +++ b/arch/x86/kvm/mmu/mmu.c @@ -2696,6 +2696,8 @@ static int mmu_set_spte(struct kvm_vcpu *vcpu, u64 *sptep, if (is_shadow_present_pte(*sptep)) { if (!was_rmapped) { rmap_count = rmap_add(vcpu, sptep, gfn); + if (rmap_count > vcpu->kvm->stat.max_mmu_rmap_size) + vcpu->kvm->stat.max_mmu_rmap_size = rmap_count; if (rmap_count > RMAP_RECYCLE_THRESHOLD) rmap_recycle(vcpu, sptep, gfn); } diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 8166ad113fb2..d83ccf35ce99 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -237,6 +237,7 @@ const struct _kvm_stats_desc kvm_vm_stats_desc[] = { STATS_DESC_ICOUNTER(VM, mmu_unsync), STATS_DESC_ICOUNTER(VM, lpages), STATS_DESC_ICOUNTER(VM, nx_lpage_splits), + STATS_DESC_PCOUNTER(VM, max_mmu_rmap_size), STATS_DESC_PCOUNTER(VM, max_mmu_page_hash_collisions) }; static_assert(ARRAY_SIZE(kvm_vm_stats_desc) == -- 2.31.1