Add a new statistic max_mmu_rmap_size, which stores the maximum size of rmap for the vm. Signed-off-by: Peter Xu <peterx@xxxxxxxxxx> --- arch/x86/include/asm/kvm_host.h | 1 + arch/x86/kvm/mmu/mmu.c | 2 ++ arch/x86/kvm/x86.c | 1 + 3 files changed, 4 insertions(+) diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h index 55efbacfc244..937bc8021f7a 100644 --- a/arch/x86/include/asm/kvm_host.h +++ b/arch/x86/include/asm/kvm_host.h @@ -1138,6 +1138,7 @@ struct kvm_vm_stat { ulong lpages; ulong nx_lpage_splits; ulong max_mmu_page_hash_collisions; + ulong max_mmu_rmap_size; }; struct kvm_vcpu_stat { diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c index 5e60b00e8e50..6dd338738118 100644 --- a/arch/x86/kvm/mmu/mmu.c +++ b/arch/x86/kvm/mmu/mmu.c @@ -2602,6 +2602,8 @@ static int mmu_set_spte(struct kvm_vcpu *vcpu, u64 *sptep, if (is_shadow_present_pte(*sptep)) { if (!was_rmapped) { rmap_count = rmap_add(vcpu, sptep, gfn); + if (rmap_count > vcpu->kvm->stat.max_mmu_rmap_size) + vcpu->kvm->stat.max_mmu_rmap_size = rmap_count; if (rmap_count > RMAP_RECYCLE_THRESHOLD) rmap_recycle(vcpu, sptep, gfn); } diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index bbc4e04e67ad..4aa3cc6ae5d4 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -257,6 +257,7 @@ struct kvm_stats_debugfs_item debugfs_entries[] = { VM_STAT("largepages", lpages, .mode = 0444), VM_STAT("nx_largepages_splitted", nx_lpage_splits, .mode = 0444), VM_STAT("max_mmu_page_hash_collisions", max_mmu_page_hash_collisions), + VM_STAT("max_mmu_rmap_size", max_mmu_rmap_size), { NULL } }; -- 2.31.1