Signed-off-by: Ahmed Abd El Mawgood <ahmedsoliman@xxxxxxxxxxx> --- arch/x86/kvm/mmu.c | 7 +++++++ arch/x86/kvm/mmu.h | 1 + 2 files changed, 8 insertions(+) diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c index b67d743c33..a300e4acb8 100644 --- a/arch/x86/kvm/mmu.c +++ b/arch/x86/kvm/mmu.c @@ -1024,6 +1024,13 @@ static gfn_t kvm_mmu_page_get_gfn(struct kvm_mmu_page *sp, int index) return sp->gfn + (index << ((sp->role.level - 1) * PT64_LEVEL_BITS)); } +gfn_t spte_to_gfn(u64 *spte) +{ + struct kvm_mmu_page *sp; + + sp = page_header(__pa(spte)); + return kvm_mmu_page_get_gfn(sp, spte - sp->spt); +} static void kvm_mmu_page_set_gfn(struct kvm_mmu_page *sp, int index, gfn_t gfn) { diff --git a/arch/x86/kvm/mmu.h b/arch/x86/kvm/mmu.h index c7b333147c..49d7f2f002 100644 --- a/arch/x86/kvm/mmu.h +++ b/arch/x86/kvm/mmu.h @@ -211,4 +211,5 @@ void kvm_mmu_gfn_allow_lpage(struct kvm_memory_slot *slot, gfn_t gfn); bool kvm_mmu_slot_gfn_write_protect(struct kvm *kvm, struct kvm_memory_slot *slot, u64 gfn); int kvm_arch_write_log_dirty(struct kvm_vcpu *vcpu); +gfn_t spte_to_gfn(u64 *sptep); #endif -- 2.19.2