Re: [PATCH 06/11] kvm: x86: Map guest PCIDs to host PCIDs

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



----- junaids@xxxxxxxxxx wrote:

> When using shadow paging mode, map guest PCIDs 1:1 to host PCIDs
> instead
> of mapping them all to PCID 0.
> 
> Signed-off-by: Junaid Shahid <junaids@xxxxxxxxxx>
> ---
>  arch/x86/kvm/mmu.c |  7 +++++--
>  arch/x86/kvm/mmu.h | 14 ++++++++++++++
>  2 files changed, 19 insertions(+), 2 deletions(-)
> 
> diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
> index 611ecc37e5d2..d93eba7b8787 100644
> --- a/arch/x86/kvm/mmu.c
> +++ b/arch/x86/kvm/mmu.c
> @@ -4037,6 +4037,8 @@ static bool fast_cr3_switch(struct kvm_vcpu
> *vcpu, gpa_t old_cr3)
>  		swap(mmu->root_hpa, mmu->prev_root_hpa);
>  
>  		if (new_cr3 == mmu->prev_cr3 && VALID_PAGE(mmu->root_hpa)) {
> +			unsigned long pcid = kvm_get_active_pcid(vcpu);
> +
>  			/*
>  			 * It is possible that the cached previous root page is
>  			 * obsolete because of a change in the MMU
> @@ -4048,7 +4050,7 @@ static bool fast_cr3_switch(struct kvm_vcpu
> *vcpu, gpa_t old_cr3)
>  			kvm_mmu_sync_roots(vcpu);
>  			__clear_sp_write_flooding_count(
>  						page_header(mmu->root_hpa));
> -			mmu->set_cr3(vcpu, mmu->root_hpa, false);
> +			mmu->set_cr3(vcpu, mmu->root_hpa | pcid, false);
>  			return true;
>  		}
>  	}
> @@ -4765,6 +4767,7 @@ EXPORT_SYMBOL_GPL(kvm_mmu_reset_context);
>  int kvm_mmu_load(struct kvm_vcpu *vcpu)
>  {
>  	int r;
> +	unsigned long pcid = kvm_get_active_pcid(vcpu);
>  
>  	r = mmu_topup_memory_caches(vcpu);
>  	if (r)
> @@ -4774,7 +4777,7 @@ int kvm_mmu_load(struct kvm_vcpu *vcpu)
>  	if (r)
>  		goto out;
>  	/* set_cr3() should ensure TLB has been flushed */
> -	vcpu->arch.mmu.set_cr3(vcpu, vcpu->arch.mmu.root_hpa, false);
> +	vcpu->arch.mmu.set_cr3(vcpu, vcpu->arch.mmu.root_hpa | pcid,
> false);
>  out:
>  	return r;
>  }
> diff --git a/arch/x86/kvm/mmu.h b/arch/x86/kvm/mmu.h
> index 5b408c0ad612..336fcff60091 100644
> --- a/arch/x86/kvm/mmu.h
> +++ b/arch/x86/kvm/mmu.h
> @@ -85,6 +85,20 @@ static inline int kvm_mmu_reload(struct kvm_vcpu
> *vcpu)
>  	return kvm_mmu_load(vcpu);
>  }
>  
> +static inline unsigned long kvm_get_pcid(struct kvm_vcpu *vcpu, gpa_t
> cr3)
> +{
> +	BUILD_BUG_ON((X86_CR3_PCID_MASK & PAGE_MASK) != 0);
> +
> +	return kvm_read_cr4_bits(vcpu, X86_CR4_PCIDE)
> +	       ? cr3 & X86_CR3_PCID_MASK
> +	       : 0;
> +}
> +
> +static inline unsigned long kvm_get_active_pcid(struct kvm_vcpu
> *vcpu)
> +{
> +	return kvm_get_pcid(vcpu, kvm_read_cr3(vcpu));
> +}
> +
>  /*
>   * Currently, we have two sorts of write-protection, a) the first
> one
>   * write-protects guest page to sync the guest modification, b)
> another one is
> -- 
> 2.17.0.441.gb46fe60e1d-goog

Reviewed-by: Liran Alon <liran.alon@xxxxxxxxxx>




[Index of Archives]     [KVM ARM]     [KVM ia64]     [KVM ppc]     [Virtualization Tools]     [Spice Development]     [Libvirt]     [Libvirt Users]     [Linux USB Devel]     [Linux Audio Users]     [Yosemite Questions]     [Linux Kernel]     [Linux SCSI]     [XFree86]

  Powered by Linux