Re: [PATCH v3 08/10] KVM: x86/mmu: Split out TDP MMU page fault handling

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



On Wed, Sep 21, 2022 at 10:35:44AM -0700,
David Matlack <dmatlack@xxxxxxxxxx> wrote:

> Split out the page fault handling for the TDP MMU to a separate
> function.  This creates some duplicate code, but makes the TDP MMU fault
> handler simpler to read by eliminating branches and will enable future
> cleanups by allowing the TDP MMU and non-TDP MMU fault paths to diverge.
> 
> Only compile in the TDP MMU fault handler for 64-bit builds since
> kvm_tdp_mmu_map() does not exist in 32-bit builds.
> 
> No functional change intended.
> 
> Signed-off-by: David Matlack <dmatlack@xxxxxxxxxx>
> ---
>  arch/x86/kvm/mmu/mmu.c | 62 ++++++++++++++++++++++++++++++++----------
>  1 file changed, 48 insertions(+), 14 deletions(-)
> 
> diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c
> index dc203973de83..b36f351138f7 100644
> --- a/arch/x86/kvm/mmu/mmu.c
> +++ b/arch/x86/kvm/mmu/mmu.c
> @@ -4238,7 +4238,6 @@ static bool is_page_fault_stale(struct kvm_vcpu *vcpu,
>  
>  static int direct_page_fault(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault)
>  {
> -	bool is_tdp_mmu_fault = is_tdp_mmu(vcpu->arch.mmu);
>  	int r;
>  
>  	if (page_fault_handle_page_track(vcpu, fault))
> @@ -4257,11 +4256,7 @@ static int direct_page_fault(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault
>  		return r;
>  
>  	r = RET_PF_RETRY;
> -
> -	if (is_tdp_mmu_fault)
> -		read_lock(&vcpu->kvm->mmu_lock);
> -	else
> -		write_lock(&vcpu->kvm->mmu_lock);
> +	write_lock(&vcpu->kvm->mmu_lock);
>  
>  	if (is_page_fault_stale(vcpu, fault))
>  		goto out_unlock;
> @@ -4270,16 +4265,10 @@ static int direct_page_fault(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault
>  	if (r)
>  		goto out_unlock;
>  
> -	if (is_tdp_mmu_fault)
> -		r = kvm_tdp_mmu_map(vcpu, fault);
> -	else
> -		r = __direct_map(vcpu, fault);
> +	r = __direct_map(vcpu, fault);
>  
>  out_unlock:
> -	if (is_tdp_mmu_fault)
> -		read_unlock(&vcpu->kvm->mmu_lock);
> -	else
> -		write_unlock(&vcpu->kvm->mmu_lock);
> +	write_unlock(&vcpu->kvm->mmu_lock);
>  	kvm_release_pfn_clean(fault->pfn);
>  	return r;
>  }
> @@ -4327,6 +4316,46 @@ int kvm_handle_page_fault(struct kvm_vcpu *vcpu, u64 error_code,
>  }
>  EXPORT_SYMBOL_GPL(kvm_handle_page_fault);
>  
> +#ifdef CONFIG_X86_64
> +static int kvm_tdp_mmu_page_fault(struct kvm_vcpu *vcpu,
> +				  struct kvm_page_fault *fault)
> +{
> +	int r;
> +
> +	if (page_fault_handle_page_track(vcpu, fault))
> +		return RET_PF_EMULATE;
> +
> +	r = fast_page_fault(vcpu, fault);
> +	if (r != RET_PF_INVALID)
> +		return r;
> +
> +	r = mmu_topup_memory_caches(vcpu, false);
> +	if (r)
> +		return r;
> +
> +	r = kvm_faultin_pfn(vcpu, fault, ACC_ALL);
> +	if (r != RET_PF_CONTINUE)
> +		return r;
> +
> +	r = RET_PF_RETRY;
> +	read_lock(&vcpu->kvm->mmu_lock);
> +
> +	if (is_page_fault_stale(vcpu, fault))
> +		goto out_unlock;
> +
> +	r = make_mmu_pages_available(vcpu);
> +	if (r)
> +		goto out_unlock;
> +
> +	r = kvm_tdp_mmu_map(vcpu, fault);
> +
> +out_unlock:
> +	read_unlock(&vcpu->kvm->mmu_lock);
> +	kvm_release_pfn_clean(fault->pfn);
> +	return r;
> +}
> +#endif
> +
>  int kvm_tdp_page_fault(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault)
>  {
>  	/*
> @@ -4351,6 +4380,11 @@ int kvm_tdp_page_fault(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault)
>  		}
>  	}
>  
> +#ifdef CONFIG_X86_64
> +	if (tdp_mmu_enabled)
> +		return kvm_tdp_mmu_page_fault(vcpu, fault);
> +#endif
> +
>  	return direct_page_fault(vcpu, fault);
>  }
>  
> -- 
> 2.37.3.998.g577e59143f-goog
> 

Reviewed-by: Isaku Yamahata <isaku.yamahata@xxxxxxxxx>
-- 
Isaku Yamahata <isaku.yamahata@xxxxxxxxx>



[Index of Archives]     [KVM ARM]     [KVM ia64]     [KVM ppc]     [Virtualization Tools]     [Spice Development]     [Libvirt]     [Libvirt Users]     [Linux USB Devel]     [Linux Audio Users]     [Yosemite Questions]     [Linux Kernel]     [Linux SCSI]     [XFree86]

  Powered by Linux