Re: KVM: x86: handle invalid root_hpa everywhere

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



Il 03/01/2014 20:09, Marcelo Tosatti ha scritto:
> 
> Rom Freiman <rom@xxxxxxxxxxxxxxx> notes other code paths vulnerable to 
> bug fixed by 989c6b34f6a9480e397b.
> 
> Signed-off-by: Marcelo Tosatti <mtosatti@xxxxxxxxxx>
> 
> diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
> index 31a5702..e50425d 100644
> --- a/arch/x86/kvm/mmu.c
> +++ b/arch/x86/kvm/mmu.c
> @@ -2832,6 +2832,9 @@ static bool fast_page_fault(struct kvm_vcpu *vcpu, gva_t gva, int level,
>  	bool ret = false;
>  	u64 spte = 0ull;
>  
> +	if (!VALID_PAGE(vcpu->arch.mmu.root_hpa))
> +		return false;
> +
>  	if (!page_fault_can_be_fast(error_code))
>  		return false;
>  
> @@ -3227,6 +3230,9 @@ static u64 walk_shadow_page_get_mmio_spte(struct kvm_vcpu *vcpu, u64 addr)
>  	struct kvm_shadow_walk_iterator iterator;
>  	u64 spte = 0ull;
>  
> +	if (!VALID_PAGE(vcpu->arch.mmu.root_hpa))
> +		return spte;
> +
>  	walk_shadow_page_lockless_begin(vcpu);
>  	for_each_shadow_entry_lockless(vcpu, addr, iterator, spte)
>  		if (!is_shadow_present_pte(spte))
> @@ -4513,6 +4519,9 @@ int kvm_mmu_get_spte_hierarchy(struct kvm_vcpu *vcpu, u64 addr, u64 sptes[4])
>  	u64 spte;
>  	int nr_sptes = 0;
>  
> +	if (!VALID_PAGE(vcpu->arch.mmu.root_hpa))
> +		return nr_sptes;
> +
>  	walk_shadow_page_lockless_begin(vcpu);
>  	for_each_shadow_entry_lockless(vcpu, addr, iterator, spte) {
>  		sptes[iterator.level-1] = spte;
> diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h
> index ad75d77..cba218a 100644
> --- a/arch/x86/kvm/paging_tmpl.h
> +++ b/arch/x86/kvm/paging_tmpl.h
> @@ -569,6 +569,9 @@ static int FNAME(fetch)(struct kvm_vcpu *vcpu, gva_t addr,
>  	if (FNAME(gpte_changed)(vcpu, gw, top_level))
>  		goto out_gpte_changed;
>  
> +	if (!VALID_PAGE(vcpu->arch.mmu.root_hpa))
> +		goto out_gpte_changed;
> +
>  	for (shadow_walk_init(&it, vcpu, addr);
>  	     shadow_walk_okay(&it) && it.level > gw->level;
>  	     shadow_walk_next(&it)) {
> @@ -820,6 +823,11 @@ static void FNAME(invlpg)(struct kvm_vcpu *vcpu, gva_t gva)
>  	 */
>  	mmu_topup_memory_caches(vcpu);
>  
> +	if (!VALID_PAGE(vcpu->arch.mmu.root_hpa)) {
> +		WARN_ON(1);
> +		return;
> +	}
> +
>  	spin_lock(&vcpu->kvm->mmu_lock);
>  	for_each_shadow_entry(vcpu, gva, iterator) {
>  		level = iterator.level;
> 

Applied to kvm/queue, thanks.

Paolo
--
To unsubscribe from this list: send the line "unsubscribe kvm" in
the body of a message to majordomo@xxxxxxxxxxxxxxx
More majordomo info at  http://vger.kernel.org/majordomo-info.html




[Index of Archives]     [KVM ARM]     [KVM ia64]     [KVM ppc]     [Virtualization Tools]     [Spice Development]     [Libvirt]     [Libvirt Users]     [Linux USB Devel]     [Linux Audio Users]     [Yosemite Questions]     [Linux Kernel]     [Linux SCSI]     [XFree86]
  Powered by Linux