Re: KVM: MMU: handle invalid root_hpa at __direct_map

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



Hi everybody,
Marcelo, your suggestion above should work together with the same
patch to __direct_mapping.

After running some test on different kernels, the !VALID_PAGE happens
as following:

Linux localhost.localdomain 3.12.5-200.fc19.x86_64 #1 SMP Tue Dec 17
22:21:14 UTC 2013 x86_64 x86_64 x86_64 GNU/Linux:
INVALID PAGE kvm/arch/x86/kvm/paging_tmpl.h:FNAME(fetch):573: ffffffffffffffff

Linux localhost.localdomain
3.11.8-200.strato0002.fc19.strato.44671928e2e2.x86_64 #1 SMP Sun Jan 5
18:30:38 IST 2014 x86_64 x86_64 x86_64 GNU/Linux:
INVALID PAGE arch/x86/kvm/mmu.c:__direct_map:2695: ffffffffffffffff

So u probably should add both patches to kvm.

Here is my complete patch for 3.11, if you like:

diff --git a/kvm/arch/x86/kvm/mmu.c b/kvm/arch/x86/kvm/mmu.c
index 9e9285a..c38e480 100644
--- a/kvm/arch/x86/kvm/mmu.c
+++ b/kvm/arch/x86/kvm/mmu.c
@@ -2691,6 +2691,11 @@ static int __direct_map(struct kvm_vcpu *vcpu,
gpa_t v, int write,
        int emulate = 0;
        gfn_t pseudo_gfn;

+       if (!VALID_PAGE(vcpu->arch.mmu.root_hpa)) {
+               pgprintk(KERN_WARNING "invalid page access %s:%s:%d:
%llx\n",  __FILE__, __func__, __LINE__, vcpu->arch.mmu.root_hpa);
+                return 0;
+       }
+
        for_each_shadow_entry(vcpu, (u64)gfn << PAGE_SHIFT, iterator) {
                if (iterator.level == level) {
                        mmu_set_spte(vcpu, iterator.sptep, ACC_ALL,
@@ -2861,6 +2866,11 @@ static bool fast_page_fault(struct kvm_vcpu
*vcpu, gva_t gva, int level,
        bool ret = false;
        u64 spte = 0ull;

+       if (!VALID_PAGE(vcpu->arch.mmu.root_hpa)) {
+               pgprintk(KERN_WARNING "invalid page access %s:%s:%d:
%llx\n",  __FILE__, __func__, __LINE__, vcpu->arch.mmu.root_hpa);
+               return false;
+       }
+
        if (!page_fault_can_be_fast(vcpu, error_code))
                return false;

@@ -3255,6 +3265,11 @@ static u64
walk_shadow_page_get_mmio_spte(struct kvm_vcpu *vcpu, u64 addr)
        struct kvm_shadow_walk_iterator iterator;
        u64 spte = 0ull;

+       if (!VALID_PAGE(vcpu->arch.mmu.root_hpa)) {
+               pgprintk(KERN_WARNING "invalid page access %s:%s:%d:
%llx\n",  __FILE__, __func__, __LINE__, vcpu->arch.mmu.root_hpa);
+               return spte;
+       }
+
        walk_shadow_page_lockless_begin(vcpu);
        for_each_shadow_entry_lockless(vcpu, addr, iterator, spte)
                if (!is_shadow_present_pte(spte))
@@ -4525,6 +4540,11 @@ int kvm_mmu_get_spte_hierarchy(struct kvm_vcpu
*vcpu, u64 addr, u64 sptes[4])
        u64 spte;
        int nr_sptes = 0;

+       if (!VALID_PAGE(vcpu->arch.mmu.root_hpa)) {
+               pgprintk(KERN_WARNING "invalid page access %s:%s:%d:
%llx\n",  __FILE__, __func__, __LINE__, vcpu->arch.mmu.root_hpa);
+               return nr_sptes;
+       }
+
        walk_shadow_page_lockless_begin(vcpu);
        for_each_shadow_entry_lockless(vcpu, addr, iterator, spte) {
                sptes[iterator.level-1] = spte;

diff --git a/kvm/arch/x86/kvm/paging_tmpl.h b/kvm/arch/x86/kvm/paging_tmpl.h
index 7769699..202a1fc 100644
--- a/kvm/arch/x86/kvm/paging_tmpl.h
+++ b/kvm/arch/x86/kvm/paging_tmpl.h
@@ -423,6 +423,11 @@ static int FNAME(fetch)(struct kvm_vcpu *vcpu, gva_t addr,
        if (FNAME(gpte_changed)(vcpu, gw, top_level))
                goto out_gpte_changed;

+       if (!VALID_PAGE(vcpu->arch.mmu.root_hpa)) {
+               pgprintk(KERN_WARNING "invalid page access %s:%s:%d:
%llx\n",  __FILE__, __func__, __LINE__, vcpu->arch.mmu.root_hpa);
+               goto out_gpte_changed;
+       }
+
        for (shadow_walk_init(&it, vcpu, addr);
             shadow_walk_okay(&it) && it.level > gw->level;
             shadow_walk_next(&it)) {
@@ -674,6 +679,12 @@ static void FNAME(invlpg)(struct kvm_vcpu *vcpu, gva_t gva)
         */
        mmu_topup_memory_caches(vcpu);

+       if (!VALID_PAGE(vcpu->arch.mmu.root_hpa)) {
+               pgprintk(KERN_WARNING "invalid page access %s:%s:%d:
%llx\n",  __FILE__, __func__, __LINE__, vcpu->arch.mmu.root_hpa);
+               WARN_ON(1);
+                return;
+       }


Another issue I'm struggling with is the next bug:
https://bugzilla.redhat.com/show_bug.cgi?id=1052861

The only thing i'm trying to over there is to run virt-v2v in nested
environment. May be you have any idea?
It's also related to memory pressure on L2. Of coarse L0 does not
crash (using it with the above patch) but L2 is crushing during the
conversion process.

Thanks,
Rom

On Fri, Dec 27, 2013 at 9:43 PM, Marcelo Tosatti <mtosatti@xxxxxxxxxx> wrote:
> On Sun, Dec 22, 2013 at 12:56:49PM -0200, Marcelo Tosatti wrote:
>> On Sun, Dec 22, 2013 at 11:17:21AM +0200, Rom Freiman wrote:
>> > Hello everyone,
>> >
>> > I've been chasing this bug for a while.
>> >
>> > According to my research, this bug fix is works fine for
>> > 3.11.9-200.fc19.x86_64 kernel version (and I also came to almost similar
>> > solution and really solved the crash).
>> >
>> > But, the problem is, that it seems that this patch does not work on 3.13.0-rc2+
>> > - it looks like the code flow is different and it crashes in ept_page_fault
>> > and does not reach __direct_map:
>>
>> Yep, similar problem, care to send a patch against
>>
>> FNAME(page_fault), kvm_mmu_get_spte_hierarchy
>>
>> Maybe there are more vulnerable sites, should secure them all.
>
> These should cover all it?
>
> diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
> index 31a5702..e50425d 100644
> --- a/arch/x86/kvm/mmu.c
> +++ b/arch/x86/kvm/mmu.c
> @@ -2832,6 +2832,9 @@ static bool fast_page_fault(struct kvm_vcpu *vcpu, gva_t gva, int level,
>         bool ret = false;
>         u64 spte = 0ull;
>
> +       if (!VALID_PAGE(vcpu->arch.mmu.root_hpa))
> +               return false;
> +
>         if (!page_fault_can_be_fast(error_code))
>                 return false;
>
> @@ -3227,6 +3230,9 @@ static u64 walk_shadow_page_get_mmio_spte(struct kvm_vcpu *vcpu, u64 addr)
>         struct kvm_shadow_walk_iterator iterator;
>         u64 spte = 0ull;
>
> +       if (!VALID_PAGE(vcpu->arch.mmu.root_hpa))
> +               return spte;
> +
>         walk_shadow_page_lockless_begin(vcpu);
>         for_each_shadow_entry_lockless(vcpu, addr, iterator, spte)
>                 if (!is_shadow_present_pte(spte))
> @@ -4513,6 +4519,9 @@ int kvm_mmu_get_spte_hierarchy(struct kvm_vcpu *vcpu, u64 addr, u64 sptes[4])
>         u64 spte;
>         int nr_sptes = 0;
>
> +       if (!VALID_PAGE(vcpu->arch.mmu.root_hpa))
> +               return nr_sptes;
> +
>         walk_shadow_page_lockless_begin(vcpu);
>         for_each_shadow_entry_lockless(vcpu, addr, iterator, spte) {
>                 sptes[iterator.level-1] = spte;
> diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h
> index ad75d77..cba218a 100644
> --- a/arch/x86/kvm/paging_tmpl.h
> +++ b/arch/x86/kvm/paging_tmpl.h
> @@ -569,6 +569,9 @@ static int FNAME(fetch)(struct kvm_vcpu *vcpu, gva_t addr,
>         if (FNAME(gpte_changed)(vcpu, gw, top_level))
>                 goto out_gpte_changed;
>
> +       if (!VALID_PAGE(vcpu->arch.mmu.root_hpa))
> +               goto out_gpte_changed;
> +
>         for (shadow_walk_init(&it, vcpu, addr);
>              shadow_walk_okay(&it) && it.level > gw->level;
>              shadow_walk_next(&it)) {
> @@ -820,6 +823,11 @@ static void FNAME(invlpg)(struct kvm_vcpu *vcpu, gva_t gva)
>          */
>         mmu_topup_memory_caches(vcpu);
>
> +       if (!VALID_PAGE(vcpu->arch.mmu.root_hpa)) {
> +               WARN_ON(1);
> +               return;
> +       }
> +
>         spin_lock(&vcpu->kvm->mmu_lock);
>         for_each_shadow_entry(vcpu, gva, iterator) {
>                 level = iterator.level;
--
To unsubscribe from this list: send the line "unsubscribe kvm" in
the body of a message to majordomo@xxxxxxxxxxxxxxx
More majordomo info at  http://vger.kernel.org/majordomo-info.html




[Index of Archives]     [KVM ARM]     [KVM ia64]     [KVM ppc]     [Virtualization Tools]     [Spice Development]     [Libvirt]     [Libvirt Users]     [Linux USB Devel]     [Linux Audio Users]     [Yosemite Questions]     [Linux Kernel]     [Linux SCSI]     [XFree86]
  Powered by Linux