On Thu, Jul 21, 2022, Lai Jiangshan wrote: > On Wed, Jul 20, 2022 at 3:52 AM Sean Christopherson <seanjc@xxxxxxxxxx> wrote: > > > > --- > > > arch/x86/kvm/mmu/mmu.c | 22 +++++++++++++--------- > > > 1 file changed, 13 insertions(+), 9 deletions(-) > > > > > > diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c > > > index f35fd5c59c38..2446ede0b7b9 100644 > > > --- a/arch/x86/kvm/mmu/mmu.c > > > +++ b/arch/x86/kvm/mmu/mmu.c > > > @@ -1794,19 +1794,23 @@ static int __mmu_unsync_walk(struct kvm_mmu_page *sp, > > > return -ENOSPC; > > > > > > ret = __mmu_unsync_walk(child, pvec); > > > - if (!ret) { > > > - clear_unsync_child_bit(sp, i); > > > - continue; > > > - } else if (ret > 0) { > > > - nr_unsync_leaf += ret; > > > - } else > > > + if (ret < 0) > > > return ret; > > > - } else if (child->unsync) { > > > + nr_unsync_leaf += ret; > > > + } > > > + > > > + /* > > > + * Clear unsync bit for @child directly if @child is fully > > > + * walked and all the unsync shadow pages descended from > > > + * @child (including itself) are added into @pvec, the caller > > > + * must sync or zap all the unsync shadow pages in @pvec. > > > + */ > > > + clear_unsync_child_bit(sp, i); > > > + if (child->unsync) { > > > nr_unsync_leaf++; > > > if (mmu_pages_add(pvec, child, i)) > > > > This ordering is wrong, no? If the child itself is unsync and can't be added to > > @pvec, i.e. fails here, then clearing its bit in unsync_child_bitmap is wrong. > > mmu_pages_add() can always successfully add the page to @pvec and > the caller needs to guarantee there is enough room to do so. > > When it returns true, it means it will fail if you keep adding pages. Oof, that's downright evil. As prep work, can you fold in the attached patches earlier in this series? Then this patch can yield: for_each_set_bit(i, sp->unsync_child_bitmap, 512) { struct kvm_mmu_page *child; u64 ent = sp->spt[i]; if (!is_shadow_present_pte(ent) || is_large_pte(ent)) goto clear_unsync_child; child = to_shadow_page(ent & SPTE_BASE_ADDR_MASK); if (!child->unsync && !child->unsync_children) goto clear_unsync_child; if (mmu_is_page_vec_full(pvec)) return -ENOSPC; mmu_pages_add(pvec, child, i); if (child->unsync_children) { ret = __mmu_unsync_walk(child, pvec); if (!ret) goto clear_unsync_child; else if (ret > 0) nr_unsync_leaf += ret; else return ret; } else { nr_unsync_leaf++; } clear_unsync_child: /* * Clear the unsync info, the child is either already sync * (bitmap is stale) or is guaranteed to be zapped/synced by * the caller before mmu_lock is released. Note, the caller is * required to zap/sync all entries in @pvec even if an error * is returned! */ clear_unsync_child_bit(sp, i); }
>From f2968d1afb08708c8292808b88aa915ec714e154 Mon Sep 17 00:00:00 2001 From: Sean Christopherson <seanjc@xxxxxxxxxx> Date: Thu, 21 Jul 2022 08:38:35 -0700 Subject: [PATCH 1/2] KVM: x86/mmu: Separate "page vec is full" from adding a page to the array Move the check for a full "page vector" out of mmu_pages_add(), returning true/false (effectively) looks a _lot_ like returning success/fail, which is very misleading and will even be more misleading when a future patch clears the unsync child bit upon a page being added to the vector (as opposed to clearing the bit when the vector is processed by the caller). Checking that the vector is full when adding a previous page is also sub-optimal, e.g. KVM unnecessarily returns an error if the vector is full but there are no more unsync pages to process. Separating the check from the "add" will allow fixing this quirk in a future patch. No functional change intended. Signed-off-by: Sean Christopherson <seanjc@xxxxxxxxxx> --- arch/x86/kvm/mmu/mmu.c | 24 +++++++++++++++++------- 1 file changed, 17 insertions(+), 7 deletions(-) diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c index 52664c3caaab..ac60a52044ef 100644 --- a/arch/x86/kvm/mmu/mmu.c +++ b/arch/x86/kvm/mmu/mmu.c @@ -1741,20 +1741,26 @@ struct kvm_mmu_pages { unsigned int nr; }; -static int mmu_pages_add(struct kvm_mmu_pages *pvec, struct kvm_mmu_page *sp, +static bool mmu_is_page_vec_full(struct kvm_mmu_pages *pvec) +{ + return (pvec->nr == KVM_PAGE_ARRAY_NR); +} + +static void mmu_pages_add(struct kvm_mmu_pages *pvec, struct kvm_mmu_page *sp, int idx) { int i; - if (sp->unsync) - for (i=0; i < pvec->nr; i++) + if (sp->unsync) { + for (i = 0; i < pvec->nr; i++) { if (pvec->page[i].sp == sp) - return 0; + return; + } + } pvec->page[pvec->nr].sp = sp; pvec->page[pvec->nr].idx = idx; pvec->nr++; - return (pvec->nr == KVM_PAGE_ARRAY_NR); } static inline void clear_unsync_child_bit(struct kvm_mmu_page *sp, int idx) @@ -1781,7 +1787,9 @@ static int __mmu_unsync_walk(struct kvm_mmu_page *sp, child = to_shadow_page(ent & SPTE_BASE_ADDR_MASK); if (child->unsync_children) { - if (mmu_pages_add(pvec, child, i)) + mmu_pages_add(pvec, child, i); + + if (mmu_is_page_vec_full(pvec)) return -ENOSPC; ret = __mmu_unsync_walk(child, pvec); @@ -1794,7 +1802,9 @@ static int __mmu_unsync_walk(struct kvm_mmu_page *sp, return ret; } else if (child->unsync) { nr_unsync_leaf++; - if (mmu_pages_add(pvec, child, i)) + mmu_pages_add(pvec, child, i); + + if (mmu_is_page_vec_full(pvec)) return -ENOSPC; } else clear_unsync_child_bit(sp, i); -- 2.37.1.359.gd136c6c3e2-goog
>From c8b0d983791ef783165bbf2230ebc41145bf052e Mon Sep 17 00:00:00 2001 From: Sean Christopherson <seanjc@xxxxxxxxxx> Date: Thu, 21 Jul 2022 08:49:37 -0700 Subject: [PATCH 2/2] KVM: x86/mmu: Check for full page vector _before_ adding a new page Check for a full page vector before adding to the vector instead of after adding to the vector array, i.e. bail if and only if the vector is full _and_ a new page needs to be added. Previously, KVM would still bail if the vector was full but there were no more unsync pages to process. Signed-off-by: Sean Christopherson <seanjc@xxxxxxxxxx> --- arch/x86/kvm/mmu/mmu.c | 23 +++++++++++------------ 1 file changed, 11 insertions(+), 12 deletions(-) diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c index ac60a52044ef..aca9a8e6c626 100644 --- a/arch/x86/kvm/mmu/mmu.c +++ b/arch/x86/kvm/mmu/mmu.c @@ -1785,13 +1785,17 @@ static int __mmu_unsync_walk(struct kvm_mmu_page *sp, } child = to_shadow_page(ent & SPTE_BASE_ADDR_MASK); + if (!child->unsync && !child->unsync_children) { + clear_unsync_child_bit(sp, i); + continue; + } + + if (mmu_is_page_vec_full(pvec)) + return -ENOSPC; + + mmu_pages_add(pvec, child, i); if (child->unsync_children) { - mmu_pages_add(pvec, child, i); - - if (mmu_is_page_vec_full(pvec)) - return -ENOSPC; - ret = __mmu_unsync_walk(child, pvec); if (!ret) { clear_unsync_child_bit(sp, i); @@ -1800,14 +1804,9 @@ static int __mmu_unsync_walk(struct kvm_mmu_page *sp, nr_unsync_leaf += ret; } else return ret; - } else if (child->unsync) { + } else { nr_unsync_leaf++; - mmu_pages_add(pvec, child, i); - - if (mmu_is_page_vec_full(pvec)) - return -ENOSPC; - } else - clear_unsync_child_bit(sp, i); + } } return nr_unsync_leaf; -- 2.37.1.359.gd136c6c3e2-goog