On Mon, Dec 05, 2022 at 02:23:35PM +0000, "Wang, Wei W" <wei.w.wang@xxxxxxxxx> wrote: > On Sunday, October 30, 2022 2:23 PM, Yamahata, Isaku wrote: > > From: Isaku Yamahata <isaku.yamahata@xxxxxxxxx> > > > > Some KVM MMU operations (dirty page logging, page migration, aging page) > > aren't supported for private GFNs (yet) with the first generation of TDX. > > Silently return on unsupported TDX KVM MMU operations. > > > > Signed-off-by: Isaku Yamahata <isaku.yamahata@xxxxxxxxx> > > --- > > arch/x86/kvm/mmu/mmu.c | 3 ++ > > arch/x86/kvm/mmu/tdp_mmu.c | 73 > > +++++++++++++++++++++++++++++++++++--- > > arch/x86/kvm/x86.c | 3 ++ > > 3 files changed, 74 insertions(+), 5 deletions(-) > > > > diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c index > > 02e7b5cf3231..efc3b3f2dd12 100644 > > --- a/arch/x86/kvm/mmu/mmu.c > > +++ b/arch/x86/kvm/mmu/mmu.c > > @@ -6588,6 +6588,9 @@ static bool kvm_mmu_zap_collapsible_spte(struct > > kvm *kvm, > > for_each_rmap_spte(rmap_head, &iter, sptep) { > > sp = sptep_to_sp(sptep); > > > > + /* Private page dirty logging is not supported yet. */ > > + KVM_BUG_ON(is_private_sptep(sptep), kvm); > > + > > /* > > * We cannot do huge page mapping for indirect shadow pages, > > * which are found on the last rmap (level = 1) when not using diff --git > > a/arch/x86/kvm/mmu/tdp_mmu.c b/arch/x86/kvm/mmu/tdp_mmu.c index > > 0e053b96444a..4b207ce83ffe 100644 > > --- a/arch/x86/kvm/mmu/tdp_mmu.c > > +++ b/arch/x86/kvm/mmu/tdp_mmu.c > > @@ -1469,7 +1469,8 @@ typedef bool (*tdp_handler_t)(struct kvm *kvm, > > struct tdp_iter *iter, > > > > static __always_inline bool kvm_tdp_mmu_handle_gfn(struct kvm *kvm, > > struct kvm_gfn_range *range, > > - tdp_handler_t handler) > > + tdp_handler_t handler, > > + bool only_shared) > > { > > struct kvm_mmu_page *root; > > struct tdp_iter iter; > > @@ -1480,9 +1481,23 @@ static __always_inline bool > > kvm_tdp_mmu_handle_gfn(struct kvm *kvm, > > * into this helper allow blocking; it'd be dead, wasteful code. > > */ > > for_each_tdp_mmu_root(kvm, root, range->slot->as_id) { > > + gfn_t start; > > + gfn_t end; > > + > > + if (only_shared && is_private_sp(root)) > > + continue; > > + > > rcu_read_lock(); > > > > - tdp_root_for_each_leaf_pte(iter, root, range->start, range->end) > > + /* > > + * For TDX shared mapping, set GFN shared bit to the range, > > + * so the handler() doesn't need to set it, to avoid duplicated > > + * code in multiple handler()s. > > + */ > > + start = kvm_gfn_for_root(kvm, root, range->start); > > + end = kvm_gfn_for_root(kvm, root, range->end); > > + > > + tdp_root_for_each_leaf_pte(iter, root, start, end) > > ret |= handler(kvm, &iter, range); > > > > rcu_read_unlock(); > > @@ -1526,7 +1541,12 @@ static bool age_gfn_range(struct kvm *kvm, struct > > tdp_iter *iter, > > > > bool kvm_tdp_mmu_age_gfn_range(struct kvm *kvm, struct kvm_gfn_range > > *range) { > > - return kvm_tdp_mmu_handle_gfn(kvm, range, age_gfn_range); > > + /* > > + * First TDX generation doesn't support clearing A bit for private > > + * mapping, since there's no secure EPT API to support it. However > > + * it's a legitimate request for TDX guest. > > + */ > > + return kvm_tdp_mmu_handle_gfn(kvm, range, age_gfn_range, true); > > } > > > > static bool test_age_gfn(struct kvm *kvm, struct tdp_iter *iter, @@ -1537,7 > > +1557,8 @@ static bool test_age_gfn(struct kvm *kvm, struct tdp_iter *iter, > > > > bool kvm_tdp_mmu_test_age_gfn(struct kvm *kvm, struct kvm_gfn_range > > *range) { > > - return kvm_tdp_mmu_handle_gfn(kvm, range, test_age_gfn); > > + /* The first TDX generation doesn't support A bit. */ > > + return kvm_tdp_mmu_handle_gfn(kvm, range, test_age_gfn, true); > > } > > > > static bool set_spte_gfn(struct kvm *kvm, struct tdp_iter *iter, @@ -1582,8 > > +1603,11 @@ bool kvm_tdp_mmu_set_spte_gfn(struct kvm *kvm, struct > > kvm_gfn_range *range) > > * No need to handle the remote TLB flush under RCU protection, the > > * target SPTE _must_ be a leaf SPTE, i.e. cannot result in freeing a > > * shadow page. See the WARN on pfn_changed in > > __handle_changed_spte(). > > + * > > + * .change_pte() callback should not happen for private page, because > > + * for now TDX private pages are pinned during VM's life time. > > */ > > - return kvm_tdp_mmu_handle_gfn(kvm, range, set_spte_gfn); > > + return kvm_tdp_mmu_handle_gfn(kvm, range, set_spte_gfn, true); > > } > > > > /* > > @@ -1637,6 +1661,14 @@ bool kvm_tdp_mmu_wrprot_slot(struct kvm *kvm, > > > > lockdep_assert_held_read(&kvm->mmu_lock); > > > > + /* > > + * Because first TDX generation doesn't support write protecting private > > + * mappings and kvm_arch_dirty_log_supported(kvm) = false, it's a bug > > + * to reach here for guest TD. > > + */ > > + if (WARN_ON_ONCE(!kvm_arch_dirty_log_supported(kvm))) > > + return false; > > + > > for_each_valid_tdp_mmu_root_yield_safe(kvm, root, slot->as_id, true) > > spte_set |= wrprot_gfn_range(kvm, root, slot->base_gfn, > > slot->base_gfn + slot->npages, min_level); @@ -1902,6 > > +1934,14 @@ bool kvm_tdp_mmu_clear_dirty_slot(struct kvm *kvm, > > > > lockdep_assert_held_read(&kvm->mmu_lock); > > > > + /* > > + * First TDX generation doesn't support clearing dirty bit, > > + * since there's no secure EPT API to support it. It is a > > + * bug to reach here for TDX guest. > > + */ > > + if (WARN_ON_ONCE(!kvm_arch_dirty_log_supported(kvm))) > > + return false; > > + > > It might not be a good choice to intercept everywhere in kvm_mmu just as tdx > doesn't support it. I'm thinking maybe we could do the check in tdx.c, which is > much simpler. For example: > > @@ -2592,6 +2605,12 @@ static void tdx_handle_changed_private_spte(struct kvm *kvm, > lockdep_assert_held(&kvm->mmu_lock); > > if (change->new.is_present) { > + /* Only flags change. This isn't supported currently. */ > + KVM_BUG_ON(change->old.is_present, kvm); > > Then we can have kvm_arch_dirty_log_supported completely removed. Do you mean WARN_ON_ONCE()? If so, they can be removed from this patches because the code should be blocked by "if (!kvm_arch_dirty_log_supported(kvm))" at the caller. -- Isaku Yamahata <isaku.yamahata@xxxxxxxxx>