Re: [PATCH Part2 v6 41/49] KVM: SVM: Add support to handle the RMP nested page fault

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



On Mon, Jun 20, 2022 at 4:13 PM Ashish Kalra <Ashish.Kalra@xxxxxxx> wrote:
>
> From: Brijesh Singh <brijesh.singh@xxxxxxx>
>
> When SEV-SNP is enabled in the guest, the hardware places restrictions on
> all memory accesses based on the contents of the RMP table. When hardware
> encounters RMP check failure caused by the guest memory access it raises
> the #NPF. The error code contains additional information on the access
> type. See the APM volume 2 for additional information.
>
> Signed-off-by: Brijesh Singh <brijesh.singh@xxxxxxx>
> ---
>  arch/x86/kvm/svm/sev.c | 76 ++++++++++++++++++++++++++++++++++++++++++
>  arch/x86/kvm/svm/svm.c | 14 +++++---
>  2 files changed, 86 insertions(+), 4 deletions(-)
>
> diff --git a/arch/x86/kvm/svm/sev.c b/arch/x86/kvm/svm/sev.c
> index 4ed90331bca0..7fc0fad87054 100644
> --- a/arch/x86/kvm/svm/sev.c
> +++ b/arch/x86/kvm/svm/sev.c
> @@ -4009,3 +4009,79 @@ void sev_post_unmap_gfn(struct kvm *kvm, gfn_t gfn, kvm_pfn_t pfn)
>
>         spin_unlock(&sev->psc_lock);
>  }
> +
> +void handle_rmp_page_fault(struct kvm_vcpu *vcpu, gpa_t gpa, u64 error_code)
> +{
> +       int rmp_level, npt_level, rc, assigned;
> +       struct kvm *kvm = vcpu->kvm;
> +       gfn_t gfn = gpa_to_gfn(gpa);
> +       bool need_psc = false;
> +       enum psc_op psc_op;
> +       kvm_pfn_t pfn;
> +       bool private;
> +
> +       write_lock(&kvm->mmu_lock);
> +
> +       if (unlikely(!kvm_mmu_get_tdp_walk(vcpu, gpa, &pfn, &npt_level)))
> +               goto unlock;
> +
> +       assigned = snp_lookup_rmpentry(pfn, &rmp_level);
> +       if (unlikely(assigned < 0))
> +               goto unlock;
> +
> +       private = !!(error_code & PFERR_GUEST_ENC_MASK);
> +
> +       /*
> +        * If the fault was due to size mismatch, or NPT and RMP page level's
> +        * are not in sync, then use PSMASH to split the RMP entry into 4K.
> +        */
> +       if ((error_code & PFERR_GUEST_SIZEM_MASK) ||
> +           (npt_level == PG_LEVEL_4K && rmp_level == PG_LEVEL_2M && private)) {
> +               rc = snp_rmptable_psmash(kvm, pfn);


Regarding this case:
RMP level is 4K
Page table level is 2M

Does this also cause a page fault with size mismatch? If so, we
shouldn't try psmash because the rmp entry is already 4K.

I see these errors in our tests and I think it may be happening
because rmp size is already 4K.

[ 1848.752952] psmash failed, gpa 0x191560000 pfn 0x536cd60 rc 7
[ 2922.879635] psmash failed, gpa 0x102830000 pfn 0x37c8230 rc 7
[ 3010.983090] psmash failed, gpa 0x104220000 pfn 0x6cf1e20 rc 7
[ 3170.792050] psmash failed, gpa 0x108a80000 pfn 0x20e0080 rc 7
[ 3345.955147] psmash failed, gpa 0x11b480000 pfn 0x1545e480 rc 7

Shouldn't we use AND instead of OR in the if statement?

if ((error_code & PFERR_GUEST_SIZEM_MASK) && ...

> +               if (rc)
> +                       pr_err_ratelimited("psmash failed, gpa 0x%llx pfn 0x%llx rc %d\n",
> +                                          gpa, pfn, rc);
> +               goto out;
> +       }
> +
> +       /*
> +        * If it's a private access, and the page is not assigned in the
> +        * RMP table, create a new private RMP entry. This can happen if
> +        * guest did not use the PSC VMGEXIT to transition the page state
> +        * before the access.
> +        */
> +       if (!assigned && private) {
> +               need_psc = 1;
> +               psc_op = SNP_PAGE_STATE_PRIVATE;
> +               goto out;
> +       }
> +
> +       /*
> +        * If it's a shared access, but the page is private in the RMP table
> +        * then make the page shared in the RMP table. This can happen if
> +        * the guest did not use the PSC VMGEXIT to transition the page
> +        * state before the access.
> +        */
> +       if (assigned && !private) {
> +               need_psc = 1;
> +               psc_op = SNP_PAGE_STATE_SHARED;
> +       }
> +
> +out:
> +       write_unlock(&kvm->mmu_lock);
> +
> +       if (need_psc)
> +               rc = __snp_handle_page_state_change(vcpu, psc_op, gpa, PG_LEVEL_4K);
> +
> +       /*
> +        * The fault handler has updated the RMP pagesize, zap the existing
> +        * rmaps for large entry ranges so that nested page table gets rebuilt
> +        * with the updated RMP pagesize.
> +        */
> +       gfn = gpa_to_gfn(gpa) & ~(KVM_PAGES_PER_HPAGE(PG_LEVEL_2M) - 1);
> +       kvm_zap_gfn_range(kvm, gfn, gfn + PTRS_PER_PMD);
> +       return;
> +
> +unlock:
> +       write_unlock(&kvm->mmu_lock);
> +}
> diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c
> index 1c8e035ba011..7742bc986afc 100644
> --- a/arch/x86/kvm/svm/svm.c
> +++ b/arch/x86/kvm/svm/svm.c
> @@ -1866,15 +1866,21 @@ static int pf_interception(struct kvm_vcpu *vcpu)
>  static int npf_interception(struct kvm_vcpu *vcpu)
>  {
>         struct vcpu_svm *svm = to_svm(vcpu);
> +       int rc;
>
>         u64 fault_address = svm->vmcb->control.exit_info_2;
>         u64 error_code = svm->vmcb->control.exit_info_1;
>
>         trace_kvm_page_fault(fault_address, error_code);
> -       return kvm_mmu_page_fault(vcpu, fault_address, error_code,
> -                       static_cpu_has(X86_FEATURE_DECODEASSISTS) ?
> -                       svm->vmcb->control.insn_bytes : NULL,
> -                       svm->vmcb->control.insn_len);
> +       rc = kvm_mmu_page_fault(vcpu, fault_address, error_code,
> +                               static_cpu_has(X86_FEATURE_DECODEASSISTS) ?
> +                               svm->vmcb->control.insn_bytes : NULL,
> +                               svm->vmcb->control.insn_len);
> +
> +       if (error_code & PFERR_GUEST_RMP_MASK)
> +               handle_rmp_page_fault(vcpu, fault_address, error_code);
> +
> +       return rc;
>  }
>
>  static int db_interception(struct kvm_vcpu *vcpu)
> --
> 2.25.1
>
>



[Index of Archives]     [Kernel]     [Gnu Classpath]     [Gnu Crypto]     [DM Crypt]     [Netfilter]     [Bugtraq]
  Powered by Linux