Re: [PATCH v9 19/43] x86/mm: Add support to validate memory when changing C-bit

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



On Fri, Jan 28, 2022 at 11:17:40AM -0600, Brijesh Singh wrote:
> diff --git a/arch/x86/kernel/sev.c b/arch/x86/kernel/sev.c
> index 5ee0fbd98d0d..b7ae741a8c66 100644
> --- a/arch/x86/kernel/sev.c
> +++ b/arch/x86/kernel/sev.c
> @@ -655,6 +655,173 @@ void __init snp_prep_memory(unsigned long paddr, unsigned int sz, enum psc_op op
>  		WARN(1, "invalid memory op %d\n", op);
>  }
>  
> +static int vmgexit_psc(struct snp_psc_desc *desc)
> +{
> +	int cur_entry, end_entry, ret = 0;
> +	struct snp_psc_desc *data;
> +	struct ghcb_state state;
> +	struct es_em_ctxt ctxt;
> +	unsigned long flags;
> +	struct ghcb *ghcb;
> +
> +	/*
> +	 * __sev_get_ghcb() needs to run with IRQs disabled because it is using
> +	 * a per-CPU GHCB.
> +	 */
> +	local_irq_save(flags);
> +

I know the guest will terminate anyway but still...

> +	ghcb = __sev_get_ghcb(&state);
> +	if (unlikely(!ghcb))
> +		return 1;

This needs to be (btw, do you really need the unlikely()?)

	if (!ghcb) {
		ret = 1;
		goto out_unlock;
	}

and at the end you have

out:
        __sev_put_ghcb(&state);

out_unlock:
        local_irq_restore(flags);

...

> +static void set_pages_state(unsigned long vaddr, unsigned int npages, int op)
> +{
> +	unsigned long vaddr_end, next_vaddr;
> +	struct snp_psc_desc *desc;
> +
> +	desc = kmalloc(sizeof(*desc), GFP_KERNEL_ACCOUNT);
> +	if (!desc)
> +		panic("SEV-SNP: failed to allocate memory for PSC descriptor\n");
> +
> +	vaddr = vaddr & PAGE_MASK;
> +	vaddr_end = vaddr + (npages << PAGE_SHIFT);
> +
> +	while (vaddr < vaddr_end) {
> +		/*
> +		 * Calculate the last vaddr that can be fit in one

"... that fits in one ... "

and then the comment *fits* :) on a single line too:

		/* Calculate the last vaddr that fits in one struct snp_psc_desc. */

> +		 * struct snp_psc_desc.
> +		 */
> +		next_vaddr = min_t(unsigned long, vaddr_end,
> +				   (VMGEXIT_PSC_MAX_ENTRY * PAGE_SIZE) + vaddr);
> +
> +		__set_pages_state(desc, vaddr, next_vaddr, op);
> +
> +		vaddr = next_vaddr;
> +	}
> +
> +	kfree(desc);
> +}

...

> diff --git a/arch/x86/mm/pat/set_memory.c b/arch/x86/mm/pat/set_memory.c
> index b4072115c8ef..1bc15b9d15f3 100644
> --- a/arch/x86/mm/pat/set_memory.c
> +++ b/arch/x86/mm/pat/set_memory.c
> @@ -32,6 +32,7 @@
>  #include <asm/set_memory.h>
>  #include <asm/hyperv-tlfs.h>
>  #include <asm/mshyperv.h>
> +#include <asm/sev.h>
>  
>  #include "../mm_internal.h"
>  
> @@ -2012,8 +2013,22 @@ static int __set_memory_enc_pgtable(unsigned long addr, int numpages, bool enc)
>  	 */
>  	cpa_flush(&cpa, !this_cpu_has(X86_FEATURE_SME_COHERENT));
>  
> +	/*
> +	 * To maintain the security guarantees of SEV-SNP guest invalidate the
> +	 * memory before clearing the encryption attribute.
> +	 */
> +	if (!enc)
> +		snp_set_memory_shared(addr, numpages);
> +
>  	ret = __change_page_attr_set_clr(&cpa, 1);
>  
> +	/*
> +	 * Now that memory is mapped encrypted in the page table, validate it
> +	 * so that is consistent with the above page state.

" ... so that it is consistent... "

> +	 */
> +	if (!ret && enc)
> +		snp_set_memory_private(addr, numpages);
> +
>  	/*
>  	 * After changing the encryption attribute, we need to flush TLBs again
>  	 * in case any speculative TLB caching occurred (but no need to flush
> -- 
> 2.25.1
> 

-- 
Regards/Gruss,
    Boris.

https://people.kernel.org/tglx/notes-about-netiquette



[Index of Archives]     [KVM ARM]     [KVM ia64]     [KVM ppc]     [Virtualization Tools]     [Spice Development]     [Libvirt]     [Libvirt Users]     [Linux USB Devel]     [Linux Audio Users]     [Yosemite Questions]     [Linux Kernel]     [Linux SCSI]     [XFree86]

  Powered by Linux