Re: [PATCH Part2 RFC v3 06/37] x86/sev: Add helper functions for RMPUPDATE and PSMASH instruction

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



* Brijesh Singh (brijesh.singh@xxxxxxx) wrote:
> The RMPUPDATE instruction writes a new RMP entry in the RMP Table. The
> hypervisor will use the instruction to add pages to the RMP table. See
> APM3 for details on the instruction operations.
> 
> The PSMASH instruction expands a 2MB RMP entry into a corresponding set of
> contiguous 4KB-Page RMP entries. The hypervisor will use this instruction
> to adjust the RMP entry without invalidating the previous RMP entry.
> 
> Signed-off-by: Brijesh Singh <brijesh.singh@xxxxxxx>
> ---
>  arch/x86/kernel/sev.c | 42 ++++++++++++++++++++++++++++++++++++++++++
>  include/linux/sev.h   | 20 ++++++++++++++++++++
>  2 files changed, 62 insertions(+)
> 
> diff --git a/arch/x86/kernel/sev.c b/arch/x86/kernel/sev.c
> index 51676ab1a321..9727df945fb1 100644
> --- a/arch/x86/kernel/sev.c
> +++ b/arch/x86/kernel/sev.c
> @@ -2226,3 +2226,45 @@ struct rmpentry *snp_lookup_page_in_rmptable(struct page *page, int *level)
>  	return entry;
>  }
>  EXPORT_SYMBOL_GPL(snp_lookup_page_in_rmptable);
> +
> +int psmash(struct page *page)
> +{
> +	unsigned long spa = page_to_pfn(page) << PAGE_SHIFT;
> +	int ret;
> +
> +	if (!cpu_feature_enabled(X86_FEATURE_SEV_SNP))
> +		return -ENXIO;
> +
> +	/* Retry if another processor is modifying the RMP entry. */
> +	do {
> +		/* Binutils version 2.36 supports the PSMASH mnemonic. */
> +		asm volatile(".byte 0xF3, 0x0F, 0x01, 0xFF"
> +			      : "=a"(ret)
> +			      : "a"(spa)
> +			      : "memory", "cc");
> +	} while (ret == FAIL_INUSE);
> +
> +	return ret;
> +}
> +EXPORT_SYMBOL_GPL(psmash);
> +
> +int rmpupdate(struct page *page, struct rmpupdate *val)
> +{
> +	unsigned long spa = page_to_pfn(page) << PAGE_SHIFT;
> +	int ret;
> +
> +	if (!cpu_feature_enabled(X86_FEATURE_SEV_SNP))
> +		return -ENXIO;
> +
> +	/* Retry if another processor is modifying the RMP entry. */
> +	do {
> +		/* Binutils version 2.36 supports the RMPUPDATE mnemonic. */
> +		asm volatile(".byte 0xF2, 0x0F, 0x01, 0xFE"
> +			     : "=a"(ret)
> +			     : "a"(spa), "c"((unsigned long)val)
> +			     : "memory", "cc");
> +	} while (ret == FAIL_INUSE);
> +
> +	return ret;
> +}
> +EXPORT_SYMBOL_GPL(rmpupdate);
> diff --git a/include/linux/sev.h b/include/linux/sev.h
> index 83c89e999999..bcd4d75d87c8 100644
> --- a/include/linux/sev.h
> +++ b/include/linux/sev.h
> @@ -39,13 +39,33 @@ struct __packed rmpentry {
>  
>  #define RMP_TO_X86_PG_LEVEL(level)	(((level) == RMP_PG_SIZE_4K) ? PG_LEVEL_4K : PG_LEVEL_2M)
>  
> +struct rmpupdate {
> +	u64 gpa;
> +	u8 assigned;
> +	u8 pagesize;
> +	u8 immutable;
> +	u8 rsvd;
> +	u32 asid;
> +} __packed;
> +
> +
> +/*
> + * The psmash() and rmpupdate() returns FAIL_INUSE when another processor is
> + * modifying the RMP entry.
> + */
> +#define FAIL_INUSE              3

Perhaps SEV_FAIL_INUSE ?

(Given that there are a whole buunch of FAIL_* macros already in
general)

Dave

>  #ifdef CONFIG_AMD_MEM_ENCRYPT
>  struct rmpentry *snp_lookup_page_in_rmptable(struct page *page, int *level);
> +int psmash(struct page *page);
> +int rmpupdate(struct page *page, struct rmpupdate *e);
>  #else
>  static inline struct rmpentry *snp_lookup_page_in_rmptable(struct page *page, int *level)
>  {
>  	return NULL;
>  }
> +static inline int psmash(struct page *page) { return -ENXIO; }
> +static inline int rmpupdate(struct page *page, struct rmpupdate *e) { return -ENXIO; }
>  
>  #endif /* CONFIG_AMD_MEM_ENCRYPT */
>  #endif /* __LINUX_SEV_H */
> -- 
> 2.17.1
> 
> 
-- 
Dr. David Alan Gilbert / dgilbert@xxxxxxxxxx / Manchester, UK





[Index of Archives]     [Linux ARM Kernel]     [Linux ARM]     [Linux Omap]     [Fedora ARM]     [IETF Annouce]     [Bugtraq]     [Linux OMAP]     [Linux MIPS]     [eCos]     [Asterisk Internet PBX]     [Linux API]

  Powered by Linux