On 8/7/20 7:37 PM, Cfir Cohen wrote: > The LAUNCH_SECRET command performs encryption of the > launch secret memory contents. Mark pinned pages as > dirty, before unpinning them. > This matches the logic in sev_launch_update_data(). > > Signed-off-by: Cfir Cohen <cfir@xxxxxxxxxx> > --- > Changelog since v1: > - Updated commit message. > > arch/x86/kvm/svm/sev.c | 15 ++++++++++++++- > 1 file changed, 14 insertions(+), 1 deletion(-) Reviewed-by: Brijesh Singh <brijesh.singh@xxxxxxx> > > diff --git a/arch/x86/kvm/svm/sev.c b/arch/x86/kvm/svm/sev.c > index 5573a97f1520..37c47d26b9f7 100644 > --- a/arch/x86/kvm/svm/sev.c > +++ b/arch/x86/kvm/svm/sev.c > @@ -850,7 +850,7 @@ static int sev_launch_secret(struct kvm *kvm, struct kvm_sev_cmd *argp) > struct kvm_sev_launch_secret params; > struct page **pages; > void *blob, *hdr; > - unsigned long n; > + unsigned long n, i; > int ret, offset; > > if (!sev_guest(kvm)) > @@ -863,6 +863,14 @@ static int sev_launch_secret(struct kvm *kvm, struct kvm_sev_cmd *argp) > if (!pages) > return -ENOMEM; > > + /* > + * The LAUNCH_SECRET command will perform in-place encryption of the > + * memory content (i.e it will write the same memory region with C=1). > + * It's possible that the cache may contain the data with C=0, i.e., > + * unencrypted so invalidate it first. > + */ > + sev_clflush_pages(pages, n); > + > /* > * The secret must be copied into contiguous memory region, lets verify > * that userspace memory pages are contiguous before we issue command. > @@ -908,6 +916,11 @@ static int sev_launch_secret(struct kvm *kvm, struct kvm_sev_cmd *argp) > e_free: > kfree(data); > e_unpin_memory: > + /* content of memory is updated, mark pages dirty */ > + for (i = 0; i < n; i++) { > + set_page_dirty_lock(pages[i]); > + mark_page_accessed(pages[i]); > + } > sev_unpin_memory(kvm, pages, n); > return ret; > }