Some hardware implementations may enforce cache coherency across encryption domains. In such cases, it's not required to flush encrypted pages off cache lines. Signed-off-by: Krish Sadhukhan <krish.sadhukhan@xxxxxxxxxx> --- arch/x86/kvm/svm/sev.c | 3 ++- arch/x86/mm/pat/set_memory.c | 6 ++++-- 2 files changed, 6 insertions(+), 3 deletions(-) diff --git a/arch/x86/kvm/svm/sev.c b/arch/x86/kvm/svm/sev.c index 402dc4234e39..8aa2209f2637 100644 --- a/arch/x86/kvm/svm/sev.c +++ b/arch/x86/kvm/svm/sev.c @@ -384,7 +384,8 @@ static void sev_clflush_pages(struct page *pages[], unsigned long npages) uint8_t *page_virtual; unsigned long i; - if (npages == 0 || pages == NULL) + if (this_cpu_has(X86_FEATURE_HW_CACHE_COHERENCY) || npages == 0 || + pages == NULL) return; for (i = 0; i < npages; i++) { diff --git a/arch/x86/mm/pat/set_memory.c b/arch/x86/mm/pat/set_memory.c index d1b2a889f035..5e2c618cbe84 100644 --- a/arch/x86/mm/pat/set_memory.c +++ b/arch/x86/mm/pat/set_memory.c @@ -1999,7 +1999,8 @@ static int __set_memory_enc_dec(unsigned long addr, int numpages, bool enc) /* * Before changing the encryption attribute, we need to flush caches. */ - cpa_flush(&cpa, 1); + if (!this_cpu_has(X86_FEATURE_HW_CACHE_COHERENCY)) + cpa_flush(&cpa, 1); ret = __change_page_attr_set_clr(&cpa, 1); @@ -2010,7 +2011,8 @@ static int __set_memory_enc_dec(unsigned long addr, int numpages, bool enc) * flushing gets optimized in the cpa_flush() path use the same logic * as above. */ - cpa_flush(&cpa, 0); + if (!this_cpu_has(X86_FEATURE_HW_CACHE_COHERENCY)) + cpa_flush(&cpa, 0); return ret; } -- 2.18.4