As on allocation of encrypted page, we need to flush cache before returning page to free pool. Failing to do this may lead to data corruption. Signed-off-by: Kirill A. Shutemov <kirill.shutemov@xxxxxxxxxxxxxxx> --- arch/x86/mm/mktme.c | 13 +++++++++++++ 1 file changed, 13 insertions(+) diff --git a/arch/x86/mm/mktme.c b/arch/x86/mm/mktme.c index 1129ad25b22a..ef0eb1eb8d6e 100644 --- a/arch/x86/mm/mktme.c +++ b/arch/x86/mm/mktme.c @@ -45,6 +45,19 @@ void prep_encrypt_page(struct page *page, gfp_t gfp, unsigned int order) WARN_ONCE(gfp & __GFP_ZERO, "__GFP_ZERO is useless for encrypted pages"); } +void free_encrypt_page(struct page *page, int keyid, unsigned int order) +{ + int i; + void *v; + + for (i = 0; i < (1 << order); i++) { + v = kmap_atomic_keyid(page, keyid + i); + /* See comment in prep_encrypt_page() */ + clflush_cache_range(v, PAGE_SIZE); + kunmap_atomic(v); + } +} + struct page *__alloc_zeroed_encrypted_user_highpage(gfp_t gfp, struct vm_area_struct *vma, unsigned long vaddr) { -- 2.16.1 -- To unsubscribe, send a message with 'unsubscribe linux-mm' in the body to majordomo@xxxxxxxxx. For more info on Linux MM, see: http://www.linux-mm.org/ . Don't email: <a href=mailto:"dont@xxxxxxxxx"> email@xxxxxxxxx </a>