This is a note to let you know that I've just added the patch titled KVM: s390: pv: properly handle page flags for protected guests to the 5.10-stable tree which can be found at: http://www.kernel.org/git/?p=linux/kernel/git/stable/stable-queue.git;a=summary The filename of the patch is: kvm-s390-pv-properly-handle-page-flags-for-protected.patch and it can be found in the queue-5.10 subdirectory. If you, or anyone else, feels it should not be added to the stable tree, please let <stable@xxxxxxxxxxxxxxx> know about it. commit 118aed88a4b89ddc321e735e50c4836e20fffe3b Author: Claudio Imbrenda <imbrenda@xxxxxxxxxxxxx> Date: Mon Sep 20 15:24:54 2021 +0200 KVM: s390: pv: properly handle page flags for protected guests [ Upstream commit 380d97bd02fca7b9b41aec2d1c767874d602bc78 ] Introduce variants of the convert and destroy page functions that also clear the PG_arch_1 bit used to mark them as secure pages. The PG_arch_1 flag is always allowed to overindicate; using the new functions introduced here allows to reduce the extent of overindication and thus improve performance. These new functions can only be called on pages for which a reference is already being held. Signed-off-by: Claudio Imbrenda <imbrenda@xxxxxxxxxxxxx> Reviewed-by: Janosch Frank <frankja@xxxxxxxxxxxxx> Reviewed-by: Christian Borntraeger <borntraeger@xxxxxxxxxx> Link: https://lore.kernel.org/r/20210920132502.36111-7-imbrenda@xxxxxxxxxxxxx Signed-off-by: Christian Borntraeger <borntraeger@xxxxxxxxxx> Stable-dep-of: 3f29f6537f54 ("s390/uv: Don't call folio_wait_writeback() without a folio reference") Signed-off-by: Sasha Levin <sashal@xxxxxxxxxx> diff --git a/arch/s390/include/asm/pgtable.h b/arch/s390/include/asm/pgtable.h index 2338345912a31..e967cc2a01f7e 100644 --- a/arch/s390/include/asm/pgtable.h +++ b/arch/s390/include/asm/pgtable.h @@ -1080,8 +1080,9 @@ static inline pte_t ptep_get_and_clear(struct mm_struct *mm, pte_t res; res = ptep_xchg_lazy(mm, addr, ptep, __pte(_PAGE_INVALID)); + /* At this point the reference through the mapping is still present */ if (mm_is_protected(mm) && pte_present(res)) - uv_convert_from_secure(pte_val(res) & PAGE_MASK); + uv_convert_owned_from_secure(pte_val(res) & PAGE_MASK); return res; } @@ -1097,8 +1098,9 @@ static inline pte_t ptep_clear_flush(struct vm_area_struct *vma, pte_t res; res = ptep_xchg_direct(vma->vm_mm, addr, ptep, __pte(_PAGE_INVALID)); + /* At this point the reference through the mapping is still present */ if (mm_is_protected(vma->vm_mm) && pte_present(res)) - uv_convert_from_secure(pte_val(res) & PAGE_MASK); + uv_convert_owned_from_secure(pte_val(res) & PAGE_MASK); return res; } @@ -1122,8 +1124,9 @@ static inline pte_t ptep_get_and_clear_full(struct mm_struct *mm, } else { res = ptep_xchg_lazy(mm, addr, ptep, __pte(_PAGE_INVALID)); } + /* At this point the reference through the mapping is still present */ if (mm_is_protected(mm) && pte_present(res)) - uv_convert_from_secure(pte_val(res) & PAGE_MASK); + uv_convert_owned_from_secure(pte_val(res) & PAGE_MASK); return res; } diff --git a/arch/s390/include/asm/uv.h b/arch/s390/include/asm/uv.h index 12c5f006c1364..bbd51aa94d05c 100644 --- a/arch/s390/include/asm/uv.h +++ b/arch/s390/include/asm/uv.h @@ -351,8 +351,9 @@ static inline int is_prot_virt_host(void) } int gmap_make_secure(struct gmap *gmap, unsigned long gaddr, void *uvcb); -int uv_destroy_page(unsigned long paddr); +int uv_destroy_owned_page(unsigned long paddr); int uv_convert_from_secure(unsigned long paddr); +int uv_convert_owned_from_secure(unsigned long paddr); int gmap_convert_to_secure(struct gmap *gmap, unsigned long gaddr); void setup_uv(void); @@ -362,7 +363,7 @@ void adjust_to_uv_max(unsigned long *vmax); static inline void setup_uv(void) {} static inline void adjust_to_uv_max(unsigned long *vmax) {} -static inline int uv_destroy_page(unsigned long paddr) +static inline int uv_destroy_owned_page(unsigned long paddr) { return 0; } @@ -371,6 +372,11 @@ static inline int uv_convert_from_secure(unsigned long paddr) { return 0; } + +static inline int uv_convert_owned_from_secure(unsigned long paddr) +{ + return 0; +} #endif #if defined(CONFIG_PROTECTED_VIRTUALIZATION_GUEST) || IS_ENABLED(CONFIG_KVM) diff --git a/arch/s390/kernel/uv.c b/arch/s390/kernel/uv.c index 422765c81dd69..5173c24a02637 100644 --- a/arch/s390/kernel/uv.c +++ b/arch/s390/kernel/uv.c @@ -121,7 +121,7 @@ static int uv_pin_shared(unsigned long paddr) * * @paddr: Absolute host address of page to be destroyed */ -int uv_destroy_page(unsigned long paddr) +static int uv_destroy_page(unsigned long paddr) { struct uv_cb_cfs uvcb = { .header.cmd = UVC_CMD_DESTR_SEC_STOR, @@ -141,6 +141,22 @@ int uv_destroy_page(unsigned long paddr) return 0; } +/* + * The caller must already hold a reference to the page + */ +int uv_destroy_owned_page(unsigned long paddr) +{ + struct page *page = phys_to_page(paddr); + int rc; + + get_page(page); + rc = uv_destroy_page(paddr); + if (!rc) + clear_bit(PG_arch_1, &page->flags); + put_page(page); + return rc; +} + /* * Requests the Ultravisor to encrypt a guest page and make it * accessible to the host for paging (export). @@ -160,6 +176,22 @@ int uv_convert_from_secure(unsigned long paddr) return 0; } +/* + * The caller must already hold a reference to the page + */ +int uv_convert_owned_from_secure(unsigned long paddr) +{ + struct page *page = phys_to_page(paddr); + int rc; + + get_page(page); + rc = uv_convert_from_secure(paddr); + if (!rc) + clear_bit(PG_arch_1, &page->flags); + put_page(page); + return rc; +} + /* * Calculate the expected ref_count for a page that would otherwise have no * further pins. This was cribbed from similar functions in other places in diff --git a/arch/s390/mm/gmap.c b/arch/s390/mm/gmap.c index ad4bae2465b19..46daa9115a192 100644 --- a/arch/s390/mm/gmap.c +++ b/arch/s390/mm/gmap.c @@ -2693,8 +2693,10 @@ static int __s390_reset_acc(pte_t *ptep, unsigned long addr, { pte_t pte = READ_ONCE(*ptep); + /* There is a reference through the mapping */ if (pte_present(pte)) - WARN_ON_ONCE(uv_destroy_page(pte_val(pte) & PAGE_MASK)); + WARN_ON_ONCE(uv_destroy_owned_page(pte_val(pte) & PAGE_MASK)); + return 0; }