When a task does fork(), its shadow stack (SHSTK) must be duplicated for the child. This patch implements a flow similar to copy-on-write of an anonymous page, but for SHSTK. A SHSTK PTE must be RO and dirty. This dirty bit requirement is used to effect the copying. In copy_one_pte(), clear the dirty bit from a SHSTK PTE to cause a page fault upon the next SHSTK access. At that time, fix the PTE and copy/re-use the page. Signed-off-by: Yu-cheng Yu <yu-cheng.yu@xxxxxxxxx> --- arch/x86/mm/pgtable.c | 15 +++++++++++++++ include/asm-generic/pgtable.h | 8 ++++++++ mm/memory.c | 7 ++++++- 3 files changed, 29 insertions(+), 1 deletion(-) diff --git a/arch/x86/mm/pgtable.c b/arch/x86/mm/pgtable.c index 1f67b1e15bf6..c2d754a780b3 100644 --- a/arch/x86/mm/pgtable.c +++ b/arch/x86/mm/pgtable.c @@ -891,3 +891,18 @@ int pmd_free_pte_page(pmd_t *pmd, unsigned long addr) #endif /* CONFIG_X86_64 */ #endif /* CONFIG_HAVE_ARCH_HUGE_VMAP */ + +#ifdef CONFIG_X86_INTEL_SHADOW_STACK_USER +inline pte_t pte_set_vma_features(pte_t pte, struct vm_area_struct *vma) +{ + if (vma->vm_flags & VM_SHSTK) + return pte_mkdirty_shstk(pte); + else + return pte; +} + +inline bool arch_copy_pte_mapping(vm_flags_t vm_flags) +{ + return (vm_flags & VM_SHSTK); +} +#endif /* CONFIG_X86_INTEL_SHADOW_STACK_USER */ diff --git a/include/asm-generic/pgtable.h b/include/asm-generic/pgtable.h index 75d9d68a6de7..ffcc0be7cadc 100644 --- a/include/asm-generic/pgtable.h +++ b/include/asm-generic/pgtable.h @@ -1188,4 +1188,12 @@ static inline bool arch_has_pfn_modify_check(void) #define mm_pmd_folded(mm) __is_defined(__PAGETABLE_PMD_FOLDED) #endif +#ifndef CONFIG_ARCH_HAS_SHSTK +#define pte_set_vma_features(pte, vma) pte +#define arch_copy_pte_mapping(vma_flags) false +#else +pte_t pte_set_vma_features(pte_t pte, struct vm_area_struct *vma); +bool arch_copy_pte_mapping(vm_flags_t vm_flags); +#endif + #endif /* _ASM_GENERIC_PGTABLE_H */ diff --git a/mm/memory.c b/mm/memory.c index ddf20bd0c317..51c97294f00f 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -777,7 +777,8 @@ copy_one_pte(struct mm_struct *dst_mm, struct mm_struct *src_mm, * If it's a COW mapping, write protect it both * in the parent and the child */ - if (is_cow_mapping(vm_flags) && pte_write(pte)) { + if ((is_cow_mapping(vm_flags) && pte_write(pte)) || + arch_copy_pte_mapping(vm_flags)) { ptep_set_wrprotect(src_mm, addr, src_pte); pte = pte_wrprotect(pte); } @@ -2312,6 +2313,7 @@ static inline void wp_page_reuse(struct vm_fault *vmf) flush_cache_page(vma, vmf->address, pte_pfn(vmf->orig_pte)); entry = pte_mkyoung(vmf->orig_pte); entry = maybe_mkwrite(pte_mkdirty(entry), vma); + entry = pte_set_vma_features(entry, vma); if (ptep_set_access_flags(vma, vmf->address, vmf->pte, entry, 1)) update_mmu_cache(vma, vmf->address, vmf->pte); pte_unmap_unlock(vmf->pte, vmf->ptl); @@ -2387,6 +2389,7 @@ static vm_fault_t wp_page_copy(struct vm_fault *vmf) flush_cache_page(vma, vmf->address, pte_pfn(vmf->orig_pte)); entry = mk_pte(new_page, vma->vm_page_prot); entry = maybe_mkwrite(pte_mkdirty(entry), vma); + entry = pte_set_vma_features(entry, vma); /* * Clear the pte entry and flush it first, before updating the * pte with the new entry. This will avoid a race condition @@ -2910,6 +2913,7 @@ vm_fault_t do_swap_page(struct vm_fault *vmf) pte = mk_pte(page, vma->vm_page_prot); if ((vmf->flags & FAULT_FLAG_WRITE) && reuse_swap_page(page, NULL)) { pte = maybe_mkwrite(pte_mkdirty(pte), vma); + pte = pte_set_vma_features(pte, vma); vmf->flags &= ~FAULT_FLAG_WRITE; ret |= VM_FAULT_WRITE; exclusive = RMAP_EXCLUSIVE; @@ -3052,6 +3056,7 @@ static vm_fault_t do_anonymous_page(struct vm_fault *vmf) entry = mk_pte(page, vma->vm_page_prot); if (vma->vm_flags & VM_WRITE) entry = pte_mkwrite(pte_mkdirty(entry)); + entry = pte_set_vma_features(entry, vma); vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd, vmf->address, &vmf->ptl); -- 2.17.1