On 08/19/2016 03:41 PM, Andrea Arcangeli wrote: > Transfer the soft_dirty from pmd to pte during THP splits. > > This fix avoids losing the soft_dirty bit and avoids userland memory > corruption in the checkpoint. Nasty :( Thanks for catching this! > Signed-off-by: Andrea Arcangeli <aarcange@xxxxxxxxxx> Acked-by: Pavel Emelyanov <xemul@xxxxxxxxxxxxx> > --- > mm/huge_memory.c | 7 ++++++- > 1 file changed, 6 insertions(+), 1 deletion(-) > > diff --git a/mm/huge_memory.c b/mm/huge_memory.c > index b9570b5..cb95a83 100644 > --- a/mm/huge_memory.c > +++ b/mm/huge_memory.c > @@ -1512,7 +1512,7 @@ static void __split_huge_pmd_locked(struct vm_area_struct *vma, pmd_t *pmd, > struct page *page; > pgtable_t pgtable; > pmd_t _pmd; > - bool young, write, dirty; > + bool young, write, dirty, soft_dirty; > unsigned long addr; > int i; > > @@ -1546,6 +1546,7 @@ static void __split_huge_pmd_locked(struct vm_area_struct *vma, pmd_t *pmd, > write = pmd_write(*pmd); > young = pmd_young(*pmd); > dirty = pmd_dirty(*pmd); > + soft_dirty = pmd_soft_dirty(*pmd); > > pmdp_huge_split_prepare(vma, haddr, pmd); > pgtable = pgtable_trans_huge_withdraw(mm, pmd); > @@ -1562,6 +1563,8 @@ static void __split_huge_pmd_locked(struct vm_area_struct *vma, pmd_t *pmd, > swp_entry_t swp_entry; > swp_entry = make_migration_entry(page + i, write); > entry = swp_entry_to_pte(swp_entry); > + if (soft_dirty) > + entry = pte_swp_mksoft_dirty(entry); > } else { > entry = mk_pte(page + i, vma->vm_page_prot); > entry = maybe_mkwrite(entry, vma); > @@ -1569,6 +1572,8 @@ static void __split_huge_pmd_locked(struct vm_area_struct *vma, pmd_t *pmd, > entry = pte_wrprotect(entry); > if (!young) > entry = pte_mkold(entry); > + if (soft_dirty) > + entry = pte_mksoft_dirty(entry); > } > if (dirty) > SetPageDirty(page + i); > . > -- To unsubscribe, send a message with 'unsubscribe linux-mm' in the body to majordomo@xxxxxxxxx. For more info on Linux MM, see: http://www.linux-mm.org/ . Don't email: <a href=mailto:"dont@xxxxxxxxx"> email@xxxxxxxxx </a>