>>>> - * We use break_ksm to break COW on a ksm page: it's a stripped down >>>> + * We use break_ksm to break COW on a ksm page or KSM-placed zero page (only >>>> + * happen when enabling use_zero_pages): it's a stripped down >>>> * >>>> * if (get_user_pages(addr, 1, FOLL_WRITE, &page, NULL) == 1) >>>> * put_page(page); >>>> @@ -434,7 +428,8 @@ static inline bool ksm_test_exit(struct mm_struct *mm) >>>> * of the process that owns 'vma'. We also do not want to enforce >>>> * protection keys here anyway. >>>> */ >>>> -static int break_ksm(struct vm_area_struct *vma, unsigned long addr) >>>> +static int break_ksm(struct vm_area_struct *vma, unsigned long addr, >>>> + bool ksm_check_bypass) >>>> { >>>> struct page *page; >>>> vm_fault_t ret = 0; >>>> @@ -449,6 +444,16 @@ static int break_ksm(struct vm_area_struct *vma, unsigned long addr) >>>> ret = handle_mm_fault(vma, addr, >>>> FAULT_FLAG_WRITE | FAULT_FLAG_REMOTE, >>>> NULL); >>>> + else if (ksm_check_bypass && is_zero_pfn(page_to_pfn(page))) { >>>> + /* >>>> + * Although it's not ksm page, it's zero page as placed by >>>> + * KSM use_zero_page, so we should unshare it when >>>> + * ksm_check_bypass is true. >>>> + */ >>>> + ret = handle_mm_fault(vma, addr, >>>> + FAULT_FLAG_WRITE | FAULT_FLAG_REMOTE, >>>> + NULL); >>>> + } >>> >>> Please don't duplicate that page fault triggering code. >>> >>> Also, please be aware that this collides with >>> >>> https://lkml.kernel.org/r/20221021101141.84170-1-david@xxxxxxxxxx >>> >>> Adjustments should be comparatively easy. >> >> ... except that I'm still working on FAULT_FLAG_UNSHARE support for the >> shared zeropage. That will be posted soonish (within next 2 weeks). >> > >Posted: https://lkml.kernel.org/r/20221107161740.144456-1-david@xxxxxxxxxx > >With that, we can use FAULT_FLAG_UNSHARE also to break COW on the shared >zeropage. Sounds a better way for breaking COW working with reliable R/O long-tern pinning. >-- >Thanks, > >David / dhildenb