This will allow the mm code to figure out when a file has been changed through a writable mmap. Future changes will use this information to update the file timestamp after writes. This is handled in core mm code for two reasons: 1. Performance. Setting a bit directly is faster than an indirect call to a vma op. 2. Simplicity. The cmtime bit is set with lots of mm locks held. Rather than making filesystems add a new vm operation that needs to be aware of locking, it's easier to just get it right in core code. Signed-off-by: Andy Lutomirski <luto@xxxxxxxxxxxxxx> --- include/linux/pagemap.h | 16 ++++++++++++++++ mm/memory.c | 7 ++++++- mm/rmap.c | 27 +++++++++++++++++++++++++-- 3 files changed, 47 insertions(+), 3 deletions(-) diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h index e3dea75..9a461ee 100644 --- a/include/linux/pagemap.h +++ b/include/linux/pagemap.h @@ -25,6 +25,7 @@ enum mapping_flags { AS_MM_ALL_LOCKS = __GFP_BITS_SHIFT + 2, /* under mm_take_all_locks() */ AS_UNEVICTABLE = __GFP_BITS_SHIFT + 3, /* e.g., ramdisk, SHM_LOCK */ AS_BALLOON_MAP = __GFP_BITS_SHIFT + 4, /* balloon page special map */ + AS_CMTIME = __GFP_BITS_SHIFT + 5, /* cmtime update deferred */ }; static inline void mapping_set_error(struct address_space *mapping, int error) @@ -74,6 +75,21 @@ static inline gfp_t mapping_gfp_mask(struct address_space * mapping) return (__force gfp_t)mapping->flags & __GFP_BITS_MASK; } +static inline void mapping_set_cmtime(struct address_space * mapping) +{ + set_bit(AS_CMTIME, &mapping->flags); +} + +static inline bool mapping_test_cmtime(struct address_space * mapping) +{ + return test_bit(AS_CMTIME, &mapping->flags); +} + +static inline bool mapping_test_clear_cmtime(struct address_space * mapping) +{ + return test_and_clear_bit(AS_CMTIME, &mapping->flags); +} + /* * This is non-atomic. Only to be used before the mapping is activated. * Probably needs a barrier... diff --git a/mm/memory.c b/mm/memory.c index 4026841..1737a90 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -1150,8 +1150,13 @@ again: if (PageAnon(page)) rss[MM_ANONPAGES]--; else { - if (pte_dirty(ptent)) + if (pte_dirty(ptent)) { + struct address_space *mapping = + page_mapping(page); + if (mapping) + mapping_set_cmtime(mapping); set_page_dirty(page); + } if (pte_young(ptent) && likely(!(vma->vm_flags & VM_SEQ_READ))) mark_page_accessed(page); diff --git a/mm/rmap.c b/mm/rmap.c index b2e29ac..2e3fb27 100644 --- a/mm/rmap.c +++ b/mm/rmap.c @@ -928,6 +928,10 @@ static int page_mkclean_file(struct address_space *mapping, struct page *page) } } mutex_unlock(&mapping->i_mmap_mutex); + + if (ret) + mapping_set_cmtime(mapping); + return ret; } @@ -1179,6 +1183,19 @@ out: } /* + * Mark a page's mapping for future cmtime update. It's safe to call this + * on any page, but it only has any effect if the page is backed by a mapping + * that uses mapping_test_clear_cmtime to handle file time updates. This means + * that there's no need to call this on for non-VM_SHARED vmas. + */ +static void page_set_cmtime(struct page *page) +{ + struct address_space *mapping = page_mapping(page); + if (mapping) + mapping_set_cmtime(mapping); +} + +/* * Subfunctions of try_to_unmap: try_to_unmap_one called * repeatedly from try_to_unmap_ksm, try_to_unmap_anon or try_to_unmap_file. */ @@ -1219,8 +1236,11 @@ int try_to_unmap_one(struct page *page, struct vm_area_struct *vma, pteval = ptep_clear_flush(vma, address, pte); /* Move the dirty bit to the physical page now the pte is gone. */ - if (pte_dirty(pteval)) + if (pte_dirty(pteval)) { set_page_dirty(page); + if (vma->vm_flags & VM_SHARED) + page_set_cmtime(page); + } /* Update high watermark before we lower rss */ update_hiwater_rss(mm); @@ -1413,8 +1433,11 @@ static int try_to_unmap_cluster(unsigned long cursor, unsigned int *mapcount, } /* Move the dirty bit to the physical page now the pte is gone. */ - if (pte_dirty(pteval)) + if (pte_dirty(pteval)) { set_page_dirty(page); + if (vma->vm_flags & VM_SHARED) + page_set_cmtime(page); + } page_remove_rmap(page); page_cache_release(page); -- 1.8.3.1 _______________________________________________ xfs mailing list xfs@xxxxxxxxxxx http://oss.sgi.com/mailman/listinfo/xfs