Commit f8b63c1 made flush_kernel_dcache_page a no-op assuming that the pages it needs to handle are kernel mapped only. However, for example when doing direct I/O, pages with user space mappings may occur. Thus, continue to do lazy flushing if there are no user space mappings. Otherwise, flush the kernel cache lines directly. Signed-off-by: Simon Baatz <gmbnomis@xxxxxxxxx> Cc: Catalin Marinas <catalin.marinas@xxxxxxx> Cc: Russell King <linux@xxxxxxxxxxxxxxxx> Cc: <stable@xxxxxxxxxxxxxxx> # 3.0 --- This is the backport of commits 1bc39742aab09248169ef9d3727c9def3528b3f3 (ARM: 7755/1) and 63384fd0b1509acf522a8a8fcede09087eedb7df (ARM: 7772/1) for the 3.0 stable kernel. The changes should only be syntactical; IS_ENABLED is not available in this kernel and thus, #ifdef is used. - Simon arch/arm/include/asm/cacheflush.h | 4 +--- arch/arm/mm/flush.c | 34 ++++++++++++++++++++++++++++++++++ arch/arm/mm/nommu.c | 6 ++++++ 3 files changed, 41 insertions(+), 3 deletions(-) diff --git a/arch/arm/include/asm/cacheflush.h b/arch/arm/include/asm/cacheflush.h index 42dec04..0a5e8a5 100644 --- a/arch/arm/include/asm/cacheflush.h +++ b/arch/arm/include/asm/cacheflush.h @@ -305,9 +305,7 @@ static inline void flush_anon_page(struct vm_area_struct *vma, } #define ARCH_HAS_FLUSH_KERNEL_DCACHE_PAGE -static inline void flush_kernel_dcache_page(struct page *page) -{ -} +extern void flush_kernel_dcache_page(struct page *); #define flush_dcache_mmap_lock(mapping) \ spin_lock_irq(&(mapping)->tree_lock) diff --git a/arch/arm/mm/flush.c b/arch/arm/mm/flush.c index 8fda9f7..32f6372 100644 --- a/arch/arm/mm/flush.c +++ b/arch/arm/mm/flush.c @@ -304,6 +304,40 @@ void flush_dcache_page(struct page *page) EXPORT_SYMBOL(flush_dcache_page); /* + * Ensure cache coherency for the kernel mapping of this page. We can + * assume that the page is pinned via kmap. + * + * If the page only exists in the page cache and there are no user + * space mappings, this is a no-op since the page was already marked + * dirty at creation. Otherwise, we need to flush the dirty kernel + * cache lines directly. + */ +void flush_kernel_dcache_page(struct page *page) +{ + if (cache_is_vivt() || cache_is_vipt_aliasing()) { + struct address_space *mapping; + + mapping = page_mapping(page); + + if (!mapping || mapping_mapped(mapping)) { + void *addr; + + addr = page_address(page); +#ifdef CONFIG_HIGHMEM + /* + * kmap_atomic() doesn't set the page virtual + * address, and kunmap_atomic() takes care of + * cache flushing already. + */ + if (addr) +#endif + __cpuc_flush_dcache_area(addr, PAGE_SIZE); + } + } +} +EXPORT_SYMBOL(flush_kernel_dcache_page); + +/* * Flush an anonymous page so that users of get_user_pages() * can safely access the data. The expected sequence is: * diff --git a/arch/arm/mm/nommu.c b/arch/arm/mm/nommu.c index 941a98c..a5018fb 100644 --- a/arch/arm/mm/nommu.c +++ b/arch/arm/mm/nommu.c @@ -53,6 +53,12 @@ void flush_dcache_page(struct page *page) } EXPORT_SYMBOL(flush_dcache_page); +void flush_kernel_dcache_page(struct page *page) +{ + __cpuc_flush_dcache_area(page_address(page), PAGE_SIZE); +} +EXPORT_SYMBOL(flush_kernel_dcache_page); + void copy_to_user_page(struct vm_area_struct *vma, struct page *page, unsigned long uaddr, void *dst, const void *src, unsigned long len) -- 1.7.9.5 -- To unsubscribe from this list: send the line "unsubscribe stable" in the body of a message to majordomo@xxxxxxxxxxxxxxx More majordomo info at http://vger.kernel.org/majordomo-info.html