Add PFN_PTE_SHIFT, update_mmu_cache_range(), flush_dcache_folio() and flush_icache_pages(). Signed-off-by: Matthew Wilcox (Oracle) <willy@xxxxxxxxxxxxx> Acked-by: Mike Rapoport (IBM) <rppt@xxxxxxxxxx> Cc: "David S. Miller" <davem@xxxxxxxxxxxxx> Cc: sparclinux@xxxxxxxxxxxxxxx --- arch/sparc/include/asm/cacheflush_32.h | 9 +++++++-- arch/sparc/include/asm/pgtable_32.h | 8 ++++---- arch/sparc/mm/init_32.c | 13 +++++++++++-- 3 files changed, 22 insertions(+), 8 deletions(-) diff --git a/arch/sparc/include/asm/cacheflush_32.h b/arch/sparc/include/asm/cacheflush_32.h index adb6991d0455..8dba35d63328 100644 --- a/arch/sparc/include/asm/cacheflush_32.h +++ b/arch/sparc/include/asm/cacheflush_32.h @@ -16,6 +16,7 @@ sparc32_cachetlb_ops->cache_page(vma, addr) #define flush_icache_range(start, end) do { } while (0) #define flush_icache_page(vma, pg) do { } while (0) +#define flush_icache_pages(vma, pg, nr) do { } while (0) #define copy_to_user_page(vma, page, vaddr, dst, src, len) \ do { \ @@ -35,11 +36,15 @@ #define flush_page_for_dma(addr) \ sparc32_cachetlb_ops->page_for_dma(addr) -struct page; void sparc_flush_page_to_ram(struct page *page); +void sparc_flush_folio_to_ram(struct folio *folio); #define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1 -#define flush_dcache_page(page) sparc_flush_page_to_ram(page) +#define flush_dcache_folio(folio) sparc_flush_folio_to_ram(folio) +static inline void flush_dcache_page(struct page *page) +{ + flush_dcache_folio(page_folio(page)); +} #define flush_dcache_mmap_lock(mapping) do { } while (0) #define flush_dcache_mmap_unlock(mapping) do { } while (0) diff --git a/arch/sparc/include/asm/pgtable_32.h b/arch/sparc/include/asm/pgtable_32.h index d4330e3c57a6..315d316614ca 100644 --- a/arch/sparc/include/asm/pgtable_32.h +++ b/arch/sparc/include/asm/pgtable_32.h @@ -101,8 +101,6 @@ static inline void set_pte(pte_t *ptep, pte_t pteval) srmmu_swap((unsigned long *)ptep, pte_val(pteval)); } -#define set_pte_at(mm,addr,ptep,pteval) set_pte(ptep,pteval) - static inline int srmmu_device_memory(unsigned long x) { return ((x & 0xF0000000) != 0); @@ -256,6 +254,7 @@ static inline pte_t pte_mkyoung(pte_t pte) return __pte(pte_val(pte) | SRMMU_REF); } +#define PFN_PTE_SHIFT (PAGE_SHIFT - 4) #define pfn_pte(pfn, prot) mk_pte(pfn_to_page(pfn), prot) static inline unsigned long pte_pfn(pte_t pte) @@ -268,7 +267,7 @@ static inline unsigned long pte_pfn(pte_t pte) */ return ~0UL; } - return (pte_val(pte) & SRMMU_PTE_PMASK) >> (PAGE_SHIFT-4); + return (pte_val(pte) & SRMMU_PTE_PMASK) >> PFN_PTE_SHIFT; } #define pte_page(pte) pfn_to_page(pte_pfn(pte)) @@ -318,6 +317,7 @@ void mmu_info(struct seq_file *m); #define FAULT_CODE_USER 0x4 #define update_mmu_cache(vma, address, ptep) do { } while (0) +#define update_mmu_cache_range(vmf, vma, address, ptep, nr) do { } while (0) void srmmu_mapiorange(unsigned int bus, unsigned long xpa, unsigned long xva, unsigned int len); @@ -422,7 +422,7 @@ static inline int io_remap_pfn_range(struct vm_area_struct *vma, ({ \ int __changed = !pte_same(*(__ptep), __entry); \ if (__changed) { \ - set_pte_at((__vma)->vm_mm, (__address), __ptep, __entry); \ + set_pte(__ptep, __entry); \ flush_tlb_page(__vma, __address); \ } \ __changed; \ diff --git a/arch/sparc/mm/init_32.c b/arch/sparc/mm/init_32.c index 9c0ea457bdf0..d96a14ffceeb 100644 --- a/arch/sparc/mm/init_32.c +++ b/arch/sparc/mm/init_32.c @@ -297,11 +297,20 @@ void sparc_flush_page_to_ram(struct page *page) { unsigned long vaddr = (unsigned long)page_address(page); - if (vaddr) - __flush_page_to_ram(vaddr); + __flush_page_to_ram(vaddr); } EXPORT_SYMBOL(sparc_flush_page_to_ram); +void sparc_flush_folio_to_ram(struct folio *folio) +{ + unsigned long vaddr = (unsigned long)folio_address(folio); + unsigned int i, nr = folio_nr_pages(folio); + + for (i = 0; i < nr; i++) + __flush_page_to_ram(vaddr + i * PAGE_SIZE); +} +EXPORT_SYMBOL(sparc_flush_folio_to_ram); + static const pgprot_t protection_map[16] = { [VM_NONE] = PAGE_NONE, [VM_READ] = PAGE_READONLY, -- 2.39.2