On 2/2/23 01:38, Matthew Wilcox wrote:
On Wed, Feb 01, 2023 at 04:17:36PM +0800, Yin Fengwei wrote:
do_set_pte_range() allows to setup page table entries for a
specific range. It calls page_add_file_rmap_range() to take
advantage of batched rmap update for large folio.
How about something more like this? Yes, we need to define
flush_icache_pages() and PTE_STRIDE.
(we could also do for (i = 0; i < nr; i++) flush_icache_page(...) but
given that some architectures already implement flush_icache_range(),
I think they may appreciate being given one large range to flush)
For flush_icache_range() and flush_icache_page(), the implementation
on riscv could be exception.
According to arch/riscv/include/asm/cacheflush.h
#define flush_icache_range(start, end) flush_icache_all()
There is no definition for flush_icache_page(). I suppose
flush_icache_page() does nothing on riscv.
Using flush_icache_range() may not be good choice for riscv.
Regards
Yin, Fengwei
+++ b/mm/memory.c
@@ -4277,15 +4277,19 @@ vm_fault_t do_set_pmd(struct vm_fault *vmf, struct page
*page)
}
#endif
-void do_set_pte(struct vm_fault *vmf, struct page *page, unsigned long addr)
+void do_set_pte_range(struct vm_fault *vmf, struct folio *folio,
+ unsigned int start, unsigned int nr,
+ unsigned long addr)
{
+ struct page *page = folio_page(page, start);
struct vm_area_struct *vma = vmf->vma;
bool uffd_wp = pte_marker_uffd_wp(vmf->orig_pte);
bool write = vmf->flags & FAULT_FLAG_WRITE;
bool prefault = vmf->address != addr;
pte_t entry;
+ unsigned int i;
- flush_icache_page(vma, page);
+ flush_icache_pages(vma, page, nr);
entry = mk_pte(page, vma->vm_page_prot);
if (prefault && arch_wants_old_prefaulted_pte())
@@ -4299,14 +4303,23 @@ void do_set_pte(struct vm_fault *vmf, struct page *page, unsigned long addr)
entry = pte_mkuffd_wp(pte_wrprotect(entry));
/* copy-on-write page */
if (write && !(vma->vm_flags & VM_SHARED)) {
- inc_mm_counter(vma->vm_mm, MM_ANONPAGES);
- page_add_new_anon_rmap(page, vma, addr);
- lru_cache_add_inactive_or_unevictable(page, vma);
+ add_mm_counter(vma->vm_mm, MM_ANONPAGES, nr);
+ for (i = 0; i < nr; i++) {
+ page_add_new_anon_rmap(page + i, vma, addr);
+ lru_cache_add_inactive_or_unevictable(page + i, vma);
+ }
} else {
- inc_mm_counter(vma->vm_mm, mm_counter_file(page));
- page_add_file_rmap(page, vma, false);
+ add_mm_counter(vma->vm_mm, mm_counter_file(page), nr);
+ folio_add_file_rmap(folio, start, n, vma);
+ }
+
+ for (i = 0; i < nr; i++) {
+ set_pte_at(vma->vm_mm, addr, vmf->pte + i, entry);
+ /* no need to invalidate: a not-present page won't be cached */
+ update_mmu_cache(vma, addr, vmf->pte + i);
+ addr += PAGE_SIZE;
+ entry += PTE_STRIDE;
}
- set_pte_at(vma->vm_mm, addr, vmf->pte, entry);
}
static bool vmf_pte_changed(struct vm_fault *vmf)