Re: [PATCH 5/7] alpha: Implement the new page table range API

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



On Sat, 2023-02-11 at 03:39 +0000, Matthew Wilcox (Oracle) wrote:
> Add set_ptes(), update_mmu_cache_range() and flush_icache_pages().
> 
> Signed-off-by: Matthew Wilcox (Oracle) <willy@xxxxxxxxxxxxx>
> ---
>  arch/alpha/include/asm/cacheflush.h | 10 ++++++++++
>  arch/alpha/include/asm/pgtable.h    | 18 +++++++++++++++++-
>  2 files changed, 27 insertions(+), 1 deletion(-)
> 
> diff --git a/arch/alpha/include/asm/cacheflush.h
> b/arch/alpha/include/asm/cacheflush.h
> index 9945ff483eaf..3956460e69e2 100644
> --- a/arch/alpha/include/asm/cacheflush.h
> +++ b/arch/alpha/include/asm/cacheflush.h
> @@ -57,6 +57,16 @@ extern void flush_icache_user_page(struct
> vm_area_struct *vma,
>  #define flush_icache_page(vma, page) \
>         flush_icache_user_page((vma), (page), 0, 0)
Not related with this patch or ask for change. Just a question of mine.

So is it nore efficient to implement the flush_icache_page(s) as no-op.
and do the real flush in update_mmu_cache()?


Regards
Yin, Fengwei

>  
> +/*
> + * Both implementations of flush_icache_user_page flush the entire
> + * address space, so one call, no matter how many pages.
> + */
> +static inline void flush_icache_pages(struct vm_area_struct *vma,
> +               struct page *page, unsigned int nr)
> +{
> +       flush_icache_user_page(vma, page, 0, 0);
> +}
> +
>  #include <asm-generic/cacheflush.h>
>  
>  #endif /* _ALPHA_CACHEFLUSH_H */
> diff --git a/arch/alpha/include/asm/pgtable.h
> b/arch/alpha/include/asm/pgtable.h
> index ba43cb841d19..1e3354e9731b 100644
> --- a/arch/alpha/include/asm/pgtable.h
> +++ b/arch/alpha/include/asm/pgtable.h
> @@ -26,7 +26,18 @@ struct vm_area_struct;
>   * hook is made available.
>   */
>  #define set_pte(pteptr, pteval) ((*(pteptr)) = (pteval))
> -#define set_pte_at(mm,addr,ptep,pteval) set_pte(ptep,pteval)
> +static inline void set_ptes(struct mm_struct *mm, unsigned long
> addr,
> +               pte_t *ptep, pte_t pte, unsigned int nr)
> +{
> +       for (;;) {
> +               set_pte(ptep, pte);
> +               if (--nr == 0)
> +                       break;
> +               ptep++;
> +               pte_val(pte) += 1UL << 32;
> +       }
> +}
> +#define set_pte_at(mm, addr, ptep, pte) set_ptes(mm, addr, ptep,
> pte, 1)
>  
>  /* PMD_SHIFT determines the size of the area a second-level page
> table can map */
>  #define PMD_SHIFT      (PAGE_SHIFT + (PAGE_SHIFT-3))
> @@ -303,6 +314,11 @@ extern inline void update_mmu_cache(struct
> vm_area_struct * vma,
>  {
>  }
>  
> +static inline void update_mmu_cache_range(struct vm_area_struct
> *vma,
> +               unsigned long address, pte_t *ptep, unsigned int nr)
> +{
> +}
> +
>  /*
>   * Encode/decode swap entries and swap PTEs. Swap PTEs are all PTEs
> that
>   * are !pte_none() && !pte_present().





[Index of Archives]     [Linux ARM Kernel]     [Linux ARM]     [Linux Omap]     [Fedora ARM]     [IETF Annouce]     [Bugtraq]     [Linux OMAP]     [Linux MIPS]     [eCos]     [Asterisk Internet PBX]     [Linux API]

  Powered by Linux