Re: [PATCH V5 07/10] Add per-memcg zone "unreclaimable"

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 





On Sun, Apr 17, 2011 at 9:27 PM, Minchan Kim <minchan.kim@xxxxxxxxx> wrote:
On Sat, Apr 16, 2011 at 8:23 AM, Ying Han <yinghan@xxxxxxxxxx> wrote:
> After reclaiming each node per memcg, it checks mem_cgroup_watermark_ok()
> and breaks the priority loop if it returns true. The per-memcg zone will
> be marked as "unreclaimable" if the scanning rate is much greater than the
> reclaiming rate on the per-memcg LRU. The bit is cleared when there is a
> page charged to the memcg being freed. Kswapd breaks the priority loop if
> all the zones are marked as "unreclaimable".
>
> changelog v5..v4:
> 1. reduce the frequency of updating mz->unreclaimable bit by using the existing
> memcg batch in task struct.
> 2. add new function mem_cgroup_mz_clear_unreclaimable() for recoganizing zone.
>
> changelog v4..v3:
> 1. split off from the per-memcg background reclaim patch in V3.
>
> Signed-off-by: Ying Han <yinghan@xxxxxxxxxx>
> ---
>  include/linux/memcontrol.h |   40 ++++++++++++++
>  include/linux/sched.h      |    1 +
>  include/linux/swap.h       |    2 +
>  mm/memcontrol.c            |  130 +++++++++++++++++++++++++++++++++++++++++++-
>  mm/vmscan.c                |   19 +++++++
>  5 files changed, 191 insertions(+), 1 deletions(-)
>
> diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h
> index d4ff7f2..b18435d 100644
> --- a/include/linux/memcontrol.h
> +++ b/include/linux/memcontrol.h
> @@ -155,6 +155,14 @@ static inline void mem_cgroup_dec_page_stat(struct page *page,
>  unsigned long mem_cgroup_soft_limit_reclaim(struct zone *zone, int order,
>                                                gfp_t gfp_mask);
>  u64 mem_cgroup_get_limit(struct mem_cgroup *mem);
> +bool mem_cgroup_zone_reclaimable(struct mem_cgroup *mem, int nid, int zid);
> +bool mem_cgroup_mz_unreclaimable(struct mem_cgroup *mem, struct zone *zone);
> +void mem_cgroup_mz_set_unreclaimable(struct mem_cgroup *mem, struct zone *zone);
> +void mem_cgroup_clear_unreclaimable(struct mem_cgroup *mem, struct page *page);
> +void mem_cgroup_mz_clear_unreclaimable(struct mem_cgroup *mem,
> +                                       struct zone *zone);
> +void mem_cgroup_mz_pages_scanned(struct mem_cgroup *mem, struct zone* zone,
> +                                       unsigned long nr_scanned);
>
>  #ifdef CONFIG_TRANSPARENT_HUGEPAGE
>  void mem_cgroup_split_huge_fixup(struct page *head, struct page *tail);
> @@ -345,6 +353,38 @@ static inline void mem_cgroup_dec_page_stat(struct page *page,
>  {
>  }
>
> +static inline bool mem_cgroup_zone_reclaimable(struct mem_cgroup *mem, int nid,
> +                                                               int zid)
> +{
> +       return false;
> +}
> +
> +static inline bool mem_cgroup_mz_unreclaimable(struct mem_cgroup *mem,
> +                                               struct zone *zone)
> +{
> +       return false;
> +}
> +
> +static inline void mem_cgroup_mz_set_unreclaimable(struct mem_cgroup *mem,
> +                                                       struct zone *zone)
> +{
> +}
> +
> +static inline void mem_cgroup_clear_unreclaimable(struct mem_cgroup *mem,
> +                                                       struct page *page)
> +{
> +}
> +
> +static inline void mem_cgroup_mz_clear_unreclaimable(struct mem_cgroup *mem,
> +                                                       struct zone *zone);
> +{
> +}
> +static inline void mem_cgroup_mz_pages_scanned(struct mem_cgroup *mem,
> +                                               struct zone *zone,
> +                                               unsigned long nr_scanned)
> +{
> +}
> +
>  static inline
>  unsigned long mem_cgroup_soft_limit_reclaim(struct zone *zone, int order,
>                                            gfp_t gfp_mask)
> diff --git a/include/linux/sched.h b/include/linux/sched.h
> index 98fc7ed..3370c5a 100644
> --- a/include/linux/sched.h
> +++ b/include/linux/sched.h
> @@ -1526,6 +1526,7 @@ struct task_struct {
>                struct mem_cgroup *memcg; /* target memcg of uncharge */
>                unsigned long nr_pages; /* uncharged usage */
>                unsigned long memsw_nr_pages; /* uncharged mem+swap usage */
> +               struct zone *zone; /* a zone page is last uncharged */
>        } memcg_batch;
>  #endif
>  };
> diff --git a/include/linux/swap.h b/include/linux/swap.h
> index 17e0511..319b800 100644
> --- a/include/linux/swap.h
> +++ b/include/linux/swap.h
> @@ -160,6 +160,8 @@ enum {
>        SWP_SCANNING    = (1 << 8),     /* refcount in scan_swap_map */
>  };
>
> +#define ZONE_RECLAIMABLE_RATE 6
> +

You can use ZONE_RECLAIMABLE_RATE in zone_reclaimable, too.
If you want to separate rate of memcg and global, please clear macro
name like ZONE_MEMCG_RECLAIMABLE_RATE.

For now I will leave them as the same value. Will make the change in the next post.

Thanks

--Ying 

--
Kind regards,
Minchan Kim


[Index of Archives]     [Linux ARM Kernel]     [Linux ARM]     [Linux Omap]     [Fedora ARM]     [IETF Annouce]     [Bugtraq]     [Linux]     [Linux OMAP]     [Linux MIPS]     [ECOS]     [Asterisk Internet PBX]     [Linux API]