Re: [PATCH v1] mm: add tracepoints to ksm

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



On Fri, 10 Feb 2023 13:46:45 -0800
Stefan Roesch <shr@xxxxxxxxxxxx> wrote:

Sorry for the late reply, I just noticed this (I had the flu when this was
originally sent).

> +/**
> + * ksm_remove_ksm_page - called after a ksm page has been removed
> + *
> + * @pfn:		page frame number of ksm page
> + *
> + * Allows to trace the removing of stable ksm pages.
> + */
> +TRACE_EVENT(ksm_remove_ksm_page,
> +
> +	TP_PROTO(unsigned long pfn),
> +
> +	TP_ARGS(pfn),
> +
> +	TP_STRUCT__entry(
> +		__field(unsigned long, pfn)
> +	),
> +
> +	TP_fast_assign(
> +		__entry->pfn = pfn;
> +	),
> +
> +	TP_printk("pfn %lu", __entry->pfn)
> +);
> +
> +/**
> + * ksm_remove_rmap_item - called after a rmap_item has been removed from the
> + *                        stable tree
> + *
> + * @pfn:		page frame number of ksm page
> + * @rmap_item:		address of rmap_item  object
> + * @mm:			address of the process mm struct
> + *
> + * Allows to trace the removal of pages from the stable tree list.
> + */
> +TRACE_EVENT(ksm_remove_rmap_item,
> +
> +	TP_PROTO(unsigned long pfn, void *rmap_item, void *mm),
> +
> +	TP_ARGS(pfn, rmap_item, mm),
> +
> +	TP_STRUCT__entry(
> +		__field(unsigned long,	pfn)
> +		__field(void *,		rmap_item)
> +		__field(void *,		mm)
> +	),
> +
> +	TP_fast_assign(
> +		__entry->pfn		= pfn;
> +		__entry->rmap_item	= rmap_item;
> +		__entry->mm		= mm;
> +	),
> +
> +	TP_printk("pfn %lu rmap_item %p mm %p",
> +			__entry->pfn, __entry->rmap_item, __entry->mm)
> +);
> +
> +#endif /* _TRACE_KSM_H */
> +
> +/* This part must be outside protection */
> +#include <trace/define_trace.h>
> diff --git a/mm/ksm.c b/mm/ksm.c
> index 56808e3bfd19..4356af760735 100644
> --- a/mm/ksm.c
> +++ b/mm/ksm.c
> @@ -45,6 +45,9 @@
>  #include "internal.h"
>  #include "mm_slot.h"
>  
> +#define CREATE_TRACE_POINTS
> +#include <trace/events/ksm.h>
> +
>  #ifdef CONFIG_NUMA
>  #define NUMA(x)		(x)
>  #define DO_NUMA(x)	do { (x); } while (0)
> @@ -655,10 +658,12 @@ static void remove_node_from_stable_tree(struct ksm_stable_node *stable_node)
>  	BUG_ON(stable_node->rmap_hlist_len < 0);
>  
>  	hlist_for_each_entry(rmap_item, &stable_node->hlist, hlist) {
> -		if (rmap_item->hlist.next)
> +		if (rmap_item->hlist.next) {
>  			ksm_pages_sharing--;
> -		else
> +			trace_ksm_remove_rmap_item(stable_node->kpfn, rmap_item, rmap_item->mm);

Instead of dereferencing the stable_node here, where the work could
possibly happen outside the trace event and in the hot path, could you pass
in the stable_node instead, and then in the TP_fast_assign() do:

		__entry->pfn = stable_node->kpfn;


> +		} else {
>  			ksm_pages_shared--;
> +		}
>  
>  		rmap_item->mm->ksm_merging_pages--;
>  
> @@ -679,6 +684,7 @@ static void remove_node_from_stable_tree(struct ksm_stable_node *stable_node)
>  	BUILD_BUG_ON(STABLE_NODE_DUP_HEAD <= &migrate_nodes);
>  	BUILD_BUG_ON(STABLE_NODE_DUP_HEAD >= &migrate_nodes + 1);
>  
> +	trace_ksm_remove_ksm_page(stable_node->kpfn);

Here too?

-- Steve

>  	if (stable_node->head == &migrate_nodes)
>  		list_del(&stable_node->list);
>  	else
> @@ -1367,6 +1373,8 @@ static int try_to_merge_with_ksm_page(struct ksm_rmap_item *rmap_item,
>  	get_anon_vma(vma->anon_vma);
>  out:
>  	mmap_read_unlock(mm);
> +	trace_ksm_merge_with_ksm_page(kpage, page_to_pfn(kpage ? kpage : page),
> +				rmap_item, mm, err);
>  	return err;
>  }
>  
> @@ -2114,6 +2122,9 @@ static int try_to_merge_with_kernel_zero_page(struct ksm_rmap_item *rmap_item,
>  		if (vma) {
>  			err = try_to_merge_one_page(vma, page,
>  						ZERO_PAGE(rmap_item->address));
> +			trace_ksm_merge_one_page(
> +				page_to_pfn(ZERO_PAGE(rmap_item->address)),
> +				rmap_item, mm, err);
>  			if (!err) {
>  				rmap_item->address |= ZERO_PAGE_FLAG;
>  				ksm_zero_pages_sharing++;
> @@ -2344,6 +2355,8 @@ static struct ksm_rmap_item *scan_get_next_rmap_item(struct page **page)
>  
>  	mm_slot = ksm_scan.mm_slot;
>  	if (mm_slot == &ksm_mm_head) {
> +		trace_ksm_start_scan(ksm_scan.seqnr, ksm_rmap_items);
> +
>  		/*
>  		 * A number of pages can hang around indefinitely on per-cpu
>  		 * pagevecs, raised page count preventing write_protect_page
> @@ -2510,6 +2523,7 @@ static struct ksm_rmap_item *scan_get_next_rmap_item(struct page **page)
>  	if (mm_slot != &ksm_mm_head)
>  		goto next_mm;
>  
> +	trace_ksm_stop_scan(ksm_scan.seqnr, ksm_rmap_items);
>  	ksm_scan.seqnr++;
>  	return NULL;
>  }
> @@ -2661,6 +2675,7 @@ int __ksm_enter(struct mm_struct *mm)
>  	if (needs_wakeup)
>  		wake_up_interruptible(&ksm_thread_wait);
>  
> +	trace_ksm_enter(mm);
>  	return 0;
>  }
>  
> @@ -2702,6 +2717,8 @@ void __ksm_exit(struct mm_struct *mm)
>  		mmap_write_lock(mm);
>  		mmap_write_unlock(mm);
>  	}
> +
> +	trace_ksm_exit(mm);
>  }
>  
>  struct page *ksm_might_need_to_copy(struct page *page,
> 
> base-commit: 234a68e24b120b98875a8b6e17a9dead277be16a





[Index of Archives]     [Linux ARM Kernel]     [Linux ARM]     [Linux Omap]     [Fedora ARM]     [IETF Annouce]     [Bugtraq]     [Linux OMAP]     [Linux MIPS]     [eCos]     [Asterisk Internet PBX]     [Linux API]

  Powered by Linux