From: Liam Mark <lmark@xxxxxxxxxxxxxx> Subject: mm: cma: add trace events for CMA alloc perf testing Add cma and migrate trace events to enable CMA allocation performance to be measured via ftrace. [georgi.djakov@xxxxxxxxxx: add the CMA instance name to the cma_alloc_start trace event] Link: https://lkml.kernel.org/r/20210326155414.25006-1-georgi.djakov@xxxxxxxxxx Link: https://lkml.kernel.org/r/20210324160740.15901-1-georgi.djakov@xxxxxxxxxx Signed-off-by: Liam Mark <lmark@xxxxxxxxxxxxxx> Signed-off-by: Georgi Djakov <georgi.djakov@xxxxxxxxxx> Acked-by: Minchan Kim <minchan@xxxxxxxxxx> Signed-off-by: Andrew Morton <akpm@xxxxxxxxxxxxxxxxxxxx> --- include/trace/events/cma.h | 42 ++++++++++++++++++++++++++++++- include/trace/events/migrate.h | 22 ++++++++++++++++ mm/cma.c | 4 ++ mm/migrate.c | 2 + 4 files changed, 69 insertions(+), 1 deletion(-) --- a/include/trace/events/cma.h~mm-cma-add-trace-events-for-cma-alloc-perf-testing +++ a/include/trace/events/cma.h @@ -8,7 +8,7 @@ #include <linux/types.h> #include <linux/tracepoint.h> -TRACE_EVENT(cma_alloc, +DECLARE_EVENT_CLASS(cma_alloc_class, TP_PROTO(unsigned long pfn, const struct page *page, unsigned int count, unsigned int align), @@ -61,6 +61,46 @@ TRACE_EVENT(cma_release, __entry->count) ); +TRACE_EVENT(cma_alloc_start, + + TP_PROTO(const char *name, unsigned int count, unsigned int align), + + TP_ARGS(name, count, align), + + TP_STRUCT__entry( + __string(name, name) + __field(unsigned int, count) + __field(unsigned int, align) + ), + + TP_fast_assign( + __assign_str(name, name); + __entry->count = count; + __entry->align = align; + ), + + TP_printk("name=%s count=%u align=%u", + __get_str(name), + __entry->count, + __entry->align) +); + +DEFINE_EVENT(cma_alloc_class, cma_alloc, + + TP_PROTO(unsigned long pfn, const struct page *page, + unsigned int count, unsigned int align), + + TP_ARGS(pfn, page, count, align) +); + +DEFINE_EVENT(cma_alloc_class, cma_alloc_busy_retry, + + TP_PROTO(unsigned long pfn, const struct page *page, + unsigned int count, unsigned int align), + + TP_ARGS(pfn, page, count, align) +); + #endif /* _TRACE_CMA_H */ /* This part must be outside protection */ --- a/include/trace/events/migrate.h~mm-cma-add-trace-events-for-cma-alloc-perf-testing +++ a/include/trace/events/migrate.h @@ -81,6 +81,28 @@ TRACE_EVENT(mm_migrate_pages, __print_symbolic(__entry->mode, MIGRATE_MODE), __print_symbolic(__entry->reason, MIGRATE_REASON)) ); + +TRACE_EVENT(mm_migrate_pages_start, + + TP_PROTO(enum migrate_mode mode, int reason), + + TP_ARGS(mode, reason), + + TP_STRUCT__entry( + __field(enum migrate_mode, mode) + __field(int, reason) + ), + + TP_fast_assign( + __entry->mode = mode; + __entry->reason = reason; + ), + + TP_printk("mode=%s reason=%s", + __print_symbolic(__entry->mode, MIGRATE_MODE), + __print_symbolic(__entry->reason, MIGRATE_REASON)) +); + #endif /* _TRACE_MIGRATE_H */ /* This part must be outside protection */ --- a/mm/cma.c~mm-cma-add-trace-events-for-cma-alloc-perf-testing +++ a/mm/cma.c @@ -443,6 +443,8 @@ struct page *cma_alloc(struct cma *cma, if (!count) goto out; + trace_cma_alloc_start(cma->name, count, align); + mask = cma_bitmap_aligned_mask(cma, align); offset = cma_bitmap_aligned_offset(cma, align); bitmap_maxno = cma_bitmap_maxno(cma); @@ -483,6 +485,8 @@ struct page *cma_alloc(struct cma *cma, pr_debug("%s(): memory range at %p is busy, retrying\n", __func__, pfn_to_page(pfn)); + + trace_cma_alloc_busy_retry(pfn, pfn_to_page(pfn), count, align); /* try again with a bit different memory target */ start = bitmap_no + mask + 1; } --- a/mm/migrate.c~mm-cma-add-trace-events-for-cma-alloc-perf-testing +++ a/mm/migrate.c @@ -1418,6 +1418,8 @@ int migrate_pages(struct list_head *from int rc, nr_subpages; LIST_HEAD(ret_pages); + trace_mm_migrate_pages_start(mode, reason); + if (!swapwrite) current->flags |= PF_SWAPWRITE; _