From: Nitesh Narayan Lal <nilal@xxxxxxxxxx> Signed-off-by: Nitesh Narayan Lal <nilal@xxxxxxxxxx> --- include/trace/events/kmem.h | 101 ++++++++++++++++++++++++++++++++++++++++++++ virt/kvm/page_hinting.c | 20 ++++++++- 2 files changed, 119 insertions(+), 2 deletions(-) diff --git a/include/trace/events/kmem.h b/include/trace/events/kmem.h index 6b2e154..ec97791 100644 --- a/include/trace/events/kmem.h +++ b/include/trace/events/kmem.h @@ -317,6 +317,107 @@ TRACE_EVENT(mm_page_alloc_extfrag, __entry->change_ownership) ); +TRACE_EVENT(guest_free_page, + TP_PROTO(struct page *page, unsigned int order), + + TP_ARGS(page, order), + + TP_STRUCT__entry( + __field(unsigned long, pfn) + __field(unsigned int, order) + ), + + TP_fast_assign( + __entry->pfn = page_to_pfn(page); + __entry->order = order; + ), + + TP_printk("page=%p pfn=%lu number of pages=%d", + pfn_to_page(__entry->pfn), + __entry->pfn, + (1 << __entry->order)) +); + +TRACE_EVENT(guest_alloc_page, + TP_PROTO(struct page *page, unsigned int order), + + TP_ARGS(page, order), + + TP_STRUCT__entry( + __field(unsigned long, pfn) + __field(unsigned int, order) + ), + + TP_fast_assign( + __entry->pfn = page_to_pfn(page); + __entry->order = order; + ), + + TP_printk("page=%p pfn=%lu number of pages=%d", + pfn_to_page(__entry->pfn), + __entry->pfn, + (1 << __entry->order)) +); + +TRACE_EVENT(guest_free_page_slowpath, + TP_PROTO(unsigned long pfn, unsigned int pages), + + TP_ARGS(pfn, pages), + + TP_STRUCT__entry( + __field(unsigned long, pfn) + __field(unsigned int, pages) + ), + + TP_fast_assign( + __entry->pfn = pfn; + __entry->pages = pages; + ), + + TP_printk("pfn=%lu number of pages=%u", + __entry->pfn, + __entry->pages) +); + +TRACE_EVENT(guest_pfn_dump, + TP_PROTO(char *type, unsigned long pfn, unsigned int pages), + + TP_ARGS(type, pfn, pages), + + TP_STRUCT__entry( + __field(char *, type) + __field(unsigned long, pfn) + __field(unsigned int, pages) + ), + + TP_fast_assign( + __entry->type = type; + __entry->pfn = pfn; + __entry->pages = pages; + ), + + TP_printk("Type=%s pfn=%lu number of pages=%d", + __entry->type, + __entry->pfn, + __entry->pages) +); + +TRACE_EVENT(guest_str_dump, + TP_PROTO(char *str), + + TP_ARGS(str), + + TP_STRUCT__entry( + __field(char *, str) + ), + + TP_fast_assign( + __entry->str = str; + ), + + TP_printk("Debug=%s", + __entry->str) +); #endif /* _TRACE_KMEM_H */ /* This part must be outside protection */ diff --git a/virt/kvm/page_hinting.c b/virt/kvm/page_hinting.c index 658856d..54fe6bc 100644 --- a/virt/kvm/page_hinting.c +++ b/virt/kvm/page_hinting.c @@ -4,6 +4,7 @@ #include <linux/kvm_host.h> #include <linux/sort.h> #include <linux/kernel.h> +#include <trace/events/kmem.h> #define MAX_FGPT_ENTRIES 1000 #define HYPERLIST_THRESHOLD 500 @@ -48,12 +49,13 @@ static void empty_hyperlist(void) } } -static void make_hypercall(void) +void make_hypercall(void) { /* * Dummy function: Tobe filled later. */ empty_hyperlist(); + trace_guest_str_dump("Hypercall to host...:"); } static int sort_pfn(const void *a1, const void *b1) @@ -70,13 +72,16 @@ static int sort_pfn(const void *a1, const void *b1) return 0; } -static int pack_hyperlist(void) +int pack_hyperlist(void) { int i = 0, j = 0; while (i < MAX_FGPT_ENTRIES) { if (hypervisor_pagelist[i].pfn != 0) { if (i != j) { + trace_guest_pfn_dump("Packing Hyperlist", + hypervisor_pagelist[i].pfn, + hypervisor_pagelist[i].pages); hypervisor_pagelist[j].pfn = hypervisor_pagelist[i].pfn; hypervisor_pagelist[j].pages = @@ -163,6 +168,9 @@ void copy_hyperlist(int hyper_idx) free_page_obj = &get_cpu_var(kvm_pt)[0]; while (i < hyper_idx) { + trace_guest_pfn_dump("HyperList entry copied", + hypervisor_pagelist[i].pfn, + hypervisor_pagelist[i].pages); free_page_obj[*idx].pfn = hypervisor_pagelist[i].pfn; free_page_obj[*idx].pages = hypervisor_pagelist[i].pages; *idx += 1; @@ -203,11 +211,14 @@ void arch_free_page_slowpath(void) pfn = head_pfn + alloc_pages; prev_free = false; + trace_guest_pfn_dump("Compound", + head_pfn, alloc_pages); continue; } if (page_ref_count(p)) { pfn++; prev_free = false; + trace_guest_pfn_dump("Single", pfn, 1); continue; } /* @@ -218,6 +229,9 @@ void arch_free_page_slowpath(void) hyper_idx++; hypervisor_pagelist[hyper_idx].pfn = pfn; hypervisor_pagelist[hyper_idx].pages = 1; + trace_guest_free_page_slowpath( + hypervisor_pagelist[hyper_idx].pfn, + hypervisor_pagelist[hyper_idx].pages); if (hyper_idx == MAX_FGPT_ENTRIES - 1) { hyper_idx = compress_hyperlist(); if (hyper_idx >= @@ -261,6 +275,7 @@ void arch_alloc_page(struct page *page, int order) do { seq = read_seqbegin(&guest_page_lock); } while (read_seqretry(&guest_page_lock, seq)); + trace_guest_alloc_page(page, order); } void arch_free_page(struct page *page, int order) @@ -276,6 +291,7 @@ void arch_free_page(struct page *page, int order) */ local_irq_save(flags); free_page_obj = &get_cpu_var(kvm_pt)[0]; + trace_guest_free_page(page, order); free_page_obj[*free_page_idx].pfn = page_to_pfn(page); free_page_obj[*free_page_idx].pages = 1 << order; *free_page_idx += 1; -- 2.9.4