Signed-off-by: Nitesh Narayan Lal <nilal@xxxxxxxxxx> --- include/trace/events/kvm.h | 101 +++++++++++++++++++++++++++++++++++++++++++++ virt/kvm/page_hinting.c | 17 ++++++++ 2 files changed, 118 insertions(+) diff --git a/include/trace/events/kvm.h b/include/trace/events/kvm.h index 8ade3eb..96d29a4 100644 --- a/include/trace/events/kvm.h +++ b/include/trace/events/kvm.h @@ -393,6 +393,107 @@ TRACE_EVENT(kvm_halt_poll_ns, #define trace_kvm_halt_poll_ns_shrink(vcpu_id, new, old) \ trace_kvm_halt_poll_ns(false, vcpu_id, new, old) +TRACE_EVENT(guest_free_page, + TP_PROTO(struct page *page, unsigned int order), + + TP_ARGS(page, order), + + TP_STRUCT__entry( + __field(unsigned long, pfn) + __field(unsigned int, order) + ), + + TP_fast_assign( + __entry->pfn = page_to_pfn(page); + __entry->order = order; + ), + + TP_printk("page=%p pfn=%lu number of pages=%d", + pfn_to_page(__entry->pfn), + __entry->pfn, + (1 << __entry->order)) +); + +TRACE_EVENT(guest_alloc_page, + TP_PROTO(struct page *page, unsigned int order), + + TP_ARGS(page, order), + + TP_STRUCT__entry( + __field(unsigned long, pfn) + __field(unsigned int, order) + ), + + TP_fast_assign( + __entry->pfn = page_to_pfn(page); + __entry->order = order; + ), + + TP_printk("page=%p pfn=%lu number of pages=%d", + pfn_to_page(__entry->pfn), + __entry->pfn, + (1 << __entry->order)) +); + +TRACE_EVENT(guest_free_page_slowpath, + TP_PROTO(unsigned long pfn, unsigned int pages), + + TP_ARGS(pfn, pages), + + TP_STRUCT__entry( + __field(unsigned long, pfn) + __field(unsigned int, pages) + ), + + TP_fast_assign( + __entry->pfn = pfn; + __entry->pages = pages; + ), + + TP_printk("pfn=%lu number of pages=%u", + __entry->pfn, + __entry->pages) +); + +TRACE_EVENT(guest_pfn_dump, + TP_PROTO(char *type, unsigned long pfn, unsigned int pages), + + TP_ARGS(type, pfn, pages), + + TP_STRUCT__entry( + __field(char *, type) + __field(unsigned long, pfn) + __field(unsigned int, pages) + ), + + TP_fast_assign( + __entry->type = type; + __entry->pfn = pfn; + __entry->pages = pages; + ), + + TP_printk("Type=%s pfn=%lu number of pages=%d", + __entry->type, + __entry->pfn, + __entry->pages) +); + +TRACE_EVENT(guest_str_dump, + TP_PROTO(char *str), + + TP_ARGS(str), + + TP_STRUCT__entry( + __field(char *, str) + ), + + TP_fast_assign( + __entry->str = str; + ), + + TP_printk("Debug=%s", + __entry->str) +); #endif /* _TRACE_KVM_MAIN_H */ /* This part must be outside protection */ diff --git a/virt/kvm/page_hinting.c b/virt/kvm/page_hinting.c index cfdc513..5fb390e0 100644 --- a/virt/kvm/page_hinting.c +++ b/virt/kvm/page_hinting.c @@ -55,6 +55,7 @@ static void make_hypercall(void) * Dummy function: Tobe filled later. */ empty_hyperlist(); + trace_guest_str_dump("Hypercall to host...:"); } static int sort_pfn(const void *a1, const void *b1) @@ -77,6 +78,9 @@ static int pack_hyperlist(void) while (i < MAX_FGPT_ENTRIES - 1) { if (hypervisor_pagelist[i].pfn != 0) { + trace_guest_pfn_dump("Packing Hyperlist", + hypervisor_pagelist[i].pfn, + hypervisor_pagelist[i].pages); hypervisor_pagelist[j].pfn = hypervisor_pagelist[i].pfn; hypervisor_pagelist[j].pages = @@ -164,6 +168,9 @@ void copy_hyperlist(int hyper_idx) free_page_obj = &get_cpu_var(kvm_pt)[0]; while (i < hyper_idx) { + trace_guest_pfn_dump("HyperList entry copied", + hypervisor_pagelist[i].pfn, + hypervisor_pagelist[i].pages); free_page_obj[*idx].pfn = hypervisor_pagelist[i].pfn; free_page_obj[*idx].pages = hypervisor_pagelist[i].pages; *idx += 1; @@ -204,11 +211,14 @@ void arch_free_page_slowpath(void) pfn = head_pfn + alloc_pages; prev_free = false; + trace_guest_pfn_dump("Compound", + head_pfn, alloc_pages); continue; } if (page_ref_count(p)) { pfn++; prev_free = false; + trace_guest_pfn_dump("Single", pfn, 1); continue; } /* @@ -216,6 +226,11 @@ void arch_free_page_slowpath(void) * hypervisor_pagelist if required. */ if (!prev_free) { + if (hyper_idx != -1) { + trace_guest_free_page_slowpath( + hypervisor_pagelist[hyper_idx].pfn, + hypervisor_pagelist[hyper_idx].pages); + } hyper_idx++; if (hyper_idx == MAX_FGPT_ENTRIES - 1) { hyper_idx = compress_hyperlist(); @@ -262,6 +277,7 @@ void arch_alloc_page(struct page *page, int order) do { seq = read_seqbegin(&guest_page_lock); } while (read_seqretry(&guest_page_lock, seq)); + trace_guest_alloc_page(page, order); } void arch_free_page(struct page *page, int order) @@ -277,6 +293,7 @@ void arch_free_page(struct page *page, int order) */ local_irq_save(flags); free_page_obj = &get_cpu_var(kvm_pt)[0]; + trace_guest_free_page(page, order); free_page_obj[*free_page_idx].pfn = page_to_pfn(page); free_page_obj[*free_page_idx].pages = 1 << order; *free_page_idx += 1; -- 2.9.4