[Patch v4 09/13] perf/x86/intel: Handle KVM virtual metrics event in perf system

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



KVM creates a virtual metrics event to claim the PERF_METRICS MSR, but
this virtual metrics event can't be recognized by perf system as it uses
a different event code with known metrics events. We need to modify perf
system code and make the KVM virtual metrics event can be recognized and
processed by perf system.

The counter of virtual metrics event doesn't save the real count value
like other normal events, instead it's used to store the raw data of
PERF_METRICS MSR, so KVM can obtain the raw data of PERF_METRICS after
the virtual metrics event is disabled.

Signed-off-by: Dapeng Mi <dapeng1.mi@xxxxxxxxxxxxxxx>
---
 arch/x86/events/intel/core.c | 39 +++++++++++++++++++++++++++---------
 arch/x86/events/perf_event.h |  9 ++++++++-
 2 files changed, 38 insertions(+), 10 deletions(-)

diff --git a/arch/x86/events/intel/core.c b/arch/x86/events/intel/core.c
index 1c349290677c..df56e091eb25 100644
--- a/arch/x86/events/intel/core.c
+++ b/arch/x86/events/intel/core.c
@@ -2546,7 +2546,7 @@ static int icl_set_topdown_event_period(struct perf_event *event)
 		hwc->saved_metric = 0;
 	}
 
-	if ((hwc->saved_slots) && is_slots_event(event)) {
+	if (is_slots_event(event)) {
 		wrmsrl(MSR_CORE_PERF_FIXED_CTR3, hwc->saved_slots);
 		wrmsrl(MSR_PERF_METRICS, hwc->saved_metric);
 	}
@@ -2619,6 +2619,15 @@ static void __icl_update_topdown_event(struct perf_event *event,
 	}
 }
 
+static inline void __icl_update_vmetrics_event(struct perf_event *event, u64 metrics)
+{
+	/*
+	 * For the guest metrics event, the count would be used to save
+	 * the raw data of PERF_METRICS MSR.
+	 */
+	local64_set(&event->count, metrics);
+}
+
 static void update_saved_topdown_regs(struct perf_event *event, u64 slots,
 				      u64 metrics, int metric_end)
 {
@@ -2638,6 +2647,17 @@ static void update_saved_topdown_regs(struct perf_event *event, u64 slots,
 	}
 }
 
+static inline void _intel_update_topdown_event(struct perf_event *event,
+					       u64 slots, u64 metrics,
+					       u64 last_slots, u64 last_metrics)
+{
+	if (is_vmetrics_event(event))
+		__icl_update_vmetrics_event(event, metrics);
+	else
+		__icl_update_topdown_event(event, slots, metrics,
+					   last_slots, last_metrics);
+}
+
 /*
  * Update all active Topdown events.
  *
@@ -2665,9 +2685,9 @@ static u64 intel_update_topdown_event(struct perf_event *event, int metric_end)
 		if (!is_topdown_idx(idx))
 			continue;
 		other = cpuc->events[idx];
-		__icl_update_topdown_event(other, slots, metrics,
-					   event ? event->hw.saved_slots : 0,
-					   event ? event->hw.saved_metric : 0);
+		_intel_update_topdown_event(other, slots, metrics,
+					    event ? event->hw.saved_slots : 0,
+					    event ? event->hw.saved_metric : 0);
 	}
 
 	/*
@@ -2675,9 +2695,9 @@ static u64 intel_update_topdown_event(struct perf_event *event, int metric_end)
 	 * in active_mask e.g. x86_pmu_stop()
 	 */
 	if (event && !test_bit(event->hw.idx, cpuc->active_mask)) {
-		__icl_update_topdown_event(event, slots, metrics,
-					   event->hw.saved_slots,
-					   event->hw.saved_metric);
+		_intel_update_topdown_event(event, slots, metrics,
+					    event->hw.saved_slots,
+					    event->hw.saved_metric);
 
 		/*
 		 * In x86_pmu_stop(), the event is cleared in active_mask first,
@@ -3858,8 +3878,9 @@ static int core_pmu_hw_config(struct perf_event *event)
 
 static bool is_available_metric_event(struct perf_event *event)
 {
-	return is_metric_event(event) &&
-		event->attr.config <= INTEL_TD_METRIC_AVAILABLE_MAX;
+	return (is_metric_event(event) &&
+		event->attr.config <= INTEL_TD_METRIC_AVAILABLE_MAX) ||
+			is_vmetrics_event(event);
 }
 
 static inline bool is_mem_loads_event(struct perf_event *event)
diff --git a/arch/x86/events/perf_event.h b/arch/x86/events/perf_event.h
index a0d12989a483..7238b7f871ce 100644
--- a/arch/x86/events/perf_event.h
+++ b/arch/x86/events/perf_event.h
@@ -105,9 +105,16 @@ static inline bool is_slots_event(struct perf_event *event)
 	return (event->attr.config & INTEL_ARCH_EVENT_MASK) == INTEL_TD_SLOTS;
 }
 
+static inline bool is_vmetrics_event(struct perf_event *event)
+{
+	return (event->attr.config & INTEL_ARCH_EVENT_MASK) ==
+			INTEL_FIXED_VMETRICS_EVENT;
+}
+
 static inline bool is_topdown_event(struct perf_event *event)
 {
-	return is_metric_event(event) || is_slots_event(event);
+	return is_metric_event(event) || is_slots_event(event) ||
+			is_vmetrics_event(event);
 }
 
 struct amd_nb {
-- 
2.34.1




[Index of Archives]     [KVM ARM]     [KVM ia64]     [KVM ppc]     [Virtualization Tools]     [Spice Development]     [Libvirt]     [Libvirt Users]     [Linux USB Devel]     [Linux Audio Users]     [Yosemite Questions]     [Linux Kernel]     [Linux SCSI]     [XFree86]

  Powered by Linux