Hi Sean,
On 2021/1/6 5:16, Sean Christopherson wrote:
diff --git a/arch/x86/events/intel/core.c b/arch/x86/events/intel/core.c
index 6453b8a6834a..ccddda455bec 100644
--- a/arch/x86/events/intel/core.c
+++ b/arch/x86/events/intel/core.c
@@ -3690,6 +3690,7 @@ static struct perf_guest_switch_msr *intel_guest_get_msrs(int *nr)
{
struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
struct perf_guest_switch_msr *arr = cpuc->guest_switch_msrs;
+ struct debug_store *ds = __this_cpu_read(cpu_hw_events.ds);
arr[0].msr = MSR_CORE_PERF_GLOBAL_CTRL;
arr[0].host = x86_pmu.intel_ctrl & ~cpuc->intel_ctrl_guest_mask;
@@ -3735,6 +3736,18 @@ static struct perf_guest_switch_msr *intel_guest_get_msrs(int *nr)
*nr = 2;
}
+ if (arr[1].guest) {
+ arr[2].msr = MSR_IA32_DS_AREA;
+ arr[2].host = (unsigned long)ds;
+ /* KVM will update MSR_IA32_DS_AREA with the trapped guest value. */
+ arr[2].guest = 0ull;
+ *nr = 3;
+ } else if (*nr == 2) {
+ arr[2].msr = MSR_IA32_DS_AREA;
+ arr[2].host = arr[2].guest = 0;
+ *nr = 3;
+ }
Similar comments as the previous patch, please figure out a way to properly
integrate this into the PEBS logic instead of querying arr/nr.
To address your comment, please help confirm whether you are
fine or happy with the streamlined logic of intel_guest_get_msrs():
static struct perf_guest_switch_msr *intel_guest_get_msrs(int *nr)
{
struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
struct perf_guest_switch_msr *arr = cpuc->guest_switch_msrs;
struct debug_store *ds = __this_cpu_read(cpu_hw_events.ds);
arr[0].msr = MSR_CORE_PERF_GLOBAL_CTRL;
arr[0].host = x86_pmu.intel_ctrl & ~cpuc->intel_ctrl_guest_mask;
arr[0].guest = x86_pmu.intel_ctrl & ~cpuc->intel_ctrl_host_mask;
/*
* Disable PEBS in the guest if PEBS is used by the host; enabling PEBS
* in both will lead to unexpected PMIs in the host and/or missed PMIs
* in the guest.
*/
if (cpuc->pebs_enabled & ~cpuc->intel_ctrl_host_mask) {
if (x86_pmu.flags & PMU_FL_PEBS_ALL)
arr[0].guest &= ~cpuc->pebs_enabled;
else
arr[0].guest &= ~(cpuc->pebs_enabled & PEBS_COUNTER_MASK);
}
*nr = 1;
if (x86_pmu.pebs) {
arr[1].msr = MSR_IA32_PEBS_ENABLE;
arr[2].msr = MSR_IA32_DS_AREA;
if (x86_pmu.intel_cap.pebs_baseline)
arr[3].msr = MSR_PEBS_DATA_CFG;
/* Skip the MSR loads by stuffing guest=host (KVM will remove the
entry). */
arr[1].guest = arr[1].host = cpuc->pebs_enabled &
~cpuc->intel_ctrl_guest_mask;
arr[2].guest = arr[2].host = (unsigned long)ds;
if (x86_pmu.intel_cap.pebs_baseline)
arr[3].guest = arr[3].host = cpuc->pebs_data_cfg;
/*
* Host and guest PEBS are mutually exclusive. Load the guest
* value iff PEBS is disabled in the host.
*
* If PEBS is enabled in the host and the CPU supports PEBS isolation,
* disabling the counters is sufficient (see commit 9b545c04abd4);
* Without isolation, PEBS must be explicitly disabled prior to
* VM-Enter to prevent PEBS writes from overshooting VM-Enter.
*
* KVM will update arr[2|3].guest with the trapped guest values
* iff guest PEBS is allowed to be enabled.
*/
if (!arr[1].host) {
arr[1].guest = cpuc->pebs_enabled & ~cpuc->intel_ctrl_host_mask;
arr[0].guest |= arr[1].guest;
} else if (x86_pmu.pebs_no_isolation)
arr[1].guest = 0;
*nr = x86_pmu.intel_cap.pebs_baseline ? 4 : 3;
}
return arr;
}
---
thx,likexu
+
return arr;
}