From: Quentin Perret <qperret@xxxxxxxxxx> The host KVM PMU code can currently index kvm_arm_hyp_percpu_base[] through this_cpu_ptr_hyp_sym(), but will not actually dereference that pointer when protected KVM is enabled. In preparation for making kvm_arm_hyp_percpu_base[] unaccessible to the host, let's make sure the indexing in hyp per-cpu pages is also done after the static key check to avoid spurious accesses to EL2-private data from EL1. Signed-off-by: Quentin Perret <qperret@xxxxxxxxxx> --- arch/arm64/kvm/pmu.c | 16 ++++++++++++---- 1 file changed, 12 insertions(+), 4 deletions(-) diff --git a/arch/arm64/kvm/pmu.c b/arch/arm64/kvm/pmu.c index 03a6c1f4a09a..a8878fd8b696 100644 --- a/arch/arm64/kvm/pmu.c +++ b/arch/arm64/kvm/pmu.c @@ -31,9 +31,13 @@ static bool kvm_pmu_switch_needed(struct perf_event_attr *attr) */ void kvm_set_pmu_events(u32 set, struct perf_event_attr *attr) { - struct kvm_host_data *ctx = this_cpu_ptr_hyp_sym(kvm_host_data); + struct kvm_host_data *ctx; - if (!kvm_arm_support_pmu_v3() || !ctx || !kvm_pmu_switch_needed(attr)) + if (!kvm_arm_support_pmu_v3()) + return; + + ctx = this_cpu_ptr_hyp_sym(kvm_host_data); + if (!ctx || !kvm_pmu_switch_needed(attr)) return; if (!attr->exclude_host) @@ -47,9 +51,13 @@ void kvm_set_pmu_events(u32 set, struct perf_event_attr *attr) */ void kvm_clr_pmu_events(u32 clr) { - struct kvm_host_data *ctx = this_cpu_ptr_hyp_sym(kvm_host_data); + struct kvm_host_data *ctx; + + if (!kvm_arm_support_pmu_v3()) + return; - if (!kvm_arm_support_pmu_v3() || !ctx) + ctx = this_cpu_ptr_hyp_sym(kvm_host_data); + if (!ctx) return; ctx->pmu_events.events_host &= ~clr; -- 2.36.1.124.g0e6072fb45-goog