Factor out the implementation of reset_tdp_shadow_zero_bits_mask to a helper function which does not require a vCPU pointer. The only element of the struct kvm_mmu context used by the function is the shadow root level, so pass that in too instead of the mmu context. No functional change intended. Signed-off-by: Ben Gardon <bgardon@xxxxxxxxxx> --- arch/x86/kvm/mmu/mmu.c | 17 +++++++++++------ 1 file changed, 11 insertions(+), 6 deletions(-) diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c index 3b8da8b0745e..6f98111f8f8b 100644 --- a/arch/x86/kvm/mmu/mmu.c +++ b/arch/x86/kvm/mmu/mmu.c @@ -4487,16 +4487,14 @@ static inline bool boot_cpu_is_amd(void) * possible, however, kvm currently does not do execution-protection. */ static void -reset_tdp_shadow_zero_bits_mask(struct kvm_mmu *context) +build_tdp_shadow_zero_bits_mask(struct rsvd_bits_validate *shadow_zero_check, + int shadow_root_level) { - struct rsvd_bits_validate *shadow_zero_check; int i; - shadow_zero_check = &context->shadow_zero_check; - if (boot_cpu_is_amd()) __reset_rsvds_bits_mask(shadow_zero_check, reserved_hpa_bits(), - context->shadow_root_level, false, + shadow_root_level, false, boot_cpu_has(X86_FEATURE_GBPAGES), false, true); else @@ -4507,12 +4505,19 @@ reset_tdp_shadow_zero_bits_mask(struct kvm_mmu *context) if (!shadow_me_mask) return; - for (i = context->shadow_root_level; --i >= 0;) { + for (i = shadow_root_level; --i >= 0;) { shadow_zero_check->rsvd_bits_mask[0][i] &= ~shadow_me_mask; shadow_zero_check->rsvd_bits_mask[1][i] &= ~shadow_me_mask; } } +static void +reset_tdp_shadow_zero_bits_mask(struct kvm_mmu *context) +{ + build_tdp_shadow_zero_bits_mask(&context->shadow_zero_check, + context->shadow_root_level); +} + /* * as the comments in reset_shadow_zero_bits_mask() except it * is the shadow page table for intel nested guest. -- 2.35.1.894.gb6a874cedc-goog