On 12/9/2022 12:45 PM, Robert Hoo wrote:
When only changes LAM bits, ask next vcpu run to load mmu pgd, so that it
will build new CR3 with LAM bits updates. No TLB flush needed on this case.
When changes on effective addresses, no matter LAM bits changes or not, go
through normal pgd update process.
Signed-off-by: Robert Hoo <robert.hu@xxxxxxxxxxxxxxx>
---
arch/x86/kvm/x86.c | 24 ++++++++++++++++++++----
1 file changed, 20 insertions(+), 4 deletions(-)
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 48a2ad1e4cd6..6fbe8dd36b1e 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -1248,9 +1248,9 @@ static bool kvm_is_valid_cr3(struct kvm_vcpu *vcpu, unsigned long cr3)
int kvm_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3)
{
bool skip_tlb_flush = false;
- unsigned long pcid = 0;
+ unsigned long pcid = 0, old_cr3;
#ifdef CONFIG_X86_64
- bool pcid_enabled = kvm_read_cr4_bits(vcpu, X86_CR4_PCIDE);
+ bool pcid_enabled = !!kvm_read_cr4_bits(vcpu, X86_CR4_PCIDE);
if (pcid_enabled) {
skip_tlb_flush = cr3 & X86_CR3_PCID_NOFLUSH;
@@ -1263,6 +1263,10 @@ int kvm_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3)
if (cr3 == kvm_read_cr3(vcpu) && !is_pae_paging(vcpu))
goto handle_tlb_flush;
+ if (!guest_cpuid_has(vcpu, X86_FEATURE_LAM) &&
+ (cr3 & (X86_CR3_LAM_U48 | X86_CR3_LAM_U57)))
+ return 1;
+
/*
* Do not condition the GPA check on long mode, this helper is used to
* stuff CR3, e.g. for RSM emulation, and there is no guarantee that
@@ -1274,8 +1278,20 @@ int kvm_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3)
if (is_pae_paging(vcpu) && !load_pdptrs(vcpu, cr3))
return 1;
- if (cr3 != kvm_read_cr3(vcpu))
- kvm_mmu_new_pgd(vcpu, cr3);
+ old_cr3 = kvm_read_cr3(vcpu);
+ if (cr3 != old_cr3) {
+ if ((cr3 ^ old_cr3) & CR3_ADDR_MASK) {
+ kvm_mmu_new_pgd(vcpu, cr3 & ~(X86_CR3_LAM_U48 |
+ X86_CR3_LAM_U57));
"CR3_ADDR_MASK" should not contain "X86_CR3_LAM_U48 | X86_CR3_LAM_U57"
But seems it is not defined explicitly.
Besides this, looks good for me.
Reviewed-by: Jingqi Liu<jingqi.liu@xxxxxxxxx>
+ } else {
+ /*
+ * Though effective addr no change, mark the
+ * request so that LAM bits will take effect
+ * when enter guest.
+ */
+ kvm_make_request(KVM_REQ_LOAD_MMU_PGD, vcpu);
+ }
+ }
vcpu->arch.cr3 = cr3;
kvm_register_mark_dirty(vcpu, VCPU_EXREG_CR3);