Gavin Shan <gshan@xxxxxxxxxx> writes: > On 12/13/24 1:55 AM, Steven Price wrote: .... >> +static int private_memslot_fault(struct kvm_vcpu *vcpu, >> + phys_addr_t fault_ipa, >> + struct kvm_memory_slot *memslot) >> +{ >> + struct kvm *kvm = vcpu->kvm; >> + gpa_t gpa = kvm_gpa_from_fault(kvm, fault_ipa); >> + gfn_t gfn = gpa >> PAGE_SHIFT; >> + bool priv_exists = kvm_mem_is_private(kvm, gfn); >> + struct kvm_mmu_memory_cache *memcache = &vcpu->arch.mmu_page_cache; >> + struct page *page; >> + kvm_pfn_t pfn; >> + int ret; >> + /* >> + * For Realms, the shared address is an alias of the private GPA with >> + * the top bit set. Thus is the fault address matches the GPA then it >> + * is the private alias. >> + */ >> + bool is_priv_gfn = (gpa == fault_ipa); >> + > > We may rename 'priv_exists' to 'was_priv_gfn', which is consistent to 'is_priv_gfn'. > Alternatively, we may use 'was_private' and 'is_private'. > >> + if (priv_exists != is_priv_gfn) { >> + kvm_prepare_memory_fault_exit(vcpu, >> + gpa, >> + PAGE_SIZE, >> + kvm_is_write_fault(vcpu), >> + false, is_priv_gfn); >> + >> + return -EFAULT; >> + } >> + >> + if (!is_priv_gfn) { >> + /* Not a private mapping, handling normally */ >> + return -EINVAL; >> + } >> + >> + ret = kvm_mmu_topup_memory_cache(memcache, >> + kvm_mmu_cache_min_pages(vcpu->arch.hw_mmu)); >> + if (ret) >> + return ret; >> + >> + ret = kvm_gmem_get_pfn(kvm, memslot, gfn, &pfn, &page, NULL); >> + if (ret) >> + return ret; >> + >> + /* FIXME: Should be able to use bigger than PAGE_SIZE mappings */ >> + ret = realm_map_ipa(kvm, fault_ipa, pfn, PAGE_SIZE, KVM_PGTABLE_PROT_W, >> + memcache); >> + if (!ret) >> + return 1; /* Handled */ >> + >> + put_page(page); >> + return ret; >> +} I also found the names confusing. Can we do modified arch/arm64/kvm/mmu.c @@ -1487,7 +1487,7 @@ static int private_memslot_fault(struct kvm_vcpu *vcpu, struct kvm *kvm = vcpu->kvm; gpa_t gpa = kvm_gpa_from_fault(kvm, fault_ipa); gfn_t gfn = gpa >> PAGE_SHIFT; - bool priv_exists = kvm_mem_is_private(kvm, gfn); + bool is_priv_gfn = kvm_mem_is_private(kvm, gfn); struct kvm_mmu_memory_cache *memcache = &vcpu->arch.mmu_page_cache; struct page *page; kvm_pfn_t pfn; @@ -1497,19 +1497,19 @@ static int private_memslot_fault(struct kvm_vcpu *vcpu, * the top bit set. Thus is the fault address matches the GPA then it * is the private alias. */ - bool is_priv_gfn = (gpa == fault_ipa); + bool is_priv_fault = (gpa == fault_ipa); - if (priv_exists != is_priv_gfn) { + if (is_priv_gfn != is_priv_fault) { kvm_prepare_memory_fault_exit(vcpu, gpa, PAGE_SIZE, kvm_is_write_fault(vcpu), - false, is_priv_gfn); + false, is_priv_fault); return 0; } - if (!is_priv_gfn) { + if (!is_priv_fault) { /* Not a private mapping, handling normally */ return -EINVAL; }