From: Isaku Yamahata <isaku.yamahata@xxxxxxxxx> TDX KVM support needs to track whether GFN is private or shared. Introduce functions to set whether GFN is private or shared and pre-allocate memory for xarray. Suggested-by: Sean Christopherson <seanjc@xxxxxxxxxx> Signed-off-by: Isaku Yamahata <isaku.yamahata@xxxxxxxxx> --- include/linux/kvm_host.h | 11 ++++++ virt/kvm/kvm_main.c | 74 ++++++++++++++++++++++++++++++++++++++++ 2 files changed, 85 insertions(+) diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h index a0b64308d240..fac07886ab39 100644 --- a/include/linux/kvm_host.h +++ b/include/linux/kvm_host.h @@ -2307,9 +2307,20 @@ static inline void kvm_account_pgtable_pages(void *virt, int nr) #define KVM_MEM_ATTR_PRIVATE 0x0002 #ifdef __KVM_HAVE_ARCH_UPDATE_MEM_ATTR +/* memory attr on [start, end) */ +int kvm_vm_reserve_mem_attr(struct kvm *kvm, gfn_t start, gfn_t end); +int kvm_vm_set_mem_attr(struct kvm *kvm, int attr, gfn_t start, gfn_t end); void kvm_arch_update_mem_attr(struct kvm *kvm, struct kvm_memory_slot *slot, unsigned int attr, gfn_t start, gfn_t end); #else +static inline int kvm_vm_reserve_mem_attr(struct kvm *kvm, gfn_t start, gfn_t end) +{ + return -EOPNOTSUPP; +} +static inline int kvm_vm_set_mem_attr(struct kvm *kvm, int attr, gfn_t start, gfn_t end) +{ + return -EOPNOTSUPP; +} static inline void kvm_arch_update_mem_attr(struct kvm *kvm, struct kvm_memory_slot *slot, unsigned int attr, diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c index 9f82b03a8118..f0e77b65939b 100644 --- a/virt/kvm/kvm_main.c +++ b/virt/kvm/kvm_main.c @@ -1121,6 +1121,80 @@ static inline void kvm_restrictedmem_unregister(struct kvm_memory_slot *slot) &slot->notifier); } +/* + * Reserve memory for [start, end) so that the next set oepration won't fail + * with -ENOMEM. + */ +int kvm_vm_reserve_mem_attr(struct kvm *kvm, gfn_t start, gfn_t end) +{ + int r = 0; + gfn_t gfn; + + xa_lock(&kvm->mem_attr_array); + for (gfn = start; gfn < end; gfn++) { + r = __xa_insert(&kvm->mem_attr_array, gfn, NULL, GFP_KERNEL_ACCOUNT); + if (r == -EBUSY) + r = 0; + if (r) + break; + } + xa_unlock(&kvm->mem_attr_array); + + return r; +} +EXPORT_SYMBOL_GPL(kvm_vm_reserve_mem_attr); + +/* Set memory attr for [start, end) */ +int kvm_vm_set_mem_attr(struct kvm *kvm, int attr, gfn_t start, gfn_t end) +{ + void *entry; + gfn_t gfn; + int r; + int i; + + /* By default, the entry is private. */ + switch (attr) { + case KVM_MEM_ATTR_PRIVATE: + entry = NULL; + break; + case KVM_MEM_ATTR_SHARED: + entry = xa_mk_value(KVM_MEM_ATTR_SHARED); + break; + default: + WARN_ON_ONCE(1); + return -EINVAL; + } + + WARN_ON_ONCE(start >= end); + for (gfn = start; gfn < end; gfn++) { + r = xa_err(xa_store(&kvm->mem_attr_array, gfn, entry, + GFP_KERNEL_ACCOUNT)); + if (r) + break; + } + if (start >= gfn) + return r; + + end = gfn; + for (i = 0; i < KVM_ADDRESS_SPACE_NUM; i++) { + struct kvm_memslot_iter iter; + struct kvm_memory_slot *slot; + struct kvm_memslots *slots; + + slots = __kvm_memslots(kvm, i); + kvm_for_each_memslot_in_gfn_range(&iter, slots, start, end) { + gfn_t s = max(start, slot->base_gfn); + gfn_t e = min(end, slot->base_gfn + slot->npages); + + WARN_ON_ONCE(s >= e); + kvm_arch_update_mem_attr(kvm, slot, attr, s, e); + } + } + + return r; +} +EXPORT_SYMBOL_GPL(kvm_vm_set_mem_attr); + #else /* !CONFIG_HAVE_KVM_RESTRICTED_MEM */ static inline void kvm_restrictedmem_register(struct kvm_memory_slot *slot) -- 2.25.1