From: Isaku Yamahata <isaku.yamahata@xxxxxxxxx> Add new ioctl KVM_MAP_MEMORY in the kvm common code. It iterates on the memory range and call arch specific function. Add stub function as weak symbol. [1] https://lore.kernel.org/kvm/Zbrj5WKVgMsUFDtb@xxxxxxxxxx/ Suggested-by: Sean Christopherson <seanjc@xxxxxxxxxx> Signed-off-by: Isaku Yamahata <isaku.yamahata@xxxxxxxxx> --- include/linux/kvm_host.h | 4 +++ include/uapi/linux/kvm.h | 15 ++++++++ virt/kvm/kvm_main.c | 74 ++++++++++++++++++++++++++++++++++++++++ 3 files changed, 93 insertions(+) diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h index 9807ea98b568..afbed288d625 100644 --- a/include/linux/kvm_host.h +++ b/include/linux/kvm_host.h @@ -2445,4 +2445,8 @@ static inline int kvm_gmem_get_pfn(struct kvm *kvm, } #endif /* CONFIG_KVM_PRIVATE_MEM */ +int kvm_arch_vcpu_pre_map_memory(struct kvm_vcpu *vcpu); +int kvm_arch_vcpu_map_memory(struct kvm_vcpu *vcpu, + struct kvm_memory_mapping *mapping); + #endif diff --git a/include/uapi/linux/kvm.h b/include/uapi/linux/kvm.h index 2190adbe3002..f5d6b481244f 100644 --- a/include/uapi/linux/kvm.h +++ b/include/uapi/linux/kvm.h @@ -917,6 +917,7 @@ struct kvm_enable_cap { #define KVM_CAP_MEMORY_ATTRIBUTES 233 #define KVM_CAP_GUEST_MEMFD 234 #define KVM_CAP_VM_TYPES 235 +#define KVM_CAP_MAP_MEMORY 236 struct kvm_irq_routing_irqchip { __u32 irqchip; @@ -1548,4 +1549,18 @@ struct kvm_create_guest_memfd { __u64 reserved[6]; }; +#define KVM_MAP_MEMORY _IOWR(KVMIO, 0xd5, struct kvm_memory_mapping) + +#define KVM_MEMORY_MAPPING_FLAG_WRITE _BITULL(0) +#define KVM_MEMORY_MAPPING_FLAG_EXEC _BITULL(1) +#define KVM_MEMORY_MAPPING_FLAG_USER _BITULL(2) +#define KVM_MEMORY_MAPPING_FLAG_PRIVATE _BITULL(3) + +struct kvm_memory_mapping { + __u64 base_gfn; + __u64 nr_pages; + __u64 flags; + __u64 source; +}; + #endif /* __LINUX_KVM_H */ diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c index d1fd9cb5d037..d77c9b79d76b 100644 --- a/virt/kvm/kvm_main.c +++ b/virt/kvm/kvm_main.c @@ -4419,6 +4419,69 @@ static int kvm_vcpu_ioctl_get_stats_fd(struct kvm_vcpu *vcpu) return fd; } +__weak int kvm_arch_vcpu_pre_map_memory(struct kvm_vcpu *vcpu) +{ + return -EOPNOTSUPP; +} + +__weak int kvm_arch_vcpu_map_memory(struct kvm_vcpu *vcpu, + struct kvm_memory_mapping *mapping) +{ + return -EOPNOTSUPP; +} + +static int kvm_vcpu_map_memory(struct kvm_vcpu *vcpu, + struct kvm_memory_mapping *mapping) +{ + bool added = false; + int idx, r = 0; + + if (mapping->flags & ~(KVM_MEMORY_MAPPING_FLAG_WRITE | + KVM_MEMORY_MAPPING_FLAG_EXEC | + KVM_MEMORY_MAPPING_FLAG_USER | + KVM_MEMORY_MAPPING_FLAG_PRIVATE)) + return -EINVAL; + if ((mapping->flags & KVM_MEMORY_MAPPING_FLAG_PRIVATE) && + !kvm_arch_has_private_mem(vcpu->kvm)) + return -EINVAL; + + /* Sanity check */ + if (!IS_ALIGNED(mapping->source, PAGE_SIZE) || + !mapping->nr_pages || + mapping->base_gfn + mapping->nr_pages <= mapping->base_gfn) + return -EINVAL; + + vcpu_load(vcpu); + idx = srcu_read_lock(&vcpu->kvm->srcu); + r = kvm_arch_vcpu_pre_map_memory(vcpu); + if (r) + return r; + + while (mapping->nr_pages) { + if (signal_pending(current)) { + r = -ERESTARTSYS; + break; + } + + if (need_resched()) + cond_resched(); + + r = kvm_arch_vcpu_map_memory(vcpu, mapping); + if (r) + break; + + added = true; + } + + srcu_read_unlock(&vcpu->kvm->srcu, idx); + vcpu_put(vcpu); + + if (added && mapping->nr_pages > 0) + r = -EAGAIN; + + return r; +} + static long kvm_vcpu_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg) { @@ -4620,6 +4683,17 @@ static long kvm_vcpu_ioctl(struct file *filp, r = kvm_vcpu_ioctl_get_stats_fd(vcpu); break; } + case KVM_MAP_MEMORY: { + struct kvm_memory_mapping mapping; + + r = -EFAULT; + if (copy_from_user(&mapping, argp, sizeof(mapping))) + break; + r = kvm_vcpu_map_memory(vcpu, &mapping); + if (copy_to_user(argp, &mapping, sizeof(mapping))) + r = -EFAULT; + break; + } default: r = kvm_arch_vcpu_ioctl(filp, ioctl, arg); } -- 2.25.1