This is the host side interface that the guests which support KVM TMEM talk to. Signed-off-by: Sasha Levin <levinsasha928@xxxxxxxxx> --- arch/x86/kvm/tmem/Kconfig | 6 +++ arch/x86/kvm/tmem/Makefile | 2 + arch/x86/kvm/tmem/host.c | 78 ++++++++++++++++++++++++++++++++++ arch/x86/kvm/tmem/host.h | 20 +++++++++ arch/x86/kvm/x86.c | 8 +--- drivers/staging/zcache/zcache-main.c | 35 +++++++++++++++- 6 files changed, 141 insertions(+), 8 deletions(-) create mode 100644 arch/x86/kvm/tmem/host.c create mode 100644 arch/x86/kvm/tmem/host.h diff --git a/arch/x86/kvm/tmem/Kconfig b/arch/x86/kvm/tmem/Kconfig index 15d8301..1a59e4f 100644 --- a/arch/x86/kvm/tmem/Kconfig +++ b/arch/x86/kvm/tmem/Kconfig @@ -13,4 +13,10 @@ menuconfig KVM_TMEM if KVM_TMEM +config KVM_TMEM_HOST + bool "Host-side KVM TMEM" + ---help--- + With this option on, the KVM host will be able to process KVM TMEM requests + coming from guests. + endif # KVM_TMEM diff --git a/arch/x86/kvm/tmem/Makefile b/arch/x86/kvm/tmem/Makefile index 6812d46..706cd36 100644 --- a/arch/x86/kvm/tmem/Makefile +++ b/arch/x86/kvm/tmem/Makefile @@ -1 +1,3 @@ ccflags-y += -Idrivers/staging/zcache/ + +obj-$(CONFIG_KVM_TMEM_HOST) += host.o diff --git a/arch/x86/kvm/tmem/host.c b/arch/x86/kvm/tmem/host.c new file mode 100644 index 0000000..9e73395 --- /dev/null +++ b/arch/x86/kvm/tmem/host.c @@ -0,0 +1,78 @@ +/* + * KVM TMEM host side interface + * + * Copyright (c) 2012 Sasha Levin + * + */ + +#include <linux/kvm_types.h> +#include <linux/kvm_host.h> + +#include "tmem.h" +#include <zcache.h> + +int use_kvm_tmem_host = 1; + +static int no_kvmtmemhost(char *s) +{ + use_kvm_tmem_host = 0; + return 1; +} + +__setup("nokvmtmemhost", no_kvmtmemhost); + +int kvm_pv_tmem_op(struct kvm_vcpu *vcpu, gpa_t addr, unsigned long *ret) +{ + struct tmem_kvm_op op; + struct page *page; + int r; + unsigned long flags; + + if (!use_kvm_tmem_host || !zcache_enabled) { + *ret = -ENXIO; + return 0; + } + + r = kvm_read_guest(vcpu->kvm, addr, &op, sizeof(op)); + if (r < 0) { + *ret = r; + return 0; + } + + switch (op.cmd) { + case TMEM_NEW_POOL: + *ret = zcache_new_pool(op.cli_id, op.u.new.flags); + break; + case TMEM_DESTROY_POOL: + *ret = zcache_destroy_pool(op.cli_id, op.pool_id); + break; + case TMEM_NEW_PAGE: + break; + case TMEM_PUT_PAGE: + page = gfn_to_page(vcpu->kvm, op.u.gen.gfn); + local_irq_save(flags); + *ret = zcache_put_page(op.cli_id, op.pool_id, + &op.u.gen.oid, op.u.gen.index, page); + local_irq_restore(flags); + break; + case TMEM_GET_PAGE: + page = gfn_to_page(vcpu->kvm, op.u.gen.gfn); + local_irq_save(flags); + *ret = zcache_get_page(op.cli_id, op.pool_id, + &op.u.gen.oid, op.u.gen.index, page); + local_irq_restore(flags); + break; + case TMEM_FLUSH_PAGE: + local_irq_save(flags); + *ret = zcache_flush_page(op.cli_id, op.pool_id, + &op.u.gen.oid, op.u.gen.index); + local_irq_restore(flags); + break; + case TMEM_FLUSH_OBJECT: + local_irq_save(flags); + *ret = zcache_flush_object(op.cli_id, op.pool_id, &op.u.gen.oid); + local_irq_restore(flags); + break; + } + return 0; +} diff --git a/arch/x86/kvm/tmem/host.h b/arch/x86/kvm/tmem/host.h new file mode 100644 index 0000000..17ba0c4 --- /dev/null +++ b/arch/x86/kvm/tmem/host.h @@ -0,0 +1,20 @@ +#ifndef _KVM_TMEM_HOST_H_ +#define _KVM_TMEM_HOST_H_ + +#ifdef CONFIG_KVM_TMEM_HOST + +extern int use_kvm_tmem_host; + +extern int kvm_pv_tmem_op(struct kvm_vcpu *vcpu, gpa_t addr, unsigned long *ret); + +#else + +static inline int kvm_pv_tmem_op(struct kvm_vcpu *vcpu, gpa_t addr, unsigned long *ret) +{ + *ret = -ENOSUPP; + return 0; +} + +#endif + +#endif diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 4c5b6ab..c92d4c8 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -27,6 +27,7 @@ #include "kvm_cache_regs.h" #include "x86.h" #include "cpuid.h" +#include "tmem/host.h" #include <linux/clocksource.h> #include <linux/interrupt.h> @@ -4993,13 +4994,6 @@ int kvm_hv_hypercall(struct kvm_vcpu *vcpu) return 1; } -static int kvm_pv_tmem_op(struct kvm_vcpu *vcpu, gpa_t addr, unsigned long *ret) -{ - *ret = -ENOTSUPP; - - return 0; -} - static inline gpa_t hc_gpa(struct kvm_vcpu *vcpu, unsigned long a0, unsigned long a1) { diff --git a/drivers/staging/zcache/zcache-main.c b/drivers/staging/zcache/zcache-main.c index a196aff..65c0ab0 100644 --- a/drivers/staging/zcache/zcache-main.c +++ b/drivers/staging/zcache/zcache-main.c @@ -1692,7 +1692,7 @@ out: return ret; } -static int zcache_new_pool(uint16_t cli_id, uint32_t flags) +int zcache_new_pool(uint16_t cli_id, uint32_t flags) { int poolid = -1; struct tmem_pool *pool; @@ -2011,6 +2011,37 @@ out: return ret; } +#ifdef CONFIG_KVM_TMEM_HOST +extern int use_kvm_tmem_host; + +static int __zcache_init_kvm(void) +{ + int ret = 0; + + if (use_kvm_tmem_host) { + ret = zcache_new_client(KVM_CLIENT); + if (ret) { + pr_err("zcache: can't create client\n"); + return -1; + } else { + pr_info("zcache: enabled support for KVM TMEM\n"); + } + } + zbud_init(); + register_shrinker(&zcache_shrinker); + + return 0; +} + +#else + +static inline int static int __zcache_init_kvm(void) +{ + return 0; +} + +#endif + static int __init zcache_init(void) { int ret = 0; @@ -2053,6 +2084,8 @@ static int __init zcache_init(void) pr_err("zcache: can't create client\n"); goto out; } + if (__zcache_init_kvm()) + goto out; #endif #ifdef CONFIG_CLEANCACHE if (zcache_enabled && use_cleancache) { -- 1.7.8.6 -- To unsubscribe from this list: send the line "unsubscribe kvm" in the body of a message to majordomo@xxxxxxxxxxxxxxx More majordomo info at http://vger.kernel.org/majordomo-info.html