Whenever dirty quota is full (i.e. dirty counter equals dirty quota), control is passed to the QEMU side, through a KVM exit with the custom exit reason KVM_EXIT_DIRTY_QUOTA_FULL, to handle the dirty quota full event. Co-developed-by: Anurag Madnawat <anurag.madnawat@xxxxxxxxxxx> Signed-off-by: Anurag Madnawat <anurag.madnawat@xxxxxxxxxxx> Signed-off-by: Shivam Kumar <shivam.kumar1@xxxxxxxxxxx> Signed-off-by: Shaju Abraham <shaju.abraham@xxxxxxxxxxx> Signed-off-by: Manish Mishra <manish.mishra@xxxxxxxxxxx> --- arch/x86/kvm/x86.c | 9 +++++++++ include/linux/dirty_quota_migration.h | 6 ++++++ include/uapi/linux/kvm.h | 1 + virt/kvm/dirty_quota_migration.c | 5 +++++ 4 files changed, 21 insertions(+) diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index dc7eb5fddfd3..32fc7a6f8b86 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -59,6 +59,7 @@ #include <linux/mem_encrypt.h> #include <linux/entry-kvm.h> #include <linux/suspend.h> +#include <linux/dirty_quota_migration.h> #include <trace/events/kvm.h> @@ -10028,6 +10029,14 @@ static int vcpu_run(struct kvm_vcpu *vcpu) return r; vcpu->srcu_idx = srcu_read_lock(&kvm->srcu); } + + /* check for dirty quota migration exit condition if it is enabled */ + if (vcpu->kvm->dirty_quota_migration_enabled && + is_dirty_quota_full(vcpu->vCPUdqctx)) { + vcpu->run->exit_reason = KVM_EXIT_DIRTY_QUOTA_FULL; + r = 0; + break; + } } srcu_read_unlock(&kvm->srcu, vcpu->srcu_idx); diff --git a/include/linux/dirty_quota_migration.h b/include/linux/dirty_quota_migration.h index b6c6f5f896dd..b9b3bedd9682 100644 --- a/include/linux/dirty_quota_migration.h +++ b/include/linux/dirty_quota_migration.h @@ -30,11 +30,17 @@ static inline struct page *kvm_dirty_quota_context_get_page( return NULL; } +static inline bool is_dirty_quota_full(struct vCPUDirtyQuotaContext *vCPUdqctx) +{ + return true; +} + #else /* KVM_DIRTY_QUOTA_PAGE_OFFSET == 0 */ int kvm_vcpu_dirty_quota_alloc(struct vCPUDirtyQuotaContext **vCPUdqctx); struct page *kvm_dirty_quota_context_get_page( struct vCPUDirtyQuotaContext *vCPUdqctx, u32 offset); +bool is_dirty_quota_full(struct vCPUDirtyQuotaContext *vCPUdqctx); #endif /* KVM_DIRTY_QUOTA_PAGE_OFFSET == 0 */ diff --git a/include/uapi/linux/kvm.h b/include/uapi/linux/kvm.h index a6785644bf47..6ba39a6015b0 100644 --- a/include/uapi/linux/kvm.h +++ b/include/uapi/linux/kvm.h @@ -270,6 +270,7 @@ struct kvm_xen_exit { #define KVM_EXIT_X86_BUS_LOCK 33 #define KVM_EXIT_XEN 34 #define KVM_EXIT_RISCV_SBI 35 +#define KVM_EXIT_DIRTY_QUOTA_FULL 36 /* For KVM_EXIT_INTERNAL_ERROR */ /* Emulate instruction failed. */ diff --git a/virt/kvm/dirty_quota_migration.c b/virt/kvm/dirty_quota_migration.c index 7e9ace760939..eeef19347af4 100644 --- a/virt/kvm/dirty_quota_migration.c +++ b/virt/kvm/dirty_quota_migration.c @@ -18,3 +18,8 @@ struct page *kvm_dirty_quota_context_get_page( { return vmalloc_to_page((void *)vCPUdqctx + offset * PAGE_SIZE); } + +bool is_dirty_quota_full(struct vCPUDirtyQuotaContext *vCPUdqctx) +{ + return (vCPUdqctx->dirty_counter >= vCPUdqctx->dirty_quota); +} -- 2.22.3