From: Atish Patra <atish.patra@xxxxxxx> The RISC-V hypervisor specification doesn't have any virtual timer feature. Due to this, the guest VCPU timer will be programmed via SBI calls. The host will use a separate hrtimer event for each guest VCPU to provide timer functionality. We inject a virtual timer interrupt to the guest VCPU whenever the guest VCPU hrtimer event expires. The following features are not supported yet and will be added in future: 1. A time offset to adjust guest time from host time 2. A saved next event in guest vcpu for vm migration Signed-off-by: Atish Patra <atish.patra@xxxxxxx> Signed-off-by: Anup Patel <anup.patel@xxxxxxx> Acked-by: Paolo Bonzini <pbonzini@xxxxxxxxxx> Reviewed-by: Paolo Bonzini <pbonzini@xxxxxxxxxx> --- arch/riscv/include/asm/kvm_host.h | 4 + arch/riscv/include/asm/kvm_vcpu_timer.h | 30 +++++++ arch/riscv/kvm/Makefile | 2 +- arch/riscv/kvm/vcpu.c | 6 ++ arch/riscv/kvm/vcpu_timer.c | 110 ++++++++++++++++++++++++ drivers/clocksource/timer-riscv.c | 8 ++ include/clocksource/timer-riscv.h | 16 ++++ 7 files changed, 175 insertions(+), 1 deletion(-) create mode 100644 arch/riscv/include/asm/kvm_vcpu_timer.h create mode 100644 arch/riscv/kvm/vcpu_timer.c create mode 100644 include/clocksource/timer-riscv.h diff --git a/arch/riscv/include/asm/kvm_host.h b/arch/riscv/include/asm/kvm_host.h index b1493c6cbe83..0ef0aac8143e 100644 --- a/arch/riscv/include/asm/kvm_host.h +++ b/arch/riscv/include/asm/kvm_host.h @@ -12,6 +12,7 @@ #include <linux/types.h> #include <linux/kvm.h> #include <linux/kvm_types.h> +#include <asm/kvm_vcpu_timer.h> #ifdef CONFIG_64BIT #define KVM_MAX_VCPUS (1U << 16) @@ -168,6 +169,9 @@ struct kvm_vcpu_arch { unsigned long irqs_pending; unsigned long irqs_pending_mask; + /* VCPU Timer */ + struct kvm_vcpu_timer timer; + /* MMIO instruction details */ struct kvm_mmio_decode mmio_decode; diff --git a/arch/riscv/include/asm/kvm_vcpu_timer.h b/arch/riscv/include/asm/kvm_vcpu_timer.h new file mode 100644 index 000000000000..6f904d49e27e --- /dev/null +++ b/arch/riscv/include/asm/kvm_vcpu_timer.h @@ -0,0 +1,30 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (C) 2019 Western Digital Corporation or its affiliates. + * + * Authors: + * Atish Patra <atish.patra@xxxxxxx> + */ + +#ifndef __KVM_VCPU_RISCV_TIMER_H +#define __KVM_VCPU_RISCV_TIMER_H + +#include <linux/hrtimer.h> + +struct kvm_vcpu_timer { + bool init_done; + /* Check if the timer is programmed */ + bool next_set; + u64 next_cycles; + struct hrtimer hrt; + /* Mult & Shift values to get nanosec from cycles */ + u32 mult; + u32 shift; +}; + +int kvm_riscv_vcpu_timer_init(struct kvm_vcpu *vcpu); +int kvm_riscv_vcpu_timer_deinit(struct kvm_vcpu *vcpu); +int kvm_riscv_vcpu_timer_reset(struct kvm_vcpu *vcpu); +int kvm_riscv_vcpu_timer_next_event(struct kvm_vcpu *vcpu, u64 ncycles); + +#endif diff --git a/arch/riscv/kvm/Makefile b/arch/riscv/kvm/Makefile index c0f57f26c13d..3e0c7558320d 100644 --- a/arch/riscv/kvm/Makefile +++ b/arch/riscv/kvm/Makefile @@ -9,6 +9,6 @@ ccflags-y := -Ivirt/kvm -Iarch/riscv/kvm kvm-objs := $(common-objs-y) kvm-objs += main.o vm.o vmid.o tlb.o mmu.o -kvm-objs += vcpu.o vcpu_exit.o vcpu_switch.o +kvm-objs += vcpu.o vcpu_exit.o vcpu_switch.o vcpu_timer.o obj-$(CONFIG_KVM) += kvm.o diff --git a/arch/riscv/kvm/vcpu.c b/arch/riscv/kvm/vcpu.c index 36957802fed4..f9c5ef98becf 100644 --- a/arch/riscv/kvm/vcpu.c +++ b/arch/riscv/kvm/vcpu.c @@ -54,6 +54,8 @@ static void kvm_riscv_reset_vcpu(struct kvm_vcpu *vcpu) memcpy(cntx, reset_cntx, sizeof(*cntx)); + kvm_riscv_vcpu_timer_reset(vcpu); + WRITE_ONCE(vcpu->arch.irqs_pending, 0); WRITE_ONCE(vcpu->arch.irqs_pending_mask, 0); } @@ -108,6 +110,9 @@ int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu) cntx->hstatus |= HSTATUS_SP2P; cntx->hstatus |= HSTATUS_SPV; + /* Setup VCPU timer */ + kvm_riscv_vcpu_timer_init(vcpu); + /* Reset VCPU */ kvm_riscv_reset_vcpu(vcpu); @@ -116,6 +121,7 @@ int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu) void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu) { + kvm_riscv_vcpu_timer_deinit(vcpu); kvm_riscv_stage2_flush_cache(vcpu); kmem_cache_free(kvm_vcpu_cache, vcpu); } diff --git a/arch/riscv/kvm/vcpu_timer.c b/arch/riscv/kvm/vcpu_timer.c new file mode 100644 index 000000000000..4ff5d666f60d --- /dev/null +++ b/arch/riscv/kvm/vcpu_timer.c @@ -0,0 +1,110 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2019 Western Digital Corporation or its affiliates. + * + * Authors: + * Atish Patra <atish.patra@xxxxxxx> + */ + +#include <linux/errno.h> +#include <linux/err.h> +#include <linux/kvm_host.h> +#include <clocksource/timer-riscv.h> +#include <asm/csr.h> +#include <asm/kvm_vcpu_timer.h> + +static u64 kvm_riscv_delta_cycles2ns(u64 cycles, struct kvm_vcpu_timer *t) +{ + unsigned long flags; + u64 cycles_now, cycles_delta, delta_ns; + + local_irq_save(flags); + cycles_now = get_cycles64(); + if (cycles_now < cycles) + cycles_delta = cycles - cycles_now; + else + cycles_delta = 0; + delta_ns = (cycles_delta * t->mult) >> t->shift; + local_irq_restore(flags); + + return delta_ns; +} + +static enum hrtimer_restart kvm_riscv_vcpu_hrtimer_expired(struct hrtimer *h) +{ + u64 delta_ns; + struct kvm_vcpu_timer *t = container_of(h, struct kvm_vcpu_timer, hrt); + struct kvm_vcpu *vcpu = container_of(t, struct kvm_vcpu, arch.timer); + + if (get_cycles64() < t->next_cycles) { + delta_ns = kvm_riscv_delta_cycles2ns(t->next_cycles, t); + hrtimer_forward_now(&t->hrt, ktime_set(0, delta_ns)); + return HRTIMER_RESTART; + } + + t->next_set = false; + kvm_riscv_vcpu_set_interrupt(vcpu, IRQ_S_TIMER); + + return HRTIMER_NORESTART; +} + +static int kvm_riscv_vcpu_timer_cancel(struct kvm_vcpu_timer *t) +{ + if (!t->init_done || !t->next_set) + return -EINVAL; + + hrtimer_cancel(&t->hrt); + t->next_set = false; + + return 0; +} + +int kvm_riscv_vcpu_timer_next_event(struct kvm_vcpu *vcpu, u64 ncycles) +{ + struct kvm_vcpu_timer *t = &vcpu->arch.timer; + u64 delta_ns = kvm_riscv_delta_cycles2ns(ncycles, t); + + if (!t->init_done) + return -EINVAL; + + kvm_riscv_vcpu_unset_interrupt(vcpu, IRQ_S_TIMER); + + delta_ns = kvm_riscv_delta_cycles2ns(ncycles, t); + t->next_cycles = ncycles; + hrtimer_start(&t->hrt, ktime_set(0, delta_ns), HRTIMER_MODE_REL); + t->next_set = true; + + return 0; +} + +int kvm_riscv_vcpu_timer_init(struct kvm_vcpu *vcpu) +{ + struct kvm_vcpu_timer *t = &vcpu->arch.timer; + + if (t->init_done) + return -EINVAL; + + hrtimer_init(&t->hrt, CLOCK_MONOTONIC, HRTIMER_MODE_REL); + t->hrt.function = kvm_riscv_vcpu_hrtimer_expired; + t->init_done = true; + t->next_set = false; + + riscv_cs_get_mult_shift(&t->mult, &t->shift); + + return 0; +} + +int kvm_riscv_vcpu_timer_deinit(struct kvm_vcpu *vcpu) +{ + int ret; + + ret = kvm_riscv_vcpu_timer_cancel(&vcpu->arch.timer); + vcpu->arch.timer.init_done = false; + + return ret; +} + +int kvm_riscv_vcpu_timer_reset(struct kvm_vcpu *vcpu) +{ + return kvm_riscv_vcpu_timer_cancel(&vcpu->arch.timer); +} diff --git a/drivers/clocksource/timer-riscv.c b/drivers/clocksource/timer-riscv.c index 470c7ef02ea4..a48036459f3c 100644 --- a/drivers/clocksource/timer-riscv.c +++ b/drivers/clocksource/timer-riscv.c @@ -12,6 +12,7 @@ #include <linux/cpu.h> #include <linux/delay.h> #include <linux/irq.h> +#include <linux/module.h> #include <linux/sched_clock.h> #include <asm/smp.h> #include <asm/sbi.h> @@ -71,6 +72,13 @@ static int riscv_timer_dying_cpu(unsigned int cpu) return 0; } +void riscv_cs_get_mult_shift(u32 *mult, u32 *shift) +{ + *mult = riscv_clocksource.mult; + *shift = riscv_clocksource.shift; +} +EXPORT_SYMBOL_GPL(riscv_cs_get_mult_shift); + /* called directly from the low-level interrupt handler */ void riscv_timer_interrupt(void) { diff --git a/include/clocksource/timer-riscv.h b/include/clocksource/timer-riscv.h new file mode 100644 index 000000000000..e94e4feecbe8 --- /dev/null +++ b/include/clocksource/timer-riscv.h @@ -0,0 +1,16 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (C) 2019 Western Digital Corporation or its affiliates. + * + * Authors: + * Atish Patra <atish.patra@xxxxxxx> + */ + +#ifndef __TIMER_RISCV_H +#define __TIMER_RISCV_H + +#include <linux/types.h> + +void riscv_cs_get_mult_shift(u32 *mult, u32 *shift); + +#endif -- 2.17.1