On 2020-07-11 11:04, Andrew Jones wrote:
When updating the stolen time we should always read the current
stolen time from the user provided memory, not from a kernel
cache. If we use a cache then we'll end up resetting stolen time
to zero on the first update after migration.
Signed-off-by: Andrew Jones <drjones@xxxxxxxxxx>
---
arch/arm64/include/asm/kvm_host.h | 1 -
arch/arm64/kvm/pvtime.c | 23 +++++++++--------------
include/linux/kvm_host.h | 19 +++++++++++++++++++
3 files changed, 28 insertions(+), 15 deletions(-)
diff --git a/arch/arm64/include/asm/kvm_host.h
b/arch/arm64/include/asm/kvm_host.h
index c3e6fcc664b1..b01f52b61572 100644
--- a/arch/arm64/include/asm/kvm_host.h
+++ b/arch/arm64/include/asm/kvm_host.h
@@ -343,7 +343,6 @@ struct kvm_vcpu_arch {
/* Guest PV state */
struct {
- u64 steal;
u64 last_steal;
gpa_t base;
} steal;
diff --git a/arch/arm64/kvm/pvtime.c b/arch/arm64/kvm/pvtime.c
index db5ef097a166..025b5f3a97ef 100644
--- a/arch/arm64/kvm/pvtime.c
+++ b/arch/arm64/kvm/pvtime.c
@@ -13,26 +13,22 @@
void kvm_update_stolen_time(struct kvm_vcpu *vcpu)
{
struct kvm *kvm = vcpu->kvm;
+ u64 base = vcpu->arch.steal.base;
u64 last_steal = vcpu->arch.steal.last_steal;
- u64 steal;
- __le64 steal_le;
- u64 offset;
+ u64 offset = offsetof(struct pvclock_vcpu_stolen_time, stolen_time);
+ u64 steal = 0;
int idx;
- u64 base = vcpu->arch.steal.base;
if (base == GPA_INVALID)
return;
- /* Let's do the local bookkeeping */
- steal = vcpu->arch.steal.steal;
- vcpu->arch.steal.last_steal = current->sched_info.run_delay;
- steal += vcpu->arch.steal.last_steal - last_steal;
- vcpu->arch.steal.steal = steal;
-
- steal_le = cpu_to_le64(steal);
idx = srcu_read_lock(&kvm->srcu);
- offset = offsetof(struct pvclock_vcpu_stolen_time, stolen_time);
- kvm_put_guest(kvm, base + offset, steal_le, u64);
+ if (!kvm_get_guest(kvm, base + offset, steal, u64)) {
+ steal = le64_to_cpu(steal);
+ vcpu->arch.steal.last_steal = current->sched_info.run_delay;
+ steal += vcpu->arch.steal.last_steal - last_steal;
+ kvm_put_guest(kvm, base + offset, cpu_to_le64(steal), u64);
+ }
srcu_read_unlock(&kvm->srcu, idx);
}
@@ -68,7 +64,6 @@ gpa_t kvm_init_stolen_time(struct kvm_vcpu *vcpu)
* Start counting stolen time from the time the guest requests
* the feature enabled.
*/
- vcpu->arch.steal.steal = 0;
vcpu->arch.steal.last_steal = current->sched_info.run_delay;
idx = srcu_read_lock(&kvm->srcu);
diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
index d564855243d8..e2fc655f0b5b 100644
--- a/include/linux/kvm_host.h
+++ b/include/linux/kvm_host.h
@@ -749,6 +749,25 @@ int kvm_write_guest_offset_cached(struct kvm
*kvm, struct gfn_to_hva_cache *ghc,
int kvm_gfn_to_hva_cache_init(struct kvm *kvm, struct gfn_to_hva_cache
*ghc,
gpa_t gpa, unsigned long len);
+#define __kvm_get_guest(kvm, gfn, offset, x, type) \
+({ \
+ unsigned long __addr = gfn_to_hva(kvm, gfn); \
+ type __user *__uaddr = (type __user *)(__addr + offset); \
Passing the type around is pretty ugly. Can't you use something like:
typeof(x) __user *__uaddr = (typeof(__uaddr))(__addr + offset);
which would avoid passing this type around? kvm_put_guest could
use the same treatment.
Yes, it forces the caller to rigorously type the inputs to the
macro. But they should do that anyway.
+ int __ret = -EFAULT; \
+ \
+ if (!kvm_is_error_hva(__addr)) \
+ __ret = get_user(x, __uaddr); \
+ __ret; \
+})
+
+#define kvm_get_guest(kvm, gpa, x, type) \
+({ \
+ gpa_t __gpa = gpa; \
+ struct kvm *__kvm = kvm; \
+ __kvm_get_guest(__kvm, __gpa >> PAGE_SHIFT, \
+ offset_in_page(__gpa), x, type); \
+})
+
#define __kvm_put_guest(kvm, gfn, offset, value, type) \
({ \
unsigned long __addr = gfn_to_hva(kvm, gfn); \
Thanks,
M.
--
Jazz is not dead. It just smells funny...
_______________________________________________
kvmarm mailing list
kvmarm@xxxxxxxxxxxxxxxxxxxxx
https://lists.cs.columbia.edu/mailman/listinfo/kvmarm