On Wed, Feb 08, 2012 at 10:07:44AM -0500, Eric B Munson wrote: > When a host stops or suspends a VM it will set a flag to show this. The > watchdog will use these functions to determine if a softlockup is real, or the > result of a suspended VM. > > Signed-off-by: Eric B Munson <emunson@xxxxxxxxx> > asm-generic changes Acked-by: Arnd Bergmann <arnd@xxxxxxxx> > Cc: mingo@xxxxxxxxxx > Cc: hpa@xxxxxxxxx > Cc: ryanh@xxxxxxxxxxxxxxxxxx > Cc: aliguori@xxxxxxxxxx > Cc: mtosatti@xxxxxxxxxx > Cc: kvm@xxxxxxxxxxxxxxx > Cc: linux-arch@xxxxxxxxxxxxxxx > Cc: x86@xxxxxxxxxx > Cc: linux-kernel@xxxxxxxxxxxxxxx > --- > Changes from V11: > Re-add the missing asm-generic stub for check_and_clear_guest_stopped() > Changes from V6: > Use __this_cpu_and when clearing the PVCLOCK_GUEST_STOPPED flag > Changes from V5: > Collapse generic stubs into this patch > check_and_clear_guest_stopped() takes no args and uses __get_cpu_var() > Include individual definitions in ia64, s390, and powerpc > > arch/ia64/include/asm/kvm_para.h | 5 +++++ > arch/powerpc/include/asm/kvm_para.h | 5 +++++ > arch/s390/include/asm/kvm_para.h | 5 +++++ > arch/x86/include/asm/kvm_para.h | 8 ++++++++ > arch/x86/kernel/kvmclock.c | 21 +++++++++++++++++++++ > include/asm-generic/kvm_para.h | 14 ++++++++++++++ > 6 files changed, 58 insertions(+), 0 deletions(-) > create mode 100644 include/asm-generic/kvm_para.h > > diff --git a/arch/ia64/include/asm/kvm_para.h b/arch/ia64/include/asm/kvm_para.h > index 1588aee..2019cb9 100644 > --- a/arch/ia64/include/asm/kvm_para.h > +++ b/arch/ia64/include/asm/kvm_para.h > @@ -26,6 +26,11 @@ static inline unsigned int kvm_arch_para_features(void) > return 0; > } > > +static inline bool kvm_check_and_clear_guest_paused(void) > +{ > + return false; > +} > + > #endif > > #endif > diff --git a/arch/powerpc/include/asm/kvm_para.h b/arch/powerpc/include/asm/kvm_para.h > index 50533f9..1f80293 100644 > --- a/arch/powerpc/include/asm/kvm_para.h > +++ b/arch/powerpc/include/asm/kvm_para.h > @@ -169,6 +169,11 @@ static inline unsigned int kvm_arch_para_features(void) > return r; > } > > +static inline bool kvm_check_and_clear_guest_paused(void) > +{ > + return false; > +} > + > #endif /* __KERNEL__ */ > > #endif /* __POWERPC_KVM_PARA_H__ */ > diff --git a/arch/s390/include/asm/kvm_para.h b/arch/s390/include/asm/kvm_para.h > index 6964db2..a988329 100644 > --- a/arch/s390/include/asm/kvm_para.h > +++ b/arch/s390/include/asm/kvm_para.h > @@ -149,6 +149,11 @@ static inline unsigned int kvm_arch_para_features(void) > return 0; > } > > +static inline bool kvm_check_and_clear_guest_paused(void) > +{ > + return false; > +} > + > #endif > > #endif /* __S390_KVM_PARA_H */ > diff --git a/arch/x86/include/asm/kvm_para.h b/arch/x86/include/asm/kvm_para.h > index 734c376..99c4bbe 100644 > --- a/arch/x86/include/asm/kvm_para.h > +++ b/arch/x86/include/asm/kvm_para.h > @@ -95,6 +95,14 @@ struct kvm_vcpu_pv_apf_data { > extern void kvmclock_init(void); > extern int kvm_register_clock(char *txt); > > +#ifdef CONFIG_KVM_CLOCK > +bool kvm_check_and_clear_guest_paused(void); > +#else > +static inline bool kvm_check_and_clear_guest_paused(void) > +{ > + return false; > +} > +#endif /* CONFIG_KVMCLOCK */ > > /* This instruction is vmcall. On non-VT architectures, it will generate a > * trap that we will then rewrite to the appropriate instruction. > diff --git a/arch/x86/kernel/kvmclock.c b/arch/x86/kernel/kvmclock.c > index 44842d7..bdf6423 100644 > --- a/arch/x86/kernel/kvmclock.c > +++ b/arch/x86/kernel/kvmclock.c > @@ -22,6 +22,7 @@ > #include <asm/msr.h> > #include <asm/apic.h> > #include <linux/percpu.h> > +#include <linux/hardirq.h> > > #include <asm/x86_init.h> > #include <asm/reboot.h> > @@ -114,6 +115,26 @@ static void kvm_get_preset_lpj(void) > preset_lpj = lpj; > } > > +bool kvm_check_and_clear_guest_paused(void) > +{ > + bool ret = false; > + struct pvclock_vcpu_time_info *src; > + > + /* > + * per_cpu() is safe here because this function is only called from > + * timer functions where preemption is already disabled. > + */ > + WARN_ON(!in_atomic()); > + src = &__get_cpu_var(hv_clock); > + if ((src->flags & PVCLOCK_GUEST_STOPPED) != 0) { > + __this_cpu_and(hv_clock.flags, ~PVCLOCK_GUEST_STOPPED); > + ret = true; > + } > + > + return ret; > +} > +EXPORT_SYMBOL_GPL(kvm_check_and_clear_guest_paused); > + > static struct clocksource kvm_clock = { > .name = "kvm-clock", > .read = kvm_clock_get_cycles, > diff --git a/include/asm-generic/kvm_para.h b/include/asm-generic/kvm_para.h > new file mode 100644 > index 0000000..05ef7e7 > --- /dev/null > +++ b/include/asm-generic/kvm_para.h > @@ -0,0 +1,14 @@ > +#ifndef _ASM_GENERIC_KVM_PARA_H > +#define _ASM_GENERIC_KVM_PARA_H > + > + > +/* > + * This function is used by architectures that support kvm to avoid issuing > + * false soft lockup messages. > + */ > +static inline bool kvm_check_and_clear_guest_paused(void) > +{ > + return false; > +} > + > +#endif How is the stub getting included for other architectures again? -- To unsubscribe from this list: send the line "unsubscribe kvm" in the body of a message to majordomo@xxxxxxxxxxxxxxx More majordomo info at http://vger.kernel.org/majordomo-info.html