[RFC PATCH 15/35] subarch support for controlling interrupt delivery

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



> +/*
> + * The use of 'barrier' in the following reflects their use as local-lock
> + * operations. Reentrancy must be prevented (e.g., __cli()) /before/ following
> + * critical operations are executed. All critical operations must complete
> + * /before/ reentrancy is permitted (e.g., __sti()). Alpha architecture also
> + * includes these barriers, for example.
> + */

Seems like an odd comment to have in an i386 header file.

> +#define __cli()								\
> +do {									\
> +	struct vcpu_info *_vcpu;					\
> +	preempt_disable();						\
> +	_vcpu = &HYPERVISOR_shared_info->vcpu_info[__vcpu_id];		\
> +	_vcpu->evtchn_upcall_mask = 1;					\
> +	preempt_enable_no_resched();					\
> +	barrier();							\
> +} while (0)

Should be a real function

> +#define __sti()								\
> +do {									\
> +	struct vcpu_info *_vcpu;					\
> +	barrier();							\
> +	preempt_disable();						\
> +	_vcpu = &HYPERVISOR_shared_info->vcpu_info[__vcpu_id];		\
> +	_vcpu->evtchn_upcall_mask = 0;					\
> +	barrier(); /* unmask then check (avoid races) */		\
> +	if (unlikely(_vcpu->evtchn_upcall_pending))			\
> +		force_evtchn_callback();				\
> +	preempt_enable();						\
> +} while (0)

Should be a real function

> +#define __save_flags(x)							\
> +do {									\
> +	struct vcpu_info *_vcpu;					\
> +	preempt_disable();						\
> +	_vcpu = &HYPERVISOR_shared_info->vcpu_info[__vcpu_id];		\
> +	(x) = _vcpu->evtchn_upcall_mask;				\
> +	preempt_enable();						\
> +} while (0)

Should be a real function

> +#define __restore_flags(x)						\
> +do {									\
> +	struct vcpu_info *_vcpu;					\
> +	barrier();							\
> +	preempt_disable();						\
> +	_vcpu = &HYPERVISOR_shared_info->vcpu_info[__vcpu_id];		\
> +	if ((_vcpu->evtchn_upcall_mask = (x)) == 0) {			\
> +		barrier(); /* unmask then check (avoid races) */	\
> +		if (unlikely(_vcpu->evtchn_upcall_pending))		\
> +			force_evtchn_callback();			\
> +		preempt_enable();					\
> +	} else								\
> +		preempt_enable_no_resched();				\
> +} while (0)

Should be a real function

> +#define safe_halt()		((void)0)
> +#define halt()			((void)0)
> +
> +#define __save_and_cli(x)						\
> +do {									\
> +	struct vcpu_info *_vcpu;					\
> +	preempt_disable();						\
> +	_vcpu = &HYPERVISOR_shared_info->vcpu_info[__vcpu_id];		\
> +	(x) = _vcpu->evtchn_upcall_mask;				\
> +	_vcpu->evtchn_upcall_mask = 1;					\
> +	preempt_enable_no_resched();					\
> +	barrier();							\
> +} while (0)

Should be a real function

> +#define local_irq_save(x)	__save_and_cli(x)
> +#define local_irq_restore(x)	__restore_flags(x)
> +#define local_save_flags(x)	__save_flags(x)
> +#define local_irq_disable()	__cli()
> +#define local_irq_enable()	__sti()
> +
> +/* Cannot use preempt_enable() here as we would recurse in preempt_sched(). */
> +#define irqs_disabled()							\
> +({	int ___x;							\
> +	struct vcpu_info *_vcpu;					\
> +	preempt_disable();						\
> +	_vcpu = &HYPERVISOR_shared_info->vcpu_info[__vcpu_id];		\
> +	___x = (_vcpu->evtchn_upcall_mask != 0);			\
> +	preempt_enable_no_resched();					\
> +	___x; })
> +
> +#endif /* __KERNEL__ */

Should be a real function

[Index of Archives]     [KVM Development]     [Libvirt Development]     [Libvirt Users]     [CentOS Virtualization]     [Netdev]     [Ethernet Bridging]     [Linux Wireless]     [Kernel Newbies]     [Security]     [Linux for Hams]     [Netfilter]     [Bugtraq]     [Yosemite Forum]     [MIPS Linux]     [ARM Linux]     [Linux RAID]     [Linux Admin]     [Samba]

  Powered by Linux