From: Isaku Yamahata <isaku.yamahata@xxxxxxxxx> Now all related callbacks are called under kvm_lock, no point for _nolock post fix. Remove _nolock post fix for readability with shorter function names. Signed-off-by: Isaku Yamahata <isaku.yamahata@xxxxxxxxx> Reviewed-by: Chao Gao <chao.gao@xxxxxxxxx> --- virt/kvm/kvm_arch.c | 20 ++++++++++---------- 1 file changed, 10 insertions(+), 10 deletions(-) diff --git a/virt/kvm/kvm_arch.c b/virt/kvm/kvm_arch.c index bcf8b74144e3..e6bf9de18cba 100644 --- a/virt/kvm/kvm_arch.c +++ b/virt/kvm/kvm_arch.c @@ -24,7 +24,7 @@ int __weak kvm_arch_post_init_vm(struct kvm *kvm) return 0; } -static void hardware_enable_nolock(void *junk) +static void hardware_enable(void *junk) { int cpu = raw_smp_processor_id(); int r; @@ -46,7 +46,7 @@ static void hardware_enable_nolock(void *junk) } } -static void hardware_disable_nolock(void *junk) +static void hardware_disable(void *junk) { int cpu = raw_smp_processor_id(); @@ -70,7 +70,7 @@ int __weak kvm_arch_add_vm(struct kvm *kvm, int usage_count) return 0; atomic_set(&hardware_enable_failed, 0); - on_each_cpu(hardware_enable_nolock, NULL, 1); + on_each_cpu(hardware_enable, NULL, 1); if (atomic_read(&hardware_enable_failed)) { r = -EBUSY; @@ -80,7 +80,7 @@ int __weak kvm_arch_add_vm(struct kvm *kvm, int usage_count) r = kvm_arch_post_init_vm(kvm); err: if (r) - on_each_cpu(hardware_disable_nolock, NULL, 1); + on_each_cpu(hardware_disable, NULL, 1); return r; } @@ -89,7 +89,7 @@ int __weak kvm_arch_del_vm(int usage_count) if (usage_count) return 0; - on_each_cpu(hardware_disable_nolock, NULL, 1); + on_each_cpu(hardware_disable, NULL, 1); return 0; } @@ -115,7 +115,7 @@ int __weak kvm_arch_online_cpu(unsigned int cpu, int usage_count) * preemption until all arch callbacks are fixed. */ preempt_disable(); - hardware_enable_nolock(NULL); + hardware_enable(NULL); preempt_enable(); if (atomic_read(&hardware_enable_failed)) { atomic_set(&hardware_enable_failed, 0); @@ -134,7 +134,7 @@ int __weak kvm_arch_offline_cpu(unsigned int cpu, int usage_count) * preemption until all arch callbacks are fixed. */ preempt_disable(); - hardware_disable_nolock(NULL); + hardware_disable(NULL); preempt_enable(); } return 0; @@ -142,7 +142,7 @@ int __weak kvm_arch_offline_cpu(unsigned int cpu, int usage_count) int __weak kvm_arch_reboot(int val) { - on_each_cpu(hardware_disable_nolock, NULL, 1); + on_each_cpu(hardware_disable, NULL, 1); return NOTIFY_OK; } @@ -153,12 +153,12 @@ int __weak kvm_arch_suspend(int usage_count) * Because kvm_suspend() is called with interrupt disabled, no * need to disable preemption. */ - hardware_disable_nolock(NULL); + hardware_disable(NULL); return 0; } void __weak kvm_arch_resume(int usage_count) { if (usage_count) - hardware_enable_nolock(NULL); + hardware_enable(NULL); } -- 2.25.1