[PATCH] x86/kvm: handle the failure of __pv_cpu_mask allocation

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



From: Wanpeng Li <wanpengli@xxxxxxxxxxx>

Fallback to native ipis/tlb flush if fails to allocate __pv_cpu_mask.

Signed-off-by: Wanpeng Li <wanpengli@xxxxxxxxxxx>
---
 arch/x86/kernel/kvm.c | 26 ++++++++++++++++++++++++--
 1 file changed, 24 insertions(+), 2 deletions(-)

diff --git a/arch/x86/kernel/kvm.c b/arch/x86/kernel/kvm.c
index a22deb58f86d..29d79d760996 100644
--- a/arch/x86/kernel/kvm.c
+++ b/arch/x86/kernel/kvm.c
@@ -46,6 +46,7 @@
 
 DEFINE_STATIC_KEY_FALSE(kvm_async_pf_enabled);
 
+static struct apic orig_apic;
 static int kvmapf = 1;
 
 static int __init parse_no_kvmapf(char *arg)
@@ -542,6 +543,11 @@ static void __send_ipi_mask(const struct cpumask *mask, int vector)
 
 static void kvm_send_ipi_mask(const struct cpumask *mask, int vector)
 {
+	if (unlikely(!this_cpu_cpumask_var_ptr(__pv_cpu_mask))) {
+		orig_apic.send_IPI_mask(mask, vector);
+		return;
+	}
+
 	__send_ipi_mask(mask, vector);
 }
 
@@ -551,6 +557,11 @@ static void kvm_send_ipi_mask_allbutself(const struct cpumask *mask, int vector)
 	struct cpumask *new_mask = this_cpu_cpumask_var_ptr(__pv_cpu_mask);
 	const struct cpumask *local_mask;
 
+	if (unlikely(!new_mask)) {
+		orig_apic.send_IPI_mask_allbutself(mask, vector);
+		return;
+	}
+
 	cpumask_copy(new_mask, mask);
 	cpumask_clear_cpu(this_cpu, new_mask);
 	local_mask = new_mask;
@@ -611,6 +622,7 @@ late_initcall(setup_efi_kvm_sev_migration);
  */
 static void kvm_setup_pv_ipi(void)
 {
+	orig_apic = *apic;
 	apic->send_IPI_mask = kvm_send_ipi_mask;
 	apic->send_IPI_mask_allbutself = kvm_send_ipi_mask_allbutself;
 	pr_info("setup PV IPIs\n");
@@ -639,6 +651,11 @@ static void kvm_flush_tlb_multi(const struct cpumask *cpumask,
 	struct kvm_steal_time *src;
 	struct cpumask *flushmask = this_cpu_cpumask_var_ptr(__pv_cpu_mask);
 
+	if (unlikely(!flushmask)) {
+		native_flush_tlb_multi(cpumask, info);
+		return;
+	}
+
 	cpumask_copy(flushmask, cpumask);
 	/*
 	 * We have to call flush only on online vCPUs. And
@@ -671,11 +688,16 @@ static __init int kvm_alloc_cpumask(void)
 
 	if (pv_tlb_flush_supported() || pv_ipi_supported())
 		for_each_possible_cpu(cpu) {
-			zalloc_cpumask_var_node(per_cpu_ptr(&__pv_cpu_mask, cpu),
-				GFP_KERNEL, cpu_to_node(cpu));
+			if (!zalloc_cpumask_var_node(&per_cpu(__pv_cpu_mask, cpu),
+				GFP_KERNEL, cpu_to_node(cpu)))
+				goto err_out;
 		}
 
 	return 0;
+err_out:
+	for_each_possible_cpu(cpu)
+		free_cpumask_var(per_cpu(__pv_cpu_mask, cpu));
+	return -ENOMEM;
 }
 arch_initcall(kvm_alloc_cpumask);
 
-- 
2.25.1




[Index of Archives]     [KVM ARM]     [KVM ia64]     [KVM ppc]     [Virtualization Tools]     [Spice Development]     [Libvirt]     [Libvirt Users]     [Linux USB Devel]     [Linux Audio Users]     [Yosemite Questions]     [Linux Kernel]     [Linux SCSI]     [XFree86]

  Powered by Linux