To allow unplugging the vcpus, hotplugging of vcpus on platforms which require to plug multiple logical vcpus at once or pluging them in in arbitrary order it's necessary to use the new device_add interface for vcpu hotplug. This patch adds support for the device_add interface using the old setvcpus API by implementing an algorihm to select the appropriate entities to plug in. --- src/qemu/qemu_driver.c | 161 +++++++++++++++++++++++++++++++++++++++++++------ 1 file changed, 143 insertions(+), 18 deletions(-) diff --git a/src/qemu/qemu_driver.c b/src/qemu/qemu_driver.c index 476f9d8..dbc9d92 100644 --- a/src/qemu/qemu_driver.c +++ b/src/qemu/qemu_driver.c @@ -4594,46 +4594,66 @@ qemuDomainHotplugAddVcpu(virQEMUDriverPtr driver, virDomainObjPtr vm, unsigned int vcpu) { - qemuDomainObjPrivatePtr priv = vm->privateData; + virJSONValuePtr vcpuprops = NULL; virDomainVcpuDefPtr vcpuinfo = virDomainDefGetVcpu(vm->def, vcpu); qemuDomainVcpuPrivatePtr vcpupriv = QEMU_DOMAIN_VCPU_PRIVATE(vcpuinfo); + unsigned int nvcpus = vcpupriv->vcpus; + bool newhotplug = qemuDomainSupportsNewVcpuHotplug(vm); int ret = -1; int rc; int oldvcpus = virDomainDefGetVcpus(vm->def); + size_t i; - if (vcpuinfo->online) { - virReportError(VIR_ERR_INVALID_ARG, - _("vCPU '%u' is already online"), vcpu); - return -1; + if (newhotplug) { + if (virAsprintf(&vcpupriv->alias, "vcpu%u", vcpu) < 0) + goto cleanup; + + if (!(vcpuprops = qemuBuildHotpluggableCPUProps(vcpuinfo))) + goto cleanup; } qemuDomainObjEnterMonitor(driver, vm); - rc = qemuMonitorSetCPU(priv->mon, vcpu, true); + if (newhotplug) { + rc = qemuMonitorAddDeviceArgs(qemuDomainGetMonitor(vm), vcpuprops); + vcpuprops = NULL; + } else { + rc = qemuMonitorSetCPU(qemuDomainGetMonitor(vm), vcpu, true); + } if (qemuDomainObjExitMonitor(driver, vm) < 0) goto cleanup; - virDomainAuditVcpu(vm, oldvcpus, oldvcpus + 1, "update", rc == 0); + virDomainAuditVcpu(vm, oldvcpus, oldvcpus + nvcpus, "update", rc == 0); if (rc < 0) goto cleanup; - vcpuinfo->online = true; + /* start outputing of the new XML element to allow keeping unpluggability */ + if (newhotplug) + vm->def->individualvcpus = true; if (qemuDomainRefreshVcpuInfo(driver, vm, QEMU_ASYNC_JOB_NONE, false) < 0) goto cleanup; - if (qemuDomainValidateVcpuInfo(vm) < 0) - goto cleanup; + for (i = vcpu; i < vcpu + nvcpus; i++) { + vcpuinfo = virDomainDefGetVcpu(vm->def, i); + vcpupriv = QEMU_DOMAIN_VCPU_PRIVATE(vcpuinfo); - if (vcpupriv->tid > 0 && - qemuProcessSetupVcpu(vm, vcpu) < 0) + vcpuinfo->online = true; + + if (vcpupriv->tid > 0 && + qemuProcessSetupVcpu(vm, i) < 0) + goto cleanup; + } + + if (qemuDomainValidateVcpuInfo(vm) < 0) goto cleanup; ret = 0; cleanup: + virJSONValueFree(vcpuprops); return ret; } @@ -4758,6 +4778,95 @@ qemuDomainSetVcpusMax(virQEMUDriverPtr driver, } +/** + * qemuDomainSelectHotplugVcpuEntities: + * + * @def: domain definition + * @nvcpus: target vcpu count + * @cpumap: vcpu entity IDs filled on success + * + * Tries to find which vcpu entities need to be enabled or disabled to reach + * @nvcpus. This function works in order of the legacy hotplug but is able to + * skip over entries that are added out of order. + */ +static virBitmapPtr +qemuDomainSelectHotplugVcpuEntities(virDomainDefPtr def, + unsigned int nvcpus) +{ + virBitmapPtr ret = NULL; + virDomainVcpuDefPtr vcpu; + qemuDomainVcpuPrivatePtr vcpupriv; + unsigned int maxvcpus = virDomainDefGetVcpusMax(def); + unsigned int curvcpus = virDomainDefGetVcpus(def); + ssize_t i; + + if (!(ret = virBitmapNew(maxvcpus))) + return NULL; + + if (nvcpus > curvcpus) { + for (i = 0; i < maxvcpus && curvcpus < nvcpus; i++) { + vcpu = virDomainDefGetVcpu(def, i); + vcpupriv = QEMU_DOMAIN_VCPU_PRIVATE(vcpu); + + if (vcpu->online) + continue; + + if (vcpupriv->vcpus == 0) + continue; + + curvcpus += vcpupriv->vcpus; + + if (curvcpus > nvcpus) { + virReportError(VIR_ERR_CONFIG_UNSUPPORTED, "%s", + _("target vm vcpu granularity does not allow the " + "desired vcpu count")); + goto error; + } + + ignore_value(virBitmapSetBit(ret, i)); + } + } else { + for (i = maxvcpus - 1; i >= 0 && curvcpus > nvcpus; i--) { + vcpu = virDomainDefGetVcpu(def, i); + vcpupriv = QEMU_DOMAIN_VCPU_PRIVATE(vcpu); + + if (!vcpu->online) + continue; + + if (vcpupriv->vcpus == 0) + continue; + + if (!vcpupriv->alias) + continue; + + curvcpus -= vcpupriv->vcpus; + + if (curvcpus < nvcpus) { + virReportError(VIR_ERR_CONFIG_UNSUPPORTED, "%s", + _("target vm vcpu granularity does not allow the " + "desired vcpu count")); + goto error; + } + + ignore_value(virBitmapSetBit(ret, i)); + } + } + + if (curvcpus != nvcpus) { + virReportError(VIR_ERR_CONFIG_UNSUPPORTED, "%s", + _("failed to find appropriate hotpluggable vcpus to " + "reach the desired target vcpu count")); + goto error; + } + + return ret; + + error: + virBitmapFree(ret); + return NULL; +} + + static int qemuDomainSetVcpusFlags(virDomainPtr dom, unsigned int nvcpus, unsigned int flags) @@ -4769,12 +4878,12 @@ qemuDomainSetVcpusFlags(virDomainPtr dom, unsigned int nvcpus, int ret = -1; virQEMUDriverConfigPtr cfg = NULL; qemuDomainObjPrivatePtr priv; - size_t i; virCgroupPtr cgroup_temp = NULL; char *mem_mask = NULL; char *all_nodes_str = NULL; virBitmapPtr all_nodes = NULL; virErrorPtr err = NULL; + virBitmapPtr vcpumap = NULL; virCheckFlags(VIR_DOMAIN_AFFECT_LIVE | VIR_DOMAIN_AFFECT_CONFIG | @@ -4818,6 +4927,9 @@ qemuDomainSetVcpusFlags(virDomainPtr dom, unsigned int nvcpus, goto endjob; } + if (!(vcpumap = qemuDomainSelectHotplugVcpuEntities(def, nvcpus))) + goto endjob; + if (virNumaIsAvailable() && virCgroupHasController(priv->cgroup, VIR_CGROUP_CONTROLLER_CPUSET)) { if (virCgroupNewThread(priv->cgroup, VIR_CGROUP_THREAD_EMULATOR, 0, @@ -4852,20 +4964,32 @@ qemuDomainSetVcpusFlags(virDomainPtr dom, unsigned int nvcpus, } if (def) { + int rc = 0; + if (nvcpus > virDomainDefGetVcpus(def)) { - for (i = virDomainDefGetVcpus(def); i < nvcpus; i++) { - if (qemuDomainHotplugAddVcpu(driver, vm, i) < 0) - goto endjob; + ssize_t nextvcpu = -1; + while ((nextvcpu = virBitmapNextSetBit(vcpumap, nextvcpu)) != -1) { + if ((rc = qemuDomainHotplugAddVcpu(driver, vm, nextvcpu)) < 0) + break; } } else { + size_t i; for (i = virDomainDefGetVcpus(def) - 1; i >= nvcpus; i--) { - if (qemuDomainHotplugDelVcpu(driver, vm, i) < 0) - goto endjob; + if (!virBitmapIsBitSet(vcpumap, i)) + continue; + + if ((rc = qemuDomainHotplugDelVcpu(driver, vm, i)) < 0) + break; } } + qemuDomainVcpuPersistOrder(vm->def); + if (virDomainSaveStatus(driver->xmlopt, cfg->stateDir, vm, driver->caps) < 0) goto endjob; + + if (rc < 0) + goto endjob; } if (persistentDef) { @@ -4894,6 +5018,7 @@ qemuDomainSetVcpusFlags(virDomainPtr dom, unsigned int nvcpus, VIR_FREE(mem_mask); VIR_FREE(all_nodes_str); virBitmapFree(all_nodes); + virBitmapFree(vcpumap); virCgroupFree(&cgroup_temp); virObjectUnref(cfg); return ret; -- 2.9.2 -- libvir-list mailing list libvir-list@xxxxxxxxxx https://www.redhat.com/mailman/listinfo/libvir-list