On 11/20/2015 10:22 AM, Peter Krempa wrote: > qemuDomainHotplugVcpus/qemuDomainHotunplugVcpus are complex enough in > regards of adding one CPU. Additionally it will be desired to reuse > those functions later with specific vCPU hotplug. > > Move the loops for adding vCPUs into qemuDomainSetVcpusFlags so that the > helpers can be made simpler and more straightforward. > --- > src/qemu/qemu_driver.c | 105 ++++++++++++++++++++++--------------------------- > 1 file changed, 48 insertions(+), 57 deletions(-) > > diff --git a/src/qemu/qemu_driver.c b/src/qemu/qemu_driver.c > index 9011b2d..9f0e3a3 100644 > --- a/src/qemu/qemu_driver.c > +++ b/src/qemu/qemu_driver.c > @@ -4694,10 +4694,9 @@ qemuDomainDelCgroupForThread(virCgroupPtr cgroup, > static int > qemuDomainHotplugVcpus(virQEMUDriverPtr driver, > virDomainObjPtr vm, > - unsigned int nvcpus) > + unsigned int vcpu) > { > qemuDomainObjPrivatePtr priv = vm->privateData; > - size_t i; > int ret = -1; > int oldvcpus = virDomainDefGetVCpus(vm->def); > int vcpus = oldvcpus; You could set this to oldvcpus + 1;... > @@ -4709,13 +4708,10 @@ qemuDomainHotplugVcpus(virQEMUDriverPtr driver, > > qemuDomainObjEnterMonitor(driver, vm); > > - for (i = vcpus; i < nvcpus; i++) { > - /* Online new CPU */ > - if (qemuMonitorSetCPU(priv->mon, i, true) < 0) > - goto exit_monitor; > + if (qemuMonitorSetCPU(priv->mon, vcpu, true) < 0) > + goto exit_monitor; > > - vcpus++; > - } > + vcpus++; Thus removing the need for this... and an Audit message that doesn't use the same value for oldvcpus and vcpus (although it could before these changes). > > /* After hotplugging the CPUs we need to re-detect threads corresponding > * to the virtual CPUs. Some older versions don't provide the thread ID > @@ -4747,37 +4743,34 @@ qemuDomainHotplugVcpus(virQEMUDriverPtr driver, > &mem_mask, -1) < 0) > goto cleanup; > > - for (i = oldvcpus; i < nvcpus; i++) { > - if (priv->cgroup) { > - cgroup_vcpu = > - qemuDomainAddCgroupForThread(priv->cgroup, > - VIR_CGROUP_THREAD_VCPU, > - i, mem_mask, > - cpupids[i]); > - if (!cgroup_vcpu) > - goto cleanup; > - } > + if (priv->cgroup) { > + cgroup_vcpu = > + qemuDomainAddCgroupForThread(priv->cgroup, > + VIR_CGROUP_THREAD_VCPU, > + vcpu, mem_mask, > + cpupids[vcpu]); > + if (!cgroup_vcpu) > + goto cleanup; > + } > > - /* Inherit def->cpuset */ > - if (vm->def->cpumask) { > - if (qemuDomainHotplugAddPin(vm->def->cpumask, i, > - &vm->def->cputune.vcpupin, > - &vm->def->cputune.nvcpupin) < 0) { > - goto cleanup; > - } > - if (qemuDomainHotplugPinThread(vm->def->cpumask, i, cpupids[i], > - cgroup_vcpu) < 0) { > - goto cleanup; > - } > + /* Inherit def->cpuset */ > + if (vm->def->cpumask) { > + if (qemuDomainHotplugAddPin(vm->def->cpumask, vcpu, > + &vm->def->cputune.vcpupin, > + &vm->def->cputune.nvcpupin) < 0) { > + goto cleanup; > } > - virCgroupFree(&cgroup_vcpu); Ahhh.. finally ;-) > - > - if (qemuProcessSetSchedParams(i, cpupids[i], > - vm->def->cputune.nvcpusched, > - vm->def->cputune.vcpusched) < 0) > + if (qemuDomainHotplugPinThread(vm->def->cpumask, vcpu, cpupids[vcpu], > + cgroup_vcpu) < 0) { > goto cleanup; > + } > } > > + if (qemuProcessSetSchedParams(vcpu, cpupids[vcpu], > + vm->def->cputune.nvcpusched, > + vm->def->cputune.vcpusched) < 0) > + goto cleanup; > + > priv->nvcpupids = ncpupids; > VIR_FREE(priv->vcpupids); > priv->vcpupids = cpupids; > @@ -4791,7 +4784,7 @@ qemuDomainHotplugVcpus(virQEMUDriverPtr driver, > if (virDomainObjIsActive(vm) && > virDomainDefSetVCpus(vm->def, vcpus) < 0) > ret = -1; > - virDomainAuditVcpu(vm, oldvcpus, nvcpus, "update", ret == 0); > + virDomainAuditVcpu(vm, oldvcpus, vcpus, "update", ret == 0); > if (cgroup_vcpu) > virCgroupFree(&cgroup_vcpu); > return ret; > @@ -4805,10 +4798,9 @@ qemuDomainHotplugVcpus(virQEMUDriverPtr driver, > static int > qemuDomainHotunplugVcpus(virQEMUDriverPtr driver, > virDomainObjPtr vm, > - unsigned int nvcpus) > + unsigned int vcpu) > { > qemuDomainObjPrivatePtr priv = vm->privateData; > - size_t i; > int ret = -1; > int oldvcpus = virDomainDefGetVCpus(vm->def); > int vcpus = oldvcpus; Same as Hotplug, but in reverse... oldvcpus - 1; > @@ -4817,13 +4809,10 @@ qemuDomainHotunplugVcpus(virQEMUDriverPtr driver, > > qemuDomainObjEnterMonitor(driver, vm); > > - for (i = vcpus - 1; i >= nvcpus; i--) { > - /* Offline old CPU */ > - if (qemuMonitorSetCPU(priv->mon, i, false) < 0) > - goto exit_monitor; > + if (qemuMonitorSetCPU(priv->mon, vcpu, false) < 0) > + goto exit_monitor; > > - vcpus--; > - } > + vcpus--; Removing this... > > /* After hotplugging the CPUs we need to re-detect threads corresponding > * to the virtual CPUs. Some older versions don't provide the thread ID > @@ -4855,16 +4844,14 @@ qemuDomainHotunplugVcpus(virQEMUDriverPtr driver, > goto cleanup; > } > > - for (i = oldvcpus - 1; i >= nvcpus; i--) { > - if (qemuDomainDelCgroupForThread(priv->cgroup, > - VIR_CGROUP_THREAD_VCPU, i) < 0) > - goto cleanup; > + if (qemuDomainDelCgroupForThread(priv->cgroup, > + VIR_CGROUP_THREAD_VCPU, vcpu) < 0) > + goto cleanup; > > - /* Free vcpupin setting */ > - virDomainPinDel(&vm->def->cputune.vcpupin, > - &vm->def->cputune.nvcpupin, > - i); > - } > + /* Free vcpupin setting */ > + virDomainPinDel(&vm->def->cputune.vcpupin, > + &vm->def->cputune.nvcpupin, > + vcpu); > > priv->nvcpupids = ncpupids; > VIR_FREE(priv->vcpupids); > @@ -4878,7 +4865,7 @@ qemuDomainHotunplugVcpus(virQEMUDriverPtr driver, > if (virDomainObjIsActive(vm) && > virDomainDefSetVCpus(vm->def, vcpus) < 0) > ret = -1; > - virDomainAuditVcpu(vm, oldvcpus, nvcpus, "update", ret == 0); > + virDomainAuditVcpu(vm, oldvcpus, vcpus, "update", ret == 0); > return ret; > > exit_monitor: > @@ -5021,11 +5008,15 @@ qemuDomainSetVcpusFlags(virDomainPtr dom, unsigned int nvcpus, > > if (def) { Could use unsigned int curvcpus = virDomainDefGetVCpus(def); Although I suppose the compiler will optimize anyway... > if (nvcpus > virDomainDefGetVCpus(def)) { > - if (qemuDomainHotplugVcpus(driver, vm, nvcpus) < 0) > - goto endjob; > + for (i = virDomainDefGetVCpus(def); i < nvcpus; i++) { > + if (qemuDomainHotplugVcpus(driver, vm, i) < 0) > + goto endjob; > + } > } else { > - if (qemuDomainHotunplugVcpus(driver, vm, nvcpus) < 0) > - goto endjob; Perhaps this is where the comment removed during patch 19 would be descriptive, e.g. adjust the following to fit here. - /* We need different branches here, because we want to offline - * in reverse order to onlining, so any partial fail leaves us in a - * reasonably sensible state */ ACK - none of the mentioned changes is required, merely suggestions John > + for (i = virDomainDefGetVCpus(def) - 1; i >= nvcpus; i--) { > + if (qemuDomainHotunplugVcpus(driver, vm, i) < 0) > + goto endjob; > + } > } > > if (virDomainSaveStatus(driver->xmlopt, cfg->stateDir, vm) < 0) > -- libvir-list mailing list libvir-list@xxxxxxxxxx https://www.redhat.com/mailman/listinfo/libvir-list