On 11/20/2015 10:22 AM, Peter Krempa wrote: > Add qemuDomainHasVCpuPids to do the checking and replace in place checks > with it. > --- > src/qemu/qemu_cgroup.c | 7 ++----- > src/qemu/qemu_domain.c | 15 +++++++++++++++ > src/qemu/qemu_domain.h | 2 ++ > src/qemu/qemu_driver.c | 29 +++++++++++++---------------- > src/qemu/qemu_process.c | 7 ++++--- > 5 files changed, 36 insertions(+), 24 deletions(-) > Well I got close - reached critical mass here. > diff --git a/src/qemu/qemu_cgroup.c b/src/qemu/qemu_cgroup.c > index 3c7694a..56c2e90 100644 > --- a/src/qemu/qemu_cgroup.c > +++ b/src/qemu/qemu_cgroup.c > @@ -1005,12 +1005,9 @@ qemuSetupCgroupForVcpu(virDomainObjPtr vm) > !virCgroupHasController(priv->cgroup, VIR_CGROUP_CONTROLLER_CPUSET)) > return 0; > > - if (priv->nvcpupids == 0 || priv->vcpupids[0] == vm->pid) { > - /* If we don't know VCPU<->PID mapping or all vcpu runs in the same > - * thread, we cannot control each vcpu. > - */ I'm curious about the vcpupids[0] == vm->pid checks... I feel like I missed some nuance with the last patch. Perhaps a bit more explanation with regard to what happened will help. What up to this point so far including this patch allows the "safe" removal of that check > + /* If vCPU<->pid mapping is missing we can't do vCPU pinning */ > + if (!qemuDomainHasVCpuPids(vm)) > return 0; > - } > > if (virDomainNumatuneGetMode(vm->def->numa, -1, &mem_mode) == 0 && > mem_mode == VIR_DOMAIN_NUMATUNE_MEM_STRICT && > diff --git a/src/qemu/qemu_domain.c b/src/qemu/qemu_domain.c > index 4913a3b..8a45825 100644 > --- a/src/qemu/qemu_domain.c > +++ b/src/qemu/qemu_domain.c > @@ -3956,3 +3956,18 @@ qemuDomainRequiresMlock(virDomainDefPtr def) > > return false; > } > + > + > +/** > + * qemuDomainHasVCpuPids: > + * @vm: Domain object > + * > + * Returns true if we were able to successfully detect vCPU pids for the VM. > + */ > +bool > +qemuDomainHasVCpuPids(virDomainObjPtr vm) Use of "Vcpu" or "VCPU" rather than "VCpu" > +{ > + qemuDomainObjPrivatePtr priv = vm->privateData; > + > + return priv->nvcpupids > 0; > +} > diff --git a/src/qemu/qemu_domain.h b/src/qemu/qemu_domain.h > index 03cf6ef..7f2eca1 100644 > --- a/src/qemu/qemu_domain.h > +++ b/src/qemu/qemu_domain.h > @@ -491,4 +491,6 @@ int qemuDomainDefValidateMemoryHotplug(const virDomainDef *def, > virQEMUCapsPtr qemuCaps, > const virDomainMemoryDef *mem); > > +bool qemuDomainHasVCpuPids(virDomainObjPtr vm); > + > #endif /* __QEMU_DOMAIN_H__ */ > diff --git a/src/qemu/qemu_driver.c b/src/qemu/qemu_driver.c > index 8047d36..4b7452c 100644 > --- a/src/qemu/qemu_driver.c > +++ b/src/qemu/qemu_driver.c > @@ -1428,7 +1428,7 @@ qemuDomainHelperGetVcpus(virDomainObjPtr vm, virVcpuInfoPtr info, int maxinfo, > size_t i, v; > qemuDomainObjPrivatePtr priv = vm->privateData; > > - if (priv->vcpupids == NULL) { > + if (!qemuDomainHasVCpuPids(vm)) { > virReportError(VIR_ERR_OPERATION_INVALID, > "%s", _("cpu affinity is not supported")); > return -1; > @@ -5118,7 +5118,7 @@ qemuDomainPinVcpuFlags(virDomainPtr dom, > } > > if (def) { > - if (priv->vcpupids == NULL) { > + if (!qemuDomainHasVCpuPids(vm)) { > virReportError(VIR_ERR_OPERATION_INVALID, > "%s", _("cpu affinity is not supported")); > goto endjob; > @@ -10287,21 +10287,18 @@ qemuSetVcpusBWLive(virDomainObjPtr vm, virCgroupPtr cgroup, > if (period == 0 && quota == 0) > return 0; > > - /* If we does not know VCPU<->PID mapping or all vcpu runs in the same > - * thread, we cannot control each vcpu. So we only modify cpu bandwidth > - * when each vcpu has a separated thread. > - */ > - if (priv->nvcpupids != 0 && priv->vcpupids[0] != vm->pid) { > - for (i = 0; i < priv->nvcpupids; i++) { > - if (virCgroupNewThread(cgroup, VIR_CGROUP_THREAD_VCPU, i, > - false, &cgroup_vcpu) < 0) > - goto cleanup; > + if (!qemuDomainHasVCpuPids(vm)) > + return 0; Here again the vcpupids[0] thing. John > > - if (qemuSetupCgroupVcpuBW(cgroup_vcpu, period, quota) < 0) > - goto cleanup; > + for (i = 0; i < priv->nvcpupids; i++) { > + if (virCgroupNewThread(cgroup, VIR_CGROUP_THREAD_VCPU, i, > + false, &cgroup_vcpu) < 0) > + goto cleanup; > > - virCgroupFree(&cgroup_vcpu); > - } > + if (qemuSetupCgroupVcpuBW(cgroup_vcpu, period, quota) < 0) > + goto cleanup; > + > + virCgroupFree(&cgroup_vcpu); > } > > return 0; > @@ -10604,7 +10601,7 @@ qemuGetVcpusBWLive(virDomainObjPtr vm, > int ret = -1; > > priv = vm->privateData; > - if (priv->nvcpupids == 0 || priv->vcpupids[0] == vm->pid) { > + if (!qemuDomainHasVCpuPids(vm)) { > /* We do not create sub dir for each vcpu */ > rc = qemuGetVcpuBWLive(priv->cgroup, period, quota); > if (rc < 0) > diff --git a/src/qemu/qemu_process.c b/src/qemu/qemu_process.c > index 721647f..d7f45b3 100644 > --- a/src/qemu/qemu_process.c > +++ b/src/qemu/qemu_process.c > @@ -2291,12 +2291,13 @@ qemuProcessSetVcpuAffinities(virDomainObjPtr vm) > virDomainPinDefPtr pininfo; > int n; > int ret = -1; > - VIR_DEBUG("Setting affinity on CPUs nvcpupin=%zu nvcpus=%d nvcpupids=%d", > - def->cputune.nvcpupin, virDomainDefGetVCpus(def), priv->nvcpupids); > + VIR_DEBUG("Setting affinity on CPUs nvcpupin=%zu nvcpus=%d hasVcpupids=%d", > + def->cputune.nvcpupin, virDomainDefGetVCpus(def), > + qemuDomainHasVCpuPids(vm)); > if (!def->cputune.nvcpupin) > return 0; > > - if (priv->vcpupids == NULL) { > + if (!qemuDomainHasVCpuPids(vm)) { > /* If any CPU has custom affinity that differs from the > * VM default affinity, we must reject it > */ > -- libvir-list mailing list libvir-list@xxxxxxxxxx https://www.redhat.com/mailman/listinfo/libvir-list