On 11/20/2015 10:22 AM, Peter Krempa wrote: > Set new domain configs to contain at least 1 vCPU add a check that > maximum vCPU count isn't set to 0 and remove unnecesary checks. > > The openvz test suite change is necessary since the test case generates > the config via virDomainDefNew but does not set the vCPU info. With the > change to virDomainDefNew the expected output has changed. > --- > src/conf/domain_conf.c | 12 ++++++++++++ > src/lxc/lxc_native.c | 7 ------- > src/openvz/openvz_driver.c | 20 ++++++++------------ > src/qemu/qemu_command.c | 3 --- > src/vmx/vmx.c | 6 +++--- > tests/openvzutilstest.c | 2 +- > 6 files changed, 24 insertions(+), 26 deletions(-) > > diff --git a/src/conf/domain_conf.c b/src/conf/domain_conf.c > index 3a1dcc7..6b16430 100644 > --- a/src/conf/domain_conf.c > +++ b/src/conf/domain_conf.c > @@ -1428,6 +1428,12 @@ int > virDomainDefSetVCpusMax(virDomainDefPtr def, > unsigned int vcpus) > { > + if (vcpus == 0) { > + virReportError(VIR_ERR_CONFIG_UNSUPPORTED, "%s", > + _("domain config can't have 0 maximum vCPUs")); "domain configuration requires at least 1 vCPU" Shouldn't this be a post parse check rather than a parse check; otherwise, couldn't a domain disappear? May also "solve" the openvz usage pattern issue... > + return -1; > + } > + > if (vcpus < def->vcpus) > def->vcpus = vcpus; > > @@ -2697,6 +2703,12 @@ virDomainDefNew(void) > if (!(ret->numa = virDomainNumaNew())) > goto error; > > + /* assume at least 1 cpu for every config */ > + if (virDomainDefSetVCpusMax(ret, 1) < 0) > + goto error; > + > + ret->vcpus = 1; > + [1] >From a quick read - this is what generates issues w/ openvz which seems to allow a "0" to be interpreted as all CPU's on the host during parse. > ret->mem.hard_limit = VIR_DOMAIN_MEMORY_PARAM_UNLIMITED; > ret->mem.soft_limit = VIR_DOMAIN_MEMORY_PARAM_UNLIMITED; > ret->mem.swap_hard_limit = VIR_DOMAIN_MEMORY_PARAM_UNLIMITED; > diff --git a/src/lxc/lxc_native.c b/src/lxc/lxc_native.c > index d4a72c1..a3fea0a 100644 > --- a/src/lxc/lxc_native.c > +++ b/src/lxc/lxc_native.c > @@ -1017,13 +1017,6 @@ lxcParseConfigString(const char *config) > vmdef->onPoweroff = VIR_DOMAIN_LIFECYCLE_DESTROY; > vmdef->virtType = VIR_DOMAIN_VIRT_LXC; > > - /* Value not handled by the LXC driver, setting to > - * minimum required to make XML parsing pass */ > - if (virDomainDefSetVCpusMax(vmdef, 1) < 0) > - goto error; > - > - vmdef->vcpus = 1; > - > vmdef->nfss = 0; > vmdef->os.type = VIR_DOMAIN_OSTYPE_EXE; > > diff --git a/src/openvz/openvz_driver.c b/src/openvz/openvz_driver.c > index 1361432..53a2d57 100644 > --- a/src/openvz/openvz_driver.c > +++ b/src/openvz/openvz_driver.c > @@ -1035,12 +1035,10 @@ openvzDomainDefineXMLFlags(virConnectPtr conn, const char *xml, unsigned int fla > _("current vcpu count must equal maximum")); > goto cleanup; > } > - if (vm->def->maxvcpus > 0) { > - if (openvzDomainSetVcpusInternal(vm, vm->def->maxvcpus) < 0) { > - virReportError(VIR_ERR_INTERNAL_ERROR, "%s", > - _("Could not set number of vCPUs")); > - goto cleanup; > - } > + if (openvzDomainSetVcpusInternal(vm, vm->def->maxvcpus) < 0) { > + virReportError(VIR_ERR_INTERNAL_ERROR, "%s", > + _("Could not set number of vCPUs")); > + goto cleanup; > } > > if (vm->def->mem.cur_balloon > 0) { > @@ -1133,12 +1131,10 @@ openvzDomainCreateXML(virConnectPtr conn, const char *xml, > vm->def->id = vm->pid; > virDomainObjSetState(vm, VIR_DOMAIN_RUNNING, VIR_DOMAIN_RUNNING_BOOTED); > > - if (vm->def->maxvcpus > 0) { > - if (openvzDomainSetVcpusInternal(vm, vm->def->maxvcpus) < 0) { > - virReportError(VIR_ERR_INTERNAL_ERROR, "%s", > - _("Could not set number of vCPUs")); > - goto cleanup; > - } > + if (openvzDomainSetVcpusInternal(vm, vm->def->maxvcpus) < 0) { > + virReportError(VIR_ERR_INTERNAL_ERROR, "%s", > + _("Could not set number of vCPUs")); > + goto cleanup; > } > > dom = virGetDomain(conn, vm->def->name, vm->def->uuid); > diff --git a/src/qemu/qemu_command.c b/src/qemu/qemu_command.c > index ef44b8e..cc6785f 100644 > --- a/src/qemu/qemu_command.c > +++ b/src/qemu/qemu_command.c > @@ -12694,9 +12694,6 @@ qemuParseCommandLine(virCapsPtr qemuCaps, > def->id = -1; > def->mem.cur_balloon = 64 * 1024; > virDomainDefSetMemoryTotal(def, def->mem.cur_balloon); > - if (virDomainDefSetVCpusMax(def, 1) < 0) > - goto error; > - def->vcpus = 1; > def->clock.offset = VIR_DOMAIN_CLOCK_OFFSET_UTC; > > def->onReboot = VIR_DOMAIN_LIFECYCLE_RESTART; > diff --git a/src/vmx/vmx.c b/src/vmx/vmx.c > index 5456e3d..0223e94 100644 > --- a/src/vmx/vmx.c > +++ b/src/vmx/vmx.c > @@ -3181,10 +3181,10 @@ virVMXFormatConfig(virVMXContext *ctx, virDomainXMLOptionPtr xmlopt, virDomainDe > "'current'")); > goto cleanup; > } > - if (def->maxvcpus <= 0 || (def->maxvcpus % 2 != 0 && def->maxvcpus != 1)) { > + if ((def->maxvcpus % 2 != 0 && def->maxvcpus != 1)) { > virReportError(VIR_ERR_INTERNAL_ERROR, > - _("Expecting domain XML entry 'vcpu' to be an unsigned " > - "integer (1 or a multiple of 2) but found %d"), > + _("Expecting domain XML entry 'vcpu' to be 1 or a " > + "multiple of 2 but found %d"), > def->maxvcpus); > goto cleanup; > } > diff --git a/tests/openvzutilstest.c b/tests/openvzutilstest.c > index 1414d70..0214fe5 100644 > --- a/tests/openvzutilstest.c > +++ b/tests/openvzutilstest.c > @@ -81,7 +81,7 @@ testReadNetworkConf(const void *data ATTRIBUTE_UNUSED) > " <uuid>00000000-0000-0000-0000-000000000000</uuid>\n" > " <memory unit='KiB'>0</memory>\n" > " <currentMemory unit='KiB'>0</currentMemory>\n" > - " <vcpu placement='static'>0</vcpu>\n" > + " <vcpu placement='static'>1</vcpu>\n" > " <os>\n" > " <type>exe</type>\n" > " <init>/sbin/init</init>\n" > [1] Looking through history of things, finds : https://www.redhat.com/archives/libvir-list/2008-November/msg00253.html which seems to indicate that not providing a vCPU value or providing one of zero allows from the container to use all the CPU's on the host. Also the original commit id 'd6caacd1' of the test seems to indicate having a 0 is acceptable. Hopefully someone doing OpenVZ development could chime in here. It seems some code was shared w/r/t reading a configuration file and perhaps the output of a vcpus into the XML would be expected for this type of network device. That is - is the output here then fed into something else that's creating some network object and will object finding a 1 for vcpu count. Personally I don't have a problem with requiring something, but I'm wondering if it's "expected" in that environment. ACK 6-9 w/ the "VCpus" -> "Vcpus" change Conditional ACK 10 on whether this is "right" for openvz and whether the maxvcpus being non-zero check should be in post parse... John -- libvir-list mailing list libvir-list@xxxxxxxxxx https://www.redhat.com/mailman/listinfo/libvir-list