On Tue, Apr 14, 2015 at 21:18:25 -0400, John Ferlan wrote: > Add qemuDomainAddIOThread and qemuDomainDelIOThread in order to add or > remove an IOThread to/from the host either for live or config optoins > > The implementation for the 'live' option will use the iothreadpids list > in order to make decision, while the 'config' option will use the > iothreadids list. Additionally, for deletion each may have to adjust > the iothreadpin list. > > IOThreads are implemented by qmp objects, the code makes use of the existing > qemuMonitorAddObject or qemuMonitorDelObject APIs. > > Signed-off-by: John Ferlan <jferlan@xxxxxxxxxx> > --- > src/conf/domain_audit.c | 9 + > src/conf/domain_audit.h | 6 + > src/libvirt_private.syms | 1 + > src/qemu/qemu_driver.c | 431 +++++++++++++++++++++++++++++++++++++++++++++++ > 4 files changed, 447 insertions(+) > ... > diff --git a/src/qemu/qemu_driver.c b/src/qemu/qemu_driver.c > index 008258f..f42d4fb 100644 > --- a/src/qemu/qemu_driver.c > +++ b/src/qemu/qemu_driver.c > @@ -6179,6 +6179,435 @@ qemuDomainPinIOThread(virDomainPtr dom, > return ret; > } > > +static int > +qemuDomainHotplugAddIOThread(virQEMUDriverPtr driver, > + virDomainObjPtr vm, > + unsigned int iothread_id) > +{ > + qemuDomainObjPrivatePtr priv = vm->privateData; > + char *alias = NULL; > + size_t idx; > + int rc = -1; > + int ret = -1; > + unsigned int orig_niothreads = vm->def->iothreads; > + unsigned int exp_niothreads = vm->def->iothreads; > + int new_niothreads = 0; > + qemuMonitorIOThreadInfoPtr *new_iothreads = NULL; > + unsigned int idval = 0; > + virCgroupPtr cgroup_iothread = NULL; > + char *mem_mask = NULL; > + virDomainIOThreadIDDefPtr iothrid; > + > + if (virDomainIOThreadIDFind(vm->def, iothread_id)) { > + virReportError(VIR_ERR_INVALID_ARG, > + _("an IOThread is already using iothread_id '%u'"), > + iothread_id); > + goto cleanup; > + } > + > + if (virAsprintf(&alias, "iothread%u", iothread_id) < 0) > + return -1; > + > + qemuDomainObjEnterMonitor(driver, vm); > + > + rc = qemuMonitorAddObject(priv->mon, "iothread", alias, NULL); > + exp_niothreads++; > + if (rc < 0) > + goto exit_monitor; > + > + /* After hotplugging the IOThreads we need to re-detect the > + * IOThreads thread_id's, adjust the cgroups, thread affinity, > + * and add the thread_id to the vm->def->iothreadids list. > + */ > + if ((new_niothreads = qemuMonitorGetIOThreads(priv->mon, > + &new_iothreads)) < 0) Since we are not doing any fancy iothread naming, this function can parse the iothread IDs from the alias right away ... [1] > + goto exit_monitor; > + > + if (qemuDomainObjExitMonitor(driver, vm) < 0) > + goto cleanup; > + > + if (new_niothreads != exp_niothreads) { > + virReportError(VIR_ERR_INTERNAL_ERROR, > + _("got wrong number of IOThread ids from QEMU monitor. " > + "got %d, wanted %d"), > + new_niothreads, exp_niothreads); > + vm->def->iothreads = new_niothreads; > + goto cleanup; > + } > + vm->def->iothreads = exp_niothreads; > + > + if (virDomainNumatuneGetMode(vm->def->numa, -1) == > + VIR_DOMAIN_NUMATUNE_MEM_STRICT && > + virDomainNumatuneMaybeFormatNodeset(vm->def->numa, > + priv->autoNodeset, > + &mem_mask, -1) < 0) > + goto cleanup; > + > + > + /* > + * If we've successfully added an IOThread, find out where we added it > + * in the QEMU IOThread list, so we can add it to our iothreadids list > + */ The message seems obvious when looking at the code. > + for (idx = 0; idx < new_niothreads; idx++) { > + if (qemuDomainParseIOThreadAlias(new_iothreads[idx]->name, &idval) < 0) ... [1] so that you don't have to do it manually. > + goto cleanup; > + if (iothread_id == idval) > + break; > + } > + > + if (idval != iothread_id) { > + virReportError(VIR_ERR_INTERNAL_ERROR, > + _("cannot find new IOThread '%u' in QEMU monitor."), > + iothread_id); > + goto cleanup; > + } > + > + if (virDomainIOThreadIDAdd(vm->def, iothread_id) < 0) virDomainIOThreadIDAdd could return the pointer to the created item ... > + goto cleanup; > + > + if (!(iothrid = virDomainIOThreadIDFind(vm->def, iothread_id))) { > + virReportError(VIR_ERR_INTERNAL_ERROR, > + _("cannot find just added IOThread '%u'"), > + iothread_id); So that you don't have to look it up right after adding it. > + goto cleanup; > + } > + > + iothrid->thread_id = new_iothreads[idx]->thread_id; > + > + /* Add IOThread to cgroup if present */ > + if (priv->cgroup) { > + cgroup_iothread = > + qemuDomainAddCgroupForThread(priv->cgroup, > + VIR_CGROUP_THREAD_IOTHREAD, > + iothread_id, mem_mask, > + iothrid->thread_id); > + if (!cgroup_iothread) > + goto cleanup; > + } > + > + /* Inherit def->cpuset */ > + if (vm->def->cpumask) { Automatic NUMA placement(priv->autoCpuset) needs to be taken into account too. > + if (qemuDomainHotplugAddPin(vm->def->cpumask, iothread_id, > + &vm->def->cputune.iothreadspin, > + &vm->def->cputune.niothreadspin) < 0) > + > + goto cleanup; > + > + if (qemuDomainHotplugPinThread(vm->def->cpumask, iothread_id, > + iothrid->thread_id, cgroup_iothread) < 0) > + goto cleanup; > + > + if (qemuProcessSetSchedParams(iothread_id, iothrid->thread_id, > + vm->def->cputune.niothreadsched, > + vm->def->cputune.iothreadsched) < 0) qemuProcessSetSchedParams won't do anything since the new thread doesn't have any scheduler assigned. > + goto cleanup; > + } > + > + ret = 0; > + > + cleanup: > + if (new_iothreads) { > + for (idx = 0; idx < new_niothreads; idx++) > + qemuMonitorIOThreadInfoFree(new_iothreads[idx]); > + VIR_FREE(new_iothreads); > + } > + VIR_FREE(mem_mask); > + virDomainAuditIOThread(vm, orig_niothreads, new_niothreads, > + "update", rc == 0); > + if (cgroup_iothread) > + virCgroupFree(&cgroup_iothread); virCgroupFree() handles NULL just fine. > + VIR_FREE(alias); > + return ret; > + > + exit_monitor: > + ignore_value(qemuDomainObjExitMonitor(driver, vm)); > + goto cleanup; > +} > + > +static int > +qemuDomainHotplugDelIOThread(virQEMUDriverPtr driver, > + virDomainObjPtr vm, > + unsigned int iothread_id) > +{ > + qemuDomainObjPrivatePtr priv = vm->privateData; > + size_t idx; > + char *alias = NULL; > + int rc = -1; > + int ret = -1; > + unsigned int orig_niothreads = vm->def->iothreads; > + unsigned int exp_niothreads = vm->def->iothreads; > + int new_niothreads = 0; > + qemuMonitorIOThreadInfoPtr *new_iothreads = NULL; > + char *mem_mask = NULL; > + > + /* Normally would use virDomainIOThreadIDFind, but we need the index > + * from whence to delete for later... > + */ > + for (idx = 0; idx < vm->def->niothreadids; idx++) { > + if (iothread_id == vm->def->iothreadids[idx]->iothread_id) > + break; > + } > + > + if (idx == vm->def->niothreadids) { > + virReportError(VIR_ERR_INVALID_ARG, > + _("cannot find IOThread '%u' in iothreadids list"), > + iothread_id); > + return -1; > + } > + > + if (virAsprintf(&alias, "iothread%u", iothread_id) < 0) > + return -1; > + > + qemuDomainObjEnterMonitor(driver, vm); > + > + rc = qemuMonitorDelObject(priv->mon, alias); > + exp_niothreads--; > + if (rc < 0) > + goto exit_monitor; > + > + /* After hotplugging the IOThreads we need to re-detect the > + * IOThreads thread_id's, adjust the cgroups, thread affinity, > + * and the vm->def->iothreadids list. > + */ You've removed the thread here, so thread affinity was destroyed by the thread exitting. > + if ((new_niothreads = qemuMonitorGetIOThreads(priv->mon, > + &new_iothreads)) < 0) > + goto exit_monitor; > + > + if (qemuDomainObjExitMonitor(driver, vm) < 0) > + goto cleanup; > + > + if (new_niothreads != exp_niothreads) { > + virReportError(VIR_ERR_INTERNAL_ERROR, > + _("got wrong number of IOThread ids from QEMU monitor. " > + "got %d, wanted %d"), > + new_niothreads, exp_niothreads); > + vm->def->iothreads = new_niothreads; > + goto cleanup; > + } > + vm->def->iothreads = exp_niothreads; > + > + if (virDomainNumatuneGetMode(vm->def->numa, -1) == > + VIR_DOMAIN_NUMATUNE_MEM_STRICT && > + virDomainNumatuneMaybeFormatNodeset(vm->def->numa, > + priv->autoNodeset, > + &mem_mask, -1) < 0) Why do you need the memory node mask when you are deleting the cgroup? > + goto cleanup; > + > + if (VIR_DELETE_ELEMENT(vm->def->iothreadids, idx, > + vm->def->niothreadids) < 0) > + goto cleanup; You've added virDomainIOThreadIDDel > + > + if (qemuDomainDelCgroupForThread(priv->cgroup, > + VIR_CGROUP_THREAD_IOTHREAD, > + iothread_id) < 0) > + goto cleanup; > + > + virDomainPinDel(&vm->def->cputune.iothreadspin, > + &vm->def->cputune.niothreadspin, > + iothread_id); > + > + ret = 0; > + > + cleanup: > + if (new_iothreads) { > + for (idx = 0; idx < new_niothreads; idx++) > + qemuMonitorIOThreadInfoFree(new_iothreads[idx]); > + VIR_FREE(new_iothreads); > + } > + VIR_FREE(mem_mask); > + virDomainAuditIOThread(vm, orig_niothreads, new_niothreads, > + "update", rc == 0); > + VIR_FREE(alias); > + return ret; > + > + exit_monitor: > + ignore_value(qemuDomainObjExitMonitor(driver, vm)); > + goto cleanup; > +} > + > +static int > +qemuDomainChgIOThread(virQEMUDriverPtr driver, > + virDomainObjPtr vm, > + unsigned int iothread_id, > + bool add, > + unsigned int flags) > +{ > + virQEMUDriverConfigPtr cfg = NULL; > + virCapsPtr caps = NULL; > + qemuDomainObjPrivatePtr priv; > + virCgroupPtr cgroup_temp = NULL; > + virBitmapPtr all_nodes = NULL; > + char *all_nodes_str = NULL; > + char *mem_mask = NULL; > + virDomainDefPtr persistentDef; > + int ret = -1; > + > + cfg = virQEMUDriverGetConfig(driver); > + > + if (!(caps = virQEMUDriverGetCapabilities(driver, false))) > + goto cleanup; > + > + priv = vm->privateData; > + > + if (qemuDomainObjBeginJob(driver, vm, QEMU_JOB_MODIFY) < 0) > + goto cleanup; > + > + if (virDomainLiveConfigHelperMethod(caps, driver->xmlopt, vm, &flags, > + &persistentDef) < 0) > + goto endjob; > + > + if (flags & VIR_DOMAIN_AFFECT_LIVE) { > + if (!virDomainObjIsActive(vm)) { > + virReportError(VIR_ERR_OPERATION_INVALID, "%s", > + _("cannot change IOThreads for an inactive domain")); > + goto endjob; > + } > + > + if (!virQEMUCapsGet(priv->qemuCaps, QEMU_CAPS_OBJECT_IOTHREAD)) { > + virReportError(VIR_ERR_CONFIG_UNSUPPORTED, "%s", > + _("IOThreads not supported with this binary")); > + goto endjob; > + } > + > + if (virCgroupNewThread(priv->cgroup, VIR_CGROUP_THREAD_EMULATOR, 0, Wrong cgroup type. Additionally qemuDomainHotplugAddIOThread() will add the thread so adding it here doesn't make sense. > + false, &cgroup_temp) < 0) > + goto endjob; > + > + if (!(all_nodes = virNumaGetHostNodeset())) > + goto endjob; > + > + if (!(all_nodes_str = virBitmapFormat(all_nodes))) > + goto endjob; > + > + if (virCgroupGetCpusetMems(cgroup_temp, &mem_mask) < 0 || > + virCgroupSetCpusetMems(cgroup_temp, all_nodes_str) < 0) > + goto endjob; > + > + if (add) { > + if (qemuDomainHotplugAddIOThread(driver, vm, iothread_id) < 0) > + goto endjob; > + } else { > + if (qemuDomainHotplugDelIOThread(driver, vm, iothread_id) < 0) > + goto endjob; > + } > + > + if (virDomainSaveStatus(driver->xmlopt, cfg->stateDir, vm) < 0) > + goto endjob; > + } > + > + if (flags & VIR_DOMAIN_AFFECT_CONFIG) { > + if (add) { > + if (virDomainIOThreadIDAdd(persistentDef, iothread_id) < 0) > + goto endjob; > + > + /* Nothing to do in iothreadspin list (that's a separate command) */ > + > + persistentDef->iothreads++; > + } else { > + if (!virDomainIOThreadIDFind(persistentDef, iothread_id)) { > + virReportError(VIR_ERR_INVALID_ARG, > + _("cannot find IOThread '%u' in persistent " > + "iothreadids"), > + iothread_id); > + goto cleanup; > + } > + > + virDomainIOThreadIDDel(persistentDef, iothread_id); > + virDomainPinDel(&persistentDef->cputune.iothreadspin, > + &persistentDef->cputune.niothreadspin, > + iothread_id); This is the reason why I've requested in the previous review that the pinning information would be merged into the iothread data structure. You then would not have to synchronise two data structures. > + persistentDef->iothreads--; > + } > + > + if (virDomainSaveConfig(cfg->configDir, persistentDef) < 0) > + goto endjob; > + } > + > + ret = 0; > + > + endjob: > + if (mem_mask) { > + virErrorPtr err = virSaveLastError(); > + virCgroupSetCpusetMems(cgroup_temp, mem_mask); > + virSetError(err); > + virFreeError(err); > + } > + qemuDomainObjEndJob(driver, vm); > + > + cleanup: > + VIR_FREE(mem_mask); > + VIR_FREE(all_nodes_str); > + virBitmapFree(all_nodes); > + virCgroupFree(&cgroup_temp); > + virObjectUnref(caps); > + virObjectUnref(cfg); > + return ret; > +} > + > +static int > +qemuDomainAddIOThread(virDomainPtr dom, > + unsigned int iothread_id, > + unsigned int flags) > +{ > + virQEMUDriverPtr driver = dom->conn->privateData; > + virDomainObjPtr vm = NULL; > + int ret = -1; > + > + virCheckFlags(VIR_DOMAIN_AFFECT_LIVE | > + VIR_DOMAIN_AFFECT_CONFIG, -1); > + > + if (!(vm = qemuDomObjFromDomain(dom))) > + goto cleanup; > + > + if (virDomainAddIOThreadEnsureACL(dom->conn, vm->def, flags) < 0) > + goto cleanup; > + > + ret = qemuDomainChgIOThread(driver, vm, iothread_id, true, flags); > + > + cleanup: > + qemuDomObjEndAPI(&vm); > + return ret; > +} > + > + > +static int > +qemuDomainDelIOThread(virDomainPtr dom, > + unsigned int iothread_id, > + unsigned int flags) > +{ > + virQEMUDriverPtr driver = dom->conn->privateData; > + virDomainObjPtr vm = NULL; > + int ret = -1; > + size_t i; > + > + virCheckFlags(VIR_DOMAIN_AFFECT_LIVE | > + VIR_DOMAIN_AFFECT_CONFIG, -1); > + > + if (!(vm = qemuDomObjFromDomain(dom))) > + goto cleanup; > + > + if (virDomainDelIOThreadEnsureACL(dom->conn, vm->def, flags) < 0) > + goto cleanup; > + > + /* If there is a disk using the IOThread to be removed, then fail. */ > + for (i = 0; i < vm->def->ndisks; i++) { > + if (vm->def->disks[i]->iothread == iothread_id) { > + virReportError(VIR_ERR_INVALID_ARG, > + _("cannot remove IOThread %u since it " > + "is being used by disk path '%s'"), > + iothread_id, > + NULLSTR(vm->def->disks[i]->src->path)); Alternatively you can use vm->def->disks[i]->dst which should be always set. > + goto cleanup; > + } > + } > + > + ret = qemuDomainChgIOThread(driver, vm, iothread_id, false, flags); > + > + cleanup: > + qemuDomObjEndAPI(&vm); > + return ret; > +} > + > static int qemuDomainGetSecurityLabel(virDomainPtr dom, virSecurityLabelPtr seclabel) > { > virQEMUDriverPtr driver = dom->conn->privateData;
Attachment:
signature.asc
Description: Digital signature
-- libvir-list mailing list libvir-list@xxxxxxxxxx https://www.redhat.com/mailman/listinfo/libvir-list