[PATCH v3 8/9] qemu: Add qemuDomainSetIOThreads

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



https://bugzilla.redhat.com/show_bug.cgi?id=1135491

Implement the qemuDomainSetIOThreads... For now it processes the
VIR_DOMAIN_IOTHREADS_PIN and calls qemuDomainSetIOThreadsPin in order
to process changing to using the new cpumap. The change can be made
either LIVE or CONFIG, or both if both flags are supplied

The qemuDomainSetIOThreadsPin live path will utilize the iothreadpids
array that was initialized at process start/restart in order to set the
pinned thread to a specific CPU. The config path will change the config
file as directed by the cpumap.  This code is mostly a copy of the
qemuDomainPinVcpuFlags.

Signed-off-by: John Ferlan <jferlan@xxxxxxxxxx>
---
 include/libvirt/libvirt-domain.h |   9 ++
 src/qemu/qemu_driver.c           | 238 +++++++++++++++++++++++++++++++++++++++
 2 files changed, 247 insertions(+)

diff --git a/include/libvirt/libvirt-domain.h b/include/libvirt/libvirt-domain.h
index e07db16..d756b18 100644
--- a/include/libvirt/libvirt-domain.h
+++ b/include/libvirt/libvirt-domain.h
@@ -3207,6 +3207,15 @@ typedef void (*virConnectDomainEventDeviceRemovedCallback)(virConnectPtr conn,
 # define VIR_DOMAIN_TUNABLE_CPU_EMULATORPIN "cputune.emulatorpin"
 
 /**
+ * VIR_DOMAIN_TUNABLE_CPU_IOTHREADSPIN:
+ *
+ * Macro represents formatted pinning for one IOThread specified by id which is
+ * appended to the parameter name, for example "cputune.iothreadpin1",
+ * as VIR_TYPED_PARAM_STRING.
+ */
+# define VIR_DOMAIN_TUNABLE_CPU_IOTHREADSPIN "cputune.iothreadpin%u"
+
+/**
  * VIR_DOMAIN_TUNABLE_CPU_CPU_SHARES:
  *
  * Macro represents proportional weight of the scheduler used on the
diff --git a/src/qemu/qemu_driver.c b/src/qemu/qemu_driver.c
index 2c9d08c..bc74942 100644
--- a/src/qemu/qemu_driver.c
+++ b/src/qemu/qemu_driver.c
@@ -5821,6 +5821,243 @@ qemuDomainGetIOThreadsInfo(virDomainPtr dom,
     return ret;
 }
 
+static int
+qemuDomainSetIOThreadsPin(virDomainPtr dom,
+                          virQEMUDriverPtr driver,
+                          virDomainObjPtr vm,
+                          virQEMUDriverConfigPtr cfg,
+                          virDomainDefPtr persistentDef,
+                          unsigned int iothread_val,
+                          unsigned char *cpumap,
+                          int maplen,
+                          unsigned int flags)
+{
+    int ret = -1;
+    virBitmapPtr pcpumap = NULL;
+    bool doReset = false;
+    qemuDomainObjPrivatePtr priv = vm->privateData;
+    virDomainVcpuPinDefPtr *newIOThreadsPin = NULL;
+    size_t newIOThreadsPinNum = 0;
+    virCgroupPtr cgroup_iothread = NULL;
+    virObjectEventPtr event = NULL;
+    char paramField[VIR_TYPED_PARAM_FIELD_LENGTH] = "";
+    char *str = NULL;
+    virTypedParameterPtr eventParams = NULL;
+    int eventNparams = 0;
+    int eventMaxparams = 0;
+
+    if (!(pcpumap = virBitmapNewData(cpumap, maplen)))
+        goto cleanup;
+
+    if (virBitmapIsAllClear(pcpumap)) {
+        virReportError(VIR_ERR_INVALID_ARG, "%s",
+                       _("Empty iothread cpumap list for pinning"));
+        goto cleanup;
+    }
+
+    /* pinning to all physical cpus means resetting,
+     * so check if we can reset setting.
+     */
+    if (virBitmapIsAllSet(pcpumap))
+        doReset = true;
+
+    if (flags & VIR_DOMAIN_AFFECT_LIVE) {
+
+        if (priv->iothreadpids == NULL) {
+            virReportError(VIR_ERR_OPERATION_INVALID,
+                           "%s", _("IOThread affinity is not supported"));
+            goto cleanup;
+        }
+
+        if (iothread_val > priv->niothreadpids) {
+            virReportError(VIR_ERR_INVALID_ARG,
+                           _("iothread value out of range %d > %d"),
+                           iothread_val, priv->niothreadpids);
+            goto cleanup;
+        }
+
+        if (vm->def->cputune.iothreadspin) {
+            /* The VcpuPinDefCopy works for IOThreads too */
+            newIOThreadsPin =
+                virDomainVcpuPinDefCopy(vm->def->cputune.iothreadspin,
+                                        vm->def->cputune.niothreadspin);
+            if (!newIOThreadsPin)
+                goto cleanup;
+
+            newIOThreadsPinNum = vm->def->cputune.niothreadspin;
+        } else {
+            if (VIR_ALLOC(newIOThreadsPin) < 0)
+                goto cleanup;
+            newIOThreadsPinNum = 0;
+        }
+
+        if (virDomainIOThreadsPinAdd(&newIOThreadsPin, &newIOThreadsPinNum,
+                                     cpumap, maplen, iothread_val) < 0) {
+            virReportError(VIR_ERR_INTERNAL_ERROR, "%s",
+                           _("failed to update iothreadspin"));
+            virDomainVcpuPinDefArrayFree(newIOThreadsPin, newIOThreadsPinNum);
+            goto cleanup;
+        }
+
+        /* Configure the corresponding cpuset cgroup before set affinity. */
+        if (virCgroupHasController(priv->cgroup,
+                                   VIR_CGROUP_CONTROLLER_CPUSET)) {
+            if (virCgroupNewIOThread(priv->cgroup, iothread_val,
+                                     false, &cgroup_iothread) < 0)
+                goto cleanup;
+            if (qemuSetupCgroupIOThreadsPin(cgroup_iothread,
+                                            newIOThreadsPin,
+                                            newIOThreadsPinNum,
+                                            iothread_val) < 0) {
+                virReportError(VIR_ERR_OPERATION_INVALID,
+                               _("failed to set cpuset.cpus in cgroup"
+                                 " for iothread %d"), iothread_val);
+                goto cleanup;
+            }
+        } else {
+            if (virProcessSetAffinity(priv->iothreadpids[iothread_val],
+                                      pcpumap) < 0) {
+                virReportError(VIR_ERR_SYSTEM_ERROR,
+                               _("failed to set cpu affinity for IOThread %d"),
+                               iothread_val);
+                goto cleanup;
+            }
+        }
+
+        if (doReset) {
+            virDomainIOThreadsPinDel(vm->def, iothread_val);
+        } else {
+            if (vm->def->cputune.iothreadspin)
+                virDomainVcpuPinDefArrayFree(vm->def->cputune.iothreadspin,
+                                             vm->def->cputune.niothreadspin);
+
+            vm->def->cputune.iothreadspin = newIOThreadsPin;
+            vm->def->cputune.niothreadspin = newIOThreadsPinNum;
+            newIOThreadsPin = NULL;
+        }
+
+        if (newIOThreadsPin)
+            virDomainVcpuPinDefArrayFree(newIOThreadsPin, newIOThreadsPinNum);
+
+        if (virDomainSaveStatus(driver->xmlopt, cfg->stateDir, vm) < 0)
+            goto cleanup;
+
+        if (snprintf(paramField, VIR_TYPED_PARAM_FIELD_LENGTH,
+                     VIR_DOMAIN_TUNABLE_CPU_IOTHREADSPIN, iothread_val) < 0) {
+            goto cleanup;
+        }
+
+        str = virBitmapFormat(pcpumap);
+        if (virTypedParamsAddString(&eventParams, &eventNparams,
+                                    &eventMaxparams, paramField, str) < 0)
+            goto cleanup;
+
+        event = virDomainEventTunableNewFromDom(dom, eventParams, eventNparams);
+    }
+
+    if (flags & VIR_DOMAIN_AFFECT_CONFIG) {
+        if (iothread_val > persistentDef->iothreads) {
+            virReportError(VIR_ERR_INVALID_ARG,
+                           _("iothread value out of range %d > %d"),
+                           iothread_val, persistentDef->iothreads);
+            goto cleanup;
+        }
+
+        if (doReset) {
+            virDomainIOThreadsPinDel(persistentDef, iothread_val);
+        } else {
+            if (!persistentDef->cputune.iothreadspin) {
+                if (VIR_ALLOC(persistentDef->cputune.iothreadspin) < 0)
+                    goto cleanup;
+                persistentDef->cputune.niothreadspin = 0;
+            }
+            if (virDomainIOThreadsPinAdd(&persistentDef->cputune.iothreadspin,
+                                         &persistentDef->cputune.niothreadspin,
+                                         cpumap,
+                                         maplen,
+                                         iothread_val) < 0) {
+                virReportError(VIR_ERR_INTERNAL_ERROR, "%s",
+                               _("failed to update or add iothreadspin xml "
+                                 "of a persistent domain"));
+                goto cleanup;
+            }
+        }
+
+        ret = virDomainSaveConfig(cfg->configDir, persistentDef);
+        goto cleanup;
+    }
+
+    ret = 0;
+
+ cleanup:
+    if (cgroup_iothread)
+        virCgroupFree(&cgroup_iothread);
+    if (event)
+        qemuDomainEventQueue(driver, event);
+    VIR_FREE(str);
+    virBitmapFree(pcpumap);
+
+    return ret;
+}
+
+static int
+qemuDomainSetIOThreads(virDomainPtr dom,
+                       unsigned int iothread_val,
+                       unsigned char *cpumap,
+                       int maplen,
+                       unsigned int flags)
+{
+    int ret = -1;
+    virQEMUDriverPtr driver = dom->conn->privateData;
+    virQEMUDriverConfigPtr cfg = NULL;
+    virDomainObjPtr vm;
+    virCapsPtr caps = NULL;
+    virDomainDefPtr persistentDef = NULL;
+
+    virCheckFlags(VIR_DOMAIN_AFFECT_LIVE |
+                  VIR_DOMAIN_AFFECT_CONFIG |
+                  VIR_DOMAIN_IOTHREADS_PIN, -1);
+
+    cfg = virQEMUDriverGetConfig(driver);
+
+    if (!(vm = qemuDomObjFromDomain(dom)))
+        goto cleanup;
+
+    if (virDomainSetIOThreadsEnsureACL(dom->conn, vm->def, flags) < 0)
+        goto cleanup;
+
+    if (!(caps = virQEMUDriverGetCapabilities(driver, false)))
+        goto cleanup;
+
+    if (qemuDomainObjBeginJob(driver, vm, QEMU_JOB_MODIFY) < 0)
+        goto cleanup;
+
+    if (virDomainLiveConfigHelperMethod(caps, driver->xmlopt, vm, &flags,
+                                        &persistentDef) < 0)
+        goto endjob;
+
+    if (flags & VIR_DOMAIN_AFFECT_LIVE)
+        persistentDef = vm->def;
+
+    /* Coverity didn't realize that targetDef must be set if we got here.  */
+    sa_assert(persistentDef);
+
+    if (flags & VIR_DOMAIN_IOTHREADS_PIN) {
+        ret = qemuDomainSetIOThreadsPin(dom, driver, vm, cfg,
+                                        persistentDef, iothread_val,
+                                        cpumap, maplen, flags);
+    }
+
+ endjob:
+    qemuDomainObjEndJob(driver, vm);
+
+ cleanup:
+    qemuDomObjEndAPI(&vm);
+    virObjectUnref(caps);
+    virObjectUnref(cfg);
+    return ret;
+}
+
 static int qemuDomainGetSecurityLabel(virDomainPtr dom, virSecurityLabelPtr seclabel)
 {
     virQEMUDriverPtr driver = dom->conn->privateData;
@@ -19422,6 +19659,7 @@ static virHypervisorDriver qemuHypervisorDriver = {
     .domainGetVcpus = qemuDomainGetVcpus, /* 0.4.4 */
     .domainGetMaxVcpus = qemuDomainGetMaxVcpus, /* 0.4.4 */
     .domainGetIOThreadsInfo = qemuDomainGetIOThreadsInfo, /* 1.2.13 */
+    .domainSetIOThreads = qemuDomainSetIOThreads, /* 1.2.13 */
     .domainGetSecurityLabel = qemuDomainGetSecurityLabel, /* 0.6.1 */
     .domainGetSecurityLabelList = qemuDomainGetSecurityLabelList, /* 0.10.0 */
     .nodeGetSecurityModel = qemuNodeGetSecurityModel, /* 0.6.1 */
-- 
2.1.0

--
libvir-list mailing list
libvir-list@xxxxxxxxxx
https://www.redhat.com/mailman/listinfo/libvir-list




[Index of Archives]     [Virt Tools]     [Libvirt Users]     [Lib OS Info]     [Fedora Users]     [Fedora Desktop]     [Fedora SELinux]     [Big List of Linux Books]     [Yosemite News]     [KDE Users]     [Fedora Tools]