[PATCH 3/6] cputune: support cputune for qemu driver

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



When domain startup, setting cpu affinity and cpu shares according
to the cputune xml specified in domain xml.

Modify "qemudDomainPinVcpu" to update domain config for vcpupin,
and modify "qemuSetSchedulerParameters" to update domain config
for cpu shares.

* src/qemu/qemu_cgroup.c
* src/qemu/qemu_driver.c
* src/qemu/qemu_process.c
---
 src/qemu/qemu_cgroup.c  |   15 +++++++++
 src/qemu/qemu_driver.c  |    9 +++++
 src/qemu/qemu_process.c |   80 +++++++++++++++++++++++++++++++++++++++++++++++
 3 files changed, 104 insertions(+), 0 deletions(-)

diff --git a/src/qemu/qemu_cgroup.c b/src/qemu/qemu_cgroup.c
index b39b5e1..f578f9e 100644
--- a/src/qemu/qemu_cgroup.c
+++ b/src/qemu/qemu_cgroup.c
@@ -340,6 +340,21 @@ int qemuSetupCgroup(struct qemud_driver *driver,
                  vm->def->name);
     }
 
+    if (qemuCgroupControllerActive(driver, VIR_CGROUP_CONTROLLER_CPU)) {
+        if (vm->def->cputune.shares != 0) {
+            rc = virCgroupSetCpuShares(cgroup, vm->def->cputune.shares);
+            if(rc != 0) {
+                virReportSystemError(-rc,
+                                     _("Unable to set io cpu shares for domain %s"),
+                                     vm->def->name);
+                goto cleanup;
+            }
+        }
+    } else {
+        qemuReportError(VIR_ERR_CONFIG_UNSUPPORTED,
+                        _("CPU tuning is not available on this host"));
+    }
+
 done:
     virCgroupFree(&cgroup);
     return 0;
diff --git a/src/qemu/qemu_driver.c b/src/qemu/qemu_driver.c
index 1a7bec9..55c3918 100644
--- a/src/qemu/qemu_driver.c
+++ b/src/qemu/qemu_driver.c
@@ -2752,6 +2752,13 @@ qemudDomainPinVcpu(virDomainPtr dom,
                         "%s", _("cpu affinity is not supported"));
         goto cleanup;
     }
+
+    if (virDomainVcpupinAdd(vm->def, cpumap, maplen, vcpu) < 0) {
+        qemuReportError(VIR_ERR_INTERNAL_ERROR,
+                        "%s", _("failed to update or add vcpupin xml"));
+        goto cleanup;
+    }
+
     ret = 0;
 
 cleanup:
@@ -4718,6 +4725,8 @@ static int qemuSetSchedulerParameters(virDomainPtr dom,
                                      _("unable to set cpu shares tunable"));
                 goto cleanup;
             }
+
+            vm->def->cputune.shares = params[i].value.ul;
         } else {
             qemuReportError(VIR_ERR_INVALID_ARG,
                             _("Invalid parameter `%s'"), param->field);
diff --git a/src/qemu/qemu_process.c b/src/qemu/qemu_process.c
index 7879165..a650d07 100644
--- a/src/qemu/qemu_process.c
+++ b/src/qemu/qemu_process.c
@@ -1432,6 +1432,82 @@ qemuProcessGetPCIMemballoonVendorProduct(virDomainMemballoonDefPtr def,
 }
 
 
+/* Set CPU affinites for vcpus if vcpupin xml provided. */
+static int
+qemuProcessSetVcpuAffinites(virConnectPtr conn,
+                            virDomainObjPtr vm)
+{
+    qemuDomainObjPrivatePtr priv = vm->privateData;
+    virDomainDefPtr def = vm->def;
+    virNodeInfo nodeinfo;
+    pid_t vcpupid;
+    unsigned char *cpumask;
+    int vcpu, cpumaplen, hostcpus, maxcpu;
+
+    if (virNodeGetInfo(conn, &nodeinfo) != 0) {
+        return  -1;
+    }
+
+    if (!def->cputune.nvcpupin)
+        return 0;
+
+    if (priv->vcpupids == NULL) {
+        qemuReportError(VIR_ERR_NO_SUPPORT,
+                        "%s", _("cpu affinity is not supported"));
+        return -1;
+    }
+
+    hostcpus = VIR_NODEINFO_MAXCPUS(nodeinfo);
+    cpumaplen = VIR_CPU_MAPLEN(hostcpus);
+    maxcpu = cpumaplen * 8;
+
+    if (maxcpu > hostcpus)
+        maxcpu = hostcpus;
+
+    for (vcpu = 0; vcpu < def->cputune.nvcpupin; vcpu++) {
+        if (vcpu != def->cputune.vcpupin[vcpu]->vcpuid)
+            continue;
+
+        int i;
+        unsigned char *cpumap = NULL;
+
+        if (VIR_ALLOC_VAR(cpumap, char, cpumaplen) < 0) {
+            virReportOOMError();
+            return -1;
+        }
+
+        /* Initialize cpumap to all 0s. */
+        for (i = 0; i < cpumaplen; i++)
+            cpumap[i] = 0;
+
+        cpumask = (unsigned char *)def->cputune.vcpupin[vcpu]->cpumask;
+        vcpupid = priv->vcpupids[vcpu];
+
+        /* Convert cpumask to bitmap here. */
+        for (i = 0; i < VIR_DOMAIN_CPUMASK_LEN; i++) {
+            int cur = 0;
+            int mod = 0;
+
+            if (i) {
+                cur = i / 8;
+                mod = i % 8;
+            }
+
+            if (cpumask[i])
+                cpumap[cur] |= 1 << mod;
+        }
+
+        if (virProcessInfoSetAffinity(vcpupid,
+                                      cpumap,
+                                      cpumaplen,
+                                      maxcpu) < 0) {
+            return -1;
+        }
+    }
+
+    return 0;
+}
+
 /*
  * This entire method assumes that PCI devices in 'info pci'
  * match ordering of devices specified on the command line
@@ -2187,6 +2263,10 @@ int qemuProcessStart(virConnectPtr conn,
     if (qemuProcessDetectVcpuPIDs(driver, vm) < 0)
         goto cleanup;
 
+    VIR_DEBUG0("Setting VCPU affinities");
+    if (qemuProcessSetVcpuAffinites(conn, vm) < 0)
+        goto cleanup;
+
     VIR_DEBUG0("Setting any required VM passwords");
     if (qemuProcessInitPasswords(conn, driver, vm, qemuCaps) < 0)
         goto cleanup;
-- 
1.7.4

--
libvir-list mailing list
libvir-list@xxxxxxxxxx
https://www.redhat.com/mailman/listinfo/libvir-list


[Index of Archives]     [Virt Tools]     [Libvirt Users]     [Lib OS Info]     [Fedora Users]     [Fedora Desktop]     [Fedora SELinux]     [Big List of Linux Books]     [Yosemite News]     [KDE Users]     [Fedora Tools]