[PATCH 07/10] vcpubandwidth: implement the code to support new API for the qemu driver

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



---
 src/qemu/qemu_driver.c |  411 ++++++++++++++++++++++++++++++++++++++++++++++++
 1 files changed, 411 insertions(+), 0 deletions(-)

diff --git a/src/qemu/qemu_driver.c b/src/qemu/qemu_driver.c
index 9ddbc0f..03b8b9b 100644
--- a/src/qemu/qemu_driver.c
+++ b/src/qemu/qemu_driver.c
@@ -5942,6 +5942,415 @@ qemuGetSchedulerParameters(virDomainPtr dom,
                                            VIR_DOMAIN_AFFECT_CURRENT);
 }
 
+static int
+qemuGetVcpusBWConfig(struct qemud_driver *driver, virDomainObjPtr vm,
+                     bool active, virDomainVcpuBWDefPtr vcpubw_list, int *nvcpu)
+{
+    int i;
+    virDomainVcpuBWDefPtr vcpubw = NULL;
+    virDomainDefPtr vmdef = NULL;
+
+    if (active) {
+        vmdef = virDomainObjGetPersistentDef(driver->caps, vm);
+        if (!vmdef) {
+            qemuReportError(VIR_ERR_INTERNAL_ERROR, "%s",
+                            _("can't get persistentDef"));
+            return -1;
+        }
+    } else {
+        vmdef = vm->def;
+    }
+
+    for (i = 0; i < *nvcpu; i++) {
+        if (i >= vmdef->vcpus)
+            break;
+
+        vcpubw = virDomainVcpuBWFindByVcpu(vmdef->cputune.vcpubw,
+                                           vmdef->cputune.nvcpubw,
+                                           i);
+        vcpubw_list[i].vcpuid = i;
+        if (vcpubw) {
+            vcpubw_list[i].quota = vcpubw->quota;
+            vcpubw_list[i].period = vcpubw->period;
+        } else {
+            vcpubw_list[i].quota = 0;
+            vcpubw_list[i].period = 0;
+        }
+    }
+
+    *nvcpu = i;
+    return 0;
+}
+
+static int
+qemuGetVcpuBWLive(virCgroupPtr cgroup, virDomainVcpuBWDefPtr vcpubw)
+{
+    unsigned long long period;
+    long long quota;
+    int rc;
+
+    rc = virCgroupGetCpuCfsPeriod(cgroup, &period);
+    if (rc < 0) {
+        virReportSystemError(-rc, "%s",
+                             _("unable to get cpu bandwidth period tunable"));
+        return -1;
+    }
+
+    rc = virCgroupGetCpuCfsQuota(cgroup, &quota);
+    if (rc < 0) {
+        virReportSystemError(-rc, "%s",
+                             _("unable to get cpu bandwidth tunable"));
+        return -1;
+    }
+
+    vcpubw->period = period;
+    vcpubw->quota = quota;
+
+    return 0;
+}
+
+static int
+qemuGetVcpusBWLive(struct qemud_driver *driver, virDomainObjPtr vm,
+                   virDomainVcpuBWDefPtr vcpubw_list, int *nvcpu)
+{
+    virCgroupPtr cgroup = NULL;
+    virCgroupPtr cgroup_vcpu = NULL;
+    qemuDomainObjPrivatePtr priv = NULL;
+    int rc;
+    int i;
+    int ret = -1;
+
+    if (!qemuCgroupControllerActive(driver, VIR_CGROUP_CONTROLLER_CPU)) {
+        qemuReportError(VIR_ERR_OPERATION_INVALID,
+                        "%s", _("cgroup CPU controller is not mounted"));
+        return -1;
+    }
+
+    if (virCgroupForDomain(driver->cgroup, vm->def->name, &cgroup, 0) != 0) {
+        qemuReportError(VIR_ERR_INTERNAL_ERROR,
+                        _("cannot find cgroup for domain %s"), vm->def->name);
+        return -1;
+    }
+
+    priv = vm->privateData;
+    if (priv->nvcpupids == 0 || priv->vcpupids[0] == vm->pid) {
+        /* We do not create sub dir for each vcpu */
+        for (i = 0; i < *nvcpu; i++) {
+            if (i >= vm->def->vcpus)
+                break;
+
+            vcpubw_list[i].vcpuid = i;
+            rc = qemuGetVcpuBWLive(cgroup, &vcpubw_list[i]);
+            if (rc < 0)
+                goto cleanup;
+        }
+        *nvcpu = i;
+        goto out;
+    }
+
+    for (i = 0; i < *nvcpu; i++) {
+        if (i >= vm->def->vcpus)
+            break;
+
+        rc = virCgroupForVcpu(cgroup, i, &cgroup_vcpu, 0);
+        if (!cgroup_vcpu) {
+            virReportSystemError(-rc,
+                                 _("Unable to find vcpu cgroup for %s(vcpu:"
+                                   " %d)"),
+                                 vm->def->name, i);
+            goto cleanup;
+        }
+
+        vcpubw_list[i].vcpuid = i;
+        rc = qemuGetVcpuBWLive(cgroup_vcpu, &vcpubw_list[i]);
+        if (rc < 0)
+            goto cleanup;
+        virCgroupFree(&cgroup_vcpu);
+    }
+    *nvcpu = i;
+
+out:
+    ret = 0;
+
+cleanup:
+    virCgroupFree(&cgroup_vcpu);
+    virCgroupFree(&cgroup);
+    return ret;
+}
+
+static int
+qemuGetVcpuBW(virDomainPtr dom, virDomainVcpuBWDefPtr vcpubw,
+              int *nvcpu, unsigned int flags)
+{
+    struct qemud_driver *driver = dom->conn->privateData;
+    virDomainObjPtr vm = NULL;
+    int ret = -1;
+    bool isActive;
+
+    virCheckFlags(VIR_DOMAIN_AFFECT_LIVE |
+                  VIR_DOMAIN_AFFECT_CONFIG, -1);
+
+    if ((flags & (VIR_DOMAIN_AFFECT_LIVE | VIR_DOMAIN_AFFECT_CONFIG)) ==
+        (VIR_DOMAIN_AFFECT_LIVE | VIR_DOMAIN_AFFECT_CONFIG)) {
+        qemuReportError(VIR_ERR_INVALID_ARG, "%s",
+                        _("cannot query live and config together"));
+        return -1;
+    }
+
+    if (*nvcpu < 1) {
+        qemuReportError(VIR_ERR_INVALID_ARG,
+                        "%s", _("Invalid vcpu count"));
+        return -1;
+    }
+
+    qemuDriverLock(driver);
+    vm = virDomainFindByUUID(&driver->domains, dom->uuid);
+
+    if (vm == NULL) {
+        char uuidstr[VIR_UUID_STRING_BUFLEN];
+        virUUIDFormat(dom->uuid, uuidstr);
+        qemuReportError(VIR_ERR_NO_DOMAIN,
+                        _("no domain with matching uuid '%s'"), uuidstr);
+        goto cleanup;
+    }
+
+    isActive = virDomainObjIsActive(vm);
+
+    if (flags == VIR_DOMAIN_AFFECT_CURRENT) {
+        if (isActive)
+            flags = VIR_DOMAIN_AFFECT_LIVE;
+        else
+            flags = VIR_DOMAIN_AFFECT_CONFIG;
+    }
+
+    if (flags & VIR_DOMAIN_AFFECT_CONFIG) {
+        if (!vm->persistent) {
+            qemuReportError(VIR_ERR_OPERATION_INVALID, "%s",
+                            _("cannot query persistent config of a transient domain"));
+            goto cleanup;
+        }
+
+        ret = qemuGetVcpusBWConfig(driver, vm, isActive, vcpubw, nvcpu);
+        if (ret < 0)
+            goto cleanup;
+    }
+
+    if (flags & VIR_DOMAIN_AFFECT_LIVE) {
+        if (!isActive) {
+            qemuReportError(VIR_ERR_OPERATION_INVALID, "%s",
+                            _("domain is not running"));
+            goto cleanup;
+        }
+
+        ret = qemuGetVcpusBWLive(driver, vm, vcpubw, nvcpu);
+        if (ret < 0)
+            goto cleanup;
+    }
+
+    ret = 0;
+
+cleanup:
+    if (vm)
+        virDomainObjUnlock(vm);
+    qemuDriverUnlock(driver);
+    return ret;
+}
+
+static int
+qemuSetVcpuBWConfig(virDomainDefPtr vmdef, virDomainVcpuBWDefPtr vcpubw,
+                    int nvcpu)
+{
+    int i;
+
+    for (i = 0; i < nvcpu; i++) {
+        if (vcpubw[i].vcpuid < 0 || vcpubw[i].vcpuid >= vmdef->vcpus) {
+            qemuReportError(VIR_ERR_INVALID_ARG, "%s", _("Invalid vcpu id"));
+            return -1;
+        }
+
+        if (vcpubw[i].period == 0 && vcpubw[i].quota == 0) {
+            if (virDomainVcpuBWDel(vmdef, vcpubw[i].vcpuid) < 0)
+                return -1;
+        } else {
+            if (virDomainVcpuBWAdd(vmdef, vcpubw[i].period,
+                                   vcpubw[i].quota, vcpubw[i].vcpuid) < 0)
+                return -1;
+        }
+    }
+
+    return 0;
+}
+
+static int
+qemuSetVcpuBWLive(struct qemud_driver *driver, virDomainObjPtr vm,
+                  virCgroupPtr cgroup, virDomainVcpuBWDefPtr vcpubw)
+{
+    if (qemuSetupCgroupVcpuBW(cgroup, vcpubw) < 0)
+        return -1;
+
+    if (qemuSetVcpuBWConfig(vm->def, vcpubw, 1) < 0)
+        return -1;
+
+    if (virDomainSaveStatus(driver->caps, driver->stateDir, vm) < 0)
+        return -1;
+
+    return 0;
+}
+
+static int
+qemuSetVcpusBWLive(struct qemud_driver *driver, virDomainObjPtr vm,
+                   virCgroupPtr cgroup, virDomainVcpuBWDefPtr vcpubw,
+                   int nvcpubw)
+{
+    int i;
+    qemuDomainObjPrivatePtr priv = vm->privateData;
+    virCgroupPtr cgroup_vcpu = NULL;
+    int rc;
+
+    /* check vcpu id first */
+    for (i = 0; i < nvcpubw; i++) {
+        if (vcpubw[i].vcpuid < 0 || vcpubw[i].vcpuid >= vm->def->vcpus) {
+            qemuReportError(VIR_ERR_INVALID_ARG, "%s", _("Invalid vcpu id"));
+            return -1;
+        }
+    }
+
+    if (priv->nvcpupids == 0 || priv->vcpupids[0] == vm->pid) {
+        /* If we does not know VCPU<->PID mapping or all vcpu runs in the same
+         * thread, we can not control each vcpu. So just use the last config.
+         */
+        if (vcpubw[nvcpubw -1].period || vcpubw[nvcpubw - 1].quota) {
+            return qemuSetVcpuBWLive(driver, vm, cgroup, &vcpubw[nvcpubw - 1]);
+        }
+        return 0;
+    }
+
+    for (i = 0; i < nvcpubw; i++) {
+        if (vcpubw[i].period == 0 && vcpubw[i].quota == 0)
+            continue;
+
+        rc = virCgroupForVcpu(cgroup, vcpubw[i].vcpuid, &cgroup_vcpu, 0);
+        if (rc < 0) {
+            virReportSystemError(-rc,
+                                 _("Unable to find vcpu cgroup for %s(vcpu:"
+                                   " %d)"),
+                                 vm->def->name, i);
+            goto cleanup;
+        }
+
+        if (qemuSetVcpuBWLive(driver, vm, cgroup_vcpu, &vcpubw[i]) < 0)
+            goto cleanup;
+
+        virCgroupFree(&cgroup_vcpu);
+    }
+
+    return 0;
+
+cleanup:
+    virCgroupFree(&cgroup_vcpu);
+    return -1;
+}
+
+static int
+qemuSetVcpuBW(virDomainPtr dom, virDomainVcpuBWDefPtr vcpubw,
+              int nvcpu, unsigned int flags)
+{
+    struct qemud_driver *driver = dom->conn->privateData;
+    virCgroupPtr cgroup = NULL;
+    virDomainObjPtr vm = NULL;
+    int ret = -1;
+    bool isActive;
+    virDomainDefPtr vmdef = NULL;
+
+    virCheckFlags(VIR_DOMAIN_AFFECT_LIVE |
+                  VIR_DOMAIN_AFFECT_CONFIG, -1);
+
+    if (nvcpu < 1) {
+        qemuReportError(VIR_ERR_INVALID_ARG, "%s", _("Invalid vcpu count"));
+        return -1;
+    }
+
+    qemuDriverLock(driver);
+    vm = virDomainFindByUUID(&driver->domains, dom->uuid);
+
+    if (vm == NULL) {
+        char uuidstr[VIR_UUID_STRING_BUFLEN];
+        virUUIDFormat(dom->uuid, uuidstr);
+        qemuReportError(VIR_ERR_NO_DOMAIN,
+                        _("no domain with matching uuid '%s'"), uuidstr);
+        goto cleanup;
+    }
+
+    isActive = virDomainObjIsActive(vm);
+
+    if (flags == VIR_DOMAIN_AFFECT_CURRENT) {
+        if (isActive)
+            flags = VIR_DOMAIN_AFFECT_LIVE;
+        else
+            flags = VIR_DOMAIN_AFFECT_CONFIG;
+    }
+
+    if ((flags & VIR_DOMAIN_AFFECT_CONFIG) && !vm->persistent) {
+        qemuReportError(VIR_ERR_OPERATION_INVALID, "%s",
+                        _("cannot change persistent config of a transient domain"));
+        goto cleanup;
+    }
+
+    if (flags & VIR_DOMAIN_AFFECT_LIVE) {
+        if (!isActive) {
+            qemuReportError(VIR_ERR_OPERATION_INVALID,
+                            "%s", _("domain is not running"));
+            goto cleanup;
+        }
+
+        if (!qemuCgroupControllerActive(driver, VIR_CGROUP_CONTROLLER_CPU)) {
+            qemuReportError(VIR_ERR_OPERATION_INVALID,
+                            "%s", _("cgroup CPU controller is not mounted"));
+            goto cleanup;
+        }
+        if (virCgroupForDomain(driver->cgroup, vm->def->name, &cgroup, 0) != 0) {
+            qemuReportError(VIR_ERR_INTERNAL_ERROR,
+                            _("cannot find cgroup for domain %s"),
+                            vm->def->name);
+            goto cleanup;
+        }
+    }
+
+    if (flags & VIR_DOMAIN_AFFECT_CONFIG) {
+        /* Make a copy for updated domain. */
+        vmdef = virDomainObjCopyPersistentDef(driver->caps, vm);
+        if (!vmdef)
+            goto cleanup;
+
+        if (qemuSetVcpuBWConfig(vmdef, vcpubw, nvcpu) < 0)
+            goto cleanup;
+    }
+
+    if (flags & VIR_DOMAIN_AFFECT_LIVE) {
+        if (qemuSetVcpusBWLive(driver, vm, cgroup, vcpubw, nvcpu) < 0)
+            goto cleanup;
+    }
+
+    /* Finally, if no error until here, we can save config. */
+    if (flags & VIR_DOMAIN_AFFECT_CONFIG) {
+        ret = virDomainSaveConfig(driver->configDir, vmdef);
+        if (ret < 0)
+            goto cleanup;
+        virDomainObjAssignDef(vm, vmdef, false);
+        vmdef = NULL;
+    }
+
+    ret = 0;
+
+cleanup:
+    virCgroupFree(&cgroup);
+    virDomainDefFree(vmdef);
+    if (vm)
+        virDomainObjUnlock(vm);
+    qemuDriverUnlock(driver);
+    return ret;
+}
+
 /* This uses the 'info blockstats' monitor command which was
  * integrated into both qemu & kvm in late 2007.  If the command is
  * not supported we detect this and return the appropriate error.
@@ -8477,6 +8886,8 @@ static virDriver qemuDriver = {
     .domainGetSchedulerParametersFlags = qemuGetSchedulerParametersFlags, /* 0.9.2 */
     .domainSetSchedulerParameters = qemuSetSchedulerParameters, /* 0.7.0 */
     .domainSetSchedulerParametersFlags = qemuSetSchedulerParametersFlags, /* 0.9.2 */
+    .domainGetVcpuBW = qemuGetVcpuBW, /* 0.9.4 */
+    .domainSetVcpuBW = qemuSetVcpuBW, /* 0.9.4 */
     .domainMigratePerform = qemudDomainMigratePerform, /* 0.5.0 */
     .domainBlockStats = qemudDomainBlockStats, /* 0.4.1 */
     .domainInterfaceStats = qemudDomainInterfaceStats, /* 0.4.1 */
-- 
1.7.1

--
libvir-list mailing list
libvir-list@xxxxxxxxxx
https://www.redhat.com/mailman/listinfo/libvir-list


[Index of Archives]     [Virt Tools]     [Libvirt Users]     [Lib OS Info]     [Fedora Users]     [Fedora Desktop]     [Fedora SELinux]     [Big List of Linux Books]     [Yosemite News]     [KDE Users]     [Fedora Tools]