--- src/qemu/qemu_driver.c | 186 +++++++++++++++++++++++++++++++++++++++++++---- src/util/cgroup.c | 4 +- 2 files changed, 173 insertions(+), 17 deletions(-) diff --git a/src/qemu/qemu_driver.c b/src/qemu/qemu_driver.c index c3555ca..8ba3806 100644 --- a/src/qemu/qemu_driver.c +++ b/src/qemu/qemu_driver.c @@ -12463,19 +12463,142 @@ qemuDomainGetTotalcpuStats(virCgroupPtr group, return nparams; } +/* get the cpu time from cpuacct cgroup group, saving + cpu time value in cpu_time. caller is responsible + for freeing memory allocated for cpu_time. + return 0 on success, -1 otherwise */ +static int getVcpuPercpuStats(virCgroupPtr group, + unsigned long long **cpu_time, + unsigned int *num) +{ + int ret = -1; + unsigned long long *ptime = NULL; + char *buf = NULL; + char *pos; + unsigned long long tmp; + + if (virCgroupGetCpuacctPercpuUsage(group, &buf)) + goto error; + + pos = buf; + *num = 0; + while (virStrToLong_ull(pos, &pos, 10, &tmp) == 0) + (*num)++; + + if (*num > 0) { + int i; + + if (VIR_ALLOC_N(ptime, *num) < 0) { + virReportOOMError(); + goto error; + } + + pos = buf; + for (i = 0; i < *num; i++) + if (virStrToLong_ull(pos, &pos, 10, ptime + i) < 0) { + qemuReportError(VIR_ERR_INTERNAL_ERROR, + _("cpuacct parse error")); + } + *cpu_time = ptime; + ret = 0; + } +error: + VIR_FREE(buf); + return ret; +} + +/* This function gets the sums of cpu time consumed by all vcpus. + * For example, if there are 4 physical cpus, and 2 vcpus in a domain, + * then for each vcpu, the cpuacct.usage_percpu looks like this: + * t0 t1 t2 t3 + * and we have 2 groups of such data: + * v\p 0 1 2 3 + * 0 t00 t01 t02 t03 + * 1 t10 t11 t12 t13 + * for each pcpu, the sum is cpu time consumed by all vcpus. + * s0 = t00 + t10 + * s1 = t01 + t11 + * s2 = t02 + t12 + * s3 = t03 + t13 + */ +static int getSumVcpuPercpuStats(virCgroupPtr group, + unsigned int nvcpu, + unsigned long long **sum_cpu_time, + unsigned int *num) +{ + unsigned long long **cpu_time; + unsigned int *ncpu_time; + unsigned int max = 0; + unsigned long long *tmp = NULL; + int ret = -1; + int i, j; + + if ((VIR_ALLOC_N(cpu_time, nvcpu) < 0) || + (VIR_ALLOC_N(ncpu_time, nvcpu) < 0)) { + virReportOOMError(); + goto error; + } + + for (i = 0; i < nvcpu; i++) { + virCgroupPtr group_vcpu = NULL; + ret = virCgroupForVcpu(group, i, &group_vcpu, 0); + if (ret < 0) { + qemuReportError(VIR_ERR_INTERNAL_ERROR, + _("error on creating cgroup cpuacct for vcpu")); + goto error; + } + ret = getVcpuPercpuStats(group_vcpu, + &cpu_time[i], + &ncpu_time[i]); + virCgroupFree(&group_vcpu); + + if (ret < 0) + goto error; + + if (max < ncpu_time[i]) + max = ncpu_time[i]; + } + + if (max > 0) { + if (VIR_ALLOC_N(tmp, max) < 0) + goto error; + + for (i = 0; i < nvcpu; i++) { + for (j = 0; j < ncpu_time[i]; j++) + tmp[j] += cpu_time[i][j]; + } + *sum_cpu_time = tmp; + *num = max; + ret = 0; + } + +error: + if (cpu_time) { + for (i = 0; i < nvcpu; i++) + VIR_FREE(cpu_time[i]); + } + + VIR_FREE(cpu_time); + VIR_FREE(ncpu_time); + return ret; +} + static int qemuDomainGetPercpuStats(virDomainPtr domain, + virDomainObjPtr vm, virCgroupPtr group, virTypedParameterPtr params, unsigned int nparams, int start_cpu, - unsigned int ncpus) + unsigned int ncpus, + unsigned int flags) { char *map = NULL; int rv = -1; int i, max_id; char *pos; char *buf = NULL; + qemuDomainObjPrivatePtr priv = vm->privateData; virTypedParameterPtr ent; int param_idx; @@ -12511,22 +12634,52 @@ qemuDomainGetPercpuStats(virDomainPtr domain, if (max_id - start_cpu > ncpus - 1) max_id = start_cpu + ncpus - 1; - for (i = 0; i <= max_id; i++) { + if (flags & VIR_DOMAIN_CPU_STATS_VCPU) { + unsigned long long *sum_cpu_time = NULL; + unsigned int n = 0; unsigned long long cpu_time; - if (!map[i]) { - cpu_time = 0; - } else if (virStrToLong_ull(pos, &pos, 10, &cpu_time) < 0) { - qemuReportError(VIR_ERR_INTERNAL_ERROR, - _("cpuacct parse error")); + if (getSumVcpuPercpuStats(group, + priv->nvcpupids, + &sum_cpu_time, + &n) < 0) goto cleanup; + + for (i = 0; i <= max_id && i < n; i++) { + if (i < start_cpu) + continue; + + if (!map[i]) + cpu_time = 0; + else + cpu_time = sum_cpu_time[i]; + if (virTypedParameterAssign(¶ms[(i - start_cpu) * nparams + param_idx], + VIR_DOMAIN_CPU_STATS_CPUTIME, + VIR_TYPED_PARAM_ULLONG, + cpu_time) < 0) { + VIR_FREE(sum_cpu_time); + goto cleanup; + } + } + VIR_FREE(sum_cpu_time); + } else { + for (i = 0; i <= max_id; i++) { + unsigned long long cpu_time; + + if (!map[i]) { + cpu_time = 0; + } else if (virStrToLong_ull(pos, &pos, 10, &cpu_time) < 0) { + qemuReportError(VIR_ERR_INTERNAL_ERROR, + _("cpuacct parse error")); + goto cleanup; + } + if (i < start_cpu) + continue; + ent = ¶ms[ (i - start_cpu) * nparams + param_idx]; + if (virTypedParameterAssign(ent, VIR_DOMAIN_CPU_STATS_CPUTIME, + VIR_TYPED_PARAM_ULLONG, cpu_time) < 0) + goto cleanup; } - if (i < start_cpu) - continue; - ent = ¶ms[ (i - start_cpu) * nparams + param_idx]; - if (virTypedParameterAssign(ent, VIR_DOMAIN_CPU_STATS_CPUTIME, - VIR_TYPED_PARAM_ULLONG, cpu_time) < 0) - goto cleanup; } rv = param_idx + 1; cleanup: @@ -12550,7 +12703,8 @@ qemuDomainGetCPUStats(virDomainPtr domain, int ret = -1; bool isActive; - virCheckFlags(VIR_TYPED_PARAM_STRING_OKAY, -1); + virCheckFlags(VIR_TYPED_PARAM_STRING_OKAY | + VIR_DOMAIN_CPU_STATS_VCPU, -1); qemuDriverLock(driver); @@ -12583,8 +12737,8 @@ qemuDomainGetCPUStats(virDomainPtr domain, if (start_cpu == -1) ret = qemuDomainGetTotalcpuStats(group, params, nparams); else - ret = qemuDomainGetPercpuStats(domain, group, params, nparams, - start_cpu, ncpus); + ret = qemuDomainGetPercpuStats(domain, vm, group, params, nparams, + start_cpu, ncpus, flags); cleanup: virCgroupFree(&group); if (vm) diff --git a/src/util/cgroup.c b/src/util/cgroup.c index ad49bc2..5b32881 100644 --- a/src/util/cgroup.c +++ b/src/util/cgroup.c @@ -530,7 +530,9 @@ static int virCgroupMakeGroup(virCgroupPtr parent, virCgroupPtr group, continue; /* We need to control cpu bandwidth for each vcpu now */ - if ((flags & VIR_CGROUP_VCPU) && (i != VIR_CGROUP_CONTROLLER_CPU)) { + if ((flags & VIR_CGROUP_VCPU) && + (i != VIR_CGROUP_CONTROLLER_CPU && + i != VIR_CGROUP_CONTROLLER_CPUACCT)) { /* treat it as unmounted and we can use virCgroupAddTask */ VIR_FREE(group->controllers[i].mountPoint); continue; -- 1.7.1 -- libvir-list mailing list libvir-list@xxxxxxxxxx https://www.redhat.com/mailman/listinfo/libvir-list