13 дек. 2015 г. 12:53 пользователь "Daniel P. Berrange" <berrange@xxxxxxxxxx> написал:
>
> The VIR_DOMAIN_STATS_VCPU flag to virDomainListGetStats
> enables reporting of stats about vCPUs. Currently we
> only report the cumulative CPU running time and the
> execution state.
>
> This adds reporting of the wait time - time the vCPU
> wants to run, but the host schedular has something else
> running ahead of it.
>
As i understand this is well known steal time analog that can be viewed inside vm?
> The data is reported per-vCPU eg
>
> $ virsh domstats --vcpu demo
> Domain: 'demo'
> vcpu.current=4
> vcpu.maximum=4
> vcpu.0.state=1
> vcpu.0.time=1420000000
> vcpu.0.wait=18403928
> vcpu.1.state=1
> vcpu.1.time=130000000
> vcpu.1.wait=10612111
> vcpu.2.state=1
> vcpu.2.time=110000000
> vcpu.2.wait=12759501
> vcpu.3.state=1
> vcpu.3.time=90000000
> vcpu.3.wait=21825087
>
> In implementing this I notice our reporting of CPU execute
> time has very poor granularity, since we are getting it
> from /proc/$PID/stat. As a future enhancement we should
> prefer to get CPU execute time from /proc/$PID/schedstat
> or /proc/$PID/sched (if either exist on the running kernel)
>
> Signed-off-by: Daniel P. Berrange <berrange@xxxxxxxxxx>
> ---
> src/qemu/qemu_driver.c | 100 +++++++++++++++++++++++++++++++++++++++++++++++--
> 1 file changed, 96 insertions(+), 4 deletions(-)
>
> diff --git a/src/qemu/qemu_driver.c b/src/qemu/qemu_driver.c
> index 783a7cd..5293294 100644
> --- a/src/qemu/qemu_driver.c
> +++ b/src/qemu/qemu_driver.c
> @@ -1361,6 +1361,80 @@ static char *qemuConnectGetCapabilities(virConnectPtr conn) {
>
>
> static int
> +qemuGetSchedInfo(unsigned long long *cpuWait,
> + pid_t pid, pid_t tid)
> +{
> + char *proc = NULL;
> + char *data = "">
> + char **lines = NULL;
> + size_t i;
> + int ret = -1;
> + double val;
> +
> + *cpuWait = 0;
> +
> + /* In general, we cannot assume pid_t fits in int; but /proc parsing
> + * is specific to Linux where int works fine. */
> + if (tid)
> + ret = virAsprintf(&proc, "/proc/%d/task/%d/sched", (int)pid, (int)tid);
> + else
> + ret = virAsprintf(&proc, "/proc/%d/sched", (int)pid);
> + if (ret < 0)
> + goto cleanup;
> +
> + /* The file is not guaranteed to exist (needs CONFIG_SCHED_DEBUG) */
> + if (access(proc, R_OK) < 0)
> + return 0;
> +
> + if (virFileReadAll(proc, (1<<16), &data) < 0)
> + goto cleanup;
> +
> + lines = virStringSplit(data, "\n", 0);
> + if (!lines)
> + goto cleanup;
> +
> + for (i = 0; lines[i] != NULL; i++) {
> + const char *line = lines[i];
> +
> + /* Needs CONFIG_SCHEDSTATS. The second check
> + * is the old name the kernel used in past */
> + if (STRPREFIX(line, "se.statistics.wait_sum") ||
> + STRPREFIX(line, "se.wait_sum")) {
> + line = strchr(line, ':');
> + if (!line) {
> + virReportError(VIR_ERR_INTERNAL_ERROR,
> + _("Missing separate in sched info '%s'"),
> + lines[i]);
> + goto cleanup;
> + }
> + line++;
> + while (*line == ' ') {
> + line++;
> + }
> +
> + if (virStrToDouble(line, NULL, &val) < 0) {
> + virReportError(VIR_ERR_INTERNAL_ERROR,
> + _("Unable to parse sched info value '%s'"),
> + line);
> + goto cleanup;
> + }
> +
> + *cpuWait = (unsigned long long)(val * 1000000);
> + break;
> + }
> + }
> +
> + ret = 0;
> +
> + cleanup:
> + VIR_FREE(data);
> + VIR_FREE(proc);
> + VIR_FREE(lines);
> + return ret;
> +}
> +
> +
> +static int
> qemuGetProcessInfo(unsigned long long *cpuTime, int *lastCpu, long *vm_rss,
> pid_t pid, int tid)
> {
> @@ -1424,6 +1498,7 @@ qemuGetProcessInfo(unsigned long long *cpuTime, int *lastCpu, long *vm_rss,
> static int
> qemuDomainHelperGetVcpus(virDomainObjPtr vm,
> virVcpuInfoPtr info,
> + unsigned long long *cpuwait,
> int maxinfo,
> unsigned char *cpumaps,
> int maplen)
> @@ -1476,6 +1551,11 @@ qemuDomainHelperGetVcpus(virDomainObjPtr vm,
> virBitmapFree(map);
> }
>
> + if (cpuwait) {
> + if (qemuGetSchedInfo(&(cpuwait[i]), vm->pid, vcpupid) < 0)
> + return -1;
> + }
> +
> ncpuinfo++;
> }
>
> @@ -5517,7 +5597,7 @@ qemuDomainGetVcpus(virDomainPtr dom,
> goto cleanup;
> }
>
> - ret = qemuDomainHelperGetVcpus(vm, info, maxinfo, cpumaps, maplen);
> + ret = qemuDomainHelperGetVcpus(vm, info, NULL, maxinfo, cpumaps, maplen);
>
> cleanup:
> virDomainObjEndAPI(&vm);
> @@ -19089,6 +19169,7 @@ qemuDomainGetStatsVcpu(virQEMUDriverPtr driver ATTRIBUTE_UNUSED,
> int ret = -1;
> char param_name[VIR_TYPED_PARAM_FIELD_LENGTH];
> virVcpuInfoPtr cpuinfo = NULL;
> + unsigned long long *cpuwait = NULL;
>
> if (virTypedParamsAddUInt(&record->params,
> &record->nparams,
> @@ -19104,10 +19185,12 @@ qemuDomainGetStatsVcpu(virQEMUDriverPtr driver ATTRIBUTE_UNUSED,
> virDomainDefGetVcpusMax(dom->def)) < 0)
> return -1;
>
> - if (VIR_ALLOC_N(cpuinfo, virDomainDefGetVcpus(dom->def)) < 0)
> - return -1;
> + if (VIR_ALLOC_N(cpuinfo, virDomainDefGetVcpus(dom->def)) < 0 ||
> + VIR_ALLOC_N(cpuwait, virDomainDefGetVcpus(dom->def)) < 0)
> + goto cleanup;
>
> - if (qemuDomainHelperGetVcpus(dom, cpuinfo, virDomainDefGetVcpus(dom->def),
> + if (qemuDomainHelperGetVcpus(dom, cpuinfo, cpuwait,
> + virDomainDefGetVcpus(dom->def),
> NULL, 0) < 0) {
> virResetLastError();
> ret = 0; /* it's ok to be silent and go ahead */
> @@ -19136,12 +19219,21 @@ qemuDomainGetStatsVcpu(virQEMUDriverPtr driver ATTRIBUTE_UNUSED,
> param_name,
> cpuinfo[i].cpuTime) < 0)
> goto cleanup;
> + snprintf(param_name, VIR_TYPED_PARAM_FIELD_LENGTH,
> + "vcpu.%zu.wait", i);
> + if (virTypedParamsAddULLong(&record->params,
> + &record->nparams,
> + maxparams,
> + param_name,
> + cpuwait[i]) < 0)
> + goto cleanup;
> }
>
> ret = 0;
>
> cleanup:
> VIR_FREE(cpuinfo);
> + VIR_FREE(cpuwait);
> return ret;
> }
>
> --
> 2.5.0
>
> --
> libvir-list mailing list
> libvir-list@xxxxxxxxxx
> https://www.redhat.com/mailman/listinfo/libvir-list
-- libvir-list mailing list libvir-list@xxxxxxxxxx https://www.redhat.com/mailman/listinfo/libvir-list