On 09/12/14 13:48, Francesco Romani wrote: > Future patches which will implement more > bulk stats groups for QEMU will need to access > the connection object. > > To accomodate that, a few changes are needed: > > * enrich internal prototype to pass qemu driver object. > * add per-group flag to mark if one collector needs > monitor access or not. > * if at least one collector of the requested stats > needs monitor access, thus we must start a query job > for each domain. The specific collectors will > run nested monitor jobs inside that. > * although requested, monitor could be not available. > pass a flag to workers to signal the availability > of monitor, in order to gather as much data as > is possible anyway. > > Signed-off-by: Francesco Romani <fromani@xxxxxxxxxx> > --- > src/qemu/qemu_driver.c | 60 +++++++++++++++++++++++++++++++++++++++++++------- > 1 file changed, 52 insertions(+), 8 deletions(-) > > diff --git a/src/qemu/qemu_driver.c b/src/qemu/qemu_driver.c > index 917b286..279c8b3 100644 > --- a/src/qemu/qemu_driver.c > +++ b/src/qemu/qemu_driver.c > @@ -17244,7 +17244,8 @@ qemuConnectGetDomainCapabilities(virConnectPtr conn, > > > static int > -qemuDomainGetStatsState(virDomainObjPtr dom, > +qemuDomainGetStatsState(virQEMUDriverPtr driver ATTRIBUTE_UNUSED, > + virDomainObjPtr dom, > virDomainStatsRecordPtr record, > int *maxparams, > unsigned int privflags ATTRIBUTE_UNUSED) > @@ -17267,8 +17268,17 @@ qemuDomainGetStatsState(virDomainObjPtr dom, > } > > > +typedef enum { > + QEMU_DOMAIN_STATS_HAVE_MONITOR = (1 << 0), /* QEMU monitor available */ > +} qemuDomainStatsFlags; > + > + > +#define HAVE_MONITOR(flags) ((flags) & QEMU_DOMAIN_STATS_HAVE_MONITOR) > + > + > typedef int > -(*qemuDomainGetStatsFunc)(virDomainObjPtr dom, > +(*qemuDomainGetStatsFunc)(virQEMUDriverPtr driver, > + virDomainObjPtr dom, > virDomainStatsRecordPtr record, > int *maxparams, > unsigned int flags); > @@ -17276,11 +17286,12 @@ typedef int > struct qemuDomainGetStatsWorker { > qemuDomainGetStatsFunc func; > unsigned int stats; > + bool monitor; > }; > > static struct qemuDomainGetStatsWorker qemuDomainGetStatsWorkers[] = { > - { qemuDomainGetStatsState, VIR_DOMAIN_STATS_STATE}, > - { NULL, 0 } > + { qemuDomainGetStatsState, VIR_DOMAIN_STATS_STATE, false }, > + { NULL, 0, false } > }; > > > @@ -17312,6 +17323,20 @@ qemuDomainGetStatsCheckSupport(unsigned int *stats, > } > > > +static bool > +qemuDomainGetStatsNeedMonitor(unsigned int stats) > +{ > + size_t i; > + > + for (i = 0; qemuDomainGetStatsWorkers[i].func; i++) > + if (stats & qemuDomainGetStatsWorkers[i].stats) > + if (qemuDomainGetStatsWorkers[i].monitor) > + return true; > + > + return false; > +} > + > + > static int > qemuDomainGetStats(virConnectPtr conn, > virDomainObjPtr dom, > @@ -17329,8 +17354,8 @@ qemuDomainGetStats(virConnectPtr conn, > > for (i = 0; qemuDomainGetStatsWorkers[i].func; i++) { > if (stats & qemuDomainGetStatsWorkers[i].stats) { > - if (qemuDomainGetStatsWorkers[i].func(dom, tmp, &maxparams, > - flags) < 0) > + if (qemuDomainGetStatsWorkers[i].func(conn->privateData, dom, tmp, > + &maxparams, flags) < 0) > goto cleanup; > } > } > @@ -17369,6 +17394,7 @@ qemuConnectGetAllDomainStats(virConnectPtr conn, > int nstats = 0; > size_t i; > int ret = -1; > + unsigned int privflags = 0; > > if (ndoms) > virCheckFlags(VIR_CONNECT_GET_ALL_DOMAINS_STATS_ENFORCE_STATS, -1); > @@ -17403,6 +17429,9 @@ qemuConnectGetAllDomainStats(virConnectPtr conn, > if (VIR_ALLOC_N(tmpstats, ndoms + 1) < 0) > goto cleanup; > > + if (qemuDomainGetStatsNeedMonitor(stats)) > + privflags |= QEMU_DOMAIN_STATS_HAVE_MONITOR; > + > for (i = 0; i < ndoms; i++) { > virDomainStatsRecordPtr tmp = NULL; > > @@ -17413,12 +17442,22 @@ qemuConnectGetAllDomainStats(virConnectPtr conn, > !virConnectGetAllDomainStatsCheckACL(conn, dom->def)) > continue; > > - if (qemuDomainGetStats(conn, dom, stats, &tmp, flags) < 0) > - goto cleanup; > + if (HAVE_MONITOR(privflags) && > + qemuDomainObjBeginJob(driver, dom, QEMU_JOB_QUERY) < 0) > + /* As it was never requested. Gather as much as possible anyway. */ > + privflags &= ~QEMU_DOMAIN_STATS_HAVE_MONITOR; This masks out the monitor for every subsequent domain too. If a single domain is stuck in the job, other can work, so this should be done on a per-domain basis. > + > + if (qemuDomainGetStats(conn, dom, stats, &tmp, privflags) < 0) > + goto endjob; > > if (tmp) > tmpstats[nstats++] = tmp; > > + if (HAVE_MONITOR(privflags) && !qemuDomainObjEndJob(driver, dom)) { > + dom = NULL; > + goto cleanup; > + } > + > virObjectUnlock(dom); > dom = NULL; > } > @@ -17428,6 +17467,11 @@ qemuConnectGetAllDomainStats(virConnectPtr conn, > > ret = nstats; > > + endjob: > + if (HAVE_MONITOR(privflags) && dom) > + if (!qemuDomainObjEndJob(driver, dom)) > + dom = NULL; > + > cleanup: > if (dom) > virObjectUnlock(dom); > Otherwise looks good. I'll see how the rest of the series goes and if this will be one of few problems I'll fix it myself. Peter
Attachment:
signature.asc
Description: OpenPGP digital signature
-- libvir-list mailing list libvir-list@xxxxxxxxxx https://www.redhat.com/mailman/listinfo/libvir-list