It was seen that `qemu_domain.h` file depended upon `qemu_migration_params.h` and `qmeu_monitor.h` as they were required by some qemu_domainjob stuctures. This dependency was removed by the introduction of a `void *privateData` pointer. This privateData pointer was handled using a structure of callback functions. Additionally, the patch also moves funcitons `qemuDomainObjPrivateXMLFormatJob` and `qemuDomainObjPrivateXMLParseJob` from `qemu_domain` and handles them using the callback structure of domain jobs. Signed-off-by: Prathamesh Chavan <pc44800@xxxxxxxxx> --- src/qemu/qemu_backup.c | 13 +- src/qemu/qemu_domain.c | 246 +------------------- src/qemu/qemu_domainjob.c | 371 ++++++++++++++++++++++++++++--- src/qemu/qemu_domainjob.h | 62 ++++-- src/qemu/qemu_driver.c | 21 +- src/qemu/qemu_migration.c | 45 ++-- src/qemu/qemu_migration_cookie.c | 7 +- src/qemu/qemu_migration_params.c | 9 +- src/qemu/qemu_migration_params.h | 28 +++ src/qemu/qemu_process.c | 24 +- 10 files changed, 494 insertions(+), 332 deletions(-) diff --git a/src/qemu/qemu_backup.c b/src/qemu/qemu_backup.c index 8dc9d2504d..dc81d8d8de 100644 --- a/src/qemu/qemu_backup.c +++ b/src/qemu/qemu_backup.c @@ -528,17 +528,19 @@ qemuBackupJobTerminate(virDomainObjPtr vm, { qemuDomainObjPrivatePtr priv = vm->privateData; + qemuDomainJobInfoPrivatePtr completedJobInfo; size_t i; qemuDomainJobInfoUpdateTime(priv->job.current); g_clear_pointer(&priv->job.completed, qemuDomainJobInfoFree); priv->job.completed = qemuDomainJobInfoCopy(priv->job.current); + completedJobInfo = priv->job.completed->privateData; - priv->job.completed->stats.backup.total = priv->backup->push_total; - priv->job.completed->stats.backup.transferred = priv->backup->push_transferred; - priv->job.completed->stats.backup.tmp_used = priv->backup->pull_tmp_used; - priv->job.completed->stats.backup.tmp_total = priv->backup->pull_tmp_total; + completedJobInfo->stats.backup.total = priv->backup->push_total; + completedJobInfo->stats.backup.transferred = priv->backup->push_transferred; + completedJobInfo->stats.backup.tmp_used = priv->backup->pull_tmp_used; + completedJobInfo->stats.backup.tmp_total = priv->backup->pull_tmp_total; priv->job.completed->status = jobstatus; priv->job.completed->errmsg = g_strdup(priv->backup->errmsg); @@ -997,7 +999,8 @@ qemuBackupGetJobInfoStats(virQEMUDriverPtr driver, virDomainObjPtr vm, qemuDomainJobInfoPtr jobInfo) { - qemuDomainBackupStats *stats = &jobInfo->stats.backup; + qemuDomainJobInfoPrivatePtr jobInfoPriv = jobInfo->privateData; + qemuDomainBackupStats *stats = &jobInfoPriv->stats.backup; qemuDomainObjPrivatePtr priv = vm->privateData; qemuMonitorJobInfoPtr *blockjobs = NULL; size_t nblockjobs = 0; diff --git a/src/qemu/qemu_domain.c b/src/qemu/qemu_domain.c index c5b8d91f9a..d8328bb79a 100644 --- a/src/qemu/qemu_domain.c +++ b/src/qemu/qemu_domain.c @@ -2144,103 +2144,6 @@ qemuDomainObjPrivateXMLFormatPR(virBufferPtr buf, virBufferAddLit(buf, "<prDaemon/>\n"); } - -static int -qemuDomainObjPrivateXMLFormatNBDMigrationSource(virBufferPtr buf, - virStorageSourcePtr src, - virDomainXMLOptionPtr xmlopt) -{ - g_auto(virBuffer) attrBuf = VIR_BUFFER_INITIALIZER; - g_auto(virBuffer) childBuf = VIR_BUFFER_INIT_CHILD(buf); - - virBufferAsprintf(&attrBuf, " type='%s' format='%s'", - virStorageTypeToString(src->type), - virStorageFileFormatTypeToString(src->format)); - - if (virDomainDiskSourceFormat(&childBuf, src, "source", 0, false, - VIR_DOMAIN_DEF_FORMAT_STATUS, - false, false, xmlopt) < 0) - return -1; - - virXMLFormatElement(buf, "migrationSource", &attrBuf, &childBuf); - - return 0; -} - - -static int -qemuDomainObjPrivateXMLFormatNBDMigration(virBufferPtr buf, - virDomainObjPtr vm) -{ - qemuDomainObjPrivatePtr priv = vm->privateData; - size_t i; - virDomainDiskDefPtr disk; - qemuDomainDiskPrivatePtr diskPriv; - - for (i = 0; i < vm->def->ndisks; i++) { - g_auto(virBuffer) attrBuf = VIR_BUFFER_INITIALIZER; - g_auto(virBuffer) childBuf = VIR_BUFFER_INIT_CHILD(buf); - disk = vm->def->disks[i]; - diskPriv = QEMU_DOMAIN_DISK_PRIVATE(disk); - - virBufferAsprintf(&attrBuf, " dev='%s' migrating='%s'", - disk->dst, diskPriv->migrating ? "yes" : "no"); - - if (diskPriv->migrSource && - qemuDomainObjPrivateXMLFormatNBDMigrationSource(&childBuf, - diskPriv->migrSource, - priv->driver->xmlopt) < 0) - return -1; - - virXMLFormatElement(buf, "disk", &attrBuf, &childBuf); - } - - return 0; -} - - -static int -qemuDomainObjPrivateXMLFormatJob(virBufferPtr buf, - virDomainObjPtr vm, - qemuDomainObjPrivatePtr priv) -{ - g_auto(virBuffer) attrBuf = VIR_BUFFER_INITIALIZER; - g_auto(virBuffer) childBuf = VIR_BUFFER_INIT_CHILD(buf); - qemuDomainJob job = priv->job.active; - - if (!qemuDomainTrackJob(job)) - job = QEMU_JOB_NONE; - - if (job == QEMU_JOB_NONE && - priv->job.asyncJob == QEMU_ASYNC_JOB_NONE) - return 0; - - virBufferAsprintf(&attrBuf, " type='%s' async='%s'", - qemuDomainJobTypeToString(job), - qemuDomainAsyncJobTypeToString(priv->job.asyncJob)); - - if (priv->job.phase) { - virBufferAsprintf(&attrBuf, " phase='%s'", - qemuDomainAsyncJobPhaseToString(priv->job.asyncJob, - priv->job.phase)); - } - - if (priv->job.asyncJob != QEMU_ASYNC_JOB_NONE) - virBufferAsprintf(&attrBuf, " flags='0x%lx'", priv->job.apiFlags); - - if (priv->job.asyncJob == QEMU_ASYNC_JOB_MIGRATION_OUT && - qemuDomainObjPrivateXMLFormatNBDMigration(&childBuf, vm) < 0) - return -1; - - if (priv->job.migParams) - qemuMigrationParamsFormat(&childBuf, priv->job.migParams); - - virXMLFormatElement(buf, "job", &attrBuf, &childBuf); - - return 0; -} - - static bool qemuDomainHasSlirp(virDomainObjPtr vm) { @@ -2376,7 +2279,7 @@ qemuDomainObjPrivateXMLFormat(virBufferPtr buf, if (priv->lockState) virBufferAsprintf(buf, "<lockstate>%s</lockstate>\n", priv->lockState); - if (qemuDomainObjPrivateXMLFormatJob(buf, vm, priv) < 0) + if (priv->job.cb.formatJob(buf, vm, &priv->job) < 0) return -1; if (priv->fakeReboot) @@ -2876,151 +2779,6 @@ qemuDomainObjPrivateXMLParsePR(xmlXPathContextPtr ctxt, } -static int -qemuDomainObjPrivateXMLParseJobNBDSource(xmlNodePtr node, - xmlXPathContextPtr ctxt, - virDomainDiskDefPtr disk, - virDomainXMLOptionPtr xmlopt) -{ - VIR_XPATH_NODE_AUTORESTORE(ctxt); - qemuDomainDiskPrivatePtr diskPriv = QEMU_DOMAIN_DISK_PRIVATE(disk); - g_autofree char *format = NULL; - g_autofree char *type = NULL; - g_autoptr(virStorageSource) migrSource = NULL; - xmlNodePtr sourceNode; - - ctxt->node = node; - - if (!(ctxt->node = virXPathNode("./migrationSource", ctxt))) - return 0; - - if (!(type = virXMLPropString(ctxt->node, "type"))) { - virReportError(VIR_ERR_XML_ERROR, "%s", - _("missing storage source type")); - return -1; - } - - if (!(format = virXMLPropString(ctxt->node, "format"))) { - virReportError(VIR_ERR_XML_ERROR, "%s", - _("missing storage source format")); - return -1; - } - - if (!(migrSource = virDomainStorageSourceParseBase(type, format, NULL))) - return -1; - - /* newer libvirt uses the <source> subelement instead of formatting the - * source directly into <migrationSource> */ - if ((sourceNode = virXPathNode("./source", ctxt))) - ctxt->node = sourceNode; - - if (virDomainStorageSourceParse(ctxt->node, ctxt, migrSource, - VIR_DOMAIN_DEF_PARSE_STATUS, xmlopt) < 0) - return -1; - - diskPriv->migrSource = g_steal_pointer(&migrSource); - return 0; -} - - -static int -qemuDomainObjPrivateXMLParseJobNBD(virDomainObjPtr vm, - qemuDomainObjPrivatePtr priv, - xmlXPathContextPtr ctxt) -{ - g_autofree xmlNodePtr *nodes = NULL; - size_t i; - int n; - - if ((n = virXPathNodeSet("./disk[@migrating='yes']", ctxt, &nodes)) < 0) - return -1; - - if (n > 0) { - if (priv->job.asyncJob != QEMU_ASYNC_JOB_MIGRATION_OUT) { - VIR_WARN("Found disks marked for migration but we were not " - "migrating"); - n = 0; - } - for (i = 0; i < n; i++) { - virDomainDiskDefPtr disk; - g_autofree char *dst = NULL; - - if ((dst = virXMLPropString(nodes[i], "dev")) && - (disk = virDomainDiskByTarget(vm->def, dst))) { - QEMU_DOMAIN_DISK_PRIVATE(disk)->migrating = true; - - if (qemuDomainObjPrivateXMLParseJobNBDSource(nodes[i], ctxt, - disk, - priv->driver->xmlopt) < 0) - return -1; - } - } - } - - return 0; -} - - -static int -qemuDomainObjPrivateXMLParseJob(virDomainObjPtr vm, - qemuDomainObjPrivatePtr priv, - xmlXPathContextPtr ctxt) -{ - VIR_XPATH_NODE_AUTORESTORE(ctxt); - g_autofree char *tmp = NULL; - - if (!(ctxt->node = virXPathNode("./job[1]", ctxt))) - return 0; - - if ((tmp = virXPathString("string(@type)", ctxt))) { - int type; - - if ((type = qemuDomainJobTypeFromString(tmp)) < 0) { - virReportError(VIR_ERR_INTERNAL_ERROR, - _("Unknown job type %s"), tmp); - return -1; - } - VIR_FREE(tmp); - priv->job.active = type; - } - - if ((tmp = virXPathString("string(@async)", ctxt))) { - int async; - - if ((async = qemuDomainAsyncJobTypeFromString(tmp)) < 0) { - virReportError(VIR_ERR_INTERNAL_ERROR, - _("Unknown async job type %s"), tmp); - return -1; - } - VIR_FREE(tmp); - priv->job.asyncJob = async; - - if ((tmp = virXPathString("string(@phase)", ctxt))) { - priv->job.phase = qemuDomainAsyncJobPhaseFromString(async, tmp); - if (priv->job.phase < 0) { - virReportError(VIR_ERR_INTERNAL_ERROR, - _("Unknown job phase %s"), tmp); - return -1; - } - VIR_FREE(tmp); - } - } - - if (virXPathULongHex("string(@flags)", ctxt, &priv->job.apiFlags) == -2) { - virReportError(VIR_ERR_INTERNAL_ERROR, "%s", _("Invalid job flags")); - return -1; - } - - if (qemuDomainObjPrivateXMLParseJobNBD(vm, priv, ctxt) < 0) - return -1; - - if (qemuMigrationParamsParse(ctxt, &priv->job.migParams) < 0) - return -1; - - return 0; -} - - static int qemuDomainObjPrivateXMLParseSlirpFeatures(xmlNodePtr featuresNode, xmlXPathContextPtr ctxt, @@ -3180,7 +2938,7 @@ qemuDomainObjPrivateXMLParse(xmlXPathContextPtr ctxt, priv->lockState = virXPathString("string(./lockstate)", ctxt); - if (qemuDomainObjPrivateXMLParseJob(vm, priv, ctxt) < 0) + if (priv->job.cb.parseJob(vm, &priv->job, ctxt, priv->driver->xmlopt) < 0) goto error; priv->fakeReboot = virXPathBoolean("boolean(./fakereboot)", ctxt) == 1; diff --git a/src/qemu/qemu_domainjob.c b/src/qemu/qemu_domainjob.c index 7111acadda..5f59c9c61a 100644 --- a/src/qemu/qemu_domainjob.c +++ b/src/qemu/qemu_domainjob.c @@ -121,22 +121,58 @@ qemuDomainAsyncJobPhaseFromString(qemuDomainAsyncJob job, return -1; } +static void +qemuJobInfoFreePrivateData(qemuDomainJobInfoPrivatePtr priv) +{ + g_free(&priv->stats); +} + +static void +qemuJobInfoFreePrivate(void *opaque) +{ + qemuDomainJobInfoPtr jobInfo = (qemuDomainJobInfoPtr) opaque; + qemuJobInfoFreePrivateData(jobInfo->privateData); +} void qemuDomainJobInfoFree(qemuDomainJobInfoPtr info) { + info->cb.freeJobInfoPrivate(info); g_free(info->errmsg); g_free(info); } +static void * +qemuDomainJobInfoPrivateAlloc(void) +{ + qemuDomainJobInfoPrivatePtr retPriv = g_new0(qemuDomainJobInfoPrivate, 1); + return (void *)retPriv; +} + +static qemuDomainJobInfoPtr +qemuDomainJobInfoAlloc(void) +{ + qemuDomainJobInfoPtr ret = g_new0(qemuDomainJobInfo, 1); + ret->cb.allocJobInfoPrivate = &qemuDomainJobInfoPrivateAlloc; + ret->cb.freeJobInfoPrivate = &qemuJobInfoFreePrivate; + ret->privateData = ret->cb.allocJobInfoPrivate(); + return ret; +} + +static void +qemuDomainCurrentJobInfoInit(qemuDomainJobObjPtr job) +{ + job->current = qemuDomainJobInfoAlloc(); + job->current->status = QEMU_DOMAIN_JOB_STATUS_ACTIVE; +} qemuDomainJobInfoPtr qemuDomainJobInfoCopy(qemuDomainJobInfoPtr info) { - qemuDomainJobInfoPtr ret = g_new0(qemuDomainJobInfo, 1); + qemuDomainJobInfoPtr ret = qemuDomainJobInfoAlloc(); memcpy(ret, info, sizeof(*info)); - + memcpy(ret->privateData, info->privateData, sizeof(qemuDomainJobInfoPrivate)); ret->errmsg = g_strdup(info->errmsg); return ret; @@ -166,10 +202,43 @@ qemuDomainEventEmitJobCompleted(virQEMUDriverPtr driver, } +static void * +qemuJobAllocPrivate(void) +{ + qemuDomainJobPrivatePtr priv; + if (VIR_ALLOC(priv) < 0) + return NULL; + return (void *)priv; +} + + +static void +qemuJobFreePrivateData(qemuDomainJobPrivatePtr priv) +{ + priv->spiceMigration = false; + priv->spiceMigrated = false; + priv->dumpCompleted = false; + qemuMigrationParamsFree(priv->migParams); + priv->migParams = NULL; +} + +static void +qemuJobFreePrivate(void *opaque) +{ + qemuDomainJobObjPtr job = (qemuDomainJobObjPtr) opaque; + qemuJobFreePrivateData(job->privateData); +} + + int qemuDomainObjInitJob(qemuDomainJobObjPtr job) { memset(job, 0, sizeof(*job)); + job->cb.allocJobPrivate = &qemuJobAllocPrivate; + job->cb.freeJobPrivate = &qemuJobFreePrivate; + job->cb.formatJob = &qemuDomainObjPrivateXMLFormatJob; + job->cb.parseJob = &qemuDomainObjPrivateXMLParseJob; + job->privateData = job->cb.allocJobPrivate(); if (virCondInit(&job->cond) < 0) return -1; @@ -213,13 +282,9 @@ qemuDomainObjResetAsyncJob(qemuDomainJobObjPtr job) job->phase = 0; job->mask = QEMU_JOB_DEFAULT_MASK; job->abortJob = false; - job->spiceMigration = false; - job->spiceMigrated = false; - job->dumpCompleted = false; VIR_FREE(job->error); g_clear_pointer(&job->current, qemuDomainJobInfoFree); - qemuMigrationParamsFree(job->migParams); - job->migParams = NULL; + job->cb.freeJobPrivate(job); job->apiFlags = 0; } @@ -235,7 +300,7 @@ qemuDomainObjRestoreJob(virDomainObjPtr obj, job->asyncJob = priv->job.asyncJob; job->asyncOwner = priv->job.asyncOwner; job->phase = priv->job.phase; - job->migParams = g_steal_pointer(&priv->job.migParams); + job->privateData = g_steal_pointer(&priv->job.privateData); job->apiFlags = priv->job.apiFlags; qemuDomainObjResetJob(&priv->job); @@ -285,6 +350,7 @@ int qemuDomainJobInfoUpdateDowntime(qemuDomainJobInfoPtr jobInfo) { unsigned long long now; + qemuDomainJobInfoPrivatePtr jobInfoPriv = jobInfo->privateData; if (!jobInfo->stopped) return 0; @@ -298,8 +364,8 @@ qemuDomainJobInfoUpdateDowntime(qemuDomainJobInfoPtr jobInfo) return 0; } - jobInfo->stats.mig.downtime = now - jobInfo->stopped; - jobInfo->stats.mig.downtime_set = true; + jobInfoPriv->stats.mig.downtime = now - jobInfo->stopped; + jobInfoPriv->stats.mig.downtime_set = true; return 0; } @@ -334,38 +400,39 @@ int qemuDomainJobInfoToInfo(qemuDomainJobInfoPtr jobInfo, virDomainJobInfoPtr info) { + qemuDomainJobInfoPrivatePtr jobInfoPriv = jobInfo->privateData; info->type = qemuDomainJobStatusToType(jobInfo->status); info->timeElapsed = jobInfo->timeElapsed; switch (jobInfo->statsType) { case QEMU_DOMAIN_JOB_STATS_TYPE_MIGRATION: - info->memTotal = jobInfo->stats.mig.ram_total; - info->memRemaining = jobInfo->stats.mig.ram_remaining; - info->memProcessed = jobInfo->stats.mig.ram_transferred; - info->fileTotal = jobInfo->stats.mig.disk_total + + info->memTotal = jobInfoPriv->stats.mig.ram_total; + info->memRemaining = jobInfoPriv->stats.mig.ram_remaining; + info->memProcessed = jobInfoPriv->stats.mig.ram_transferred; + info->fileTotal = jobInfoPriv->stats.mig.disk_total + jobInfo->mirrorStats.total; - info->fileRemaining = jobInfo->stats.mig.disk_remaining + + info->fileRemaining = jobInfoPriv->stats.mig.disk_remaining + (jobInfo->mirrorStats.total - jobInfo->mirrorStats.transferred); - info->fileProcessed = jobInfo->stats.mig.disk_transferred + + info->fileProcessed = jobInfoPriv->stats.mig.disk_transferred + jobInfo->mirrorStats.transferred; break; case QEMU_DOMAIN_JOB_STATS_TYPE_SAVEDUMP: - info->memTotal = jobInfo->stats.mig.ram_total; - info->memRemaining = jobInfo->stats.mig.ram_remaining; - info->memProcessed = jobInfo->stats.mig.ram_transferred; + info->memTotal = jobInfoPriv->stats.mig.ram_total; + info->memRemaining = jobInfoPriv->stats.mig.ram_remaining; + info->memProcessed = jobInfoPriv->stats.mig.ram_transferred; break; case QEMU_DOMAIN_JOB_STATS_TYPE_MEMDUMP: - info->memTotal = jobInfo->stats.dump.total; - info->memProcessed = jobInfo->stats.dump.completed; + info->memTotal = jobInfoPriv->stats.dump.total; + info->memProcessed = jobInfoPriv->stats.dump.completed; info->memRemaining = info->memTotal - info->memProcessed; break; case QEMU_DOMAIN_JOB_STATS_TYPE_BACKUP: - info->fileTotal = jobInfo->stats.backup.total; - info->fileProcessed = jobInfo->stats.backup.transferred; + info->fileTotal = jobInfoPriv->stats.backup.total; + info->fileProcessed = jobInfoPriv->stats.backup.transferred; info->fileRemaining = info->fileTotal - info->fileProcessed; break; @@ -387,7 +454,8 @@ qemuDomainMigrationJobInfoToParams(qemuDomainJobInfoPtr jobInfo, virTypedParameterPtr *params, int *nparams) { - qemuMonitorMigrationStats *stats = &jobInfo->stats.mig; + qemuDomainJobInfoPrivatePtr jobInfoPriv = jobInfo->privateData; + qemuMonitorMigrationStats *stats = &jobInfoPriv->stats.mig; qemuDomainMirrorStatsPtr mirrorStats = &jobInfo->mirrorStats; virTypedParameterPtr par = NULL; int maxpar = 0; @@ -564,7 +632,8 @@ qemuDomainDumpJobInfoToParams(qemuDomainJobInfoPtr jobInfo, virTypedParameterPtr *params, int *nparams) { - qemuMonitorDumpStats *stats = &jobInfo->stats.dump; + qemuDomainJobInfoPrivatePtr jobInfoPriv = jobInfo->privateData; + qemuMonitorDumpStats *stats = &jobInfoPriv->stats.dump; virTypedParameterPtr par = NULL; int maxpar = 0; int npar = 0; @@ -607,7 +676,8 @@ qemuDomainBackupJobInfoToParams(qemuDomainJobInfoPtr jobInfo, virTypedParameterPtr *params, int *nparams) { - qemuDomainBackupStats *stats = &jobInfo->stats.backup; + qemuDomainJobInfoPrivatePtr jobInfoPriv = jobInfo->privateData; + qemuDomainBackupStats *stats = &jobInfoPriv->stats.backup; g_autoptr(virTypedParamList) par = g_new0(virTypedParamList, 1); if (virTypedParamListAddInt(par, jobInfo->operation, @@ -782,6 +852,7 @@ qemuDomainObjCanSetJob(qemuDomainJobObjPtr job, /* Give up waiting for mutex after 30 seconds */ #define QEMU_JOB_WAIT_TIME (1000ull * 30) + /** * qemuDomainObjBeginJobInternal: * @driver: qemu driver @@ -890,8 +961,7 @@ qemuDomainObjBeginJobInternal(virQEMUDriverPtr driver, qemuDomainAsyncJobTypeToString(asyncJob), obj, obj->def->name); qemuDomainObjResetAsyncJob(&priv->job); - priv->job.current = g_new0(qemuDomainJobInfo, 1); - priv->job.current->status = QEMU_DOMAIN_JOB_STATUS_ACTIVE; + qemuDomainCurrentJobInfoInit(&priv->job); priv->job.asyncJob = asyncJob; priv->job.asyncOwner = virThreadSelfID(); priv->job.asyncOwnerAPI = virThreadJobGet(); @@ -1190,3 +1260,248 @@ qemuDomainObjAbortAsyncJob(virDomainObjPtr obj) priv->job.abortJob = true; virDomainObjBroadcast(obj); } + + +static int +qemuDomainObjPrivateXMLFormatNBDMigrationSource(virBufferPtr buf, + virStorageSourcePtr src, + virDomainXMLOptionPtr xmlopt) +{ + g_auto(virBuffer) attrBuf = VIR_BUFFER_INITIALIZER; + g_auto(virBuffer) childBuf = VIR_BUFFER_INIT_CHILD(buf); + + virBufferAsprintf(&attrBuf, " type='%s' format='%s'", + virStorageTypeToString(src->type), + virStorageFileFormatTypeToString(src->format)); + + if (virDomainDiskSourceFormat(&childBuf, src, "source", 0, false, + VIR_DOMAIN_DEF_FORMAT_STATUS, + false, false, xmlopt) < 0) + return -1; + + virXMLFormatElement(buf, "migrationSource", &attrBuf, &childBuf); + + return 0; +} + + +static int +qemuDomainObjPrivateXMLFormatNBDMigration(virBufferPtr buf, + virDomainObjPtr vm) +{ + qemuDomainObjPrivatePtr priv = vm->privateData; + size_t i; + virDomainDiskDefPtr disk; + qemuDomainDiskPrivatePtr diskPriv; + + for (i = 0; i < vm->def->ndisks; i++) { + g_auto(virBuffer) attrBuf = VIR_BUFFER_INITIALIZER; + g_auto(virBuffer) childBuf = VIR_BUFFER_INIT_CHILD(buf); + disk = vm->def->disks[i]; + diskPriv = QEMU_DOMAIN_DISK_PRIVATE(disk); + + virBufferAsprintf(&attrBuf, " dev='%s' migrating='%s'", + disk->dst, diskPriv->migrating ? "yes" : "no"); + + if (diskPriv->migrSource && + qemuDomainObjPrivateXMLFormatNBDMigrationSource(&childBuf, + diskPriv->migrSource, + priv->driver->xmlopt) < 0) + return -1; + + virXMLFormatElement(buf, "disk", &attrBuf, &childBuf); + } + + return 0; +} + + +int +qemuDomainObjPrivateXMLFormatJob(virBufferPtr buf, + virDomainObjPtr vm, + qemuDomainJobObjPtr jobObj) +{ + g_auto(virBuffer) attrBuf = VIR_BUFFER_INITIALIZER; + g_auto(virBuffer) childBuf = VIR_BUFFER_INIT_CHILD(buf); + qemuDomainJob job = jobObj->active; + qemuDomainJobPrivatePtr jobPriv = jobObj->privateData; + + if (!qemuDomainTrackJob(job)) + job = QEMU_JOB_NONE; + + if (job == QEMU_JOB_NONE && + jobObj->asyncJob == QEMU_ASYNC_JOB_NONE) + return 0; + + virBufferAsprintf(&attrBuf, " type='%s' async='%s'", + qemuDomainJobTypeToString(job), + qemuDomainAsyncJobTypeToString(jobObj->asyncJob)); + + if (jobObj->phase) { + virBufferAsprintf(&attrBuf, " phase='%s'", + qemuDomainAsyncJobPhaseToString(jobObj->asyncJob, + jobObj->phase)); + } + + if (jobObj->asyncJob != QEMU_ASYNC_JOB_NONE) + virBufferAsprintf(&attrBuf, " flags='0x%lx'", jobObj->apiFlags); + + if (jobObj->asyncJob == QEMU_ASYNC_JOB_MIGRATION_OUT && + qemuDomainObjPrivateXMLFormatNBDMigration(&childBuf, vm) < 0) + return -1; + + if (jobPriv->migParams) + qemuMigrationParamsFormat(&childBuf, jobPriv->migParams); + + virXMLFormatElement(buf, "job", &attrBuf, &childBuf); + + return 0; +} + + +static int +qemuDomainObjPrivateXMLParseJobNBDSource(xmlNodePtr node, + xmlXPathContextPtr ctxt, + virDomainDiskDefPtr disk, + virDomainXMLOptionPtr xmlopt) +{ + VIR_XPATH_NODE_AUTORESTORE(ctxt); + qemuDomainDiskPrivatePtr diskPriv = QEMU_DOMAIN_DISK_PRIVATE(disk); + g_autofree char *format = NULL; + g_autofree char *type = NULL; + g_autoptr(virStorageSource) migrSource = NULL; + xmlNodePtr sourceNode; + + ctxt->node = node; + + if (!(ctxt->node = virXPathNode("./migrationSource", ctxt))) + return 0; + + if (!(type = virXMLPropString(ctxt->node, "type"))) { + virReportError(VIR_ERR_XML_ERROR, "%s", + _("missing storage source type")); + return -1; + } + + if (!(format = virXMLPropString(ctxt->node, "format"))) { + virReportError(VIR_ERR_XML_ERROR, "%s", + _("missing storage source format")); + return -1; + } + + if (!(migrSource = virDomainStorageSourceParseBase(type, format, NULL))) + return -1; + + /* newer libvirt uses the <source> subelement instead of formatting the + * source directly into <migrationSource> */ + if ((sourceNode = virXPathNode("./source", ctxt))) + ctxt->node = sourceNode; + + if (virDomainStorageSourceParse(ctxt->node, ctxt, migrSource, + VIR_DOMAIN_DEF_PARSE_STATUS, xmlopt) < 0) + return -1; + + diskPriv->migrSource = g_steal_pointer(&migrSource); + return 0; +} + + +static int +qemuDomainObjPrivateXMLParseJobNBD(virDomainObjPtr vm, + qemuDomainJobObjPtr job, + xmlXPathContextPtr ctxt, + virDomainXMLOptionPtr xmlopt) +{ + g_autofree xmlNodePtr *nodes = NULL; + size_t i; + int n; + + if ((n = virXPathNodeSet("./disk[@migrating='yes']", ctxt, &nodes)) < 0) + return -1; + + if (n > 0) { + if (job->asyncJob != QEMU_ASYNC_JOB_MIGRATION_OUT) { + VIR_WARN("Found disks marked for migration but we were not " + "migrating"); + n = 0; + } + for (i = 0; i < n; i++) { + virDomainDiskDefPtr disk; + g_autofree char *dst = NULL; + + if ((dst = virXMLPropString(nodes[i], "dev")) && + (disk = virDomainDiskByTarget(vm->def, dst))) { + QEMU_DOMAIN_DISK_PRIVATE(disk)->migrating = true; + + if (qemuDomainObjPrivateXMLParseJobNBDSource(nodes[i], ctxt, + disk, + xmlopt) < 0) + return -1; + } + } + } + + return 0; +} + + +int +qemuDomainObjPrivateXMLParseJob(virDomainObjPtr vm, + qemuDomainJobObjPtr job, + xmlXPathContextPtr ctxt, + virDomainXMLOptionPtr xmlopt) +{ + qemuDomainJobPrivatePtr jobPriv = job->privateData; + VIR_XPATH_NODE_AUTORESTORE(ctxt); + g_autofree char *tmp = NULL; + + if (!(ctxt->node = virXPathNode("./job[1]", ctxt))) + return 0; + + if ((tmp = virXPathString("string(@type)", ctxt))) { + int type; + + if ((type = qemuDomainJobTypeFromString(tmp)) < 0) { + virReportError(VIR_ERR_INTERNAL_ERROR, + _("Unknown job type %s"), tmp); + return -1; + } + VIR_FREE(tmp); + job->active = type; + } + + if ((tmp = virXPathString("string(@async)", ctxt))) { + int async; + + if ((async = qemuDomainAsyncJobTypeFromString(tmp)) < 0) { + virReportError(VIR_ERR_INTERNAL_ERROR, + _("Unknown async job type %s"), tmp); + return -1; + } + VIR_FREE(tmp); + job->asyncJob = async; + + if ((tmp = virXPathString("string(@phase)", ctxt))) { + job->phase = qemuDomainAsyncJobPhaseFromString(async, tmp); + if (job->phase < 0) { + virReportError(VIR_ERR_INTERNAL_ERROR, + _("Unknown job phase %s"), tmp); + return -1; + } + VIR_FREE(tmp); + } + } + + if (virXPathULongHex("string(@flags)", ctxt, &job->apiFlags) == -2) { + virReportError(VIR_ERR_INTERNAL_ERROR, "%s", _("Invalid job flags")); + return -1; + } + + if (qemuDomainObjPrivateXMLParseJobNBD(vm, job, ctxt, xmlopt) < 0) + return -1; + + if (qemuMigrationParamsParse(ctxt, &jobPriv->migParams) < 0) + return -1; + + return 0; +} diff --git a/src/qemu/qemu_domainjob.h b/src/qemu/qemu_domainjob.h index 124664354d..73ac9b40d9 100644 --- a/src/qemu/qemu_domainjob.h +++ b/src/qemu/qemu_domainjob.h @@ -19,7 +19,6 @@ #pragma once #include <glib-object.h> -#include "qemu_migration_params.h" #define JOB_MASK(job) (job == 0 ? 0 : 1 << (job - 1)) #define QEMU_JOB_DEFAULT_MASK \ @@ -99,7 +98,6 @@ typedef enum { QEMU_DOMAIN_JOB_STATS_TYPE_BACKUP, } qemuDomainJobStatsType; - typedef struct _qemuDomainMirrorStats qemuDomainMirrorStats; typedef qemuDomainMirrorStats *qemuDomainMirrorStatsPtr; struct _qemuDomainMirrorStats { @@ -107,12 +105,13 @@ struct _qemuDomainMirrorStats { unsigned long long total; }; -typedef struct _qemuDomainBackupStats qemuDomainBackupStats; -struct _qemuDomainBackupStats { - unsigned long long transferred; - unsigned long long total; - unsigned long long tmp_used; - unsigned long long tmp_total; +typedef void *(*qemuDomainObjJobInfoPrivateAlloc)(void); +typedef void (*qemuDomainObjJobInfoPrivateFree)(void *); + +typedef struct _qemuDomainObjPrivateJobInfoCallbacks qemuDomainObjPrivateJobInfoCallbacks; +struct _qemuDomainObjPrivateJobInfoCallbacks { + qemuDomainObjJobInfoPrivateAlloc allocJobInfoPrivate; + qemuDomainObjJobInfoPrivateFree freeJobInfoPrivate; }; typedef struct _qemuDomainJobInfo qemuDomainJobInfo; @@ -136,16 +135,15 @@ struct _qemuDomainJobInfo { bool timeDeltaSet; /* Raw values from QEMU */ qemuDomainJobStatsType statsType; - union { - qemuMonitorMigrationStats mig; - qemuMonitorDumpStats dump; - qemuDomainBackupStats backup; - } stats; qemuDomainMirrorStats mirrorStats; char *errmsg; /* optional error message for failed completed jobs */ + + void *privateData; /* job specific collection of info */ + qemuDomainObjPrivateJobInfoCallbacks cb; }; + void qemuDomainJobInfoFree(qemuDomainJobInfoPtr info); @@ -156,6 +154,25 @@ qemuDomainJobInfoCopy(qemuDomainJobInfoPtr info); typedef struct _qemuDomainJobObj qemuDomainJobObj; typedef qemuDomainJobObj *qemuDomainJobObjPtr; + +typedef void *(*qemuDomainObjPrivateJobAlloc)(void); +typedef void (*qemuDomainObjPrivateJobFree)(void *); +typedef int (*qemuDomainObjPrivateJobFormat)(virBufferPtr, + virDomainObjPtr, + qemuDomainJobObjPtr); +typedef int (*qemuDomainObjPrivateJobParse)(virDomainObjPtr, + qemuDomainJobObjPtr, + xmlXPathContextPtr, + virDomainXMLOptionPtr); + +typedef struct _qemuDomainObjPrivateJobCallbacks qemuDomainObjPrivateJobCallbacks; +struct _qemuDomainObjPrivateJobCallbacks { + qemuDomainObjPrivateJobAlloc allocJobPrivate; + qemuDomainObjPrivateJobFree freeJobPrivate; + qemuDomainObjPrivateJobFormat formatJob; + qemuDomainObjPrivateJobParse parseJob; +}; + struct _qemuDomainJobObj { virCond cond; /* Use to coordinate jobs */ @@ -182,14 +199,10 @@ struct _qemuDomainJobObj { qemuDomainJobInfoPtr current; /* async job progress data */ qemuDomainJobInfoPtr completed; /* statistics data of a recently completed job */ bool abortJob; /* abort of the job requested */ - bool spiceMigration; /* we asked for spice migration and we - * should wait for it to finish */ - bool spiceMigrated; /* spice migration completed */ char *error; /* job event completion error */ - bool dumpCompleted; /* dump completed */ - - qemuMigrationParamsPtr migParams; unsigned long apiFlags; /* flags passed to the API which started the async job */ + void *privateData; /* job specific collection of data */ + struct _qemuDomainObjPrivateJobCallbacks cb; }; const char *qemuDomainAsyncJobPhaseToString(qemuDomainAsyncJob job, @@ -267,3 +280,14 @@ void qemuDomainObjFreeJob(qemuDomainJobObjPtr job); int qemuDomainObjInitJob(qemuDomainJobObjPtr job); bool qemuDomainJobAllowed(qemuDomainJobObjPtr jobs, qemuDomainJob newJob); + +int +qemuDomainObjPrivateXMLFormatJob(virBufferPtr buf, + virDomainObjPtr vm, + qemuDomainJobObjPtr jobObj); + +int +qemuDomainObjPrivateXMLParseJob(virDomainObjPtr vm, + qemuDomainJobObjPtr job, + xmlXPathContextPtr ctxt, + virDomainXMLOptionPtr xmlopt); diff --git a/src/qemu/qemu_driver.c b/src/qemu/qemu_driver.c index a5b38b3d24..79fb38ae02 100644 --- a/src/qemu/qemu_driver.c +++ b/src/qemu/qemu_driver.c @@ -3701,14 +3701,16 @@ static int qemuDumpWaitForCompletion(virDomainObjPtr vm) { qemuDomainObjPrivatePtr priv = vm->privateData; + qemuDomainJobPrivatePtr jobPriv = priv->job.privateData; + qemuDomainJobInfoPrivatePtr jobInfoPriv = priv->job.current->privateData; VIR_DEBUG("Waiting for dump completion"); - while (!priv->job.dumpCompleted && !priv->job.abortJob) { + while (!jobPriv->dumpCompleted && !priv->job.abortJob) { if (virDomainObjWait(vm) < 0) return -1; } - if (priv->job.current->stats.dump.status == QEMU_MONITOR_DUMP_STATUS_FAILED) { + if (jobInfoPriv->stats.dump.status == QEMU_MONITOR_DUMP_STATUS_FAILED) { if (priv->job.error) virReportError(VIR_ERR_OPERATION_FAILED, _("memory-only dump failed: %s"), @@ -13554,6 +13556,7 @@ qemuDomainGetJobInfoDumpStats(virQEMUDriverPtr driver, qemuDomainJobInfoPtr jobInfo) { qemuDomainObjPrivatePtr priv = vm->privateData; + qemuDomainJobInfoPrivatePtr jobInfoPriv = jobInfo->privateData; qemuMonitorDumpStats stats = { 0 }; int rc; @@ -13565,33 +13568,33 @@ qemuDomainGetJobInfoDumpStats(virQEMUDriverPtr driver, if (qemuDomainObjExitMonitor(driver, vm) < 0 || rc < 0) return -1; - jobInfo->stats.dump = stats; + jobInfoPriv->stats.dump = stats; if (qemuDomainJobInfoUpdateTime(jobInfo) < 0) return -1; - switch (jobInfo->stats.dump.status) { + switch (jobInfoPriv->stats.dump.status) { case QEMU_MONITOR_DUMP_STATUS_NONE: case QEMU_MONITOR_DUMP_STATUS_FAILED: case QEMU_MONITOR_DUMP_STATUS_LAST: virReportError(VIR_ERR_OPERATION_FAILED, _("dump query failed, status=%d"), - jobInfo->stats.dump.status); + jobInfoPriv->stats.dump.status); return -1; break; case QEMU_MONITOR_DUMP_STATUS_ACTIVE: jobInfo->status = QEMU_DOMAIN_JOB_STATUS_ACTIVE; VIR_DEBUG("dump active, bytes written='%llu' remaining='%llu'", - jobInfo->stats.dump.completed, - jobInfo->stats.dump.total - - jobInfo->stats.dump.completed); + jobInfoPriv->stats.dump.completed, + jobInfoPriv->stats.dump.total - + jobInfoPriv->stats.dump.completed); break; case QEMU_MONITOR_DUMP_STATUS_COMPLETED: jobInfo->status = QEMU_DOMAIN_JOB_STATUS_COMPLETED; VIR_DEBUG("dump completed, bytes written='%llu'", - jobInfo->stats.dump.completed); + jobInfoPriv->stats.dump.completed); break; } diff --git a/src/qemu/qemu_migration.c b/src/qemu/qemu_migration.c index 13427c1203..a45d13aaac 100644 --- a/src/qemu/qemu_migration.c +++ b/src/qemu/qemu_migration.c @@ -1422,12 +1422,13 @@ static int qemuMigrationSrcWaitForSpice(virDomainObjPtr vm) { qemuDomainObjPrivatePtr priv = vm->privateData; + qemuDomainJobPrivatePtr jobPriv = priv->job.privateData; - if (!priv->job.spiceMigration) + if (!jobPriv->spiceMigration) return 0; VIR_DEBUG("Waiting for SPICE to finish migration"); - while (!priv->job.spiceMigrated && !priv->job.abortJob) { + while (!jobPriv->spiceMigrated && !priv->job.abortJob) { if (virDomainObjWait(vm) < 0) return -1; } @@ -1438,7 +1439,8 @@ qemuMigrationSrcWaitForSpice(virDomainObjPtr vm) static void qemuMigrationUpdateJobType(qemuDomainJobInfoPtr jobInfo) { - switch ((qemuMonitorMigrationStatus) jobInfo->stats.mig.status) { + qemuDomainJobInfoPrivatePtr jobInfoPriv = jobInfo->privateData; + switch ((qemuMonitorMigrationStatus) jobInfoPriv->stats.mig.status) { case QEMU_MONITOR_MIGRATION_STATUS_POSTCOPY: jobInfo->status = QEMU_DOMAIN_JOB_STATUS_POSTCOPY; break; @@ -1485,6 +1487,7 @@ qemuMigrationAnyFetchStats(virQEMUDriverPtr driver, char **error) { qemuDomainObjPrivatePtr priv = vm->privateData; + qemuDomainJobInfoPrivatePtr jobInfoPriv = jobInfo->privateData; qemuMonitorMigrationStats stats; int rv; @@ -1496,7 +1499,7 @@ qemuMigrationAnyFetchStats(virQEMUDriverPtr driver, if (qemuDomainObjExitMonitor(driver, vm) < 0 || rv < 0) return -1; - jobInfo->stats.mig = stats; + jobInfoPriv->stats.mig = stats; return 0; } @@ -1538,12 +1541,14 @@ qemuMigrationJobCheckStatus(virQEMUDriverPtr driver, { qemuDomainObjPrivatePtr priv = vm->privateData; qemuDomainJobInfoPtr jobInfo = priv->job.current; + qemuDomainJobInfoPrivatePtr jobInfoPriv = jobInfo->privateData; + char *error = NULL; bool events = virQEMUCapsGet(priv->qemuCaps, QEMU_CAPS_MIGRATION_EVENT); int ret = -1; if (!events || - jobInfo->stats.mig.status == QEMU_MONITOR_MIGRATION_STATUS_ERROR) { + jobInfoPriv->stats.mig.status == QEMU_MONITOR_MIGRATION_STATUS_ERROR) { if (qemuMigrationAnyFetchStats(driver, vm, asyncJob, jobInfo, &error) < 0) return -1; } @@ -1777,6 +1782,7 @@ qemuMigrationSrcGraphicsRelocate(virQEMUDriverPtr driver, const char *graphicsuri) { qemuDomainObjPrivatePtr priv = vm->privateData; + qemuDomainJobPrivatePtr jobPriv = priv->job.privateData; int ret = -1; const char *listenAddress = NULL; virSocketAddr addr; @@ -1858,7 +1864,7 @@ qemuMigrationSrcGraphicsRelocate(virQEMUDriverPtr driver, QEMU_ASYNC_JOB_MIGRATION_OUT) == 0) { ret = qemuMonitorGraphicsRelocate(priv->mon, type, listenAddress, port, tlsPort, tlsSubject); - priv->job.spiceMigration = !ret; + jobPriv->spiceMigration = !ret; if (qemuDomainObjExitMonitor(driver, vm) < 0) ret = -1; } @@ -1993,6 +1999,7 @@ qemuMigrationSrcCleanup(virDomainObjPtr vm, { virQEMUDriverPtr driver = opaque; qemuDomainObjPrivatePtr priv = vm->privateData; + qemuDomainJobPrivatePtr jobPriv = priv->job.privateData; VIR_DEBUG("vm=%s, conn=%p, asyncJob=%s, phase=%s", vm->def->name, conn, @@ -2018,7 +2025,7 @@ qemuMigrationSrcCleanup(virDomainObjPtr vm, " domain was successfully started on destination or not", vm->def->name); qemuMigrationParamsReset(driver, vm, QEMU_ASYNC_JOB_MIGRATION_OUT, - priv->job.migParams, priv->job.apiFlags); + jobPriv->migParams, priv->job.apiFlags); /* clear the job and let higher levels decide what to do */ qemuDomainObjDiscardAsyncJob(driver, vm); break; @@ -2403,6 +2410,7 @@ qemuMigrationDstPrepareAny(virQEMUDriverPtr driver, bool relabel = false; int rv; char *tlsAlias = NULL; + qemuDomainJobPrivatePtr jobPriv = NULL; virNWFilterReadLockFilterUpdates(); @@ -2410,7 +2418,7 @@ qemuMigrationDstPrepareAny(virQEMUDriverPtr driver, if (flags & (VIR_MIGRATE_NON_SHARED_DISK | VIR_MIGRATE_NON_SHARED_INC)) { virReportError(VIR_ERR_OPERATION_INVALID, "%s", - _("offline migration cannot handle " + _("offlineqemuDomainJobPrivatePtr jobPriv = priv->job.privateData;priv migration cannot handle " "non-shared storage")); goto cleanup; } @@ -2519,6 +2527,7 @@ qemuMigrationDstPrepareAny(virQEMUDriverPtr driver, *def = NULL; priv = vm->privateData; + jobPriv = priv->job.privateData; priv->origname = g_strdup(origname); if (taint_hook) { @@ -2726,7 +2735,7 @@ qemuMigrationDstPrepareAny(virQEMUDriverPtr driver, stopjob: qemuMigrationParamsReset(driver, vm, QEMU_ASYNC_JOB_MIGRATION_IN, - priv->job.migParams, priv->job.apiFlags); + jobPriv->migParams, priv->job.apiFlags); if (stopProcess) { unsigned int stopFlags = VIR_QEMU_PROCESS_STOP_MIGRATED; @@ -3000,6 +3009,7 @@ qemuMigrationSrcConfirmPhase(virQEMUDriverPtr driver, virQEMUDriverConfigPtr cfg = virQEMUDriverGetConfig(driver); qemuDomainObjPrivatePtr priv = vm->privateData; qemuDomainJobInfoPtr jobInfo = NULL; + qemuDomainJobPrivatePtr jobPriv = priv->job.privateData; VIR_DEBUG("driver=%p, vm=%p, cookiein=%s, cookieinlen=%d, " "flags=0x%x, retcode=%d", @@ -3025,6 +3035,8 @@ qemuMigrationSrcConfirmPhase(virQEMUDriverPtr driver, /* Update times with the values sent by the destination daemon */ if (mig->jobInfo && jobInfo) { + qemuDomainJobInfoPrivatePtr jobInfoPriv = jobInfo->privateData; + qemuDomainJobInfoPrivatePtr migJobInfoPriv = mig->jobInfo->privateData; int reason; /* We need to refresh migration statistics after a completed post-copy @@ -3040,8 +3052,8 @@ qemuMigrationSrcConfirmPhase(virQEMUDriverPtr driver, qemuDomainJobInfoUpdateTime(jobInfo); jobInfo->timeDeltaSet = mig->jobInfo->timeDeltaSet; jobInfo->timeDelta = mig->jobInfo->timeDelta; - jobInfo->stats.mig.downtime_set = mig->jobInfo->stats.mig.downtime_set; - jobInfo->stats.mig.downtime = mig->jobInfo->stats.mig.downtime; + jobInfoPriv->stats.mig.downtime_set = migJobInfoPriv->stats.mig.downtime_set; + jobInfoPriv->stats.mig.downtime = migJobInfoPriv->stats.mig.downtime; } if (flags & VIR_MIGRATE_OFFLINE) @@ -3084,7 +3096,7 @@ qemuMigrationSrcConfirmPhase(virQEMUDriverPtr driver, qemuMigrationSrcRestoreDomainState(driver, vm); qemuMigrationParamsReset(driver, vm, QEMU_ASYNC_JOB_MIGRATION_OUT, - priv->job.migParams, priv->job.apiFlags); + jobPriv->migParams, priv->job.apiFlags); if (virDomainObjSave(vm, driver->xmlopt, cfg->stateDir) < 0) VIR_WARN("Failed to save status on vm %s", vm->def->name); @@ -4681,6 +4693,7 @@ qemuMigrationSrcPerformJob(virQEMUDriverPtr driver, virErrorPtr orig_err = NULL; virQEMUDriverConfigPtr cfg = virQEMUDriverGetConfig(driver); qemuDomainObjPrivatePtr priv = vm->privateData; + qemuDomainJobPrivatePtr jobPriv = priv->job.privateData; if (qemuMigrationJobStart(driver, vm, QEMU_ASYNC_JOB_MIGRATION_OUT, flags) < 0) @@ -4738,7 +4751,7 @@ qemuMigrationSrcPerformJob(virQEMUDriverPtr driver, */ if (!v3proto && ret < 0) qemuMigrationParamsReset(driver, vm, QEMU_ASYNC_JOB_MIGRATION_OUT, - priv->job.migParams, priv->job.apiFlags); + jobPriv->migParams, priv->job.apiFlags); qemuMigrationSrcRestoreDomainState(driver, vm); @@ -4780,6 +4793,7 @@ qemuMigrationSrcPerformPhase(virQEMUDriverPtr driver, unsigned long resource) { qemuDomainObjPrivatePtr priv = vm->privateData; + qemuDomainJobPrivatePtr jobPriv = priv->job.privateData; int ret = -1; /* If we didn't start the job in the begin phase, start it now. */ @@ -4814,7 +4828,7 @@ qemuMigrationSrcPerformPhase(virQEMUDriverPtr driver, endjob: if (ret < 0) { qemuMigrationParamsReset(driver, vm, QEMU_ASYNC_JOB_MIGRATION_OUT, - priv->job.migParams, priv->job.apiFlags); + jobPriv->migParams, priv->job.apiFlags); qemuMigrationJobFinish(driver, vm); } else { qemuMigrationJobContinue(vm); @@ -5019,6 +5033,7 @@ qemuMigrationDstFinish(virQEMUDriverPtr driver, virErrorPtr orig_err = NULL; int cookie_flags = 0; qemuDomainObjPrivatePtr priv = vm->privateData; + qemuDomainJobPrivatePtr jobPriv = priv->job.privateData; virQEMUDriverConfigPtr cfg = virQEMUDriverGetConfig(driver); unsigned short port; unsigned long long timeReceived = 0; @@ -5272,7 +5287,7 @@ qemuMigrationDstFinish(virQEMUDriverPtr driver, } qemuMigrationParamsReset(driver, vm, QEMU_ASYNC_JOB_MIGRATION_IN, - priv->job.migParams, priv->job.apiFlags); + jobPriv->migParams, priv->job.apiFlags); qemuMigrationJobFinish(driver, vm); if (!virDomainObjIsActive(vm)) diff --git a/src/qemu/qemu_migration_cookie.c b/src/qemu/qemu_migration_cookie.c index fb8b5bcd92..b5f4647539 100644 --- a/src/qemu/qemu_migration_cookie.c +++ b/src/qemu/qemu_migration_cookie.c @@ -641,7 +641,8 @@ static void qemuMigrationCookieStatisticsXMLFormat(virBufferPtr buf, qemuDomainJobInfoPtr jobInfo) { - qemuMonitorMigrationStats *stats = &jobInfo->stats.mig; + qemuDomainJobInfoPrivatePtr jobInfoPriv = jobInfo->privateData; + qemuMonitorMigrationStats *stats = &jobInfoPriv->stats.mig; virBufferAddLit(buf, "<statistics>\n"); virBufferAdjustIndent(buf, 2); @@ -1044,6 +1045,7 @@ static qemuDomainJobInfoPtr qemuMigrationCookieStatisticsXMLParse(xmlXPathContextPtr ctxt) { qemuDomainJobInfoPtr jobInfo = NULL; + qemuDomainJobInfoPrivatePtr jobInfoPriv = NULL; qemuMonitorMigrationStats *stats; VIR_XPATH_NODE_AUTORESTORE(ctxt); @@ -1051,8 +1053,9 @@ qemuMigrationCookieStatisticsXMLParse(xmlXPathContextPtr ctxt) return NULL; jobInfo = g_new0(qemuDomainJobInfo, 1); + jobInfoPriv = jobInfo->privateData; - stats = &jobInfo->stats.mig; + stats = &jobInfoPriv->stats.mig; jobInfo->status = QEMU_DOMAIN_JOB_STATUS_COMPLETED; virXPathULongLong("string(./started[1])", ctxt, &jobInfo->started); diff --git a/src/qemu/qemu_migration_params.c b/src/qemu/qemu_migration_params.c index 6953badcfe..ba3eb14831 100644 --- a/src/qemu/qemu_migration_params.c +++ b/src/qemu/qemu_migration_params.c @@ -953,6 +953,7 @@ qemuMigrationParamsEnableTLS(virQEMUDriverPtr driver, qemuMigrationParamsPtr migParams) { qemuDomainObjPrivatePtr priv = vm->privateData; + qemuDomainJobPrivatePtr jobPriv = priv->job.privateData; virJSONValuePtr tlsProps = NULL; virJSONValuePtr secProps = NULL; virQEMUDriverConfigPtr cfg = virQEMUDriverGetConfig(driver); @@ -965,7 +966,7 @@ qemuMigrationParamsEnableTLS(virQEMUDriverPtr driver, goto error; } - if (!priv->job.migParams->params[QEMU_MIGRATION_PARAM_TLS_CREDS].set) { + if (!jobPriv->migParams->params[QEMU_MIGRATION_PARAM_TLS_CREDS].set) { virReportError(VIR_ERR_OPERATION_UNSUPPORTED, "%s", _("TLS migration is not supported with this " "QEMU binary")); @@ -1038,8 +1039,9 @@ qemuMigrationParamsDisableTLS(virDomainObjPtr vm, qemuMigrationParamsPtr migParams) { qemuDomainObjPrivatePtr priv = vm->privateData; + qemuDomainJobPrivatePtr jobPriv = priv->job.privateData; - if (!priv->job.migParams->params[QEMU_MIGRATION_PARAM_TLS_CREDS].set) + if (!jobPriv->migParams->params[QEMU_MIGRATION_PARAM_TLS_CREDS].set) return 0; if (qemuMigrationParamsSetString(migParams, @@ -1168,6 +1170,7 @@ qemuMigrationParamsCheck(virQEMUDriverPtr driver, virBitmapPtr remoteCaps) { qemuDomainObjPrivatePtr priv = vm->privateData; + qemuDomainJobPrivatePtr jobPriv = priv->job.privateData; qemuMigrationCapability cap; qemuMigrationParty party; size_t i; @@ -1221,7 +1224,7 @@ qemuMigrationParamsCheck(virQEMUDriverPtr driver, * to ask QEMU for their current settings. */ - return qemuMigrationParamsFetch(driver, vm, asyncJob, &priv->job.migParams); + return qemuMigrationParamsFetch(driver, vm, asyncJob, &jobPriv->migParams); } diff --git a/src/qemu/qemu_migration_params.h b/src/qemu/qemu_migration_params.h index 9aea24725f..381eabbe4a 100644 --- a/src/qemu/qemu_migration_params.h +++ b/src/qemu/qemu_migration_params.h @@ -70,6 +70,34 @@ typedef enum { QEMU_MIGRATION_DESTINATION = (1 << 1), } qemuMigrationParty; +typedef struct _qemuDomainJobPrivate qemuDomainJobPrivate; +typedef qemuDomainJobPrivate *qemuDomainJobPrivatePtr; +struct _qemuDomainJobPrivate { + bool spiceMigration; /* we asked for spice migration and we + * should wait for it to finish */ + bool spiceMigrated; /* spice migration completed */ + bool dumpCompleted; /* dump completed */ + qemuMigrationParamsPtr migParams; +}; + + +typedef struct _qemuDomainBackupStats qemuDomainBackupStats; +struct _qemuDomainBackupStats { + unsigned long long transferred; + unsigned long long total; + unsigned long long tmp_used; + unsigned long long tmp_total; +}; + +typedef struct _qemuDomainJobInfoPrivate qemuDomainJobInfoPrivate; +typedef qemuDomainJobInfoPrivate *qemuDomainJobInfoPrivatePtr; +struct _qemuDomainJobInfoPrivate { + union { + qemuMonitorMigrationStats mig; + qemuMonitorDumpStats dump; + qemuDomainBackupStats backup; + } stats; +}; virBitmapPtr qemuMigrationParamsGetAlwaysOnCaps(qemuMigrationParty party); diff --git a/src/qemu/qemu_process.c b/src/qemu/qemu_process.c index d36088ba98..97c6b2ec27 100644 --- a/src/qemu/qemu_process.c +++ b/src/qemu/qemu_process.c @@ -1608,6 +1608,7 @@ qemuProcessHandleSpiceMigrated(qemuMonitorPtr mon G_GNUC_UNUSED, void *opaque G_GNUC_UNUSED) { qemuDomainObjPrivatePtr priv; + qemuDomainJobPrivatePtr jobPriv; virObjectLock(vm); @@ -1615,12 +1616,13 @@ qemuProcessHandleSpiceMigrated(qemuMonitorPtr mon G_GNUC_UNUSED, vm, vm->def->name); priv = vm->privateData; + jobPriv = priv->job.privateData; if (priv->job.asyncJob != QEMU_ASYNC_JOB_MIGRATION_OUT) { VIR_DEBUG("got SPICE_MIGRATE_COMPLETED event without a migration job"); goto cleanup; } - priv->job.spiceMigrated = true; + jobPriv->spiceMigrated = true; virDomainObjBroadcast(vm); cleanup: @@ -1636,6 +1638,7 @@ qemuProcessHandleMigrationStatus(qemuMonitorPtr mon G_GNUC_UNUSED, void *opaque) { qemuDomainObjPrivatePtr priv; + qemuDomainJobInfoPrivatePtr jobInfoPriv; virQEMUDriverPtr driver = opaque; virObjectEventPtr event = NULL; g_autoptr(virQEMUDriverConfig) cfg = virQEMUDriverGetConfig(driver); @@ -1648,12 +1651,13 @@ qemuProcessHandleMigrationStatus(qemuMonitorPtr mon G_GNUC_UNUSED, qemuMonitorMigrationStatusTypeToString(status)); priv = vm->privateData; + jobInfoPriv = priv->job.current->privateData; if (priv->job.asyncJob == QEMU_ASYNC_JOB_NONE) { VIR_DEBUG("got MIGRATION event without a migration job"); goto cleanup; } - priv->job.current->stats.mig.status = status; + jobInfoPriv->stats.mig.status = status; virDomainObjBroadcast(vm); if (status == QEMU_MONITOR_MIGRATION_STATUS_POSTCOPY && @@ -1720,6 +1724,8 @@ qemuProcessHandleDumpCompleted(qemuMonitorPtr mon G_GNUC_UNUSED, void *opaque G_GNUC_UNUSED) { qemuDomainObjPrivatePtr priv; + qemuDomainJobPrivatePtr jobPriv; + qemuDomainJobInfoPrivatePtr jobInfoPriv; virObjectLock(vm); @@ -1727,18 +1733,20 @@ qemuProcessHandleDumpCompleted(qemuMonitorPtr mon G_GNUC_UNUSED, vm, vm->def->name, stats, NULLSTR(error)); priv = vm->privateData; + jobPriv = priv->job.privateData; + jobInfoPriv = priv->job.current->privateData; if (priv->job.asyncJob == QEMU_ASYNC_JOB_NONE) { VIR_DEBUG("got DUMP_COMPLETED event without a dump_completed job"); goto cleanup; } - priv->job.dumpCompleted = true; - priv->job.current->stats.dump = *stats; + jobPriv->dumpCompleted = true; + jobInfoPriv->stats.dump = *stats; priv->job.error = g_strdup(error); /* Force error if extracting the DUMP_COMPLETED status failed */ if (!error && status < 0) { priv->job.error = g_strdup(virGetLastErrorMessage()); - priv->job.current->stats.dump.status = QEMU_MONITOR_DUMP_STATUS_FAILED; + jobInfoPriv->stats.dump.status = QEMU_MONITOR_DUMP_STATUS_FAILED; } virDomainObjBroadcast(vm); @@ -3411,6 +3419,7 @@ qemuProcessRecoverMigrationIn(virQEMUDriverPtr driver, virDomainState state, int reason) { + qemuDomainJobPrivatePtr jobPriv = job->privateData; bool postcopy = (state == VIR_DOMAIN_PAUSED && reason == VIR_DOMAIN_PAUSED_POSTCOPY_FAILED) || (state == VIR_DOMAIN_RUNNING && @@ -3459,7 +3468,7 @@ qemuProcessRecoverMigrationIn(virQEMUDriverPtr driver, } qemuMigrationParamsReset(driver, vm, QEMU_ASYNC_JOB_NONE, - job->migParams, job->apiFlags); + jobPriv->migParams, job->apiFlags); return 0; } @@ -3471,6 +3480,7 @@ qemuProcessRecoverMigrationOut(virQEMUDriverPtr driver, int reason, unsigned int *stopFlags) { + qemuDomainJobPrivatePtr jobPriv = job->privateData; bool postcopy = state == VIR_DOMAIN_PAUSED && (reason == VIR_DOMAIN_PAUSED_POSTCOPY || reason == VIR_DOMAIN_PAUSED_POSTCOPY_FAILED); @@ -3554,7 +3564,7 @@ qemuProcessRecoverMigrationOut(virQEMUDriverPtr driver, } qemuMigrationParamsReset(driver, vm, QEMU_ASYNC_JOB_NONE, - job->migParams, job->apiFlags); + jobPriv->migParams, job->apiFlags); return 0; } -- 2.17.1