When a user provides a migration XML via the VIR_MIGRATE_PARAM_DEST_XML it's expected that they want to change ABI-compatible aspects of the XML such as the disk paths or similar. If the user requests persisting of the VM but does not provide an explicit persistent XML libvirt would take the persistent XML from the source of the migration as the persistent config. This usually involves the old paths to images. Doing this would result into failure to start the VM. It makes more sense to take the XML used for migration and use that as the base for persisting the config. Signed-off-by: Peter Krempa <pkrempa@xxxxxxxxxx> --- src/qemu/qemu_migration.c | 33 +++++++++++++++++++++++---------- 1 file changed, 23 insertions(+), 10 deletions(-) diff --git a/src/qemu/qemu_migration.c b/src/qemu/qemu_migration.c index a6cfede49f..0ae88eec03 100644 --- a/src/qemu/qemu_migration.c +++ b/src/qemu/qemu_migration.c @@ -4706,6 +4706,7 @@ qemuMigrationSrcCancel(virDomainObj *vm, static int qemuMigrationSrcRun(virQEMUDriver *driver, virDomainObj *vm, + const char *xmlin, const char *persist_xml, const char *cookiein, int cookieinlen, @@ -4779,6 +4780,15 @@ qemuMigrationSrcRun(virQEMUDriver *driver, persist_xml, NULL, NULL))) goto error; + } else if (xmlin) { + /* if input XML is provided, use that one as template for the + * persistent XML. Otherwise user's changes will be thrown away. + */ + if (!(persistDef = qemuMigrationAnyPrepareDef(driver, + priv->qemuCaps, + xmlin, + NULL, NULL))) + goto error; } else { virDomainDef *def = vm->newDef ? vm->newDef : vm->def; if (!(persistDef = qemuDomainDefCopy(driver, priv->qemuCaps, def, @@ -5117,6 +5127,7 @@ qemuMigrationSrcResume(virDomainObj *vm, static int qemuMigrationSrcPerformNative(virQEMUDriver *driver, virDomainObj *vm, + const char *xmlin, const char *persist_xml, const char *uri, const char *cookiein, @@ -5202,7 +5213,7 @@ qemuMigrationSrcPerformNative(virQEMUDriver *driver, ret = qemuMigrationSrcResume(vm, migParams, cookiein, cookieinlen, cookieout, cookieoutlen, &spec, flags); } else { - ret = qemuMigrationSrcRun(driver, vm, persist_xml, cookiein, cookieinlen, + ret = qemuMigrationSrcRun(driver, vm, xmlin, persist_xml, cookiein, cookieinlen, cookieout, cookieoutlen, flags, resource, &spec, dconn, graphicsuri, nmigrate_disks, migrate_disks, @@ -5220,6 +5231,7 @@ static int qemuMigrationSrcPerformTunnel(virQEMUDriver *driver, virDomainObj *vm, virStreamPtr st, + const char *xmlin, const char *persist_xml, const char *cookiein, int cookieinlen, @@ -5266,7 +5278,7 @@ qemuMigrationSrcPerformTunnel(virQEMUDriver *driver, goto cleanup; } - ret = qemuMigrationSrcRun(driver, vm, persist_xml, cookiein, cookieinlen, + ret = qemuMigrationSrcRun(driver, vm, xmlin, persist_xml, cookiein, cookieinlen, cookieout, cookieoutlen, flags, resource, &spec, dconn, graphicsuri, nmigrate_disks, migrate_disks, migParams, NULL); @@ -5305,7 +5317,7 @@ qemuMigrationSrcPerformResume(virQEMUDriver *driver, virCloseCallbacksDomainRemove(vm, NULL, qemuMigrationAnyConnectionClosed); qemuDomainCleanupRemove(vm, qemuProcessCleanupMigrationJob); - ret = qemuMigrationSrcPerformNative(driver, vm, NULL, uri, + ret = qemuMigrationSrcPerformNative(driver, vm, NULL, NULL, uri, cookiein, cookieinlen, cookieout, cookieoutlen, flags, 0, NULL, NULL, 0, NULL, migParams, NULL); @@ -5407,12 +5419,12 @@ qemuMigrationSrcPerformPeer2Peer2(virQEMUDriver *driver, VIR_DEBUG("Perform %p", sconn); ignore_value(qemuMigrationJobStartPhase(vm, QEMU_MIGRATION_PHASE_PERFORM2)); if (flags & VIR_MIGRATE_TUNNELLED) - ret = qemuMigrationSrcPerformTunnel(driver, vm, st, NULL, + ret = qemuMigrationSrcPerformTunnel(driver, vm, st, NULL, NULL, NULL, 0, NULL, NULL, flags, resource, dconn, NULL, 0, NULL, migParams); else - ret = qemuMigrationSrcPerformNative(driver, vm, NULL, uri_out, + ret = qemuMigrationSrcPerformNative(driver, vm, NULL, NULL, uri_out, cookie, cookielen, NULL, NULL, /* No out cookie with v2 migration */ flags, resource, dconn, NULL, 0, NULL, @@ -5668,14 +5680,14 @@ qemuMigrationSrcPerformPeer2Peer3(virQEMUDriver *driver, } else { ignore_value(qemuMigrationJobSetPhase(vm, QEMU_MIGRATION_PHASE_PERFORM3)); if (flags & VIR_MIGRATE_TUNNELLED) { - ret = qemuMigrationSrcPerformTunnel(driver, vm, st, persist_xml, + ret = qemuMigrationSrcPerformTunnel(driver, vm, st, xmlin, persist_xml, cookiein, cookieinlen, &cookieout, &cookieoutlen, flags, bandwidth, dconn, graphicsuri, nmigrate_disks, migrate_disks, migParams); } else { - ret = qemuMigrationSrcPerformNative(driver, vm, persist_xml, uri, + ret = qemuMigrationSrcPerformNative(driver, vm, xmlin, persist_xml, uri, cookiein, cookieinlen, &cookieout, &cookieoutlen, flags, bandwidth, dconn, graphicsuri, @@ -6075,7 +6087,7 @@ qemuMigrationSrcPerformJob(virQEMUDriver *driver, if (qemuMigrationJobStartPhase(vm, QEMU_MIGRATION_PHASE_PERFORM2) < 0) goto endjob; - ret = qemuMigrationSrcPerformNative(driver, vm, persist_xml, uri, cookiein, cookieinlen, + ret = qemuMigrationSrcPerformNative(driver, vm, xmlin, persist_xml, uri, cookiein, cookieinlen, cookieout, cookieoutlen, flags, resource, NULL, NULL, 0, NULL, migParams, nbdURI); @@ -6140,6 +6152,7 @@ static int qemuMigrationSrcPerformPhase(virQEMUDriver *driver, virConnectPtr conn, virDomainObj *vm, + const char *xmlin, const char *persist_xml, const char *uri, const char *graphicsuri, @@ -6177,7 +6190,7 @@ qemuMigrationSrcPerformPhase(virQEMUDriver *driver, virCloseCallbacksDomainRemove(vm, NULL, qemuMigrationAnyConnectionClosed); - if (qemuMigrationSrcPerformNative(driver, vm, persist_xml, uri, cookiein, cookieinlen, + if (qemuMigrationSrcPerformNative(driver, vm, xmlin, persist_xml, uri, cookiein, cookieinlen, cookieout, cookieoutlen, flags, resource, NULL, graphicsuri, nmigrate_disks, migrate_disks, migParams, nbdURI) < 0) @@ -6276,7 +6289,7 @@ qemuMigrationSrcPerform(virQEMUDriver *driver, } if (v3proto) { - return qemuMigrationSrcPerformPhase(driver, conn, vm, persist_xml, uri, + return qemuMigrationSrcPerformPhase(driver, conn, vm, xmlin, persist_xml, uri, graphicsuri, nmigrate_disks, migrate_disks, migParams, -- 2.43.0 _______________________________________________ Devel mailing list -- devel@xxxxxxxxxxxxxxxxx To unsubscribe send an email to devel-leave@xxxxxxxxxxxxxxxxx