[PATCH V4] implement offline migration

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



allow migration even domain isn't active by
inserting some stubs to tunnel migration path.

Signed-off-by: liguang <lig.fnst@xxxxxxxxxxxxxx>
---
 src/qemu/qemu_driver.c    |    2 +-
 src/qemu/qemu_migration.c |  181 +++++++++++++++++++++++++++++++++++++++++++--
 src/qemu/qemu_migration.h |    3 +-
 3 files changed, 178 insertions(+), 8 deletions(-)

diff --git a/src/qemu/qemu_driver.c b/src/qemu/qemu_driver.c
index d74bf52..00ca211 100644
--- a/src/qemu/qemu_driver.c
+++ b/src/qemu/qemu_driver.c
@@ -9779,7 +9779,7 @@ qemuDomainMigratePrepareTunnel3(virConnectPtr dconn,
 
     virCheckFlags(QEMU_MIGRATION_FLAGS, -1);
 
-    if (!dom_xml) {
+    if (!dom_xml && !(flags & VIR_MIGRATE_OFFLINE)) {
         virReportError(VIR_ERR_INTERNAL_ERROR,
                        "%s", _("no domain XML passed"));
         goto cleanup;
diff --git a/src/qemu/qemu_migration.c b/src/qemu/qemu_migration.c
index 1b21ef6..991bcc5 100644
--- a/src/qemu/qemu_migration.c
+++ b/src/qemu/qemu_migration.c
@@ -70,6 +70,7 @@ enum qemuMigrationCookieFlags {
     QEMU_MIGRATION_COOKIE_FLAG_GRAPHICS,
     QEMU_MIGRATION_COOKIE_FLAG_LOCKSTATE,
     QEMU_MIGRATION_COOKIE_FLAG_PERSISTENT,
+    QEMU_MIGRATION_COOKIE_FLAG_OFFLINE,
 
     QEMU_MIGRATION_COOKIE_FLAG_LAST
 };
@@ -77,12 +78,13 @@ enum qemuMigrationCookieFlags {
 VIR_ENUM_DECL(qemuMigrationCookieFlag);
 VIR_ENUM_IMPL(qemuMigrationCookieFlag,
               QEMU_MIGRATION_COOKIE_FLAG_LAST,
-              "graphics", "lockstate", "persistent");
+              "graphics", "lockstate", "persistent", "offline");
 
 enum qemuMigrationCookieFeatures {
     QEMU_MIGRATION_COOKIE_GRAPHICS  = (1 << QEMU_MIGRATION_COOKIE_FLAG_GRAPHICS),
     QEMU_MIGRATION_COOKIE_LOCKSTATE = (1 << QEMU_MIGRATION_COOKIE_FLAG_LOCKSTATE),
     QEMU_MIGRATION_COOKIE_PERSISTENT = (1 << QEMU_MIGRATION_COOKIE_FLAG_PERSISTENT),
+    QEMU_MIGRATION_COOKIE_OFFLINE = (1 << QEMU_MIGRATION_COOKIE_FLAG_OFFLINE),
 };
 
 typedef struct _qemuMigrationCookieGraphics qemuMigrationCookieGraphics;
@@ -101,6 +103,10 @@ struct _qemuMigrationCookie {
     unsigned int flags;
     unsigned int flagsMandatory;
 
+    /*offline migration flag*/
+    int offline;
+    char *mig_file;
+
     /* Host properties */
     unsigned char localHostuuid[VIR_UUID_BUFLEN];
     unsigned char remoteHostuuid[VIR_UUID_BUFLEN];
@@ -139,6 +145,8 @@ static void qemuMigrationCookieFree(qemuMigrationCookiePtr mig)
 
     if (mig->flags & QEMU_MIGRATION_COOKIE_GRAPHICS)
         qemuMigrationCookieGraphicsFree(mig->graphics);
+    if (mig->flags & QEMU_MIGRATION_COOKIE_GRAPHICS)
+        VIR_FREE(mig->mig_file);
 
     VIR_FREE(mig->localHostname);
     VIR_FREE(mig->remoteHostname);
@@ -439,6 +447,12 @@ qemuMigrationCookieXMLFormat(struct qemud_driver *driver,
         virBufferAdjustIndent(buf, -2);
     }
 
+    if (mig->flags & QEMU_MIGRATION_COOKIE_OFFLINE) {
+        virBufferAsprintf(buf, "  <offline mig_ol='%d' mig_file='%s'>\n",
+                          mig->offline, mig->mig_file);
+        virBufferAddLit(buf, "  </offline>\n");
+    }
+
     virBufferAddLit(buf, "</qemu-migration>\n");
     return 0;
 }
@@ -662,6 +676,18 @@ qemuMigrationCookieXMLParse(qemuMigrationCookiePtr mig,
         VIR_FREE(nodes);
     }
 
+    if ((flags & QEMU_MIGRATION_COOKIE_OFFLINE) &&
+        virXPathBoolean("count(./offline) > 0", ctxt)) {
+        if (virXPathInt("string(./offline/@mig_ol)", ctxt, &mig->offline) < 0) {
+            virReportError(VIR_ERR_INTERNAL_ERROR,
+                           "%s", _("missing mig_ol attribute in migration data"));
+            goto error;
+        }
+        mig->mig_file = virXPathString("string(./offline/@mig_file)", ctxt);
+        if (mig->mig_file && STREQ(mig->mig_file, ""))
+            VIR_FREE(mig->mig_file);
+    }
+
     return 0;
 
 error:
@@ -721,6 +747,12 @@ qemuMigrationBakeCookie(qemuMigrationCookiePtr mig,
         qemuMigrationCookieAddPersistent(mig, dom) < 0)
         return -1;
 
+    if (flags & QEMU_MIGRATION_COOKIE_OFFLINE) {
+        mig->flags |= QEMU_MIGRATION_COOKIE_OFFLINE;
+        mig->offline = 1;
+    }
+
+
     if (!(*cookieout = qemuMigrationCookieXMLFormatStr(driver, mig)))
         return -1;
 
@@ -1307,6 +1339,27 @@ qemuMigrationPrepareAny(struct qemud_driver *driver,
     /* Domain starts inactive, even if the domain XML had an id field. */
     vm->def->id = -1;
 
+    if (tunnel) {
+        if (!(mig = qemuMigrationEatCookie(driver, vm, cookiein, cookieinlen,
+                                           QEMU_MIGRATION_COOKIE_OFFLINE)))
+            return ret;
+        else if (mig->offline) {
+            char *file, *str, *tmp = NULL;
+            ret = 0;
+            for (str = mig->mig_file; ; str = NULL) {
+                file = strtok_r(str, " ", &tmp);
+                if (file == NULL)
+                    break;
+                if (virFDStreamCreateFile(st, file, 0, 0, O_WRONLY, 0) < 0) {
+                    virReportSystemError(errno, "%s",
+                                         _("cannot setup stream for tunnelled migration\n"));
+                    ret = -1;
+                }
+            }
+            goto endjob;
+        }
+    }
+
     if (tunnel &&
         (pipe(dataFD) < 0 || virSetCloseExec(dataFD[1]) < 0)) {
         virReportSystemError(errno, "%s",
@@ -2303,6 +2356,117 @@ cleanup:
     return ret;
 }
 
+static int
+doReadFile(virStreamPtr st ATTRIBUTE_UNUSED,
+                 char *buf, size_t nbytes, void *opaque)
+{
+    int *fd = opaque;
+
+    return saferead(*fd, buf, nbytes);
+}
+
+
+/*
+ * do offline migration
+ */
+static int doMigrateOffline(struct qemud_driver *driver,
+                           virConnectPtr dconn,
+                           virDomainObjPtr vm,
+                           const char *cookiein,
+                           int cookieinlen,
+                           char **cookieout,
+                           int *cookieoutlen,
+                           unsigned long flags,
+                           const char *dom_xml,
+                           const char *dname,
+                           virStreamPtr st,
+                           unsigned long resource)
+{
+    xmlDocPtr xml = NULL;
+    xmlXPathContextPtr ctxt = NULL;
+    xmlNodePtr *disks = NULL;
+    qemuMigrationCookiePtr mig = NULL;
+    virBuffer buf = VIR_BUFFER_INITIALIZER;
+    int i = 0, cn = 0, fd = -1, ret = -1;
+    char *src[] = {NULL}, *files = NULL;
+
+    VIR_DEBUG("driver=%p, vm=%p, st=%p, cookiein=%s, cookieinlen=%d, "
+              "cookieout=%p, cookieoutlen=%p, dom_xml=%s,"
+              "dname=%s, flags=%lx, resource=%lu",
+              driver, vm, st, NULLSTR(cookiein), cookieinlen,
+              cookieout, cookieoutlen, dom_xml, dname, flags, resource);
+
+    xml = virXMLParseStringCtxt(dom_xml, _("(domain_definition)"), &ctxt);
+    if (!xml) {
+         virReportError(VIR_ERR_INTERNAL_ERROR, "%s",
+                       _("can't parse dom_xml for offline migration\n"));
+        goto cleanup;
+    }
+    cn = virXPathNodeSet("./devices/disk", ctxt, &disks);
+    if (cn < 0) {
+        virReportError(VIR_ERR_INTERNAL_ERROR, "%s",
+                       _("Fail to get disk node\n"));
+        goto cleanup;
+    }
+    cn = 1;
+
+    for (i = 0 ; i < cn ; i++) {
+        ctxt->node = disks[i];
+        src[i] = virXPathString("string(./source/@file"
+                                "|./source/@dir"
+                                "|./source/@name)", ctxt);
+        virBufferAsprintf(&buf, "%s ", src[i]);
+    }
+
+    files = virBufferContentAndReset(&buf);
+
+    if (!(mig = qemuMigrationEatCookie(driver, vm, NULL, 0, 0)))
+        goto cleanup;
+
+    mig->mig_file = files;
+
+    if (qemuMigrationBakeCookie(mig, driver, vm,
+                                cookieout, cookieoutlen,
+                                QEMU_MIGRATION_COOKIE_OFFLINE) < 0)
+        goto cleanup;
+
+    cookiein = *cookieout;
+    cookieinlen = *cookieoutlen;
+    cookieout = NULL;
+    cookieoutlen = 0;
+
+    qemuDomainObjEnterRemoteWithDriver(driver, vm);
+    ret = dconn->driver->domainMigratePrepareTunnel3
+        (dconn, st, cookiein, cookieinlen,
+         cookieout, cookieoutlen,
+         flags, dname, resource, dom_xml);
+    qemuDomainObjExitRemoteWithDriver(driver, vm);
+    if (ret == -1)
+        goto cleanup;
+
+    for (i = 0; i < cn; i++) {
+        if ((fd = open(src[i], O_RDONLY)) < 0) {
+            virReportError(VIR_ERR_INTERNAL_ERROR, "%s %s",
+                           _("Fail to open file for offline migration\n"), src[i]);
+            goto cleanup;
+        }
+        if (virStreamSendAll(st, doReadFile, &fd) < 0)
+            goto cleanup;
+        if (VIR_CLOSE(fd) < 0)
+            goto cleanup;
+    }
+
+    if (virStreamFinish(st) < 0)
+        goto cleanup;
+
+    ret = 0;
+
+cleanup:
+    VIR_FREE(files);
+    xmlXPathFreeContext(ctxt);
+    xmlFreeDoc(xml);
+    return ret;
+}
 
 /* This is essentially a re-impl of virDomainMigrateVersion3
  * from libvirt.c, but running in source libvirtd context,
@@ -2357,7 +2521,13 @@ static int doPeer2PeerMigrate3(struct qemud_driver *driver,
     if (flags & VIR_MIGRATE_TUNNELLED) {
         if (!(st = virStreamNew(dconn, 0)))
             goto cleanup;
-
+        if (flags & VIR_MIGRATE_OFFLINE) {
+            if (doMigrateOffline(driver, dconn, vm, cookiein, cookieinlen,
+                                 &cookieout, &cookieoutlen, flags, dom_xml,
+                                 dname, st, resource) != -1)
+                ret = 0;
+            goto cleanup;
+        }
         qemuDomainObjEnterRemoteWithDriver(driver, vm);
         ret = dconn->driver->domainMigratePrepareTunnel3
             (dconn, st, cookiein, cookieinlen,
@@ -2477,7 +2647,7 @@ finish:
                  vm->def->name);
 
  cleanup:
-    if (ddomain) {
+    if (ddomain || (flags & VIR_MIGRATE_OFFLINE)) {
         virObjectUnref(ddomain);
         ret = 0;
     } else {
@@ -2557,7 +2727,7 @@ static int doPeer2PeerMigrate(struct qemud_driver *driver,
     if (!virDomainObjIsActive(vm)) {
         virReportError(VIR_ERR_INTERNAL_ERROR, "%s",
                        _("guest unexpectedly quit"));
-        goto cleanup;
+        flags |= VIR_MIGRATE_OFFLINE;
     }
 
     /* Change protection is only required on the source side (us), and
@@ -2573,7 +2743,6 @@ static int doPeer2PeerMigrate(struct qemud_driver *driver,
     else
         ret = doPeer2PeerMigrate2(driver, sconn, dconn, vm,
                                   dconnuri, flags, dname, resource);
-
 cleanup:
     orig_err = virSaveLastError();
     qemuDomainObjEnterRemoteWithDriver(driver, vm);
@@ -2620,7 +2789,7 @@ qemuMigrationPerformJob(struct qemud_driver *driver,
     if (!virDomainObjIsActive(vm)) {
         virReportError(VIR_ERR_OPERATION_INVALID,
                        "%s", _("domain is not running"));
-        goto endjob;
+        flags |= VIR_MIGRATE_OFFLINE;
     }
 
     if (!qemuMigrationIsAllowed(driver, vm, NULL))
diff --git a/src/qemu/qemu_migration.h b/src/qemu/qemu_migration.h
index 1740204..2bcaea0 100644
--- a/src/qemu/qemu_migration.h
+++ b/src/qemu/qemu_migration.h
@@ -36,7 +36,8 @@
      VIR_MIGRATE_NON_SHARED_DISK |              \
      VIR_MIGRATE_NON_SHARED_INC |               \
      VIR_MIGRATE_CHANGE_PROTECTION |            \
-     VIR_MIGRATE_UNSAFE)
+     VIR_MIGRATE_UNSAFE |                       \
+     VIR_MIGRATE_OFFLINE)
 
 enum qemuMigrationJobPhase {
     QEMU_MIGRATION_PHASE_NONE = 0,
-- 
1.7.2.5

--
libvir-list mailing list
libvir-list@xxxxxxxxxx
https://www.redhat.com/mailman/listinfo/libvir-list


[Index of Archives]     [Virt Tools]     [Libvirt Users]     [Lib OS Info]     [Fedora Users]     [Fedora Desktop]     [Fedora SELinux]     [Big List of Linux Books]     [Yosemite News]     [KDE Users]     [Fedora Tools]