[PATCH v2 09/11] qemu_migration: Implement qemuMigrationDriveMirror

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



This function does the source part of NBD magic.
It invokes drive-mirror on each non shared disk
and wait till the mirroring process completes.
When it does we can proceed with migration.

Currently, an active waiting is done: every 50ms
libvirt asks qemu if block-job is finished or not.
However, once the job finishes, qemu doesn't
report its progress so we can only assume if
the job finished successfully or not. The better
solution would be to listen to the event which
is sent as soon as the job finishes. The event
does contain the result of job.
---
 src/qemu/qemu_migration.c |  190 +++++++++++++++++++++++++++++++++++++++++++--
 1 files changed, 184 insertions(+), 6 deletions(-)

diff --git a/src/qemu/qemu_migration.c b/src/qemu/qemu_migration.c
index 9177777..6ee8f0e 100644
--- a/src/qemu/qemu_migration.c
+++ b/src/qemu/qemu_migration.c
@@ -1166,6 +1166,177 @@ cleanup:
     return ret;
 }
 
+/**
+ * qemuMigrationDiskMirror:
+ * @driver: qemu driver
+ * @vm: domain
+ * @mig: migration cookie
+ * @migrate_flags: migrate monitor command flags
+ *
+ * Run drive-mirror to feed NBD server running on dst and
+ * wait till the process completes. On success, update
+ * @migrate_flags so we don't tell 'migrate' command to
+ * do the very same operation.
+ *
+ * Returns 0 on success (@migrate_flags updated),
+ *        -1 otherwise.
+ */
+static int
+qemuMigrationDriveMirror(virQEMUDriverPtr driver,
+                         virDomainObjPtr vm,
+                         qemuMigrationCookiePtr mig,
+                         const char *host,
+                         unsigned long speed,
+                         unsigned int *migrate_flags)
+{
+    int ret = -1;
+    int mon_ret;
+    qemuDomainObjPrivatePtr priv = vm->privateData;
+    size_t ndisks = 0, i;
+    char **disks = NULL;
+    unsigned int mirror_flags = VIR_DOMAIN_BLOCK_REBASE_REUSE_EXT;
+    char *nbd_dest = NULL;
+
+    if (*migrate_flags & QEMU_MONITOR_MIGRATE_NON_SHARED_DISK) {
+        /* dummy */
+    } else if (*migrate_flags & QEMU_MONITOR_MIGRATE_NON_SHARED_INC) {
+        mirror_flags |= VIR_DOMAIN_BLOCK_REBASE_SHALLOW;
+    } else {
+        /* Nothing to be done here. Claim success */
+        return 0;
+    }
+
+    for (i = 0; i < vm->def->ndisks; i++) {
+        virDomainDiskDefPtr disk = vm->def->disks[i];
+
+        /* skip shared disks */
+        if (disk->shared)
+            continue;
+
+        if (VIR_REALLOC_N(disks, ndisks + 1) < 0) {
+            virReportOOMError();
+            goto cleanup;
+        }
+
+        if (virAsprintf(&disks[ndisks++], "%s%s",
+                        QEMU_DRIVE_HOST_PREFIX, disk->info.alias) < 0) {
+            virReportOOMError();
+            goto cleanup;
+        }
+    }
+
+    if (!ndisks) {
+        /* Hooray! Nothing to care about */
+        ret = 0;
+        goto cleanup;
+    }
+
+    if (!mig->nbd) {
+        /* Destination doesn't support NBD server.
+         * Fall back to previous implementation.
+         * XXX Or should we report an error here? */
+        VIR_DEBUG("Destination doesn't support NBD server "
+                  "Falling back to previous implementation.");
+        ret = 0;
+        goto cleanup;
+    }
+
+    for (i = 0; i < ndisks; i++) {
+        virDomainBlockJobInfo info;
+        VIR_FREE(nbd_dest);
+        if (virAsprintf(&nbd_dest, "nbd:%s:%u:exportname=%s",
+                        host, mig->nbd->port, disks[i]) < 0) {
+            virReportOOMError();
+            goto error;
+        }
+
+        if (qemuDomainObjEnterMonitorAsync(driver, vm,
+                                           QEMU_ASYNC_JOB_MIGRATION_OUT) < 0)
+            goto error;
+        mon_ret = qemuMonitorDriveMirror(priv->mon, disks[i], nbd_dest,
+                                         NULL, speed, mirror_flags);
+        qemuDomainObjExitMonitorWithDriver(driver, vm);
+
+        if (mon_ret < 0)
+            goto error;
+
+        /* wait for completion */
+        while (true) {
+            /* Poll every 50ms for progress & to allow cancellation */
+            struct timespec ts = { .tv_sec = 0, .tv_nsec = 50 * 1000 * 1000ull };
+            if (qemuDomainObjEnterMonitorAsync(driver, vm,
+                                               QEMU_ASYNC_JOB_MIGRATION_OUT) < 0)
+                goto error;
+            if (priv->job.asyncAbort) {
+                /* explicitly do this *after* we entered the monitor,
+                 * as this is a critical section so we are guaranteed
+                 * priv->job.asyncAbort will not change */
+                qemuDomainObjExitMonitorWithDriver(driver, vm);
+                virReportError(VIR_ERR_OPERATION_ABORTED, _("%s: %s"),
+                               qemuDomainAsyncJobTypeToString(priv->job.asyncJob),
+                               _("canceled by client"));
+                goto cleanup;
+            }
+            mon_ret = qemuMonitorBlockJob(priv->mon, disks[i], NULL, 0,
+                                          &info, BLOCK_JOB_INFO, true);
+            qemuDomainObjExitMonitorWithDriver(driver, vm);
+
+            if (mon_ret < 0) {
+                /* qemu doesn't report finished jobs */
+                VIR_WARN("Unable to query drive-mirror job status. "
+                         "Stop polling on '%s' cur:%llu end:%llu",
+                         disks[i], info.cur, info.end);
+                break;
+            }
+
+            if (info.cur == info.end) {
+                VIR_DEBUG("Drive mirroring of '%s' completed", disks[i]);
+                break;
+            }
+
+            /* XXX Frankly speaking, we should listen to the events,
+             * instead of doing this. But this works for now and we
+             * are doing something similar in migration itself anyway */
+
+            virDomainObjUnlock(vm);
+            qemuDriverUnlock(driver);
+
+            nanosleep(&ts, NULL);
+
+            qemuDriverLock(driver);
+            virDomainObjLock(vm);
+
+        }
+    }
+
+    /* okay, copied. modify migrate_flags */
+    *migrate_flags &= ~(QEMU_MONITOR_MIGRATE_NON_SHARED_DISK |
+                        QEMU_MONITOR_MIGRATE_NON_SHARED_INC);
+    ret = 0;
+
+cleanup:
+    for (i = 0; i < ndisks; i++)
+        VIR_FREE(disks[i]);
+    VIR_FREE(disks);
+    VIR_FREE(nbd_dest);
+    return ret;
+
+error:
+    /* cancel any outstanding jobs */
+    if (qemuDomainObjEnterMonitorAsync(driver, vm,
+                                       QEMU_ASYNC_JOB_MIGRATION_OUT) == 0) {
+        while (i) {
+            if (qemuMonitorBlockJob(priv->mon, disks[i], NULL, 0,
+                                    NULL, BLOCK_JOB_ABORT, true) < 0)
+                VIR_WARN("Unable to cancel block-job on '%s'", disks[i]);
+            i--;
+        }
+        qemuDomainObjExitMonitorWithDriver(driver, vm);
+    } else {
+        VIR_WARN("Unable to enter monitor. No block job cancelled");
+    }
+    goto cleanup;
+}
 
 /* Validate whether the domain is safe to migrate.  If vm is NULL,
  * then this is being run in the v2 Prepare stage on the destination
@@ -2329,6 +2500,12 @@ qemuMigrationRun(virQEMUDriverPtr driver,
               cookieout, cookieoutlen, flags, resource,
               spec, spec->destType, spec->fwdType);
 
+    if (flags & VIR_MIGRATE_NON_SHARED_DISK)
+        migrate_flags |= QEMU_MONITOR_MIGRATE_NON_SHARED_DISK;
+
+    if (flags & VIR_MIGRATE_NON_SHARED_INC)
+        migrate_flags |= QEMU_MONITOR_MIGRATE_NON_SHARED_INC;
+
     if (virLockManagerPluginUsesState(driver->lockManager) &&
         !cookieout) {
         virReportError(VIR_ERR_INTERNAL_ERROR,
@@ -2346,6 +2523,13 @@ qemuMigrationRun(virQEMUDriverPtr driver,
     if (qemuDomainMigrateGraphicsRelocate(driver, vm, mig) < 0)
         VIR_WARN("unable to provide data for graphics client relocation");
 
+    /* this will update migrate_flags on success */
+    if (qemuMigrationDriveMirror(driver, vm, mig, spec->dest.host.name,
+                                 migrate_speed, &migrate_flags) < 0) {
+        /* error reported by helper func */
+        goto cleanup;
+    }
+
     /* Before EnterMonitor, since qemuMigrationSetOffline already does that */
     if (!(flags & VIR_MIGRATE_LIVE) &&
         virDomainObjGetState(vm, NULL) == VIR_DOMAIN_RUNNING) {
@@ -2373,12 +2557,6 @@ qemuMigrationRun(virQEMUDriverPtr driver,
         goto cleanup;
     }
 
-    if (flags & VIR_MIGRATE_NON_SHARED_DISK)
-        migrate_flags |= QEMU_MONITOR_MIGRATE_NON_SHARED_DISK;
-
-    if (flags & VIR_MIGRATE_NON_SHARED_INC)
-        migrate_flags |= QEMU_MONITOR_MIGRATE_NON_SHARED_INC;
-
     /* connect to the destination qemu if needed */
     if (spec->destType == MIGRATION_DEST_CONNECT_HOST &&
         qemuMigrationConnect(driver, vm, spec) < 0) {
-- 
1.7.8.6

--
libvir-list mailing list
libvir-list@xxxxxxxxxx
https://www.redhat.com/mailman/listinfo/libvir-list


[Index of Archives]     [Virt Tools]     [Libvirt Users]     [Lib OS Info]     [Fedora Users]     [Fedora Desktop]     [Fedora SELinux]     [Big List of Linux Books]     [Yosemite News]     [KDE Users]     [Fedora Tools]