Re: [PATCH v11] support offline migration

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



On Wed, Oct 17, 2012 at 7:42 PM, liguang <lig.fnst@xxxxxxxxxxxxxx> wrote:
> original migration did not aware of offline case,
> so, try to support offline migration quietly
> (did not disturb original migration) by pass
> VIR_MIGRATE_OFFLINE flag to migration APIs if only
> the domain is really inactive, and
> migration process will not puzzled by domain
> offline and exit unexpectedly.
> these changes did not take care of disk images the
> domain required, for them could be transferred by
> other APIs as suggested, then VIR_MIGRATE_OFFLINE
> should not combined with VIR_MIGRATE_NON_SHARED_*.
> if you want a persistent migration,
> you should  do "virsh migrate --persistent" youself.
>
> Signed-off-by: liguang <lig.fnst@xxxxxxxxxxxxxx>
> ---
>  include/libvirt/libvirt.h.in |    1 +
>  src/qemu/qemu_driver.c       |   15 ++++++++++
>  src/qemu/qemu_migration.c    |   60 +++++++++++++++++++++++++++++++++++------
>  src/qemu/qemu_migration.h    |    3 +-
>  tools/virsh-domain.c         |    6 ++++
>  5 files changed, 75 insertions(+), 10 deletions(-)
>
> diff --git a/include/libvirt/libvirt.h.in b/include/libvirt/libvirt.h.in
> index 81f12a4..1cebc21 100644
> --- a/include/libvirt/libvirt.h.in
> +++ b/include/libvirt/libvirt.h.in
> @@ -995,6 +995,7 @@ typedef enum {
>                                                 * whole migration process; this will be used automatically
>                                                 * when supported */
>      VIR_MIGRATE_UNSAFE            = (1 << 9), /* force migration even if it is considered unsafe */
> +    VIR_MIGRATE_OFFLINE           = (1 << 10), /* offline migrate */
>  } virDomainMigrateFlags;
>
>  /* Domain migration. */
> diff --git a/src/qemu/qemu_driver.c b/src/qemu/qemu_driver.c
> index 97ad23e..38bfcab 100644
> --- a/src/qemu/qemu_driver.c
> +++ b/src/qemu/qemu_driver.c
> @@ -9622,6 +9622,20 @@ qemuDomainMigrateBegin3(virDomainPtr domain,
>      }
>
>      if (!virDomainObjIsActive(vm)) {
> +        if (flags & VIR_MIGRATE_OFFLINE) {
> +            if (flags & (VIR_MIGRATE_NON_SHARED_DISK|
> +                         VIR_MIGRATE_NON_SHARED_INC)) {
> +                virReportError(VIR_ERR_OPERATION_INVALID,
> +                               "%s", _("migrating storage handled by volume APIs"));
> +                goto endjob;
> +            }
> +            if (!(flags & VIR_MIGRATE_PERSIST_DEST)) {
> +                virReportError(VIR_ERR_OPERATION_INVALID,
> +                               "%s", _("VIR_MIGRATE_OFFLINE should combined with VIR_MIGRATE_PERSIST_DEST"));

I feel like maybe we should just assume that VIR_MIGRATE_OFFLINE
implies VIR_MIGRATE_PERSIST_DEST and if its not supplied add it to the
flags. Dan, do you agree or disagree?


> +                goto endjob;
> +            }
> +            goto offline;
> +        }
>          virReportError(VIR_ERR_OPERATION_INVALID,
>                         "%s", _("domain is not running"));
>          goto endjob;
> @@ -9634,6 +9648,7 @@ qemuDomainMigrateBegin3(virDomainPtr domain,
>      if (qemuDomainCheckEjectableMedia(driver, vm, asyncJob) < 0)
>          goto endjob;
>
> +offline:
>      if (!(xml = qemuMigrationBegin(driver, vm, xmlin, dname,
>                                     cookieout, cookieoutlen,
>                                     flags)))
> diff --git a/src/qemu/qemu_migration.c b/src/qemu/qemu_migration.c
> index db69a0a..b2f921e 100644
> --- a/src/qemu/qemu_migration.c
> +++ b/src/qemu/qemu_migration.c
> @@ -70,6 +70,7 @@ enum qemuMigrationCookieFlags {
>      QEMU_MIGRATION_COOKIE_FLAG_GRAPHICS,
>      QEMU_MIGRATION_COOKIE_FLAG_LOCKSTATE,
>      QEMU_MIGRATION_COOKIE_FLAG_PERSISTENT,
> +    QEMU_MIGRATION_COOKIE_FLAG_OFFLINE,
>
>      QEMU_MIGRATION_COOKIE_FLAG_LAST
>  };
> @@ -77,12 +78,13 @@ enum qemuMigrationCookieFlags {
>  VIR_ENUM_DECL(qemuMigrationCookieFlag);
>  VIR_ENUM_IMPL(qemuMigrationCookieFlag,
>                QEMU_MIGRATION_COOKIE_FLAG_LAST,
> -              "graphics", "lockstate", "persistent");
> +              "graphics", "lockstate", "persistent", "offline");
>
>  enum qemuMigrationCookieFeatures {
>      QEMU_MIGRATION_COOKIE_GRAPHICS  = (1 << QEMU_MIGRATION_COOKIE_FLAG_GRAPHICS),
>      QEMU_MIGRATION_COOKIE_LOCKSTATE = (1 << QEMU_MIGRATION_COOKIE_FLAG_LOCKSTATE),
>      QEMU_MIGRATION_COOKIE_PERSISTENT = (1 << QEMU_MIGRATION_COOKIE_FLAG_PERSISTENT),
> +    QEMU_MIGRATION_COOKIE_OFFLINE = (1 << QEMU_MIGRATION_COOKIE_FLAG_OFFLINE),
>  };
>
>  typedef struct _qemuMigrationCookieGraphics qemuMigrationCookieGraphics;
> @@ -439,6 +441,9 @@ qemuMigrationCookieXMLFormat(struct qemud_driver *driver,
>          virBufferAdjustIndent(buf, -2);
>      }
>
> +    if (mig->flags & QEMU_MIGRATION_COOKIE_OFFLINE)
> +        virBufferAsprintf(buf, "  <offline/>\n");
> +
>      virBufferAddLit(buf, "</qemu-migration>\n");
>      return 0;
>  }
> @@ -662,6 +667,11 @@ qemuMigrationCookieXMLParse(qemuMigrationCookiePtr mig,
>          VIR_FREE(nodes);
>      }
>
> +    if ((flags & QEMU_MIGRATION_COOKIE_OFFLINE)) {
> +        if (virXPathBoolean("count(./offline) > 0", ctxt))
> +            mig->flags |= QEMU_MIGRATION_COOKIE_OFFLINE;
> +    }
> +
>      return 0;
>
>  error:
> @@ -721,6 +731,10 @@ qemuMigrationBakeCookie(qemuMigrationCookiePtr mig,
>          qemuMigrationCookieAddPersistent(mig, dom) < 0)
>          return -1;
>
> +    if (flags & QEMU_MIGRATION_COOKIE_OFFLINE) {
> +        mig->flags |= QEMU_MIGRATION_COOKIE_OFFLINE;
> +    }
> +
>      if (!(*cookieout = qemuMigrationCookieXMLFormatStr(driver, mig)))
>          return -1;
>
> @@ -1168,6 +1182,13 @@ char *qemuMigrationBegin(struct qemud_driver *driver,
>                                  QEMU_MIGRATION_COOKIE_LOCKSTATE) < 0)
>          goto cleanup;
>
> +    if (flags & VIR_MIGRATE_OFFLINE) {
> +        if (qemuMigrationBakeCookie(mig, driver, vm,
> +                                    cookieout, cookieoutlen,
> +                                    QEMU_MIGRATION_COOKIE_OFFLINE) < 0)
> +            goto cleanup;
> +    }
> +
>      if (xmlin) {
>          if (!(def = virDomainDefParseString(driver->caps, xmlin,
>                                              QEMU_EXPECTED_VIRT_TYPES,
> @@ -1331,6 +1352,15 @@ qemuMigrationPrepareAny(struct qemud_driver *driver,
>          goto endjob;
>      }
>
> +    if (!(mig = qemuMigrationEatCookie(driver, vm, cookiein, cookieinlen,
> +                                       QEMU_MIGRATION_COOKIE_OFFLINE)))
> +        return ret;
> +
> +    if (mig->flags & QEMU_MIGRATION_COOKIE_OFFLINE) {
> +        ret = 0;
> +        goto cleanup;
> +    }
> +
>      /* Start the QEMU daemon, with the same command-line arguments plus
>       * -incoming $migrateFrom
>       */
> @@ -1873,7 +1903,8 @@ qemuMigrationRun(struct qemud_driver *driver,
>                         virLockManagerPluginGetName(driver->lockManager));
>          return -1;
>      }
> -
> +    if (flags & VIR_MIGRATE_OFFLINE)
> +        return 0;

This feels wrong since we're checking the state with the locking
manager. But really since we're not dealing with the disks at all
since the domain is being moved over offline we shouldn't check the
state we should just migrate. So it seems this should be above the
locking manager check. But again I defer to Dan as he's more
knowledgable about this than I am.

>      if (!(mig = qemuMigrationEatCookie(driver, vm, cookiein, cookieinlen,
>                                         QEMU_MIGRATION_COOKIE_GRAPHICS)))
>          goto cleanup;
> @@ -2389,6 +2420,8 @@ static int doPeer2PeerMigrate3(struct qemud_driver *driver,
>          qemuDomainObjExitRemoteWithDriver(driver, vm);
>      }
>      VIR_FREE(dom_xml);
> +    if (flags & VIR_MIGRATE_OFFLINE)
> +        goto cleanup;
>      if (ret == -1)
>          goto cleanup;
>
> @@ -2494,7 +2527,7 @@ finish:
>                   vm->def->name);
>
>   cleanup:
> -    if (ddomain) {
> +    if (ddomain || (flags & VIR_MIGRATE_OFFLINE)) {
>          virObjectUnref(ddomain);
>          ret = 0;
>      } else {
> @@ -2571,7 +2604,7 @@ static int doPeer2PeerMigrate(struct qemud_driver *driver,
>      }
>
>      /* domain may have been stopped while we were talking to remote daemon */
> -    if (!virDomainObjIsActive(vm)) {
> +    if (!virDomainObjIsActive(vm) && !(flags & VIR_MIGRATE_OFFLINE)) {
>          virReportError(VIR_ERR_INTERNAL_ERROR, "%s",
>                         _("guest unexpectedly quit"));
>          goto cleanup;
> @@ -2634,7 +2667,7 @@ qemuMigrationPerformJob(struct qemud_driver *driver,
>      if (qemuMigrationJobStart(driver, vm, QEMU_ASYNC_JOB_MIGRATION_OUT) < 0)
>          goto cleanup;
>
> -    if (!virDomainObjIsActive(vm)) {
> +    if (!virDomainObjIsActive(vm) && !(flags & VIR_MIGRATE_OFFLINE)) {
>          virReportError(VIR_ERR_OPERATION_INVALID,
>                         "%s", _("domain is not running"));
>          goto endjob;
> @@ -2958,6 +2991,8 @@ qemuMigrationFinish(struct qemud_driver *driver,
>       */
>      if (retcode == 0) {
>          if (!virDomainObjIsActive(vm)) {
> +            if (flags & VIR_MIGRATE_OFFLINE)
> +                goto offline;
>              virReportError(VIR_ERR_INTERNAL_ERROR, "%s",
>                             _("guest unexpectedly quit"));
>              goto endjob;
> @@ -2973,6 +3008,7 @@ qemuMigrationFinish(struct qemud_driver *driver,
>              goto endjob;
>          }
>
> +    offline:
>          if (flags & VIR_MIGRATE_PERSIST_DEST) {
>              virDomainDefPtr vmdef;
>              if (vm->persistent)
> @@ -3020,7 +3056,7 @@ qemuMigrationFinish(struct qemud_driver *driver,
>              event = NULL;
>          }
>
> -        if (!(flags & VIR_MIGRATE_PAUSED)) {
> +        if (!(flags & VIR_MIGRATE_PAUSED) && !(flags & VIR_MIGRATE_OFFLINE)) {
>              /* run 'cont' on the destination, which allows migration on qemu
>               * >= 0.10.6 to work properly.  This isn't strictly necessary on
>               * older qemu's, but it also doesn't hurt anything there
> @@ -3069,9 +3105,11 @@ qemuMigrationFinish(struct qemud_driver *driver,
>                                               VIR_DOMAIN_EVENT_SUSPENDED,
>                                               VIR_DOMAIN_EVENT_SUSPENDED_PAUSED);
>          }
> -        if (virDomainSaveStatus(driver->caps, driver->stateDir, vm) < 0) {
> -            VIR_WARN("Failed to save status on vm %s", vm->def->name);
> -            goto endjob;
> +        if (virDomainObjIsActive(vm)) {
> +            if (virDomainSaveStatus(driver->caps, driver->stateDir, vm) < 0) {
> +                VIR_WARN("Failed to save status on vm %s", vm->def->name);
> +                goto endjob;
> +            }
>          }
>
>          /* Guest is successfully running, so cancel previous auto destroy */
> @@ -3091,6 +3129,7 @@ qemuMigrationFinish(struct qemud_driver *driver,
>  endjob:
>      if (qemuMigrationJobFinish(driver, vm) == 0) {
>          vm = NULL;
> +    } else if (flags & VIR_MIGRATE_OFFLINE) {
>      } else if (!vm->persistent && !virDomainObjIsActive(vm)) {
>          qemuDomainRemoveInactive(driver, vm);
>          vm = NULL;
> @@ -3137,6 +3176,8 @@ int qemuMigrationConfirm(struct qemud_driver *driver,
>
>      if (!(mig = qemuMigrationEatCookie(driver, vm, cookiein, cookieinlen, 0)))
>          return -1;
> +    if (flags & VIR_MIGRATE_OFFLINE)
> +        goto offline;
>
>      /* Did the migration go as planned?  If yes, kill off the
>       * domain object, but if no, resume CPUs
> @@ -3173,6 +3214,7 @@ int qemuMigrationConfirm(struct qemud_driver *driver,
>          }
>      }
>
> +offline:
>      qemuMigrationCookieFree(mig);
>      rv = 0;
>
> diff --git a/src/qemu/qemu_migration.h b/src/qemu/qemu_migration.h
> index 7a2269a..b4f6a77 100644
> --- a/src/qemu/qemu_migration.h
> +++ b/src/qemu/qemu_migration.h
> @@ -36,7 +36,8 @@
>       VIR_MIGRATE_NON_SHARED_DISK |              \
>       VIR_MIGRATE_NON_SHARED_INC |               \
>       VIR_MIGRATE_CHANGE_PROTECTION |            \
> -     VIR_MIGRATE_UNSAFE)
> +     VIR_MIGRATE_UNSAFE |                       \
> +     VIR_MIGRATE_OFFLINE)
>
>  enum qemuMigrationJobPhase {
>      QEMU_MIGRATION_PHASE_NONE = 0,
> diff --git a/tools/virsh-domain.c b/tools/virsh-domain.c
> index 505169b..2218379 100644
> --- a/tools/virsh-domain.c
> +++ b/tools/virsh-domain.c
> @@ -6647,6 +6647,7 @@ static const vshCmdOptDef opts_migrate[] = {
>      {"dname", VSH_OT_DATA, 0, N_("rename to new name during migration (if supported)")},
>      {"timeout", VSH_OT_INT, 0, N_("force guest to suspend if live migration exceeds timeout (in seconds)")},
>      {"xml", VSH_OT_STRING, 0, N_("filename containing updated XML for the target")},
> +    {"offline", VSH_OT_BOOL, 0, N_("for offline migration")},
>      {NULL, 0, 0, NULL}
>  };
>
> @@ -6713,6 +6714,11 @@ doMigrate(void *opaque)
>      if (vshCommandOptBool(cmd, "unsafe"))
>          flags |= VIR_MIGRATE_UNSAFE;
>
> +    if (vshCommandOptBool(cmd, "offline")) {
> +        if (!virDomainIsActive(dom))
> +            flags |= VIR_MIGRATE_OFFLINE;
> +    }
> +
>      if (xmlfile &&
>          virFileReadAll(xmlfile, 8192, &xml) < 0) {
>          vshError(ctl, _("file '%s' doesn't exist"), xmlfile);
> --
> 1.7.2.5
>
> --
> libvir-list mailing list
> libvir-list@xxxxxxxxxx
> https://www.redhat.com/mailman/listinfo/libvir-list



-- 
Doug Goldstein

--
libvir-list mailing list
libvir-list@xxxxxxxxxx
https://www.redhat.com/mailman/listinfo/libvir-list


[Index of Archives]     [Virt Tools]     [Libvirt Users]     [Lib OS Info]     [Fedora Users]     [Fedora Desktop]     [Fedora SELinux]     [Big List of Linux Books]     [Yosemite News]     [KDE Users]     [Fedora Tools]