Re: [PATCH v9] support offline migration

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



ping ...

在 2012-09-25二的 15:45 +0800,liguang写道:
> original migration did not aware of offline case,
> so, try to support offline migration quietly
> (did not disturb original migration) by pass
> VIR_MIGRATE_OFFLINE flag to migration APIs if only
> the domain is really inactive, and
> migration process will not puzzled by domain
> offline and exit unexpectedly.
> these changes did not take care of disk images the
> domain required, for them could be transferred by
> other APIs as suggested, then VIR_MIGRATE_OFFLINE
> should not combined with VIR_MIGRATE_NON_SHARED_*.
> so, this migration result is just make domain
> definition alive at target side.
> 
> Signed-off-by: liguang <lig.fnst@xxxxxxxxxxxxxx>
> ---
>  include/libvirt/libvirt.h.in |    1 +
>  src/qemu/qemu_driver.c       |   15 ++++++++++++
>  src/qemu/qemu_migration.c    |   53 ++++++++++++++++++++++++++++++++++++-----
>  src/qemu/qemu_migration.h    |    3 +-
>  tools/virsh-domain.c         |    6 ++++
>  5 files changed, 70 insertions(+), 8 deletions(-)
> 
> diff --git a/include/libvirt/libvirt.h.in b/include/libvirt/libvirt.h.in
> index 79c7689..627e9d4 100644
> --- a/include/libvirt/libvirt.h.in
> +++ b/include/libvirt/libvirt.h.in
> @@ -995,6 +995,7 @@ typedef enum {
>                                                 * whole migration process; this will be used automatically
>                                                 * when supported */
>      VIR_MIGRATE_UNSAFE            = (1 << 9), /* force migration even if it is considered unsafe */
> +    VIR_MIGRATE_OFFLINE           = (1 << 10), /* offline migrate */
>  } virDomainMigrateFlags;
>  
>  /* Domain migration. */
> diff --git a/src/qemu/qemu_driver.c b/src/qemu/qemu_driver.c
> index 6848924..105febf 100644
> --- a/src/qemu/qemu_driver.c
> +++ b/src/qemu/qemu_driver.c
> @@ -9631,6 +9631,15 @@ qemuDomainMigrateBegin3(virDomainPtr domain,
>      }
>  
>      if (!virDomainObjIsActive(vm)) {
> +        if (flags & VIR_MIGRATE_OFFLINE) {
> +            if (flags & (VIR_MIGRATE_NON_SHARED_DISK|
> +                         VIR_MIGRATE_NON_SHARED_INC)) {
> +                virReportError(VIR_ERR_OPERATION_INVALID,
> +                               "%s", _("migrating storage handled by volume APIs"));
> +                goto endjob;
> +            }
> +            goto offline;
> +        }
>          virReportError(VIR_ERR_OPERATION_INVALID,
>                         "%s", _("domain is not running"));
>          goto endjob;
> @@ -9643,6 +9652,7 @@ qemuDomainMigrateBegin3(virDomainPtr domain,
>      if (qemuDomainCheckEjectableMedia(driver, vm, asyncJob) < 0)
>          goto endjob;
>  
> +offline:
>      if (!(xml = qemuMigrationBegin(driver, vm, xmlin, dname,
>                                     cookieout, cookieoutlen,
>                                     flags)))
> @@ -9878,6 +9888,11 @@ qemuDomainMigrateConfirm3(virDomainPtr domain,
>          goto cleanup;
>      }
>  
> +    if (flags & VIR_MIGRATE_OFFLINE) {
> +        ret = 0;
> +        goto cleanup;
> +    }
> +
>      if (!qemuMigrationJobIsActive(vm, QEMU_ASYNC_JOB_MIGRATION_OUT))
>          goto cleanup;
>  
> diff --git a/src/qemu/qemu_migration.c b/src/qemu/qemu_migration.c
> index 8e85875..411a4c2 100644
> --- a/src/qemu/qemu_migration.c
> +++ b/src/qemu/qemu_migration.c
> @@ -70,6 +70,7 @@ enum qemuMigrationCookieFlags {
>      QEMU_MIGRATION_COOKIE_FLAG_GRAPHICS,
>      QEMU_MIGRATION_COOKIE_FLAG_LOCKSTATE,
>      QEMU_MIGRATION_COOKIE_FLAG_PERSISTENT,
> +    QEMU_MIGRATION_COOKIE_FLAG_OFFLINE,
>  
>      QEMU_MIGRATION_COOKIE_FLAG_LAST
>  };
> @@ -77,12 +78,13 @@ enum qemuMigrationCookieFlags {
>  VIR_ENUM_DECL(qemuMigrationCookieFlag);
>  VIR_ENUM_IMPL(qemuMigrationCookieFlag,
>                QEMU_MIGRATION_COOKIE_FLAG_LAST,
> -              "graphics", "lockstate", "persistent");
> +              "graphics", "lockstate", "persistent", "offline");
>  
>  enum qemuMigrationCookieFeatures {
>      QEMU_MIGRATION_COOKIE_GRAPHICS  = (1 << QEMU_MIGRATION_COOKIE_FLAG_GRAPHICS),
>      QEMU_MIGRATION_COOKIE_LOCKSTATE = (1 << QEMU_MIGRATION_COOKIE_FLAG_LOCKSTATE),
>      QEMU_MIGRATION_COOKIE_PERSISTENT = (1 << QEMU_MIGRATION_COOKIE_FLAG_PERSISTENT),
> +    QEMU_MIGRATION_COOKIE_OFFLINE = (1 << QEMU_MIGRATION_COOKIE_FLAG_OFFLINE),
>  };
>  
>  typedef struct _qemuMigrationCookieGraphics qemuMigrationCookieGraphics;
> @@ -439,6 +441,9 @@ qemuMigrationCookieXMLFormat(struct qemud_driver *driver,
>          virBufferAdjustIndent(buf, -2);
>      }
>  
> +    if (mig->flags & QEMU_MIGRATION_COOKIE_OFFLINE)
> +        virBufferAsprintf(buf, "  <offline/>\n");
> +
>      virBufferAddLit(buf, "</qemu-migration>\n");
>      return 0;
>  }
> @@ -662,6 +667,11 @@ qemuMigrationCookieXMLParse(qemuMigrationCookiePtr mig,
>          VIR_FREE(nodes);
>      }
>  
> +    if ((flags & QEMU_MIGRATION_COOKIE_OFFLINE)) {
> +        if (virXPathBoolean("count(./offline) > 0", ctxt))
> +            mig->flags |= QEMU_MIGRATION_COOKIE_OFFLINE;
> +    }
> +
>      return 0;
>  
>  error:
> @@ -721,6 +731,10 @@ qemuMigrationBakeCookie(qemuMigrationCookiePtr mig,
>          qemuMigrationCookieAddPersistent(mig, dom) < 0)
>          return -1;
>  
> +    if (flags & QEMU_MIGRATION_COOKIE_OFFLINE) {
> +        mig->flags |= QEMU_MIGRATION_COOKIE_OFFLINE;
> +    }
> +
>      if (!(*cookieout = qemuMigrationCookieXMLFormatStr(driver, mig)))
>          return -1;
>  
> @@ -1151,6 +1165,13 @@ char *qemuMigrationBegin(struct qemud_driver *driver,
>                                  QEMU_MIGRATION_COOKIE_LOCKSTATE) < 0)
>          goto cleanup;
>  
> +    if (flags & VIR_MIGRATE_OFFLINE) {
> +        if (qemuMigrationBakeCookie(mig, driver, vm,
> +                                    cookieout, cookieoutlen,
> +                                    QEMU_MIGRATION_COOKIE_OFFLINE) < 0)
> +            goto cleanup;
> +    }
> +
>      if (xmlin) {
>          if (!(def = virDomainDefParseString(driver->caps, xmlin,
>                                              QEMU_EXPECTED_VIRT_TYPES,
> @@ -1314,6 +1335,15 @@ qemuMigrationPrepareAny(struct qemud_driver *driver,
>          goto endjob;
>      }
>  
> +    if (!(mig = qemuMigrationEatCookie(driver, vm, cookiein, cookieinlen,
> +                                       QEMU_MIGRATION_COOKIE_OFFLINE)))
> +        return ret;
> +
> +    if (mig->flags & QEMU_MIGRATION_COOKIE_OFFLINE) {
> +        ret = 0;
> +        goto cleanup;
> +    }
> +
>      /* Start the QEMU daemon, with the same command-line arguments plus
>       * -incoming $migrateFrom
>       */
> @@ -1856,7 +1886,8 @@ qemuMigrationRun(struct qemud_driver *driver,
>                         virLockManagerPluginGetName(driver->lockManager));
>          return -1;
>      }
> -
> +    if (flags & VIR_MIGRATE_OFFLINE)
> +        return 0;
>      if (!(mig = qemuMigrationEatCookie(driver, vm, cookiein, cookieinlen,
>                                         QEMU_MIGRATION_COOKIE_GRAPHICS)))
>          goto cleanup;
> @@ -2372,6 +2403,8 @@ static int doPeer2PeerMigrate3(struct qemud_driver *driver,
>          qemuDomainObjExitRemoteWithDriver(driver, vm);
>      }
>      VIR_FREE(dom_xml);
> +    if (flags & VIR_MIGRATE_OFFLINE)
> +        goto cleanup;
>      if (ret == -1)
>          goto cleanup;
>  
> @@ -2477,7 +2510,7 @@ finish:
>                   vm->def->name);
>  
>   cleanup:
> -    if (ddomain) {
> +    if (ddomain || (flags & VIR_MIGRATE_OFFLINE)) {
>          virObjectUnref(ddomain);
>          ret = 0;
>      } else {
> @@ -2554,7 +2587,7 @@ static int doPeer2PeerMigrate(struct qemud_driver *driver,
>      }
>  
>      /* domain may have been stopped while we were talking to remote daemon */
> -    if (!virDomainObjIsActive(vm)) {
> +    if (!virDomainObjIsActive(vm) && !(flags & VIR_MIGRATE_OFFLINE)) {
>          virReportError(VIR_ERR_INTERNAL_ERROR, "%s",
>                         _("guest unexpectedly quit"));
>          goto cleanup;
> @@ -2617,7 +2650,7 @@ qemuMigrationPerformJob(struct qemud_driver *driver,
>      if (qemuMigrationJobStart(driver, vm, QEMU_ASYNC_JOB_MIGRATION_OUT) < 0)
>          goto cleanup;
>  
> -    if (!virDomainObjIsActive(vm)) {
> +    if (!virDomainObjIsActive(vm) && !(flags & VIR_MIGRATE_OFFLINE)) {
>          virReportError(VIR_ERR_OPERATION_INVALID,
>                         "%s", _("domain is not running"));
>          goto endjob;
> @@ -2941,6 +2974,8 @@ qemuMigrationFinish(struct qemud_driver *driver,
>       */
>      if (retcode == 0) {
>          if (!virDomainObjIsActive(vm)) {
> +            if (flags & VIR_MIGRATE_OFFLINE)
> +                goto offline;
>              virReportError(VIR_ERR_INTERNAL_ERROR, "%s",
>                             _("guest unexpectedly quit"));
>              goto endjob;
> @@ -3038,7 +3073,7 @@ qemuMigrationFinish(struct qemud_driver *driver,
>                  goto endjob;
>              }
>          }
> -
> +    offline:
>          dom = virGetDomain (dconn, vm->def->name, vm->def->uuid);
>  
>          event = virDomainEventNewFromObj(vm,
> @@ -3074,6 +3109,7 @@ qemuMigrationFinish(struct qemud_driver *driver,
>  endjob:
>      if (qemuMigrationJobFinish(driver, vm) == 0) {
>          vm = NULL;
> +    } else if (flags & VIR_MIGRATE_OFFLINE) {
>      } else if (!vm->persistent && !virDomainObjIsActive(vm)) {
>          qemuDomainRemoveInactive(driver, vm);
>          vm = NULL;
> @@ -3120,7 +3156,10 @@ int qemuMigrationConfirm(struct qemud_driver *driver,
>  
>      if (!(mig = qemuMigrationEatCookie(driver, vm, cookiein, cookieinlen, 0)))
>          return -1;
> -
> +    if (flags & VIR_MIGRATE_OFFLINE) {
> +        rv = 0;
> +        goto cleanup;
> +    }
>      /* Did the migration go as planned?  If yes, kill off the
>       * domain object, but if no, resume CPUs
>       */
> diff --git a/src/qemu/qemu_migration.h b/src/qemu/qemu_migration.h
> index 7a2269a..b4f6a77 100644
> --- a/src/qemu/qemu_migration.h
> +++ b/src/qemu/qemu_migration.h
> @@ -36,7 +36,8 @@
>       VIR_MIGRATE_NON_SHARED_DISK |              \
>       VIR_MIGRATE_NON_SHARED_INC |               \
>       VIR_MIGRATE_CHANGE_PROTECTION |            \
> -     VIR_MIGRATE_UNSAFE)
> +     VIR_MIGRATE_UNSAFE |                       \
> +     VIR_MIGRATE_OFFLINE)
>  
>  enum qemuMigrationJobPhase {
>      QEMU_MIGRATION_PHASE_NONE = 0,
> diff --git a/tools/virsh-domain.c b/tools/virsh-domain.c
> index 505169b..2218379 100644
> --- a/tools/virsh-domain.c
> +++ b/tools/virsh-domain.c
> @@ -6647,6 +6647,7 @@ static const vshCmdOptDef opts_migrate[] = {
>      {"dname", VSH_OT_DATA, 0, N_("rename to new name during migration (if supported)")},
>      {"timeout", VSH_OT_INT, 0, N_("force guest to suspend if live migration exceeds timeout (in seconds)")},
>      {"xml", VSH_OT_STRING, 0, N_("filename containing updated XML for the target")},
> +    {"offline", VSH_OT_BOOL, 0, N_("for offline migration")},
>      {NULL, 0, 0, NULL}
>  };
>  
> @@ -6713,6 +6714,11 @@ doMigrate(void *opaque)
>      if (vshCommandOptBool(cmd, "unsafe"))
>          flags |= VIR_MIGRATE_UNSAFE;
>  
> +    if (vshCommandOptBool(cmd, "offline")) {
> +        if (!virDomainIsActive(dom))
> +            flags |= VIR_MIGRATE_OFFLINE;
> +    }
> +
>      if (xmlfile &&
>          virFileReadAll(xmlfile, 8192, &xml) < 0) {
>          vshError(ctl, _("file '%s' doesn't exist"), xmlfile);

-- 
liguang    lig.fnst@xxxxxxxxxxxxxx
FNST linux kernel team


--
libvir-list mailing list
libvir-list@xxxxxxxxxx
https://www.redhat.com/mailman/listinfo/libvir-list



[Index of Archives]     [Virt Tools]     [Libvirt Users]     [Lib OS Info]     [Fedora Users]     [Fedora Desktop]     [Fedora SELinux]     [Big List of Linux Books]     [Yosemite News]     [KDE Users]     [Fedora Tools]