Re: [PATCH v13] support offline migration

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



Hi, Jiri, Daniel
Any comment on this patch?


在 2012-11-05一的 12:17 +0800,liguang写道:
> original migration did not aware of offline case,
> so, try to support offline migration quietly
> (did not disturb original migration) by pass
> VIR_MIGRATE_OFFLINE flag to migration APIs if only
> the domain is really inactive, and
> migration process will not puzzled by domain
> offline and exit unexpectedly.
> these changes did not take care of disk images the
> domain required, for them could be transferred by
> other APIs as suggested, then VIR_MIGRATE_OFFLINE
> must not combined with VIR_MIGRATE_NON_SHARED_*.
> if you want a persistent migration,
> you should  do "virsh migrate --persistent" youself.
> 
> v12:
> rebased for conflicting with commit 2f3e2c0c434218a3d656c08779cb98b327170e11,
> and take in some messages from Doug Goldstein's patch
> https://www.redhat.com/archives/libvir-list/2012-October/msg00957.html
> 
> v13:
> changed for comments from Jiri Denemark
> https://www.redhat.com/archives/libvir-list/2012-November/msg00153.html
> 
> Signed-off-by: liguang <lig.fnst@xxxxxxxxxxxxxx>
> ---
>  include/libvirt/libvirt.h.in |    1 +
>  src/qemu/qemu_driver.c       |    8 ++--
>  src/qemu/qemu_migration.c    |   99 +++++++++++++++++++++++++++++++++---------
>  src/qemu/qemu_migration.h    |    3 +-
>  tools/virsh-domain.c         |   10 ++++
>  5 files changed, 95 insertions(+), 26 deletions(-)
> 
> diff --git a/include/libvirt/libvirt.h.in b/include/libvirt/libvirt.h.in
> index fe58c08..1e0500d 100644
> --- a/include/libvirt/libvirt.h.in
> +++ b/include/libvirt/libvirt.h.in
> @@ -1090,6 +1090,7 @@ typedef enum {
>                                                 * whole migration process; this will be used automatically
>                                                 * when supported */
>      VIR_MIGRATE_UNSAFE            = (1 << 9), /* force migration even if it is considered unsafe */
> +    VIR_MIGRATE_OFFLINE           = (1 << 10), /* offline migrate */
>  } virDomainMigrateFlags;
>  
>  /* Domain migration. */
> diff --git a/src/qemu/qemu_driver.c b/src/qemu/qemu_driver.c
> index 978af57..6c2bf98 100644
> --- a/src/qemu/qemu_driver.c
> +++ b/src/qemu/qemu_driver.c
> @@ -9796,7 +9796,7 @@ qemuDomainMigrateBegin3(virDomainPtr domain,
>          asyncJob = QEMU_ASYNC_JOB_NONE;
>      }
>  
> -    if (!virDomainObjIsActive(vm)) {
> +    if (!virDomainObjIsActive(vm) && !(flags & VIR_MIGRATE_OFFLINE)) {
>          virReportError(VIR_ERR_OPERATION_INVALID,
>                         "%s", _("domain is not running"));
>          goto endjob;
> @@ -9805,9 +9805,9 @@ qemuDomainMigrateBegin3(virDomainPtr domain,
>      /* Check if there is any ejected media.
>       * We don't want to require them on the destination.
>       */
> -
> -    if (qemuDomainCheckEjectableMedia(driver, vm, asyncJob) < 0)
> -        goto endjob;
> +    if (virDomainObjIsActive(vm))
> +        if (qemuDomainCheckEjectableMedia(driver, vm, asyncJob) < 0)
> +            goto endjob;
>  
>      if (!(xml = qemuMigrationBegin(driver, vm, xmlin, dname,
>                                     cookieout, cookieoutlen,
> diff --git a/src/qemu/qemu_migration.c b/src/qemu/qemu_migration.c
> index 5f8a9c5..66fbc02 100644
> --- a/src/qemu/qemu_migration.c
> +++ b/src/qemu/qemu_migration.c
> @@ -72,6 +72,7 @@ enum qemuMigrationCookieFlags {
>      QEMU_MIGRATION_COOKIE_FLAG_LOCKSTATE,
>      QEMU_MIGRATION_COOKIE_FLAG_PERSISTENT,
>      QEMU_MIGRATION_COOKIE_FLAG_NETWORK,
> +    QEMU_MIGRATION_COOKIE_FLAG_OFFLINE,
>  
>      QEMU_MIGRATION_COOKIE_FLAG_LAST
>  };
> @@ -79,13 +80,14 @@ enum qemuMigrationCookieFlags {
>  VIR_ENUM_DECL(qemuMigrationCookieFlag);
>  VIR_ENUM_IMPL(qemuMigrationCookieFlag,
>                QEMU_MIGRATION_COOKIE_FLAG_LAST,
> -              "graphics", "lockstate", "persistent", "network");
> +              "graphics", "lockstate", "persistent", "network", "offline");
>  
>  enum qemuMigrationCookieFeatures {
>      QEMU_MIGRATION_COOKIE_GRAPHICS  = (1 << QEMU_MIGRATION_COOKIE_FLAG_GRAPHICS),
>      QEMU_MIGRATION_COOKIE_LOCKSTATE = (1 << QEMU_MIGRATION_COOKIE_FLAG_LOCKSTATE),
>      QEMU_MIGRATION_COOKIE_PERSISTENT = (1 << QEMU_MIGRATION_COOKIE_FLAG_PERSISTENT),
>      QEMU_MIGRATION_COOKIE_NETWORK = (1 << QEMU_MIGRATION_COOKIE_FLAG_NETWORK),
> +    QEMU_MIGRATION_COOKIE_OFFLINE = (1 << QEMU_MIGRATION_COOKIE_FLAG_OFFLINE),
>  };
>  
>  typedef struct _qemuMigrationCookieGraphics qemuMigrationCookieGraphics;
> @@ -594,6 +596,9 @@ qemuMigrationCookieXMLFormat(struct qemud_driver *driver,
>      if ((mig->flags & QEMU_MIGRATION_COOKIE_NETWORK) && mig->network)
>          qemuMigrationCookieNetworkXMLFormat(buf, mig->network);
>  
> +    if (mig->flags & QEMU_MIGRATION_COOKIE_OFFLINE)
> +        virBufferAsprintf(buf, "  <offline/>\n");
> +
>      virBufferAddLit(buf, "</qemu-migration>\n");
>      return 0;
>  }
> @@ -874,6 +879,11 @@ qemuMigrationCookieXMLParse(qemuMigrationCookiePtr mig,
>          (!(mig->network = qemuMigrationCookieNetworkXMLParse(ctxt))))
>          goto error;
>  
> +    if ((flags & QEMU_MIGRATION_COOKIE_OFFLINE)) {
> +        if (virXPathBoolean("count(./offline) > 0", ctxt))
> +            mig->flags |= QEMU_MIGRATION_COOKIE_OFFLINE;
> +    }
> +
>      return 0;
>  
>  error:
> @@ -938,6 +948,10 @@ qemuMigrationBakeCookie(qemuMigrationCookiePtr mig,
>          return -1;
>      }
>  
> +    if (flags & QEMU_MIGRATION_COOKIE_OFFLINE) {
> +        mig->flags |= QEMU_MIGRATION_COOKIE_OFFLINE;
> +    }
> +
>      if (!(*cookieout = qemuMigrationCookieXMLFormatStr(driver, mig)))
>          return -1;
>  
> @@ -1443,6 +1457,24 @@ char *qemuMigrationBegin(struct qemud_driver *driver,
>                                  QEMU_MIGRATION_COOKIE_LOCKSTATE) < 0)
>          goto cleanup;
>  
> +    if (flags & VIR_MIGRATE_OFFLINE) {
> +        if (flags & (VIR_MIGRATE_NON_SHARED_DISK|
> +                     VIR_MIGRATE_NON_SHARED_INC)) {
> +            virReportError(VIR_ERR_OPERATION_INVALID,
> +                           "%s", _("offline migration cannot handle non-shared storage"));
> +            goto cleanup;
> +        }
> +        if (!(flags & VIR_MIGRATE_PERSIST_DEST)) {
> +            virReportError(VIR_ERR_OPERATION_INVALID,
> +                           "%s", _("offline migration must be specified with the persistent flag set"));
> +            goto cleanup;
> +        }
> +        if (qemuMigrationBakeCookie(mig, driver, vm,
> +                                    cookieout, cookieoutlen,
> +                                    QEMU_MIGRATION_COOKIE_OFFLINE) < 0)
> +            goto cleanup;
> +    }
> +
>      if (xmlin) {
>          if (!(def = virDomainDefParseString(driver->caps, xmlin,
>                                              QEMU_EXPECTED_VIRT_TYPES,
> @@ -1607,6 +1639,15 @@ qemuMigrationPrepareAny(struct qemud_driver *driver,
>          goto endjob;
>      }
>  
> +    if (!(mig = qemuMigrationEatCookie(driver, vm, cookiein, cookieinlen,
> +                                       QEMU_MIGRATION_COOKIE_OFFLINE)))
> +        return ret;
> +
> +    if (mig->flags & QEMU_MIGRATION_COOKIE_OFFLINE) {
> +        ret = 0;
> +        goto done;
> +    }
> +
>      /* Start the QEMU daemon, with the same command-line arguments plus
>       * -incoming $migrateFrom
>       */
> @@ -1658,6 +1699,7 @@ qemuMigrationPrepareAny(struct qemud_driver *driver,
>                                       VIR_DOMAIN_EVENT_STARTED,
>                                       VIR_DOMAIN_EVENT_STARTED_MIGRATED);
>  
> +done:
>      /* We keep the job active across API calls until the finish() call.
>       * This prevents any other APIs being invoked while incoming
>       * migration is taking place.
> @@ -2150,6 +2192,9 @@ qemuMigrationRun(struct qemud_driver *driver,
>          return -1;
>      }
>  
> +    if (flags & VIR_MIGRATE_OFFLINE)
> +        return 0;
> +
>      if (!(mig = qemuMigrationEatCookie(driver, vm, cookiein, cookieinlen,
>                                         QEMU_MIGRATION_COOKIE_GRAPHICS)))
>          goto cleanup;
> @@ -2665,7 +2710,12 @@ static int doPeer2PeerMigrate3(struct qemud_driver *driver,
>               uri, &uri_out, flags, dname, resource, dom_xml);
>          qemuDomainObjExitRemoteWithDriver(driver, vm);
>      }
> +
>      VIR_FREE(dom_xml);
> +
> +    if (flags & VIR_MIGRATE_OFFLINE)
> +        goto cleanup;
> +
>      if (ret == -1)
>          goto cleanup;
>  
> @@ -2771,7 +2821,7 @@ finish:
>                   vm->def->name);
>  
>   cleanup:
> -    if (ddomain) {
> +    if (ddomain || (flags & VIR_MIGRATE_OFFLINE)) {
>          virObjectUnref(ddomain);
>          ret = 0;
>      } else {
> @@ -2848,7 +2898,7 @@ static int doPeer2PeerMigrate(struct qemud_driver *driver,
>      }
>  
>      /* domain may have been stopped while we were talking to remote daemon */
> -    if (!virDomainObjIsActive(vm)) {
> +    if (!virDomainObjIsActive(vm) && !(flags & VIR_MIGRATE_OFFLINE)) {
>          virReportError(VIR_ERR_INTERNAL_ERROR, "%s",
>                         _("guest unexpectedly quit"));
>          goto cleanup;
> @@ -2911,7 +2961,7 @@ qemuMigrationPerformJob(struct qemud_driver *driver,
>      if (qemuMigrationJobStart(driver, vm, QEMU_ASYNC_JOB_MIGRATION_OUT) < 0)
>          goto cleanup;
>  
> -    if (!virDomainObjIsActive(vm)) {
> +    if (!virDomainObjIsActive(vm) && !(flags & VIR_MIGRATE_OFFLINE)) {
>          virReportError(VIR_ERR_OPERATION_INVALID,
>                         "%s", _("domain is not running"));
>          goto endjob;
> @@ -3235,26 +3285,27 @@ qemuMigrationFinish(struct qemud_driver *driver,
>       * object, but if no, clean up the empty qemu process.
>       */
>      if (retcode == 0) {
> -        if (!virDomainObjIsActive(vm)) {
> +        if (!virDomainObjIsActive(vm) && !(flags & VIR_MIGRATE_OFFLINE)) {
>              virReportError(VIR_ERR_INTERNAL_ERROR, "%s",
>                             _("guest unexpectedly quit"));
>              goto endjob;
>          }
>  
> -        if (qemuMigrationVPAssociatePortProfiles(vm->def) < 0) {
> -            qemuProcessStop(driver, vm, VIR_DOMAIN_SHUTOFF_FAILED,
> -                            VIR_QEMU_PROCESS_STOP_MIGRATED);
> -            virDomainAuditStop(vm, "failed");
> -            event = virDomainEventNewFromObj(vm,
> -                                             VIR_DOMAIN_EVENT_STOPPED,
> -                                             VIR_DOMAIN_EVENT_STOPPED_FAILED);
> -            goto endjob;
> +        if (!(flags & VIR_MIGRATE_OFFLINE)) {
> +            if (qemuMigrationVPAssociatePortProfiles(vm->def) < 0) {
> +                qemuProcessStop(driver, vm, VIR_DOMAIN_SHUTOFF_FAILED,
> +                                VIR_QEMU_PROCESS_STOP_MIGRATED);
> +                virDomainAuditStop(vm, "failed");
> +                event = virDomainEventNewFromObj(vm,
> +                                                 VIR_DOMAIN_EVENT_STOPPED,
> +                                                 VIR_DOMAIN_EVENT_STOPPED_FAILED);
> +                goto endjob;
> +            }
> +            if (mig->network)
> +                if (qemuDomainMigrateOPDRelocate(driver, vm, mig) < 0)
> +                    VIR_WARN("unable to provide network data for relocation");
>          }
>  
> -        if (mig->network)
> -            if (qemuDomainMigrateOPDRelocate(driver, vm, mig) < 0)
> -                VIR_WARN("unable to provide network data for relocation");
> -
>          if (flags & VIR_MIGRATE_PERSIST_DEST) {
>              virDomainDefPtr vmdef;
>              if (vm->persistent)
> @@ -3302,7 +3353,7 @@ qemuMigrationFinish(struct qemud_driver *driver,
>              event = NULL;
>          }
>  
> -        if (!(flags & VIR_MIGRATE_PAUSED)) {
> +        if (!(flags & VIR_MIGRATE_PAUSED) && !(flags & VIR_MIGRATE_OFFLINE)) {
>              /* run 'cont' on the destination, which allows migration on qemu
>               * >= 0.10.6 to work properly.  This isn't strictly necessary on
>               * older qemu's, but it also doesn't hurt anything there
> @@ -3351,9 +3402,11 @@ qemuMigrationFinish(struct qemud_driver *driver,
>                                               VIR_DOMAIN_EVENT_SUSPENDED,
>                                               VIR_DOMAIN_EVENT_SUSPENDED_PAUSED);
>          }
> -        if (virDomainSaveStatus(driver->caps, driver->stateDir, vm) < 0) {
> -            VIR_WARN("Failed to save status on vm %s", vm->def->name);
> -            goto endjob;
> +        if (virDomainObjIsActive(vm)) {
> +            if (virDomainSaveStatus(driver->caps, driver->stateDir, vm) < 0) {
> +                VIR_WARN("Failed to save status on vm %s", vm->def->name);
> +                goto endjob;
> +            }
>          }
>  
>          /* Guest is successfully running, so cancel previous auto destroy */
> @@ -3420,6 +3473,9 @@ int qemuMigrationConfirm(struct qemud_driver *driver,
>      if (!(mig = qemuMigrationEatCookie(driver, vm, cookiein, cookieinlen, 0)))
>          return -1;
>  
> +    if (flags & VIR_MIGRATE_OFFLINE)
> +        goto done;
> +
>      /* Did the migration go as planned?  If yes, kill off the
>       * domain object, but if no, resume CPUs
>       */
> @@ -3455,6 +3511,7 @@ int qemuMigrationConfirm(struct qemud_driver *driver,
>          }
>      }
>  
> +done:
>      qemuMigrationCookieFree(mig);
>      rv = 0;
>  
> diff --git a/src/qemu/qemu_migration.h b/src/qemu/qemu_migration.h
> index 7a2269a..b4f6a77 100644
> --- a/src/qemu/qemu_migration.h
> +++ b/src/qemu/qemu_migration.h
> @@ -36,7 +36,8 @@
>       VIR_MIGRATE_NON_SHARED_DISK |              \
>       VIR_MIGRATE_NON_SHARED_INC |               \
>       VIR_MIGRATE_CHANGE_PROTECTION |            \
> -     VIR_MIGRATE_UNSAFE)
> +     VIR_MIGRATE_UNSAFE |                       \
> +     VIR_MIGRATE_OFFLINE)
>  
>  enum qemuMigrationJobPhase {
>      QEMU_MIGRATION_PHASE_NONE = 0,
> diff --git a/tools/virsh-domain.c b/tools/virsh-domain.c
> index 393b67b..54ba63a 100644
> --- a/tools/virsh-domain.c
> +++ b/tools/virsh-domain.c
> @@ -6644,6 +6644,7 @@ static const vshCmdInfo info_migrate[] = {
>  
>  static const vshCmdOptDef opts_migrate[] = {
>      {"live", VSH_OT_BOOL, 0, N_("live migration")},
> +    {"offline", VSH_OT_BOOL, 0, N_("offline (domain's inactive) migration")},
>      {"p2p", VSH_OT_BOOL, 0, N_("peer-2-peer migration")},
>      {"direct", VSH_OT_BOOL, 0, N_("direct migration")},
>      {"tunneled", VSH_OT_ALIAS, 0, "tunnelled"},
> @@ -6729,6 +6730,15 @@ doMigrate(void *opaque)
>      if (vshCommandOptBool(cmd, "unsafe"))
>          flags |= VIR_MIGRATE_UNSAFE;
>  
> +    if (vshCommandOptBool(cmd, "offline")) {
> +        flags |= VIR_MIGRATE_OFFLINE;
> +    }
> +
> +    if (virDomainIsActive(dom) && (flags & VIR_MIGRATE_OFFLINE)) {
> +        vshError(ctl, "%s", _("domain is active, offline migration for inactive domain only"));
> +        goto out;
> +    }
> +
>      if (xmlfile &&
>          virFileReadAll(xmlfile, 8192, &xml) < 0) {
>          vshError(ctl, _("file '%s' doesn't exist"), xmlfile);

-- 
liguang    lig.fnst@xxxxxxxxxxxxxx
FNST linux kernel team


--
libvir-list mailing list
libvir-list@xxxxxxxxxx
https://www.redhat.com/mailman/listinfo/libvir-list



[Index of Archives]     [Virt Tools]     [Libvirt Users]     [Lib OS Info]     [Fedora Users]     [Fedora Desktop]     [Fedora SELinux]     [Big List of Linux Books]     [Yosemite News]     [KDE Users]     [Fedora Tools]