Re: [libvirt] KVM migration patch

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



On Tue, Jun 03, 2008 at 09:17:01AM +0100, Richard W.M. Jones wrote:
> Index: qemud/remote.c
> ===================================================================
> RCS file: /data/cvs/libvirt/qemud/remote.c,v
> retrieving revision 1.35
> diff -u -p -r1.35 remote.c
> --- qemud/remote.c	23 May 2008 08:24:44 -0000	1.35
> +++ qemud/remote.c	3 Jun 2008 08:17:52 -0000
> @@ -1346,6 +1346,66 @@ remoteDispatchDomainMigrateFinish (struc
>  }
>  
>  static int
> +remoteDispatchDomainMigratePrepare2 (struct qemud_server *server ATTRIBUTE_UNUSED,
> +                                     struct qemud_client *client,
> +                                     remote_message_header *req,
> +                                     remote_domain_migrate_prepare2_args *args,
> +                                     remote_domain_migrate_prepare2_ret *ret)
> +{
> +    int r;
> +    char *cookie = NULL;
> +    int cookielen = 0;
> +    char *uri_in;
> +    char **uri_out;
> +    char *dname;
> +    CHECK_CONN (client);
> +
> +    uri_in = args->uri_in == NULL ? NULL : *args->uri_in;
> +    dname = args->dname == NULL ? NULL : *args->dname;
> +
> +    /* Wacky world of XDR ... */
> +    uri_out = calloc (1, sizeof (*uri_out));

This ought to be updated to use

    if (VIR_ALLOC(uri_out)) < 0) {
        remoteDispatchSendError(client, req, VIR_ERR_NO_MEMORY, NULL);
        return -2;
    }

> +
> +    r = __virDomainMigratePrepare2 (client->conn, &cookie, &cookielen,
> +                                    uri_in, uri_out,
> +                                    args->flags, dname, args->resource,
> +                                    args->dom_xml);
> +    if (r == -1) return -1;
> +
> +    /* remoteDispatchClientRequest will free cookie, uri_out and
> +     * the string if there is one.
> +     */
> +    ret->cookie.cookie_len = cookielen;
> +    ret->cookie.cookie_val = cookie;
> +    ret->uri_out = *uri_out == NULL ? NULL : uri_out;

This will leak uri_out in the 1st half of the conditional

> +static int
> +remoteDispatchDomainMigrateFinish2 (struct qemud_server *server ATTRIBUTE_UNUSED,
> +                                    struct qemud_client *client,
> +                                    remote_message_header *req,
> +                                    remote_domain_migrate_finish2_args *args,
> +                                    remote_domain_migrate_finish2_ret *ret)
> +{
> +    virDomainPtr ddom;
> +    CHECK_CONN (client);
> +
> +    ddom = __virDomainMigrateFinish2 (client->conn, args->dname,
> +                                      args->cookie.cookie_val,
> +                                      args->cookie.cookie_len,
> +                                      args->uri,
> +                                      args->flags,
> +                                      args->retcode);
> +    if (ddom == NULL) return -1;
> +
> +    make_nonnull_domain (&ret->ddom, ddom);

Need to call  virDomainFree(ddom) after this.

> diff -u -p -r1.143 libvirt.c
> --- src/libvirt.c	29 May 2008 19:23:17 -0000	1.143
> +++ src/libvirt.c	3 Jun 2008 08:17:57 -0000
> +    else /* if (version == 2) */ {
> +        /* In version 2 of the protocol, the prepare step is slightly
> +         * different.  We fetch the domain XML of the source domain
> +         * and pass it to Prepare2.
> +         */
> +        if (!conn->driver->domainDumpXML) {
> +            virLibConnError (conn, VIR_ERR_INTERNAL_ERROR, __FUNCTION__);
> +            return NULL;
> +        }
> +        dom_xml = conn->driver->domainDumpXML (domain,
> +                                               VIR_DOMAIN_XML_SECURE);
> +
> +        if (!dom_xml)
> +            return NULL;
> +
> +        ret = dconn->driver->domainMigratePrepare2
> +            (dconn, &cookie, &cookielen, uri, &uri_out, flags, dname,
> +             bandwidth, dom_xml);
> +        free (dom_xml);

Can use  VIR_FREE now

> @@ -2190,18 +2230,28 @@ virDomainMigrate (virDomainPtr domain,
>      ret = conn->driver->domainMigratePerform
>          (domain, cookie, cookielen, uri, flags, dname, bandwidth);
>  
> -    if (ret == -1) goto done;
> -
> -    /* Get the destination domain and return it or error.
> -     * 'domain' no longer actually exists at this point (or so we hope), but
> -     * we still use the object in memory in order to get the name.
> -     */
> -    dname = dname ? dname : domain->name;
> -    if (dconn->driver->domainMigrateFinish)
> -        ddomain = dconn->driver->domainMigrateFinish
> -            (dconn, dname, cookie, cookielen, uri, flags);
> -    else
> -        ddomain = virDomainLookupByName (dconn, dname);
> +    if (version == 1) {
> +        if (ret == -1) goto done;
> +        /* Get the destination domain and return it or error.
> +         * 'domain' no longer actually exists at this point
> +         * (or so we hope), but we still use the object in memory
> +         * in order to get the name.
> +         */
> +        dname = dname ? dname : domain->name;
> +        if (dconn->driver->domainMigrateFinish)
> +            ddomain = dconn->driver->domainMigrateFinish
> +                (dconn, dname, cookie, cookielen, uri, flags);
> +        else
> +            ddomain = virDomainLookupByName (dconn, dname);

So this code suggests that  domainMigrateFinish is optional in v1
of the migration protocol...

> +    } else /* if (version == 2) */ {
> +        /* In version 2 of the migration protocol, we pass the
> +         * status code from the sender to the destination host,
> +         * so it can do any cleanup if the migration failed.
> +         */
> +        dname = dname ? dname : domain->name;
> +        ddomain = dconn->driver->domainMigrateFinish2
> +            (dconn, dname, cookie, cookielen, uri, flags, ret);
> +    }

And compulsory in v2 ?  Is that the correct understanding ? What if
we wanted to make Xen Driver use v2, but it doesn't require the finish
method ? I guess we could just no-op it.

> +/* Migration support. */
> +
> +/* Prepare is the first step, and it runs on the destination host.
> + *
> + * This starts an empty VM listening on a TCP port.
> +*/
> +static int
> +qemudDomainMigratePrepare2 (virConnectPtr dconn,
> +                            char **cookie ATTRIBUTE_UNUSED,
> +                            int *cookielen ATTRIBUTE_UNUSED,
> +                            const char *uri_in,
> +                            char **uri_out,
> +                            unsigned long flags ATTRIBUTE_UNUSED,
> +                            const char *dname,
> +                            unsigned long resource ATTRIBUTE_UNUSED,
> +                            const char *dom_xml)
> +{
> +    static int port = 0;
> +    struct qemud_driver *driver = (struct qemud_driver *)dconn->privateData;
> +    struct qemud_vm_def *def;
> +    struct qemud_vm *vm;
> +    int this_port;
> +    char hostname [HOST_NAME_MAX+1];
> +    const char *p;
> +    const int uri_length_max =
> +        6 /* "tcp://" */ + 
> +        sizeof (hostname) /* hostname */ +
> +        1 + 5 + 1 /* :port\0 */;
> +
> +    if (!dom_xml) {
> +        qemudReportError (dconn, NULL, NULL, VIR_ERR_INTERNAL_ERROR,
> +                          "%s", _("no domain XML passed"));
> +        return -1;
> +    }
> +
> +    /* The URI passed in may be NULL or a string "tcp://somehostname:port".
> +     *
> +     * If the URI passed in is NULL then we allocate a port number
> +     * from our pool of port numbers and return a URI of
> +     * "tcp://ourhostname:port".
> +     *
> +     * If the URI passed in is not NULL then we try to parse out the
> +     * port number and use that (note that the hostname is assumed
> +     * to be a correct hostname which refers to the target machine).
> +     */
> +    if (uri_in == NULL) {
> +        this_port = QEMUD_MIGRATION_FIRST_PORT + port++;
> +        if (port == QEMUD_MIGRATION_NUM_PORTS) port = 0;

This is a little fragile - although the pool of 64 numbers is unlikely to
wrap around and collide in 'regular' use, if some client of libvirt were
mis-behaving its conceivable that they could issue a large number of migrate
requests and cause a collsion. I think  we ought to explicitly track the
ports in use (and auto-expire if client crashes part way through).

> +        /* Caller frees */
> +        *uri_out = malloc (uri_length_max);

Can use  VIR_ALLOC_N(*uri_out, uri_length_max) here

> +    } else {
> +        /* Check the URI starts with "tcp://".  We will escape the
> +         * URI when passing it to the qemu monitor, so bad
> +         * characters in hostname part don't matter.
> +         */
> +        if (!STREQLEN (uri_in, "tcp://", 6)) {
> +            qemudReportError (dconn, NULL, NULL, VIR_ERR_INVALID_ARG,
> +                  "%s", _("only tcp URIs are supported for KVM migrations"));
> +            return -1;
> +        }
> +
> +        /* Get the port number. */
> +        p = strrchr (uri_in, ':');
> +        p++; /* definitely has a ':' in it, see above */

I think this should use the libxml2  URI parsing code instead.

> +    /* Parse the domain XML. */
> +    if (!(def = qemudParseVMDef (dconn, driver, dom_xml, NULL))) {
> +        qemudReportError (dconn, NULL, NULL, VIR_ERR_OPERATION_FAILED,
> +                          "%s", _("failed to parse XML"));
> +        return -1;
> +    }
> +
> +    /* Target domain name, maybe renamed. */
> +    dname = dname ? dname : def->name;
> +
> +    /* Ensure the name and UUID don't already exist in an active VM */
> +    vm = qemudFindVMByUUID (driver, def->uuid);
> +    if (!vm) vm = qemudFindVMByName (driver, dname);
> +    if (vm) {
> +        qemudReportError (dconn, NULL, NULL, VIR_ERR_OPERATION_FAILED,
> +                          _("domain is already active as '%s'"), vm->def->name);
> +        return -1;
> +    }
> +
> +    if (!(vm = qemudAssignVMDef (dconn, driver, def))) {
> +        qemudReportError (dconn, NULL, NULL, VIR_ERR_OPERATION_FAILED,
> +                          "%s", _("failed to assign new VM"));
> +        qemudFreeVMDef (def);
> +        return -1;
> +    }
> +
> +    /* Start the QEMU daemon, with the same command-line arguments plus
> +     * -incoming tcp://0:port
> +     */
> +    snprintf (vm->migrateFrom, sizeof (vm->migrateFrom),
> +              "tcp://0:%d", this_port);
> +    if (qemudStartVMDaemon (dconn, driver, vm) < 0) {
> +        qemudReportError (dconn, NULL, NULL, VIR_ERR_OPERATION_FAILED,
> +                          "%s", _("failed to start listening VM"));
> +        if (!vm->configFile[0])
> +            qemudRemoveInactiveVM(driver, vm);
> +        return -1;
> +    }

I don't think we're handling persistent config files correctly in this
code. There's a couple of scenarios we need to deal with

  Source: transient     Target: none                   -> transient
  Source: transient     Target: transient, running     -> error
  Source: transient     Target: persistent, inactive   -> migrate, update active config
  Source: transient     Target: persistent, running    -> error
  Source: persistent    Target: none                   -> migrate & save persistent config on success
  Source: persistent    Target: transient, running     -> error
  Source: persistent    Target: persistent, inactive   -> migrate, update active config
  Source: persistent    Target: persistent, running    -> error

5B5BIn particular, if the target already has a persistent config we need to be
careful not to remove than config upon migrate failure. We also need to
make sure that if we create a persistent config, that SaveVMDef is called
to actually flush it to disk.

> +/* Perform is the second step, and it runs on the source host. */
> +static int
> +qemudDomainMigratePerform (virDomainPtr dom,
> +                           const char *cookie ATTRIBUTE_UNUSED,
> +                           int cookielen ATTRIBUTE_UNUSED,
> +                           const char *uri,
> +                           unsigned long flags ATTRIBUTE_UNUSED,
> +                           const char *dname ATTRIBUTE_UNUSED,
> +                           unsigned long resource)
> +{
> +    struct qemud_driver *driver = (struct qemud_driver *)dom->conn->privateData;
> +    struct qemud_vm *vm = qemudFindVMByID (driver, dom->id);
> +    char *safe_uri;
> +    char cmd[HOST_NAME_MAX+50];
> +    char *info;
> +
> +    if (!vm) {
> +        qemudReportError (dom->conn, dom, NULL, VIR_ERR_INVALID_DOMAIN,
> +                          _("no domain with matching id %d"), dom->id);
> +        return -1;
> +    }
> +
> +    if (!qemudIsActiveVM (vm)) {
> +        qemudReportError (dom->conn, dom, NULL, VIR_ERR_OPERATION_FAILED,
> +                          "%s", _("domain is not running"));
> +        return -1;
> +    }
> +
> +    if (resource > 0) {
> +        /* Issue migrate_set_speed command.  Don't worry if it fails. */
> +        snprintf (cmd, sizeof cmd, "migrate_set_speed %lum", resource);
> +        qemudMonitorCommand (driver, vm, cmd, &info);
> +
> +        DEBUG ("migrate_set_speed reply: %s", info);
> +        free (info);
> +    }
> +
> +    /* Issue the migrate command. */
> +    safe_uri = qemudEscapeMonitorArg (uri);
> +    if (!safe_uri) {
> +        qemudReportError (dom->conn, dom, NULL, VIR_ERR_SYSTEM_ERROR,
> +                          "%s", strerror (errno));
> +        return -1;
> +    }
> +    snprintf (cmd, sizeof cmd, "migrate \"%s\"", safe_uri);
> +    free (safe_uri);
> +
> +    if (qemudMonitorCommand (driver, vm, cmd, &info) < 0) {
> +        qemudReportError (dom->conn, dom, NULL, VIR_ERR_OPERATION_FAILED,
> +                          "%s", _("migrate operation failed"));
> +        return -1;
> +    }
> +
> +    DEBUG ("migrate reply: %s", info);
> +    free (info);
> +
> +    /* Clean up the source domain. */
> +    qemudShutdownVMDaemon (dom->conn, driver, vm);
> +    if (!vm->configFile[0])
> +        qemudRemoveInactiveVM (driver, vm);
> +
> +    return 0;
> +}

Need to use  VIR_FREE() in various places here.


Dan,
-- 
|: Red Hat, Engineering, London   -o-   http://people.redhat.com/berrange/ :|
|: http://libvirt.org  -o-  http://virt-manager.org  -o-  http://ovirt.org :|
|: http://autobuild.org       -o-         http://search.cpan.org/~danberr/ :|
|: GnuPG: 7D3B9505  -o-  F3C9 553F A1DA 4AC2 5648 23C1 B3DF F742 7D3B 9505 :|

--
Libvir-list mailing list
Libvir-list@xxxxxxxxxx
https://www.redhat.com/mailman/listinfo/libvir-list

[Index of Archives]     [Virt Tools]     [Libvirt Users]     [Lib OS Info]     [Fedora Users]     [Fedora Desktop]     [Fedora SELinux]     [Big List of Linux Books]     [Yosemite News]     [KDE Users]     [Fedora Tools]