On Tue, Jul 21, 2009 at 01:12:04PM +0200, Miloslav Trma?? wrote: > Also fix a potential infinite loop in qemuDomainCoreDump() if sending > cont repeatedly fails. > --- > src/qemu_driver.c | 43 ++++++++++++++++++------------------------- > 1 files changed, 18 insertions(+), 25 deletions(-) Looks good to me. Daniel > > diff --git a/src/qemu_driver.c b/src/qemu_driver.c > index 12079f8..9ead5fd 100644 > --- a/src/qemu_driver.c > +++ b/src/qemu_driver.c > @@ -128,6 +128,7 @@ static int qemudMonitorCommandExtra(const virDomainObjPtr vm, > const char *extra, > const char *extraPrompt, > char **reply); > +static int qemudMonitorSendCont(const virDomainObjPtr vm); > static int qemudDomainSetMemoryBalloon(virConnectPtr conn, > virDomainObjPtr vm, > unsigned long newmem); > @@ -1199,7 +1200,6 @@ static int > qemudInitCpus(virConnectPtr conn, > virDomainObjPtr vm, > const char *migrateFrom) { > - char *info = NULL; > #if HAVE_SCHED_GETAFFINITY > cpu_set_t mask; > int i, maxcpu = QEMUD_CPUMASK_LEN; > @@ -1235,12 +1235,11 @@ qemudInitCpus(virConnectPtr conn, > > if (migrateFrom == NULL) { > /* Allow the CPUS to start executing */ > - if (qemudMonitorCommand(vm, "cont", &info) < 0) { > + if (qemudMonitorSendCont(vm) < 0) { > qemudReportError(conn, NULL, NULL, VIR_ERR_INTERNAL_ERROR, > "%s", _("resume operation failed")); > return -1; > } > - VIR_FREE(info); > } > > return 0; > @@ -2172,6 +2171,17 @@ qemudMonitorCommand(const virDomainObjPtr vm, > return qemudMonitorCommandExtra(vm, cmd, NULL, NULL, reply); > } > > +static int > +qemudMonitorSendCont(const virDomainObjPtr vm) { > + char *reply; > + > + if (qemudMonitorCommand(vm, "cont", &reply) < 0) > + return -1; > + qemudDebug ("%s: cont reply: %s", vm->def->name, info); > + VIR_FREE(reply); > + return 0; > +} > + > > > static virDrvOpenStatus qemudOpen(virConnectPtr conn, > @@ -2633,7 +2643,6 @@ cleanup: > > static int qemudDomainResume(virDomainPtr dom) { > struct qemud_driver *driver = dom->conn->privateData; > - char *info; > virDomainObjPtr vm; > int ret = -1; > virDomainEventPtr event = NULL; > @@ -2654,17 +2663,15 @@ static int qemudDomainResume(virDomainPtr dom) { > goto cleanup; > } > if (vm->state == VIR_DOMAIN_PAUSED) { > - if (qemudMonitorCommand(vm, "cont", &info) < 0) { > + if (qemudMonitorSendCont(vm) < 0) { > qemudReportError(dom->conn, dom, NULL, VIR_ERR_OPERATION_FAILED, > "%s", _("resume operation failed")); > goto cleanup; > } > vm->state = VIR_DOMAIN_RUNNING; > - qemudDebug("Reply %s", info); > event = virDomainEventNewFromObj(vm, > VIR_DOMAIN_EVENT_RESUMED, > VIR_DOMAIN_EVENT_RESUMED_UNPAUSED); > - VIR_FREE(info); > } > if (virDomainSaveStatus(dom->conn, driver->stateDir, vm) < 0) > goto cleanup; > @@ -3349,13 +3356,9 @@ cleanup: > will support synchronous operations so we always get here after > the migration is complete. */ > if (resume && paused) { > - if (qemudMonitorCommand(vm, "cont", &info) < 0) { > + if (qemudMonitorSendCont(vm) < 0) > qemudReportError(dom->conn, dom, NULL, VIR_ERR_OPERATION_FAILED, > "%s", _("resuming after dump failed")); > - goto cleanup; > - } > - DEBUG ("%s: cont reply: %s", vm->def->name, info); > - VIR_FREE(info); > } > if (vm) > virDomainObjUnlock(vm); > @@ -3824,13 +3827,11 @@ static int qemudDomainRestore(virConnectPtr conn, > > /* If it was running before, resume it now. */ > if (header.was_running) { > - char *info; > - if (qemudMonitorCommand(vm, "cont", &info) < 0) { > + if (qemudMonitorSendCont(vm) < 0) { > qemudReportError(conn, NULL, NULL, VIR_ERR_OPERATION_FAILED, > "%s", _("failed to resume domain")); > goto cleanup; > } > - VIR_FREE(info); > vm->state = VIR_DOMAIN_RUNNING; > } > ret = 0; > @@ -5645,14 +5646,9 @@ qemudDomainMigratePerform (virDomainPtr dom, > ret = 0; > > cleanup: > - /* Note that we have to free info *first*, since we are re-using the > - * variable below (and otherwise might cause a memory leak) > - */ > - VIR_FREE(info); > - > if (paused) { > /* we got here through some sort of failure; start the domain again */ > - if (qemudMonitorCommand (vm, "cont", &info) < 0) { > + if (qemudMonitorSendCont(vm) < 0) { > /* Hm, we already know we are in error here. We don't want to > * overwrite the previous error, though, so we just throw something > * to the logs and hope for the best > @@ -5660,16 +5656,13 @@ cleanup: > VIR_ERROR(_("Failed to resume guest %s after failure\n"), > vm->def->name); > } > - else { > - DEBUG ("%s: cont reply: %s", vm->def->name, info); > - VIR_FREE(info); > - } > > event = virDomainEventNewFromObj(vm, > VIR_DOMAIN_EVENT_RESUMED, > VIR_DOMAIN_EVENT_RESUMED_MIGRATED); > } > > + VIR_FREE(info); > if (vm) > virDomainObjUnlock(vm); > if (event) > -- > 1.6.2.5 > > -- > Libvir-list mailing list > Libvir-list@xxxxxxxxxx > https://www.redhat.com/mailman/listinfo/libvir-list -- |: Red Hat, Engineering, London -o- http://people.redhat.com/berrange/ :| |: http://libvirt.org -o- http://virt-manager.org -o- http://ovirt.org :| |: http://autobuild.org -o- http://search.cpan.org/~danberr/ :| |: GnuPG: 7D3B9505 -o- F3C9 553F A1DA 4AC2 5648 23C1 B3DF F742 7D3B 9505 :| -- Libvir-list mailing list Libvir-list@xxxxxxxxxx https://www.redhat.com/mailman/listinfo/libvir-list