Re: [PATCH 11/11] qemu: Refactor qemuDomainBlockJobAbort()

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



On Thu, Apr 09, 2015 at 20:22:43 +1000, Michael Chapman wrote:
> On Wed, 1 Apr 2015, Peter Krempa wrote:
> > Change few variable names and refactor the code flow. As an additional
> > bonus the function now fails if the event state is not as expected.
> > ---
> > src/qemu/qemu_driver.c | 108 ++++++++++++++++++++++++-------------------------
> > 1 file changed, 52 insertions(+), 56 deletions(-)
> >
> > diff --git a/src/qemu/qemu_driver.c b/src/qemu/qemu_driver.c
> > index e9431ad..ee5bf8b 100644
> > --- a/src/qemu/qemu_driver.c
> > +++ b/src/qemu/qemu_driver.c
> > @@ -16279,13 +16279,13 @@ qemuDomainBlockJobAbort(virDomainPtr dom,
> > {
> >     virQEMUDriverPtr driver = dom->conn->privateData;
> >     char *device = NULL;
> > -    virObjectEventPtr event = NULL;
> > -    virObjectEventPtr event2 = NULL;
> >     virDomainDiskDefPtr disk;
> >     virQEMUDriverConfigPtr cfg = virQEMUDriverGetConfig(driver);
> >     bool save = false;
> >     int idx;
> > -    bool async;
> > +    bool modern;
> > +    bool pivot = !!(flags & VIR_DOMAIN_BLOCK_JOB_ABORT_PIVOT);
> > +    bool async = !!(flags & VIR_DOMAIN_BLOCK_JOB_ABORT_ASYNC);
> >     virDomainObjPtr vm;
> >     int ret = -1;
> >
> > @@ -16298,7 +16298,7 @@ qemuDomainBlockJobAbort(virDomainPtr dom,
> >     if (virDomainBlockJobAbortEnsureACL(dom->conn, vm->def) < 0)
> >         goto cleanup;
> >
> > -    if (qemuDomainSupportsBlockJobs(vm, &async) < 0)
> > +    if (qemuDomainSupportsBlockJobs(vm, &modern) < 0)
> >         goto cleanup;
> >
> >     if (qemuDomainObjBeginJob(driver, vm, QEMU_JOB_MODIFY) < 0)
> > @@ -16314,19 +16314,14 @@ qemuDomainBlockJobAbort(virDomainPtr dom,
> >         goto endjob;
> >     disk = vm->def->disks[idx];
> >
> > -    if (async && !(flags & VIR_DOMAIN_BLOCK_JOB_ABORT_ASYNC)) {
> > -        /* prepare state for event delivery */
> > +    if (modern && !async) {
> > +        /* prepare state for event delivery. Since qemuDomainBlockPivot is
> > +         * synchronous, but the event is delivered asynchronously we need to
> > +         * wait too */
> >         disk->blockJobStatus = -1;
> >         disk->blockJobSync = true;
> >     }
> >
> > -    if ((flags & VIR_DOMAIN_BLOCK_JOB_ABORT_PIVOT) &&
> > -        !(async && disk->mirror)) {
> > -        virReportError(VIR_ERR_OPERATION_INVALID,
> > -                       _("pivot of disk '%s' requires an active copy job"),
> > -                       disk->dst);
> > -        goto endjob;
> > -    }
> >     if (disk->mirrorState != VIR_DOMAIN_DISK_MIRROR_STATE_NONE &&
> >         disk->mirrorState != VIR_DOMAIN_DISK_MIRROR_STATE_READY) {
> >         virReportError(VIR_ERR_OPERATION_INVALID,
> > @@ -16335,29 +16330,29 @@ qemuDomainBlockJobAbort(virDomainPtr dom,
> >         goto endjob;
> >     }
> >
> > -    if (disk->mirror && (flags & VIR_DOMAIN_BLOCK_JOB_ABORT_PIVOT)) {
> > -        ret = qemuDomainBlockPivot(driver, vm, device, disk);
> > -        if (ret < 0 && async)
> > +    if (pivot) {
> > +        if (qemuDomainBlockPivot(driver, vm, device, disk) < 0)
> >             goto endjob;
> > -        goto waitjob;
> > -    }
> 
> This still needs to assign to ret here, otherwise we won't return 0 on 
> success.

Indeed, I'm actually puzzled that it worked when I was testing it and I
don't remember whether I've done changes and forgot to push them or just
forgot to restart the daemon after compiling.

> 
> > -    if (disk->mirror) {
> > -        disk->mirrorState = VIR_DOMAIN_DISK_MIRROR_STATE_ABORT;
> > -        save = true;
> > -    }
> > +    } else {
> > +        if (disk->mirror) {
> > +            disk->mirrorState = VIR_DOMAIN_DISK_MIRROR_STATE_ABORT;
> > +            save = true;
> > +        }
> >
> > -    qemuDomainObjEnterMonitor(driver, vm);
> > -    ret = qemuMonitorBlockJobCancel(qemuDomainGetMonitor(vm), device, async);
> > -    if (qemuDomainObjExitMonitor(driver, vm) < 0)
> > -        ret = -1;
> > +        qemuDomainObjEnterMonitor(driver, vm);
> > +        ret = qemuMonitorBlockJobCancel(qemuDomainGetMonitor(vm), device, modern);
> > +        if (qemuDomainObjExitMonitor(driver, vm) < 0) {
> > +            ret = -1;
> > +            goto endjob;
> > +        }
> >
> > -    if (ret < 0) {
> > -        if (disk->mirror)
> > -            disk->mirrorState = VIR_DOMAIN_DISK_MIRROR_STATE_NONE;
> > -        goto endjob;
> > +        if (ret < 0) {
> > +            if (disk->mirror)
> > +                disk->mirrorState = VIR_DOMAIN_DISK_MIRROR_STATE_NONE;
> > +            goto endjob;
> > +        }
> >     }
> >
> > - waitjob:
> >     /* If we have made changes to XML due to a copy job, make a best
> >      * effort to save it now.  But we can ignore failure, since there
> >      * will be further changes when the event marks completion.  */
> > @@ -16372,33 +16367,38 @@ qemuDomainBlockJobAbort(virDomainPtr dom,
> >      * while still holding the VM job, to prevent newly scheduled
> >      * block jobs from confusing us.  */
> >     if (!async) {
> > -        /* Older qemu that lacked async reporting also lacked
> > -         * blockcopy and active commit, so we can hardcode the
> > -         * event to pull, and we know the XML doesn't need
> > -         * updating.  We have to generate two event variants.  */
> > -        int type = VIR_DOMAIN_BLOCK_JOB_TYPE_PULL;
> > -        int status = VIR_DOMAIN_BLOCK_JOB_CANCELED;
> > -        event = virDomainEventBlockJobNewFromObj(vm, disk->src->path, type,
> > -                                                 status);
> > -        event2 = virDomainEventBlockJob2NewFromObj(vm, disk->dst, type,
> > -                                                   status);
> > -    } else if (disk->blockJobSync) {
> > -        /* XXX If the event reports failure, we should reflect
> > -         * that back into the return status of this API call.  */
> > -
> > -        while (disk->blockJobStatus == -1 && disk->blockJobSync) {
> > -            if (virCondWait(&disk->blockJobSyncCond, &vm->parent.lock) < 0) {
> > -                virReportSystemError(errno, "%s",
> > -                                     _("Unable to wait on block job sync "
> > -                                       "condition"));
> > -                disk->blockJobSync = false;
> > -                goto endjob;
> > +        if (!modern) {
> > +            /* Older qemu that lacked async reporting also lacked
> > +             * blockcopy and active commit, so we can hardcode the
> > +             * event to pull and let qemuBlockJobEventProcess() handle
> > +             * the rest as usual */
> > +            disk->blockJobType = VIR_DOMAIN_BLOCK_JOB_TYPE_PULL;
> > +            disk->blockJobStatus = VIR_DOMAIN_BLOCK_JOB_CANCELED;
> > +        } else {
> > +            while (disk->blockJobStatus == -1 && disk->blockJobSync) {
> > +                if (virCondWait(&disk->blockJobSyncCond, &vm->parent.lock) < 0) {
> > +                    virReportSystemError(errno, "%s",
> > +                                         _("Unable to wait on block job sync "
> > +                                           "condition"));
> > +                    disk->blockJobSync = false;
> > +                    goto endjob;
> > +                }
> >             }
> >         }
> >
> >         qemuBlockJobEventProcess(driver, vm, disk,
> >                                  disk->blockJobType,
> >                                  disk->blockJobStatus);
> > +
> > +        /* adjust the return code */
> > +        if ((pivot && disk->blockJobStatus != VIR_DOMAIN_BLOCK_JOB_COMPLETED) ||
> > +            (!pivot && disk->blockJobStatus != VIR_DOMAIN_BLOCK_JOB_CANCELED)) {
> 
> I'm wondering whether it is simpler to examine disk->mirrorState here 
> rather than disk->blockJobStatus. The second test doesn't look right: QEMU a

Actually mirrorState won't be enough for block jobs that don't use the
mirroring approach, which is namely blockCommit to the non-active layer
or block stream (block pull).

> signals COMPLETED if a drive mirror is canceled after it's fully synced.

Ghm I might want to go re-read the code then ... 

> 
> > +            virReportError(VIR_ERR_OPERATION_FAILED,
> > +                           _("failed to terminate block job on disk '%s'"),
> > +                           disk->dst);
> > +           ret = -1;
> > +        }
> > +
> >         disk->blockJobSync = false;
> >     }
> >

Thanks for the input.

Peter

Attachment: signature.asc
Description: Digital signature

--
libvir-list mailing list
libvir-list@xxxxxxxxxx
https://www.redhat.com/mailman/listinfo/libvir-list

[Index of Archives]     [Virt Tools]     [Libvirt Users]     [Lib OS Info]     [Fedora Users]     [Fedora Desktop]     [Fedora SELinux]     [Big List of Linux Books]     [Yosemite News]     [KDE Users]     [Fedora Tools]