On Mon, Dec 09, 2019 at 11:52:33 -0600, Eric Blake wrote: > On 12/3/19 11:17 AM, Peter Krempa wrote: > > This allows to start and manage the backup job. > > > > Signed-off-by: Peter Krempa <pkrempa@xxxxxxxxxx> > > --- > > resuming where I left off last time [...] > > + if (!reuse_external) { > > + if (qemuBlockStorageSourceCreateDetectSize(blockNamedNodeData, > > + dd->store, dd->domdisk->src) < 0) > > + return -1; > > + > > + if (qemuBlockStorageSourceCreate(vm, dd->store, NULL, NULL, > > + dd->crdata->srcdata[0], > > + QEMU_ASYNC_JOB_BACKUP) < 0) > > + return -1; > > + } else { > > + if (qemuDomainObjEnterMonitorAsync(priv->driver, vm, QEMU_ASYNC_JOB_BACKUP) < 0) > > + return -1; > > + > > + rc = qemuBlockStorageSourceAttachApply(priv->mon, dd->crdata->srcdata[0]); > > + > > + if (qemuDomainObjExitMonitor(priv->driver, vm) < 0 || rc < 0) > > + return -1; > > Offlist, we were wondering if this should blindly trust whatever is already > in the external file, or blindly pass backing:null. It may depend on > whether it is push mode (probably trust the image) vs. pull mode (we will be > hooking up the backing file ourselves when we create the sync:none job, so > if the scratch disk already has a backing file that gets in the way, which > argues we want a blind backing:null in that case). I'll add the following diff: diff --git a/src/qemu/qemu_backup.c b/src/qemu/qemu_backup.c index caef12cf94..1ca9f8dfe0 100644 --- a/src/qemu/qemu_backup.c +++ b/src/qemu/qemu_backup.c @@ -283,6 +283,7 @@ qemuBackupDiskPrepareDataOne(virDomainObjPtr vm, bool removeStore) { qemuDomainObjPrivatePtr priv = vm->privateData; + g_autoptr(virStorageSource) terminator = NULL; /* set data structure */ dd->backupdisk = backupdisk; @@ -311,13 +312,17 @@ qemuBackupDiskPrepareDataOne(virDomainObjPtr vm, return -1; } + /* install terminator to prevent qemu form opening backing images */ + if (!(terminator = virStorageSourceNew())) + return -1; + if (!(dd->blockjob = qemuBlockJobDiskNewBackup(vm, dd->domdisk, dd->store, removeStore, dd->incrementalBitmap))) return -1; if (!(dd->crdata = qemuBuildStorageSourceChainAttachPrepareBlockdevTop(dd->store, - NULL, + terminator, priv->qemuCaps))) return -1; > > +/** > > + * qemuBackupBeginCollectIncrementalCheckpoints: > > + * @vm: domain object > > + * @incrFrom: name of checkpoint representing starting point of incremental backup > > + * > > + * Returns a NULL terminated list of pointers to checkpoints in chronological > > + * order starting from the 'current' checkpoint until reaching @incrFrom. > > + */ > > +static virDomainMomentObjPtr * > > +qemuBackupBeginCollectIncrementalCheckpoints(virDomainObjPtr vm, > > + const char *incrFrom) > > +{ > > + virDomainMomentObjPtr n = virDomainCheckpointGetCurrent(vm->checkpoints); > > + g_autofree virDomainMomentObjPtr *incr = NULL; > > + size_t nincr = 0; > > + > > + while (n) { > > + if (VIR_APPEND_ELEMENT_COPY(incr, nincr, n) < 0) > > Are there better glib functions for doing this string management? Probably yes. We didn't adopt them yet though and I don't feel brave enough to do it as a part of this series. > > > + return NULL; > > + > > + if (STREQ(n->def->name, incrFrom)) { > > + virDomainMomentObjPtr terminator = NULL; > > + if (VIR_APPEND_ELEMENT_COPY(incr, nincr, terminator) < 0) [...] > > + > > + if (qemuDomainObjEnterMonitorAsync(priv->driver, vm, QEMU_ASYNC_JOB_BACKUP) < 0) > > + goto endjob; > > + blockNamedNodeData = qemuMonitorBlockGetNamedNodeData(priv->mon); > > + if (qemuDomainObjExitMonitor(priv->driver, vm) < 0 || !blockNamedNodeData) > > + goto endjob; > > Do you still need to ask the monitor for named node data, or should you > already have access to all that information since backups require -blockdev > support? Yes. This is for detecting the state of bitmaps and the sizing of the images. When we are pre-creating the images we need to know the current size. > > + > > + if (qemuBackupDiskPrepareStorage(vm, dd, ndd, blockNamedNodeData, reuse) < 0) > > + goto endjob; > > + > > + priv->backup = g_steal_pointer(&def); [...] > > + if (chk && > > + qemuCheckpointCreateFinalize(priv->driver, vm, cfg, chk, true) < 0) > > + goto endjob; > > + > > + if (pull) { > > + if (qemuDomainObjEnterMonitorAsync(priv->driver, vm, QEMU_ASYNC_JOB_BACKUP) < 0) > > + goto endjob; > > + /* note that if the export fails we've already created the checkpoint > > + * and we will not delete it */ > > Why not? It would require merging of bitmaps. That operation itself is too complex and error prone to attempt it on a cleanup path. > > > + rc = qemuBackupBeginPullExportDisks(vm, dd, ndd); > > + if (qemuDomainObjExitMonitor(priv->driver, vm) < 0) > > + goto endjob; > > + > > + if (rc < 0) { > > + qemuBackupJobCancelBlockjobs(vm, priv->backup, false); > > + goto endjob; > > + } > > + } [...] > > +void > > +qemuBackupNotifyBlockjobEnd(virDomainObjPtr vm, > > + virDomainDiskDefPtr disk, > > + qemuBlockjobState state, > > + unsigned long long cur, > > + unsigned long long end) > > +{ > > + qemuDomainObjPrivatePtr priv = vm->privateData; > > + bool has_running = false; > > + bool has_cancelling = false; > > + bool has_cancelled = false; > > + bool has_failed = false; > > + qemuDomainJobStatus jobstatus = QEMU_DOMAIN_JOB_STATUS_COMPLETED; > > + virDomainBackupDefPtr backup = priv->backup; > > + size_t i; > > + > > + VIR_DEBUG("vm: '%s', disk:'%s', state:'%d'", > > + vm->def->name, disk->dst, state); > > + > > + if (!backup) > > + return; > > + > > + if (backup->type == VIR_DOMAIN_BACKUP_TYPE_PULL) { > > + qemuDomainObjEnterMonitor(priv->driver, vm); > > + ignore_value(qemuMonitorNBDServerStop(priv->mon)); > > + if (qemuDomainObjExitMonitor(priv->driver, vm) < 0) > > + return; > > + > > + /* update the final statistics with the current job's data */ > > + backup->pull_tmp_used += cur; > > + backup->pull_tmp_total += end; > > + } else { > > + backup->push_transferred += cur; > > + backup->push_total += end; > > + } > > + > > If I understand, this is no longer an API entry point, but now a helper > function called by the existing virDomainAbortJob API, so this one does not > need an ACL check (as that would already have been performed). This is (later) called from the block job event handler. I've included it in this patch because IMO it's semantically closer to this code rather than the blockjob code. > > > + for (i = 0; i < backup->ndisks; i++) { > > + virDomainBackupDiskDefPtr backupdisk = backup->disks + i; > > + > > + if (!backupdisk->store) > > + continue; -- libvir-list mailing list libvir-list@xxxxxxxxxx https://www.redhat.com/mailman/listinfo/libvir-list