[PATCH 3/4] qemu snapshot: use QMP snapshot-save/delete for internal snapshots

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



The usage of HMP commands are highly discouraged by qemu. Moreover,
current snapshot creation routine does not provide flexibility in
choosing target device for VM state snapshot.

This patch makes use of QMP commands snapshot-save/delete and by
default chooses first writable disk (if present) as target for VM
state, NVRAM - otherwise.

Signed-off-by: Nikolai Barybin <nikolai.barybin@xxxxxxxxxxxxx>
---
 src/qemu/qemu_snapshot.c | 158 ++++++++++++++++++++++++++++++++++++---
 1 file changed, 148 insertions(+), 10 deletions(-)

diff --git a/src/qemu/qemu_snapshot.c b/src/qemu/qemu_snapshot.c
index f5260c4a22..83949a9a27 100644
--- a/src/qemu/qemu_snapshot.c
+++ b/src/qemu/qemu_snapshot.c
@@ -308,6 +308,96 @@ qemuSnapshotCreateInactiveExternal(virQEMUDriver *driver,
     return ret;
 }
 
+static int
+qemuSnapshotActiveInternalGetWrdevListHelper(virDomainObj *vm,
+                                             char **wrdevs)
+{
+    size_t wrdevCount = 0;
+    size_t i = 0;
+
+    for (i = 0; i < vm->def->ndisks; i++) {
+        virDomainDiskDef *disk = vm->def->disks[i];
+        if (!disk->src->readonly) {
+            wrdevs[wrdevCount] = g_strdup(disk->src->nodenameformat);
+            wrdevCount++;
+        }
+    }
+
+    if (wrdevCount == 0) {
+        if (vm->def->os.loader->nvram) {
+            wrdevs[0] = g_strdup(vm->def->os.loader->nvram->nodenameformat);
+        } else {
+            virReportError(VIR_ERR_INTERNAL_ERROR, "%s",
+                           _("no writable device for internal snapshot creation/deletion"));
+            return -1;
+        }
+    }
+
+    return 0;
+}
+
+
+static int
+qemuSnapshotCreateActiveInternalDone(virDomainObj *vm)
+{
+    qemuBlockJobData *job = NULL;
+    qemuDomainObjPrivate *priv = vm->privateData;
+
+    if (!(job = virHashLookup(priv->blockjobs, g_strdup_printf("snapsave%d", vm->def->id)))) {
+        virReportError(VIR_ERR_INTERNAL_ERROR,
+                       _("failed to lookup blockjob 'snapsave%1$d'"), vm->def->id);
+        return -1;
+    }
+
+    qemuBlockJobUpdate(vm, job, VIR_ASYNC_JOB_NONE);
+    if (job->state == VIR_DOMAIN_BLOCK_JOB_FAILED) {
+        virReportError(VIR_ERR_INTERNAL_ERROR,
+                       _("snapshot-save job failed: %1$s"), NULLSTR(job->errmsg));
+        return -1;
+    }
+
+    return job->state == VIR_DOMAIN_BLOCK_JOB_COMPLETED ? 1 : 0;
+}
+
+
+static int
+qemuSnapshotCreateActiveInternalStart(virDomainObj *vm,
+                                      const char *name)
+{
+    qemuBlockJobData *job = NULL;
+    g_autofree char** wrdevs = NULL;
+    int ret = -1;
+    int rc = 0;
+
+    wrdevs = g_new0(char *, vm->def->ndisks + 1);
+    if (qemuSnapshotActiveInternalGetWrdevListHelper(vm, wrdevs) < 0)
+        return -1;
+
+    if (!(job = qemuBlockJobDiskNew(vm, NULL, QEMU_BLOCKJOB_TYPE_SNAPSHOT_SAVE,
+                                    g_strdup_printf("snapsave%d", vm->def->id)))) {
+        virReportError(VIR_ERR_INTERNAL_ERROR, "%s", _("failed to create new blockjob"));
+        return -1;
+    }
+
+    qemuBlockJobSyncBegin(job);
+    if (qemuDomainObjEnterMonitorAsync(vm, VIR_ASYNC_JOB_SNAPSHOT) < 0) {
+        ret = -1;
+        goto cleanup;
+    }
+
+    rc = qemuMonitorSnapshotSave(qemuDomainGetMonitor(vm), job->name,
+                                 name, wrdevs[0], wrdevs);
+    qemuDomainObjExitMonitor(vm);
+    if (rc == 0) {
+        qemuBlockJobStarted(job, vm);
+        ret = 0;
+    }
+
+ cleanup:
+    qemuBlockJobStartupFinalize(vm, job);
+    return ret;
+}
+
 
 /* The domain is expected to be locked and active. */
 static int
@@ -316,11 +406,11 @@ qemuSnapshotCreateActiveInternal(virQEMUDriver *driver,
                                  virDomainMomentObj *snap,
                                  unsigned int flags)
 {
-    qemuDomainObjPrivate *priv = vm->privateData;
     virObjectEvent *event = NULL;
     bool resume = false;
     virDomainSnapshotDef *snapdef = virDomainSnapshotObjGetDef(snap);
     int ret = -1;
+    int rv = 0;
 
     if (!qemuMigrationSrcIsAllowed(vm, false, VIR_ASYNC_JOB_SNAPSHOT, 0))
         goto cleanup;
@@ -342,15 +432,17 @@ qemuSnapshotCreateActiveInternal(virQEMUDriver *driver,
         }
     }
 
-    if (qemuDomainObjEnterMonitorAsync(vm, VIR_ASYNC_JOB_SNAPSHOT) < 0) {
+    if ((ret = qemuSnapshotCreateActiveInternalStart(vm, snap->def->name)) < 0) {
         resume = false;
         goto cleanup;
     }
 
-    ret = qemuMonitorCreateSnapshot(priv->mon, snap->def->name);
-    qemuDomainObjExitMonitor(vm);
-    if (ret < 0)
-        goto cleanup;
+    while ((rv = qemuSnapshotCreateActiveInternalDone(vm)) != 1) {
+        if (rv < 0 || qemuDomainObjWait(vm) < 0) {
+            ret = -1;
+            goto cleanup;
+        }
+    }
 
     if (!(snapdef->cookie = (virObject *) qemuDomainSaveCookieNew(vm)))
         goto cleanup;
@@ -3603,6 +3695,55 @@ qemuSnapshotDiscardMetadata(virDomainObj *vm,
 }
 
 
+static int
+qemuSnapshotDiscardActiveInternal(virDomainObj *vm,
+                                  const char *name)
+{
+    qemuBlockJobData *job = NULL;
+    g_autofree char** wrdevs = NULL;
+    int ret = -1;
+    int rc = 0;
+
+    wrdevs = g_new0(char *, vm->def->ndisks + 1);
+    if (qemuSnapshotActiveInternalGetWrdevListHelper(vm, wrdevs) < 0)
+        return -1;
+
+    if (!(job = qemuBlockJobDiskNew(vm, NULL, QEMU_BLOCKJOB_TYPE_SNAPSHOT_DELETE,
+                                    g_strdup_printf("snapdelete%d", vm->def->id)))) {
+        virReportError(VIR_ERR_INTERNAL_ERROR, "%s", _("failed to create new blockjob"));
+        return -1;
+    }
+
+    qemuBlockJobSyncBegin(job);
+    if (qemuDomainObjEnterMonitorAsync(vm, VIR_ASYNC_JOB_SNAPSHOT) < 0)
+        goto cleanup;
+
+    rc = qemuMonitorSnapshotDelete(qemuDomainGetMonitor(vm), job->name, name, wrdevs);
+    qemuDomainObjExitMonitor(vm);
+    if (rc == 0) {
+        qemuBlockJobStarted(job, vm);
+        ret = 0;
+    } else {
+        goto cleanup;
+    }
+
+    while (job->state != VIR_DOMAIN_BLOCK_JOB_COMPLETED) {
+        qemuDomainObjWait(vm);
+        qemuBlockJobUpdate(vm, job, VIR_ASYNC_JOB_NONE);
+        if (job->state == VIR_DOMAIN_BLOCK_JOB_FAILED) {
+            virReportError(VIR_ERR_INTERNAL_ERROR,
+                           _("snapshot-delete job failed: %1$s"), NULLSTR(job->errmsg));
+            ret = -1;
+            break;
+        }
+    }
+
+ cleanup:
+    qemuBlockJobStartupFinalize(vm, job);
+    return ret;
+}
+
+
 /* Discard one snapshot (or its metadata), without reparenting any children.  */
 static int
 qemuSnapshotDiscardImpl(virQEMUDriver *driver,
@@ -3645,11 +3786,8 @@ qemuSnapshotDiscardImpl(virQEMUDriver *driver,
                 /* Similarly as internal snapshot creation we would use a regular job
                  * here so set a mask to forbid any other job. */
                 qemuDomainObjSetAsyncJobMask(vm, VIR_JOB_NONE);
-                if (qemuDomainObjEnterMonitorAsync(vm, VIR_ASYNC_JOB_SNAPSHOT) < 0)
-                    return -1;
                 /* we continue on even in the face of error */
-                qemuMonitorDeleteSnapshot(qemuDomainGetMonitor(vm), snap->def->name);
-                qemuDomainObjExitMonitor(vm);
+                qemuSnapshotDiscardActiveInternal(vm, snap->def->name);
                 qemuDomainObjSetAsyncJobMask(vm, VIR_JOB_DEFAULT_MASK);
             }
         }
-- 
2.43.5



[Index of Archives]     [Virt Tools]     [Libvirt Users]     [Lib OS Info]     [Fedora Users]     [Fedora Desktop]     [Fedora SELinux]     [Big List of Linux Books]     [Yosemite News]     [KDE Users]     [Fedora Tools]

  Powered by Linux