Re: [PATCH 3/4] qemu snapshot: use QMP snapshot-save/delete for internal snapshots

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



On 7/16/24 00:42, Nikolai Barybin wrote:
The usage of HMP commands are highly discouraged by qemu. Moreover,
current snapshot creation routine does not provide flexibility in
choosing target device for VM state snapshot.

This patch makes use of QMP commands snapshot-save/delete and by
default chooses first writable disk (if present) as target for VM
state, NVRAM - otherwise.

Signed-off-by: Nikolai Barybin <nikolai.barybin@xxxxxxxxxxxxx>
---
  src/qemu/qemu_snapshot.c | 158 ++++++++++++++++++++++++++++++++++++---
  1 file changed, 148 insertions(+), 10 deletions(-)

diff --git a/src/qemu/qemu_snapshot.c b/src/qemu/qemu_snapshot.c
index f5260c4a22..83949a9a27 100644
--- a/src/qemu/qemu_snapshot.c
+++ b/src/qemu/qemu_snapshot.c
@@ -308,6 +308,96 @@ qemuSnapshotCreateInactiveExternal(virQEMUDriver *driver,
      return ret;
  }
+static int
+qemuSnapshotActiveInternalGetWrdevListHelper(virDomainObj *vm,
+                                             char **wrdevs)
+{
+    size_t wrdevCount = 0;
+    size_t i = 0;
+
+    for (i = 0; i < vm->def->ndisks; i++) {
+        virDomainDiskDef *disk = vm->def->disks[i];
+        if (!disk->src->readonly) {
+            wrdevs[wrdevCount] = g_strdup(disk->src->nodenameformat);
+            wrdevCount++;
+        }
+    }
+
+    if (wrdevCount == 0) {
+        if (vm->def->os.loader->nvram) {
+            wrdevs[0] = g_strdup(vm->def->os.loader->nvram->nodenameformat);
+        } else {
+            virReportError(VIR_ERR_INTERNAL_ERROR, "%s",
+                           _("no writable device for internal snapshot creation/deletion"));
+            return -1;
+        }
+    }
+
+    return 0;
+}
+
+
+static int
+qemuSnapshotCreateActiveInternalDone(virDomainObj *vm)
+{
+    qemuBlockJobData *job = NULL;
+    qemuDomainObjPrivate *priv = vm->privateData;
+
+    if (!(job = virHashLookup(priv->blockjobs, g_strdup_printf("snapsave%d", vm->def->id)))) {
+        virReportError(VIR_ERR_INTERNAL_ERROR,
+                       _("failed to lookup blockjob 'snapsave%1$d'"), vm->def->id);
+        return -1;
+    }
+
+    qemuBlockJobUpdate(vm, job, VIR_ASYNC_JOB_NONE);
+    if (job->state == VIR_DOMAIN_BLOCK_JOB_FAILED) {
+        virReportError(VIR_ERR_INTERNAL_ERROR,
+                       _("snapshot-save job failed: %1$s"), NULLSTR(job->errmsg));
+        return -1;
+    }
+
+    return job->state == VIR_DOMAIN_BLOCK_JOB_COMPLETED ? 1 : 0;
+}
+
+
+static int
+qemuSnapshotCreateActiveInternalStart(virDomainObj *vm,
+                                      const char *name)
+{
+    qemuBlockJobData *job = NULL;
+    g_autofree char** wrdevs = NULL;
+    int ret = -1;
+    int rc = 0;
+
+    wrdevs = g_new0(char *, vm->def->ndisks + 1);
+    if (qemuSnapshotActiveInternalGetWrdevListHelper(vm, wrdevs) < 0)
+        return -1;
+
+    if (!(job = qemuBlockJobDiskNew(vm, NULL, QEMU_BLOCKJOB_TYPE_SNAPSHOT_SAVE,
+                                    g_strdup_printf("snapsave%d", vm->def->id)))) {
+        virReportError(VIR_ERR_INTERNAL_ERROR, "%s", _("failed to create new blockjob"));
+        return -1;
+    }
+
+    qemuBlockJobSyncBegin(job);
+    if (qemuDomainObjEnterMonitorAsync(vm, VIR_ASYNC_JOB_SNAPSHOT) < 0) {
+        ret = -1;
+        goto cleanup;
+    }
+
+    rc = qemuMonitorSnapshotSave(qemuDomainGetMonitor(vm), job->name,
+                                 name, wrdevs[0], wrdevs);
+    qemuDomainObjExitMonitor(vm);
+    if (rc == 0) {
+        qemuBlockJobStarted(job, vm);
+        ret = 0;
+    }
+
+ cleanup:
+    qemuBlockJobStartupFinalize(vm, job);
+    return ret;
+}
+
/* The domain is expected to be locked and active. */
  static int
@@ -316,11 +406,11 @@ qemuSnapshotCreateActiveInternal(virQEMUDriver *driver,
                                   virDomainMomentObj *snap,
                                   unsigned int flags)
  {
-    qemuDomainObjPrivate *priv = vm->privateData;
      virObjectEvent *event = NULL;
      bool resume = false;
      virDomainSnapshotDef *snapdef = virDomainSnapshotObjGetDef(snap);
      int ret = -1;
+    int rv = 0;
if (!qemuMigrationSrcIsAllowed(vm, false, VIR_ASYNC_JOB_SNAPSHOT, 0))
          goto cleanup;
@@ -342,15 +432,17 @@ qemuSnapshotCreateActiveInternal(virQEMUDriver *driver,
          }
      }
- if (qemuDomainObjEnterMonitorAsync(vm, VIR_ASYNC_JOB_SNAPSHOT) < 0) {
+    if ((ret = qemuSnapshotCreateActiveInternalStart(vm, snap->def->name)) < 0) {
          resume = false;
          goto cleanup;
      }
- ret = qemuMonitorCreateSnapshot(priv->mon, snap->def->name);
-    qemuDomainObjExitMonitor(vm);
-    if (ret < 0)
-        goto cleanup;

'snapshot-save' has been added in QEMU 6.0.
Right now we are at 8.2

Should we still support pre 6.0 versions, i.e. check the availability of the
command via proper capability?

Den



[Index of Archives]     [Virt Tools]     [Libvirt Users]     [Lib OS Info]     [Fedora Users]     [Fedora Desktop]     [Fedora SELinux]     [Big List of Linux Books]     [Yosemite News]     [KDE Users]     [Fedora Tools]

  Powered by Linux