[v2] qemu: Introduce two new job types

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



Currently, all of domain "save/dump/managed save/migration"
use the same function "qemudDomainWaitForMigrationComplete"
to wait the job finished, but the error messages are all
about "migration", e.g. when a domain saving job is canceled
by user, "migration was cancled by client" will be throwed as
an error message, which will be confused for user.

As a solution, intoduce two new job types(QEMU_JOB_SAVE,
QEMU_JOB_DUMP), and set "priv->jobActive" to "QEMU_JOB_SAVE"
before saving, to "QEMU_JOB_DUMP" before dumping, so that we
could get the real job type in
"qemudDomainWaitForMigrationComplete", and give more clear
message further.

And as It's not important to figure out what's the exact job
is in the DEBUG and WARN log, also we don't need translated
string in logs, simply repace "migration" with "job" in some
statements.

* src/qemu/qemu_driver.c
---
 src/qemu/qemu_driver.c |   40 +++++++++++++++++++++++++++++++---------
 1 files changed, 31 insertions(+), 9 deletions(-)

diff --git a/src/qemu/qemu_driver.c b/src/qemu/qemu_driver.c
index 19ce9a6..d5af0df 100644
--- a/src/qemu/qemu_driver.c
+++ b/src/qemu/qemu_driver.c
@@ -104,6 +104,8 @@ enum qemuDomainJob {
     QEMU_JOB_UNSPECIFIED,
     QEMU_JOB_MIGRATION_OUT,
     QEMU_JOB_MIGRATION_IN,
+    QEMU_JOB_SAVE,
+    QEMU_JOB_DUMP,
 };

 enum qemuDomainJobSignals {
@@ -5389,21 +5391,37 @@ qemuDomainWaitForMigrationComplete(struct qemud_driver *driver, virDomainObjPtr
         struct timespec ts = { .tv_sec = 0, .tv_nsec = 50 * 1000 * 1000ull };
         struct timeval now;
         int rc;
+        const char *job;
+
+        switch (priv->jobActive) {
+            case QEMU_JOB_MIGRATION_OUT:
+                job = _("migration job");
+                break;
+            case QEMU_JOB_SAVE:
+                job = _("domain save job");
+                break;
+            case QEMU_JOB_DUMP:
+                job = _("domain core dump job");
+                break;
+            default:
+                job = _("job");
+        }
+

         if (!virDomainObjIsActive(vm)) {
-            qemuReportError(VIR_ERR_INTERNAL_ERROR, "%s",
-                            _("guest unexpectedly quit during migration"));
+            qemuReportError(VIR_ERR_INTERNAL_ERROR, "%s: %s",
+                            job, _("guest unexpectedly quit"));
             goto cleanup;
         }

         if (priv->jobSignals & QEMU_JOB_SIGNAL_CANCEL) {
             priv->jobSignals ^= QEMU_JOB_SIGNAL_CANCEL;
-            VIR_DEBUG0("Cancelling migration at client request");
+            VIR_DEBUG0("Cancelling job at client request");
             qemuDomainObjEnterMonitorWithDriver(driver, vm);
             rc = qemuMonitorMigrateCancel(priv->mon);
             qemuDomainObjExitMonitorWithDriver(driver, vm);
             if (rc < 0) {
-                VIR_WARN0("Unable to cancel migration");
+                VIR_WARN0("Unable to cancel job");
             }
         } else if (priv->jobSignals & QEMU_JOB_SIGNAL_SUSPEND) {
             priv->jobSignals ^= QEMU_JOB_SIGNAL_SUSPEND;
@@ -5427,8 +5445,8 @@ qemuDomainWaitForMigrationComplete(struct qemud_driver *driver, virDomainObjPtr
          * guest to die
          */
         if (!virDomainObjIsActive(vm)) {
-            qemuReportError(VIR_ERR_INTERNAL_ERROR, "%s",
-                            _("guest unexpectedly quit during migration"));
+            qemuReportError(VIR_ERR_INTERNAL_ERROR, "%s: %s",
+                            job, _("guest unexpectedly quit"));
             goto cleanup;
         }

@@ -5459,7 +5477,7 @@ qemuDomainWaitForMigrationComplete(struct qemud_driver *driver, virDomainObjPtr
         case QEMU_MONITOR_MIGRATION_STATUS_INACTIVE:
             priv->jobInfo.type = VIR_DOMAIN_JOB_NONE;
             qemuReportError(VIR_ERR_OPERATION_FAILED,
-                            "%s", _("Migration is not active"));
+                            "%s: %s", job, _("is not active"));
             break;

         case QEMU_MONITOR_MIGRATION_STATUS_ACTIVE:
@@ -5480,13 +5498,13 @@ qemuDomainWaitForMigrationComplete(struct qemud_driver *driver, virDomainObjPtr
         case QEMU_MONITOR_MIGRATION_STATUS_ERROR:
             priv->jobInfo.type = VIR_DOMAIN_JOB_FAILED;
             qemuReportError(VIR_ERR_OPERATION_FAILED,
-                            "%s", _("Migration unexpectedly failed"));
+                            "%s: %s", job, _("unexpectedly failed"));
             break;

         case QEMU_MONITOR_MIGRATION_STATUS_CANCELLED:
             priv->jobInfo.type = VIR_DOMAIN_JOB_CANCELLED;
             qemuReportError(VIR_ERR_OPERATION_FAILED,
-                            "%s", _("Migration was cancelled by client"));
+                            "%s: %s", job, _("canceled by client"));
             break;
         }

@@ -5606,6 +5624,8 @@ static int qemudDomainSaveFlag(struct qemud_driver *driver, virDomainPtr dom,
         goto endjob;
     }

+    priv->jobActive = QEMU_JOB_SAVE;
+
     memset(&priv->jobInfo, 0, sizeof(priv->jobInfo));
     priv->jobInfo.type = VIR_DOMAIN_JOB_UNBOUNDED;

@@ -6198,6 +6218,8 @@ static int qemudDomainCoreDump(virDomainPtr dom,
         goto endjob;
     }

+    priv->jobActive = QEMU_JOB_DUMP;
+
     /* Migrate will always stop the VM, so the resume condition is
        independent of whether the stop command is issued.  */
     resume = (vm->state == VIR_DOMAIN_RUNNING);
--
1.7.3.2

--
libvir-list mailing list
libvir-list@xxxxxxxxxx
https://www.redhat.com/mailman/listinfo/libvir-list


[Index of Archives]     [Virt Tools]     [Libvirt Users]     [Lib OS Info]     [Fedora Users]     [Fedora Desktop]     [Fedora SELinux]     [Big List of Linux Books]     [Yosemite News]     [KDE Users]     [Fedora Tools]