[PATCH 1/2] Parallels: add domainGetVcpus(). OpenStack Nova requires this function to start VM instance. Cpumask info is obtained via prlctl utility. Unlike KVM, Parallels Cloud Server is unable to set cpu affinity mask for every VCpu. Mask is unique for all VCpu. You can set it using 'prlctl set <vm_id|vm_name> --cpumask <{n[, n, n1-n2]|all}>' command. For example, 'prlctl set SomeDomain --cpumask 0, 1, 5-7' would set this mask to yy---yyy.

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



From: A.Burluka <aburluka@xxxxxxxxxxxxx>

---
 src/parallels/parallels_driver.c |  169 +++++++++++++++++++++++++++++++++++++-
 src/parallels/parallels_utils.h  |    1 +
 2 files changed, 167 insertions(+), 3 deletions(-)

diff --git a/src/parallels/parallels_driver.c b/src/parallels/parallels_driver.c
index ab59599..f1d5ecc 100644
--- a/src/parallels/parallels_driver.c
+++ b/src/parallels/parallels_driver.c
@@ -108,6 +108,7 @@ parallelsDomObjFreePrivate(void *p)
     if (!pdom)
         return;
 
+    VIR_FREE(pdom->cpumask);
     VIR_FREE(pdom->uuid);
     VIR_FREE(pdom->home);
     VIR_FREE(p);
@@ -654,6 +655,9 @@ parallelsLoadDomain(parallelsConnPtr privconn, virJSONValuePtr jobj)
     if (VIR_ALLOC(def) < 0)
         goto cleanup;
 
+    if (VIR_ALLOC(pdom) < 0)
+        goto cleanup;
+
     def->virtType = VIR_DOMAIN_VIRT_PARALLELS;
     def->id = -1;
 
@@ -716,6 +720,17 @@ parallelsLoadDomain(parallelsConnPtr privconn, virJSONValuePtr jobj)
         goto cleanup;
     }
 
+    if (!(tmp = virJSONValueObjectGetString(jobj3, "mask"))) {
+        /* Absence of this field means that all domains cpus are available */
+        if (VIR_STRDUP(pdom->cpumask, "all") < 0) {
+            goto cleanup;
+        }
+    } else {
+        if (VIR_STRDUP(pdom->cpumask, tmp) < 0) {
+            goto cleanup;
+        }
+    }
+
     if (!(jobj3 = virJSONValueObjectGet(jobj2, "memory"))) {
         parallelsParseError();
         goto cleanup;
@@ -757,9 +772,6 @@ parallelsLoadDomain(parallelsConnPtr privconn, virJSONValuePtr jobj)
 
     def->os.arch = VIR_ARCH_X86_64;
 
-    if (VIR_ALLOC(pdom) < 0)
-        goto cleanup;
-
     if (virJSONValueObjectGetNumberUint(jobj, "EnvID", &x) < 0)
         goto cleanup;
     pdom->id = x;
@@ -2300,6 +2312,156 @@ static int parallelsConnectIsAlive(virConnectPtr conn ATTRIBUTE_UNUSED)
 }
 
 
+static int
+parallelsNodeGetCpuMask(parallelsDomObjPtr privatedomdata,
+                        virBitmapPtr *cpumask,
+                        int hostcpus)
+{
+    int ret = -1;
+    int cpunum = -1;
+    int prevcpunum = -1;
+    int offset = 0;
+    const char *it = privatedomdata->cpumask;
+    bool isrange = false;
+    size_t i;
+    int cpunums[512] = { 0 };
+    size_t count = 0;
+
+    if (STREQ(it, "all")) {
+        if (!(*cpumask = virBitmapNew(hostcpus)))
+            goto cleanup;
+        virBitmapSetAll(*cpumask);
+    } else {
+        while (sscanf(it, "%d%n", &cpunum, &offset)) {
+            char delim = 0;
+            if (isrange) {
+                for (i = prevcpunum + 1; i <= cpunum; ++i) {
+                    cpunums[count++] = i;
+                }
+            } else {
+                cpunums[count++] = cpunum;
+            }
+
+            it += offset;
+
+            if (sscanf(it, "%c%n", &delim, &offset) == EOF) {
+                break;
+            } else {
+                it += offset;
+                switch (delim) {
+                case ',':
+                    isrange = false;
+                    break;
+                case '-':
+                    isrange = true;
+                    prevcpunum = cpunum;
+                    break;
+                default:
+                    virReportError(VIR_ERR_INVALID_ARG,
+                                   _("Invalid cpumask format '%s'"),
+                                   privatedomdata->cpumask);
+                    goto cleanup;
+                    break;
+                }
+            }
+        }
+        if (!(*cpumask = virBitmapNew(cpunums[count-1] + 1)))
+            goto cleanup;
+        virBitmapClearAll(*cpumask);
+        for (i = 0; i < count; ++i) {
+            if (virBitmapSetBit(*cpumask, cpunums[i]) == -1) {
+                virReportError(VIR_ERR_INTERNAL_ERROR,
+                               _("cannot set %d bit in cpumask"),
+                               cpunums[i]);
+                goto cleanup;
+            }
+        }
+    }
+
+    return 0;
+ cleanup:
+    virBitmapFree(*cpumask);
+    return ret;
+}
+
+static int
+parallelsDomainGetVcpus(virDomainPtr domain,
+                   virVcpuInfoPtr info,
+                   int maxinfo,
+                   unsigned char *cpumaps,
+                   int maplen)
+{
+    parallelsConnPtr privconn = domain->conn->privateData;
+    parallelsDomObjPtr privdomdata = NULL;
+    virDomainObjPtr privdom = NULL;
+    size_t i;
+    int v, maxcpu, hostcpus;
+    int ret = -1;
+
+    parallelsDriverLock(privconn);
+    privdom = virDomainObjListFindByUUID(privconn->domains, domain->uuid);
+    parallelsDriverUnlock(privconn);
+
+    if (privdom == NULL) {
+        parallelsDomNotFoundError(domain);
+        goto cleanup;
+    }
+
+    if (!virDomainObjIsActive(privdom)) {
+        virReportError(VIR_ERR_OPERATION_INVALID,
+                       "%s",
+                       _("cannot list vcpu pinning for an inactive domain"));
+        goto cleanup;
+    }
+
+    privdomdata = privdom->privateData;
+    if ((hostcpus = nodeGetCPUCount()) < 0)
+        goto cleanup;
+
+    maxcpu = maplen * 8;
+    if (maxcpu > hostcpus)
+        maxcpu = hostcpus;
+
+    if (maxinfo >= 1) {
+        if (info != NULL) {
+            memset(info, 0, sizeof(*info) * maxinfo);
+            for (i = 0; i < maxinfo; i++) {
+                info[i].number = i;
+                info[i].state = VIR_VCPU_RUNNING;
+            }
+        }
+        if (cpumaps != NULL) {
+            unsigned char *tmpmap = NULL;
+            int tmpmapLen = 0;
+            virBitmapPtr map = NULL;
+
+            memset(cpumaps, 0, maplen * maxinfo);
+            if (parallelsNodeGetCpuMask(privdomdata, &map, hostcpus) == -1) {
+                virReportError(VIR_ERR_INTERNAL_ERROR, "%s",
+                               _("Unable to get cpu affinity mask "));
+                goto cleanup;
+            }
+            virBitmapToData(map, &tmpmap, &tmpmapLen);
+            if (tmpmapLen > maplen)
+                tmpmapLen = maplen;
+
+            for (v = 0; v < maxinfo; v++) {
+                unsigned char *cpumap = VIR_GET_CPUMAP(cpumaps, maplen, v);
+                memcpy(cpumap, tmpmap, tmpmapLen);
+            }
+            VIR_FREE(tmpmap);
+            virBitmapFree(map);
+        }
+    }
+    ret = maxinfo;
+
+ cleanup:
+    if (privdom)
+        virObjectUnlock(privdom);
+    return ret;
+}
+
+
 static virDriver parallelsDriver = {
     .no = VIR_DRV_PARALLELS,
     .name = "Parallels",
@@ -2323,6 +2485,7 @@ static virDriver parallelsDriver = {
     .domainGetXMLDesc = parallelsDomainGetXMLDesc,    /* 0.10.0 */
     .domainIsPersistent = parallelsDomainIsPersistent,        /* 0.10.0 */
     .domainGetAutostart = parallelsDomainGetAutostart,        /* 0.10.0 */
+    .domainGetVcpus = parallelsDomainGetVcpus, /* 1.2.6 */
     .domainSuspend = parallelsDomainSuspend,    /* 0.10.0 */
     .domainResume = parallelsDomainResume,    /* 0.10.0 */
     .domainDestroy = parallelsDomainDestroy,  /* 0.10.0 */
diff --git a/src/parallels/parallels_utils.h b/src/parallels/parallels_utils.h
index 6215553..e88af1c 100644
--- a/src/parallels/parallels_utils.h
+++ b/src/parallels/parallels_utils.h
@@ -54,6 +54,7 @@ struct parallelsDomObj {
     int id;
     char *uuid;
     char *home;
+    char *cpumask;
 };
 
 typedef struct parallelsDomObj *parallelsDomObjPtr;
-- 
1.7.1

--
libvir-list mailing list
libvir-list@xxxxxxxxxx
https://www.redhat.com/mailman/listinfo/libvir-list




[Index of Archives]     [Virt Tools]     [Libvirt Users]     [Lib OS Info]     [Fedora Users]     [Fedora Desktop]     [Fedora SELinux]     [Big List of Linux Books]     [Yosemite News]     [KDE Users]     [Fedora Tools]