Re: [PATCH v4 3/9] qemu: Implement the qemu driver fetch for IOThreads

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



On Thu, Mar 05, 2015 at 09:03:00PM -0500, John Ferlan wrote:
> Depending on the flags passed, either attempt to return the active/live
> IOThread data for the domain or the config data.
> 
> The active/live path will call into the Monitor in order to get the
> IOThread data and then correlate the thread_id's returned from the
> monitor to the currently running system/threads in order to ascertain
> the affinity for each iothread_id.
> 
> The config path will map each of the configured IOThreads and return
> any configured iothreadspin data
> 
> Signed-off-by: John Ferlan <jferlan@xxxxxxxxxx>
> ---
>  src/qemu/qemu_driver.c | 224 +++++++++++++++++++++++++++++++++++++++++++++++++
>  1 file changed, 224 insertions(+)
> 
> diff --git a/src/qemu/qemu_driver.c b/src/qemu/qemu_driver.c
> index ffa4e19..d8a761d 100644
> --- a/src/qemu/qemu_driver.c
> +++ b/src/qemu/qemu_driver.c
> @@ -5557,6 +5557,229 @@ qemuDomainGetMaxVcpus(virDomainPtr dom)
>                                           VIR_DOMAIN_VCPU_MAXIMUM));
>  }
>  
> +static int
> +qemuDomainGetIOThreadsLive(virQEMUDriverPtr driver,
> +                           virDomainObjPtr vm,
> +                           virDomainIOThreadInfoPtr **info)
> +{
> +    qemuDomainObjPrivatePtr priv;
> +    qemuMonitorIOThreadsInfoPtr *iothreads = NULL;
> +    virDomainIOThreadInfoPtr *info_ret = NULL;
> +    int niothreads = 0;
> +    int maxcpu, hostcpus, maplen;
> +    size_t i;
> +    int ret = -1;
> +
> +    if (qemuDomainObjBeginJob(driver, vm, QEMU_JOB_QUERY) < 0)
> +        goto cleanup;
> +
> +    if (!virDomainObjIsActive(vm)) {
> +        virReportError(VIR_ERR_OPERATION_INVALID, "%s",
> +                       _("cannot list IOThreads for an inactive domain"));
> +        goto endjob;
> +    }
> +
> +    priv = vm->privateData;
> +    if (!virQEMUCapsGet(priv->qemuCaps, QEMU_CAPS_OBJECT_IOTHREAD)) {
> +        virReportError(VIR_ERR_CONFIG_UNSUPPORTED, "%s",
> +                       _("IOThreads not supported with this binary"));
> +        goto endjob;
> +    }
> +
> +    qemuDomainObjEnterMonitor(driver, vm);
> +    niothreads = qemuMonitorGetIOThreads(priv->mon, &iothreads);
> +    if (qemuDomainObjExitMonitor(driver, vm) < 0)
> +        goto endjob;
> +    if (niothreads < 0)
> +        goto endjob;
> +
> +    /* Nothing to do */
> +    if (niothreads == 0) {
> +        ret = 0;
> +        goto endjob;
> +    }
> +
> +    if ((hostcpus = nodeGetCPUCount()) < 0)
> +        goto endjob;
> +
> +    maplen = VIR_CPU_MAPLEN(hostcpus);

The maplen is not needed. Just pass 'hostcpus' to
'virProcessGetAffinity' and it will generate a virBitmap of the right
size, then virBitmapToData computes the correct maplen.

> +    maxcpu = maplen * 8;
> +    if (maxcpu > hostcpus)

These two lines are redundant.
If maplen * 8 < hostcpus, then VIR_CPU_MAPLEN is flawed, because the map
does not hold at least hostcpus bits.
If maplen * 8 >= hostcpus, the value of hostcpus is used.

> +        maxcpu = hostcpus;

> +
> +    if (VIR_ALLOC_N(info_ret, niothreads) < 0)
> +        goto endjob;
> +
> +    for (i = 0; i < niothreads; i++) {
> +        virBitmapPtr map = NULL;
> +        unsigned char *tmpmap = NULL;
> +        int tmpmaplen = 0;
> +
> +        if (VIR_ALLOC(info_ret[i]) < 0)
> +            goto endjob;
> +
> +        if (virStrToLong_ui(iothreads[i]->name + strlen("iothread"), NULL, 10,
> +                            &info_ret[i]->iothread_id) < 0)
> +            goto endjob;
> +
> +        if (VIR_ALLOC_N(info_ret[i]->cpumap, maplen) < 0)
> +            goto endjob;
> +
> +        if (virProcessGetAffinity(iothreads[i]->thread_id, &map, maxcpu) < 0)
> +            goto endjob;
> +
> +        virBitmapToData(map, &tmpmap, &tmpmaplen);

> +        if (tmpmaplen > maplen)
> +            tmpmaplen = maplen;

This is equivalent to:
if (VIR_CPU_MAPLEN(hostcpus) > VIR_CPU_MAPLEN(hostcpus))


> +        memcpy(info_ret[i]->cpumap, tmpmap, tmpmaplen);
> +        info_ret[i]->cpumaplen = tmpmaplen;
> +
> +        VIR_FREE(tmpmap);
> +        virBitmapFree(map);
> +    }
> +
> +    *info = info_ret;
> +    info_ret = NULL;
> +    ret = niothreads;
> +
> + endjob:
> +    qemuDomainObjEndJob(driver, vm);
> +
> + cleanup:
> +    if (info_ret) {
> +        for (i = 0; i < niothreads; i++)
> +            virDomainIOThreadsInfoFree(info_ret[i]);
> +        VIR_FREE(info_ret);
> +    }
> +    if (iothreads) {
> +        for (i = 0; i < niothreads; i++)
> +            qemuMonitorIOThreadsInfoFree(iothreads[i]);
> +        VIR_FREE(iothreads);
> +    }
> +
> +    return ret;
> +}
> +
> +static int
> +qemuDomainGetIOThreadsConfig(virDomainDefPtr targetDef,
> +                             virDomainIOThreadInfoPtr **info)
> +{
> +    virDomainIOThreadInfoPtr *info_ret = NULL;
> +    virDomainVcpuPinDefPtr *iothreadspin_list;
> +    virBitmapPtr cpumask = NULL;
> +    unsigned char *cpumap;
> +    int maxcpu, hostcpus, maplen;
> +    size_t i, pcpu;
> +    bool pinned;
> +    int ret = -1;
> +
> +    if (targetDef->iothreads == 0)
> +        return 0;
> +
> +    if ((hostcpus = nodeGetCPUCount()) < 0)
> +        goto cleanup;
> +
> +    maplen = VIR_CPU_MAPLEN(hostcpus);
> +    maxcpu = maplen * 8;
> +    if (maxcpu > hostcpus)
> +        maxcpu = hostcpus;

Same redunancy here.

> +
> +    if (VIR_ALLOC_N(info_ret, targetDef->iothreads) < 0)
> +        goto cleanup;
> +
> +    for (i = 0; i < targetDef->iothreads; i++) {
> +        if (VIR_ALLOC(info_ret[i]) < 0)
> +            goto cleanup;
> +
> +        /* IOThreads being counting at 1 */
> +        info_ret[i]->iothread_id = i + 1;
> +

As I mentioned in my reply to v3:
https://www.redhat.com/archives/libvir-list/2015-March/msg00249.html
this really would look readable with virBitmapToData.
I meant something like this:

pininfo = virDomainVcpuPinFindByVcpu(targetDef->cputune.iothreadspin,);
if (!pininfo) {
   bitmap = virBitmapNew(hostcpus);
   virBitmapSetAllBits(bitmap);
   pininfo = bitmap;
}
virBitmapToData(pininfo, info[i]->cpumap, info[i]->cpumaplen)

> +        if (VIR_ALLOC_N(info_ret[i]->cpumap, maplen) < 0)
> +            goto cleanup;
> +
> +        /* Initialize the cpumap */
> +        info_ret[i]->cpumaplen = maplen;
> +        memset(info_ret[i]->cpumap, 0xff, maplen);
> +        if (maxcpu % 8)
> +            info_ret[i]->cpumap[maplen - 1] &= (1 << maxcpu % 8) - 1;

> +    }
> +
> +    /* If iothreadspin setting exists, there are unused physical cpus */
> +    iothreadspin_list = targetDef->cputune.iothreadspin;
> +    for (i = 0; i < targetDef->cputune.niothreadspin; i++) {
> +        /* vcpuid is the iothread_id...
> +         * iothread_id is the index into info_ret + 1, so we can
> +         * assume that the info_ret index we want is vcpuid - 1
> +         */
> +        cpumap = info_ret[iothreadspin_list[i]->vcpuid - 1]->cpumap;
> +        cpumask = iothreadspin_list[i]->cpumask;
> +
> +        for (pcpu = 0; pcpu < maxcpu; pcpu++) {
> +            if (virBitmapGetBit(cpumask, pcpu, &pinned) < 0)
> +                goto cleanup;
> +            if (!pinned)
> +                VIR_UNUSE_CPU(cpumap, pcpu);
> +        }
> +    }

This is essentially an open-coded version of virBitmapToData.

Jan

Attachment: signature.asc
Description: Digital signature

--
libvir-list mailing list
libvir-list@xxxxxxxxxx
https://www.redhat.com/mailman/listinfo/libvir-list

[Index of Archives]     [Virt Tools]     [Libvirt Users]     [Lib OS Info]     [Fedora Users]     [Fedora Desktop]     [Fedora SELinux]     [Big List of Linux Books]     [Yosemite News]     [KDE Users]     [Fedora Tools]