Re: [PATCH 03/12] create a new cgroup and move all hypervisor threads to the new cgroup

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



On Wed, Jul 25, 2012 at 01:22:29PM +0800, tangchen wrote:
> From: Wen Congyang <wency@xxxxxxxxxxxxxx>
> 
> Create a new cgroup and move all hypervisor threads to the new cgroup.
> And then we can do the other things:
> 1. limit only vcpu usage rather than the whole qemu
> 2. limit for hypervisor threads(include vhost-net threads)
> 
> Signed-off-by: Wen Congyang <wency@xxxxxxxxxxxxxx>
> Signed-off-by: Tang Chen <tangchen@xxxxxxxxxxxxxx>
> Signed-off-by: Hu Tao <hutao@xxxxxxxxxxxxxx>
> ---
>  src/qemu/qemu_cgroup.c  |   71 ++++++++++++++++++++++++++++++++++++++++++++---
>  src/qemu/qemu_cgroup.h  |    2 ++
>  src/qemu/qemu_process.c |    6 +++-
>  3 files changed, 74 insertions(+), 5 deletions(-)
> 
> diff --git a/src/qemu/qemu_cgroup.c b/src/qemu/qemu_cgroup.c
> index 32184e7..46ae1db 100644
> --- a/src/qemu/qemu_cgroup.c
> +++ b/src/qemu/qemu_cgroup.c
> @@ -521,11 +521,12 @@ int qemuSetupCgroupForVcpu(struct qemud_driver *driver, virDomainObjPtr vm)
>      }
>  
>      if (priv->nvcpupids == 0 || priv->vcpupids[0] == vm->pid) {
> -        /* If we does not know VCPU<->PID mapping or all vcpu runs in the same
> +        /* If we does not know VCPU<->PID mapping or all vcpus run in the same
>           * thread, we cannot control each vcpu.
>           */
> -        virCgroupFree(&cgroup);
> -        return 0;
> +        virReportError(VIR_ERR_INTERNAL_ERROR,
> +                        _("Unable to get vcpus' pids."));
> +        goto cleanup;
>      }
>  
>      for (i = 0; i < priv->nvcpupids; i++) {
> @@ -562,7 +563,11 @@ int qemuSetupCgroupForVcpu(struct qemud_driver *driver, virDomainObjPtr vm)
>      return 0;
>  
>  cleanup:
> -    virCgroupFree(&cgroup_vcpu);
> +    if (cgroup_vcpu) {
> +        virCgroupRemove(cgroup_vcpu);
> +        virCgroupFree(&cgroup_vcpu);
> +    }
> +
>      if (cgroup) {
>          virCgroupRemove(cgroup);
>          virCgroupFree(&cgroup);
> @@ -571,6 +576,64 @@ cleanup:
>      return -1;
>  }
>  
> +int qemuSetupCgroupForHypervisor(struct qemud_driver *driver,
> +                                 virDomainObjPtr vm)
> +{
> +    virCgroupPtr cgroup = NULL;
> +    virCgroupPtr cgroup_hypervisor = NULL;
> +    int rc, i;
> +
> +    if (driver->cgroup == NULL)
> +        return 0; /* Not supported, so claim success */
> +
> +    rc = virCgroupForDomain(driver->cgroup, vm->def->name, &cgroup, 0);
> +    if (rc != 0) {
> +        virReportSystemError(-rc,
> +                             _("Unable to find cgroup for %s"),
> +                             vm->def->name);
> +        goto cleanup;
> +    }
> +
> +    rc = virCgroupForHypervisor(cgroup, &cgroup_hypervisor, 1);
> +    if (rc < 0) {
> +        virReportSystemError(-rc,
> +                             _("Unable to create hypervisor cgroup for %s"),
> +                             vm->def->name);
> +        goto cleanup;
> +    }
> +
> +    for (i = 0; i < VIR_CGROUP_CONTROLLER_LAST; i++) {
> +        if (!qemuCgroupControllerActive(driver, i)) {
> +            VIR_WARN("cgroup %d is not active", i);
> +            continue;
> +        }
> +        rc = virCgroupMoveTask(cgroup, cgroup_hypervisor, i);
> +        if (rc < 0) {
> +            virReportSystemError(-rc,
> +                                 _("Unable to move tasks from domain cgroup to "
> +                                   "hypervisor cgroup in controller %d for %s"),
> +                                 i, vm->def->name);
> +            goto cleanup;
> +        }
> +    }
> +
> +    virCgroupFree(&cgroup_hypervisor);
> +    virCgroupFree(&cgroup);
> +    return 0;
> +
> +cleanup:
> +    if (cgroup_hypervisor) {
> +        virCgroupRemove(cgroup_hypervisor);
> +        virCgroupFree(&cgroup_hypervisor);
> +    }
> +
> +    if (cgroup) {
> +        virCgroupRemove(cgroup);
> +        virCgroupFree(&cgroup);
> +    }
> +
> +    return rc;
> +}
>  
>  int qemuRemoveCgroup(struct qemud_driver *driver,
>                       virDomainObjPtr vm,
> diff --git a/src/qemu/qemu_cgroup.h b/src/qemu/qemu_cgroup.h
> index 5973430..3380ee2 100644
> --- a/src/qemu/qemu_cgroup.h
> +++ b/src/qemu/qemu_cgroup.h
> @@ -54,6 +54,8 @@ int qemuSetupCgroupVcpuBW(virCgroupPtr cgroup,
>                            unsigned long long period,
>                            long long quota);
>  int qemuSetupCgroupForVcpu(struct qemud_driver *driver, virDomainObjPtr vm);
> +int qemuSetupCgroupForHypervisor(struct qemud_driver *driver,
> +                                 virDomainObjPtr vm);
>  int qemuRemoveCgroup(struct qemud_driver *driver,
>                       virDomainObjPtr vm,
>                       int quiet);
> diff --git a/src/qemu/qemu_process.c b/src/qemu/qemu_process.c
> index 685ea7c..d89b4d5 100644
> --- a/src/qemu/qemu_process.c
> +++ b/src/qemu/qemu_process.c
> @@ -3753,10 +3753,14 @@ int qemuProcessStart(virConnectPtr conn,
>      if (qemuProcessDetectVcpuPIDs(driver, vm) < 0)
>          goto cleanup;
>  
> -    VIR_DEBUG("Setting cgroup for each VCPU(if required)");
> +    VIR_DEBUG("Setting cgroup for each VCPU (if required)");
>      if (qemuSetupCgroupForVcpu(driver, vm) < 0)
>          goto cleanup;
>  
> +    VIR_DEBUG("Setting cgroup for hypervisor (if required)");
> +    if (qemuSetupCgroupForHypervisor(driver, vm) < 0)
> +        goto cleanup;
> +
>      VIR_DEBUG("Setting VCPU affinities");
>      if (qemuProcessSetVcpuAffinites(conn, vm) < 0)
>          goto cleanup;
> -- 
> 1.7.10.2
> 

ACK.

-- 
Thanks,
Hu Tao

--
libvir-list mailing list
libvir-list@xxxxxxxxxx
https://www.redhat.com/mailman/listinfo/libvir-list


[Index of Archives]     [Virt Tools]     [Libvirt Users]     [Lib OS Info]     [Fedora Users]     [Fedora Desktop]     [Fedora SELinux]     [Big List of Linux Books]     [Yosemite News]     [KDE Users]     [Fedora Tools]