Re: [PATCH 2/5] Support CPU placement in LXC driver

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



On Thu, Nov 24, 2011 at 11:38:13AM +0000, Daniel P. Berrange wrote:
> From: "Daniel P. Berrange" <berrange@xxxxxxxxxx>
> 
> While LXC does not have the concept of VCPUS, so we cann't do

 s/nn/n/

> per-VCPU pCPU placement, we can support the VM level CPU
> placement. Todo this simply set the CPU affinity of the LXC
> controller at startup. All child processes will inherit this
> affinity.
> 
> * src/lxc/lxc_controller.c: Set process affinity
> ---
>  src/lxc/lxc_controller.c |   63 ++++++++++++++++++++++++++++++++++++++++++++++
>  1 files changed, 63 insertions(+), 0 deletions(-)
> 
> diff --git a/src/lxc/lxc_controller.c b/src/lxc/lxc_controller.c
> index 4718fa8..1db25fb 100644
> --- a/src/lxc/lxc_controller.c
> +++ b/src/lxc/lxc_controller.c
> @@ -66,6 +66,8 @@
>  #include "virfile.h"
>  #include "virpidfile.h"
>  #include "command.h"
> +#include "processinfo.h"
> +#include "nodeinfo.h"
>  
>  #define VIR_FROM_THIS VIR_FROM_LXC
>  
> @@ -322,6 +324,64 @@ static int lxcSetContainerNUMAPolicy(virDomainDefPtr def)
>  }
>  #endif
>  
> +
> +/*
> + * To be run while still single threaded
> + */
> +static int lxcSetContainerCpuAffinity(virDomainDefPtr def)
> +{
> +    int i, hostcpus, maxcpu = CPU_SETSIZE;
> +    virNodeInfo nodeinfo;
> +    unsigned char *cpumap;
> +    int cpumaplen;
> +
> +    VIR_DEBUG("Setting CPU affinity");
> +
> +    if (nodeGetInfo(NULL, &nodeinfo) < 0)
> +        return -1;
> +
> +    /* setaffinity fails if you set bits for CPUs which
> +     * aren't present, so we have to limit ourselves */
> +    hostcpus = VIR_NODEINFO_MAXCPUS(nodeinfo);
> +    if (maxcpu > hostcpus)
> +        maxcpu = hostcpus;
> +
> +    cpumaplen = VIR_CPU_MAPLEN(maxcpu);
> +    if (VIR_ALLOC_N(cpumap, cpumaplen) < 0) {
> +        virReportOOMError();
> +        return -1;
> +    }
> +
> +    if (def->cpumask) {
> +        /* XXX why don't we keep 'cpumask' in the libvirt cpumap
> +         * format to start with ?!?! */
> +        for (i = 0 ; i < maxcpu && i < def->cpumasklen ; i++)
> +            if (def->cpumask[i])
> +                VIR_USE_CPU(cpumap, i);
> +    } else {
> +        /* You may think this is redundant, but we can't assume libvirtd
> +         * itself is running on all pCPUs, so we need to explicitly set
> +         * the spawned QEMU instance to all pCPUs if no map is given in

  Hum you mean LXC container instead of QEmu, right ?

> +         * its config file */
> +        for (i = 0 ; i < maxcpu ; i++)
> +            VIR_USE_CPU(cpumap, i);
> +    }
> +
> +    /* We are pressuming we are running between fork/exec of QEMU

  idem

> +     * so use '0' to indicate our own process ID. No threads are
> +     * running at this point
> +     */
> +    if (virProcessInfoSetAffinity(0, /* Self */
> +                                  cpumap, cpumaplen, maxcpu) < 0) {
> +        VIR_FREE(cpumap);
> +        return -1;
> +    }
> +    VIR_FREE(cpumap);
> +
> +    return 0;
> +}
> +
> +
>  /**
>   * lxcSetContainerResources
>   * @def: pointer to virtual machine structure
> @@ -347,6 +407,9 @@ static int lxcSetContainerResources(virDomainDefPtr def)
>          {'c', LXC_DEV_MAJ_TTY, LXC_DEV_MIN_PTMX},
>          {0,   0, 0}};
>  
> +    if (lxcSetContainerCpuAffinity(def) < 0)
> +        return -1;
> +
>      if (lxcSetContainerNUMAPolicy(def) < 0)
>          return -1;

  Either I don't understand the comments or they need fixing :)

  ACK

Daniel

-- 
Daniel Veillard      | libxml Gnome XML XSLT toolkit  http://xmlsoft.org/
daniel@xxxxxxxxxxxx  | Rpmfind RPM search engine http://rpmfind.net/
http://veillard.com/ | virtualization library  http://libvirt.org/

--
libvir-list mailing list
libvir-list@xxxxxxxxxx
https://www.redhat.com/mailman/listinfo/libvir-list


[Index of Archives]     [Virt Tools]     [Libvirt Users]     [Lib OS Info]     [Fedora Users]     [Fedora Desktop]     [Fedora SELinux]     [Big List of Linux Books]     [Yosemite News]     [KDE Users]     [Fedora Tools]