*libvirt_virDomainGetVcpus: add error handling, return -1 instead of None *libvirt_virDomainPinVcpu and libvirt_virDomainPinVcpuFlags: make use of libvirt_boolUnwrap set vcpus which are list in tuple pycpumap, trun off the rest in cpumap. The original way ignored the error info from PyTuple_GetItem if pos is out of bounds. "IndexError: tuple index out of range" The error message will only be raised on next command in interactive mode. --- python/libvirt-override.c | 173 +++++++++++++++++++++++++++++--------------- 1 files changed, 114 insertions(+), 59 deletions(-) diff --git a/python/libvirt-override.c b/python/libvirt-override.c index 56f96ba..2788546 100644 --- a/python/libvirt-override.c +++ b/python/libvirt-override.c @@ -1333,9 +1333,11 @@ cleanup: static PyObject * libvirt_virDomainGetVcpus(PyObject *self ATTRIBUTE_UNUSED, - PyObject *args) { + PyObject *args) +{ virDomainPtr domain; PyObject *pyobj_domain, *pyretval = NULL, *pycpuinfo = NULL, *pycpumap = NULL; + PyObject *error = NULL; virNodeInfo nodeinfo; virDomainInfo dominfo; virVcpuInfoPtr cpuinfo = NULL; @@ -1352,29 +1354,33 @@ libvirt_virDomainGetVcpus(PyObject *self ATTRIBUTE_UNUSED, i_retval = virNodeGetInfo(virDomainGetConnect(domain), &nodeinfo); LIBVIRT_END_ALLOW_THREADS; if (i_retval < 0) - return VIR_PY_NONE; + return VIR_PY_INT_FAIL; LIBVIRT_BEGIN_ALLOW_THREADS; i_retval = virDomainGetInfo(domain, &dominfo); LIBVIRT_END_ALLOW_THREADS; if (i_retval < 0) - return VIR_PY_NONE; + return VIR_PY_INT_FAIL; if (VIR_ALLOC_N(cpuinfo, dominfo.nrVirtCpu) < 0) - return VIR_PY_NONE; + return PyErr_NoMemory(); cpumaplen = VIR_CPU_MAPLEN(VIR_NODEINFO_MAXCPUS(nodeinfo)); if (xalloc_oversized(dominfo.nrVirtCpu, cpumaplen) || - VIR_ALLOC_N(cpumap, dominfo.nrVirtCpu * cpumaplen) < 0) + VIR_ALLOC_N(cpumap, dominfo.nrVirtCpu * cpumaplen) < 0) { + error = PyErr_NoMemory(); goto cleanup; + } LIBVIRT_BEGIN_ALLOW_THREADS; i_retval = virDomainGetVcpus(domain, cpuinfo, dominfo.nrVirtCpu, cpumap, cpumaplen); LIBVIRT_END_ALLOW_THREADS; - if (i_retval < 0) + if (i_retval < 0) { + error = VIR_PY_INT_FAIL; goto cleanup; + } /* convert to a Python tuple of long objects */ if ((pyretval = PyTuple_New(2)) == NULL) @@ -1388,54 +1394,80 @@ libvirt_virDomainGetVcpus(PyObject *self ATTRIBUTE_UNUSED, PyObject *info = PyTuple_New(4); if (info == NULL) goto cleanup; - PyTuple_SetItem(info, 0, PyInt_FromLong((long)cpuinfo[i].number)); - PyTuple_SetItem(info, 1, PyInt_FromLong((long)cpuinfo[i].state)); - PyTuple_SetItem(info, 2, PyLong_FromLongLong((long long)cpuinfo[i].cpuTime)); - PyTuple_SetItem(info, 3, PyInt_FromLong((long)cpuinfo[i].cpu)); - PyList_SetItem(pycpuinfo, i, info); + if (PyTuple_SetItem(info, 0, PyInt_FromLong((long)cpuinfo[i].number)) < 0) + { + Py_DECREF(info); + goto cleanup; + } + + if (PyTuple_SetItem(info, 1, PyInt_FromLong((long)cpuinfo[i].state)) < 0) + { + Py_DECREF(info); + goto cleanup; + } + + if (PyTuple_SetItem(info, 2, PyLong_FromLongLong((long long)cpuinfo[i].cpuTime)) < 0) + { + Py_DECREF(info); + goto cleanup; + } + + if (PyTuple_SetItem(info, 3, PyInt_FromLong((long)cpuinfo[i].cpu)) < 0) + { + Py_DECREF(info); + goto cleanup; + } + if (PyList_SetItem(pycpuinfo, i, info) < 0) + goto cleanup; } + for (i = 0 ; i < dominfo.nrVirtCpu ; i++) { PyObject *info = PyTuple_New(VIR_NODEINFO_MAXCPUS(nodeinfo)); - int j; if (info == NULL) goto cleanup; + + int j; for (j = 0 ; j < VIR_NODEINFO_MAXCPUS(nodeinfo) ; j++) { - PyTuple_SetItem(info, j, PyBool_FromLong(VIR_CPU_USABLE(cpumap, cpumaplen, i, j))); + if (PyTuple_SetItem(info, j, PyBool_FromLong(VIR_CPU_USABLE(cpumap, cpumaplen, i, j))) < 0) + { + Py_DECREF(info); + goto cleanup; + } } - PyList_SetItem(pycpumap, i, info); + if (PyList_SetItem(pycpumap, i, info) < 0) + goto cleanup; } - PyTuple_SetItem(pyretval, 0, pycpuinfo); - PyTuple_SetItem(pyretval, 1, pycpumap); + + if (PyTuple_SetItem(pyretval, 0, pycpuinfo) < 0) + goto cleanup; + + if (PyTuple_SetItem(pyretval, 1, pycpumap) < 0) + goto cleanup; VIR_FREE(cpuinfo); VIR_FREE(cpumap); return pyretval; - cleanup: +cleanup: VIR_FREE(cpuinfo); VIR_FREE(cpumap); - /* NB, Py_DECREF is a badly defined macro, so we require - * braces here to avoid 'ambiguous else' warnings from - * the compiler. - * NB. this comment is true at of time of writing wrt to - * at least python2.5. - */ - if (pyretval) { Py_DECREF(pyretval); } - if (pycpuinfo) { Py_DECREF(pycpuinfo); } - if (pycpumap) { Py_DECREF(pycpumap); } - return VIR_PY_NONE; + Py_XDECREF(pyretval); + Py_XDECREF(pycpuinfo); + Py_XDECREF(pycpumap); + return error; } - static PyObject * libvirt_virDomainPinVcpu(PyObject *self ATTRIBUTE_UNUSED, - PyObject *args) { + PyObject *args) +{ virDomainPtr domain; - PyObject *pyobj_domain, *pycpumap, *truth; + PyObject *pyobj_domain, *pycpumap; + PyObject *ret = NULL; virNodeInfo nodeinfo; unsigned char *cpumap; - int cpumaplen, i, vcpu; + int cpumaplen, i, j, vcpu, tuple_size; int i_retval; if (!PyArg_ParseTuple(args, (char *)"OiO:virDomainPinVcpu", @@ -1451,37 +1483,50 @@ libvirt_virDomainPinVcpu(PyObject *self ATTRIBUTE_UNUSED, cpumaplen = VIR_CPU_MAPLEN(VIR_NODEINFO_MAXCPUS(nodeinfo)); if (VIR_ALLOC_N(cpumap, cpumaplen) < 0) - return VIR_PY_INT_FAIL; + return PyErr_NoMemory(); - truth = PyBool_FromLong(1); - for (i = 0 ; i < VIR_NODEINFO_MAXCPUS(nodeinfo) ; i++) { + tuple_size = PyTuple_Size(pycpumap); + if (tuple_size == -1) + goto cleanup; + + for (i = 0; i < tuple_size; i++) { PyObject *flag = PyTuple_GetItem(pycpumap, i); - if (flag == truth) - VIR_USE_CPU(cpumap, i); - else - VIR_UNUSE_CPU(cpumap, i); + bool b; + + if ((!flag) || (libvirt_boolUnwrap(flag, &b) < 0)) + goto cleanup; + + (b) ? VIR_USE_CPU(cpumap, i) : VIR_UNUSE_CPU(cpumap, i); } + for (j = 0; j < VIR_NODEINFO_MAXCPUS(nodeinfo) - i; j++) + VIR_UNUSE_CPU(cpumap, i + j); + LIBVIRT_BEGIN_ALLOW_THREADS; i_retval = virDomainPinVcpu(domain, vcpu, cpumap, cpumaplen); LIBVIRT_END_ALLOW_THREADS; - Py_DECREF(truth); - VIR_FREE(cpumap); - if (i_retval < 0) - return VIR_PY_INT_FAIL; + if (i_retval < 0) { + ret = VIR_PY_INT_FAIL; + goto cleanup; + } + ret = VIR_PY_INT_SUCCESS; - return VIR_PY_INT_SUCCESS; +cleanup: + VIR_FREE(cpumap); + return ret; } static PyObject * libvirt_virDomainPinVcpuFlags(PyObject *self ATTRIBUTE_UNUSED, - PyObject *args) { + PyObject *args) +{ virDomainPtr domain; - PyObject *pyobj_domain, *pycpumap, *truth; + PyObject *pyobj_domain, *pycpumap; + PyObject *ret = NULL; virNodeInfo nodeinfo; unsigned char *cpumap; - int cpumaplen, i, vcpu; + int cpumaplen, i, j, vcpu, tuple_size; unsigned int flags; int i_retval; @@ -1498,27 +1543,37 @@ libvirt_virDomainPinVcpuFlags(PyObject *self ATTRIBUTE_UNUSED, cpumaplen = VIR_CPU_MAPLEN(VIR_NODEINFO_MAXCPUS(nodeinfo)); if (VIR_ALLOC_N(cpumap, cpumaplen) < 0) - return VIR_PY_INT_FAIL; + return PyErr_NoMemory(); + + tuple_size = PyTuple_Size(pycpumap); + if (tuple_size == -1) + goto cleanup; - truth = PyBool_FromLong(1); - for (i = 0 ; i < VIR_NODEINFO_MAXCPUS(nodeinfo) ; i++) { + for (i = 0; i < tuple_size; i++) { PyObject *flag = PyTuple_GetItem(pycpumap, i); - if (flag == truth) - VIR_USE_CPU(cpumap, i); - else - VIR_UNUSE_CPU(cpumap, i); + bool b; + + if ((!flag) || (libvirt_boolUnwrap(flag, &b) < 0)) + goto cleanup; + + (b) ? VIR_USE_CPU(cpumap, i) : VIR_UNUSE_CPU(cpumap, i); } + for (j = 0; j < VIR_NODEINFO_MAXCPUS(nodeinfo) - i; j++) + VIR_UNUSE_CPU(cpumap, i + j); + LIBVIRT_BEGIN_ALLOW_THREADS; i_retval = virDomainPinVcpuFlags(domain, vcpu, cpumap, cpumaplen, flags); LIBVIRT_END_ALLOW_THREADS; - Py_DECREF(truth); - VIR_FREE(cpumap); - - if (i_retval < 0) - return VIR_PY_INT_FAIL; + if (i_retval < 0) { + ret = VIR_PY_INT_FAIL; + goto cleanup; + } + ret = VIR_PY_INT_SUCCESS; - return VIR_PY_INT_SUCCESS; +cleanup: + VIR_FREE(cpumap); + return ret; } static PyObject * -- 1.7.7.5 -- libvir-list mailing list libvir-list@xxxxxxxxxx https://www.redhat.com/mailman/listinfo/libvir-list