[PATCH 03/08] - reorganize code to plug apparent bitmask leaks Against: numactl-2.0.3-rc2 The remainder of the fixex for apparent leaks. These required a bit more reorg to address. libnuma.c | 75 ++++++++++++++++++++++++++++++++++++++------------------------ 1 files changed, 47 insertions(+), 28 deletions(-) Index: numactl-2.0.3-rc2/libnuma.c =================================================================== --- numactl-2.0.3-rc2.orig/libnuma.c 2009-04-27 09:44:52.000000000 -0400 +++ numactl-2.0.3-rc2/libnuma.c 2009-04-27 16:49:39.000000000 -0400 @@ -1031,17 +1031,19 @@ numa_get_membind_v1(void) { int oldpolicy; struct bitmask *bmp; - nodemask_t *nmp; + nodemask_t nmp; bmp = allocate_nodemask_v1(); getpol(&oldpolicy, bmp); if (oldpolicy == MPOL_BIND) { - nmp = (nodemask_t *)bmp->maskp; - return *nmp; + copy_bitmask_to_nodemask(bmp, &nmp); + } else { + /* copy the body of the map to numa_all_nodes */ + copy_bitmask_to_nodemask(bmp, &numa_all_nodes); + nmp = numa_all_nodes; } - /* copy the body of the map to numa_all_nodes */ - copy_bitmask_to_nodemask(bmp, &numa_all_nodes); - return numa_all_nodes; + numa_bitmask_free(bmp); + return nmp; } __asm__(".symver numa_get_membind_v1,numa_get_membind@xxxxxxxxxxx"); @@ -1411,14 +1413,16 @@ numa_get_run_node_mask_v1(void) int i, k; int max = numa_max_node_int(); struct bitmask *bmp, *cpus, *nodecpus; - nodemask_t *nmp; + nodemask_t nmp; - bmp = allocate_nodemask_v1(); /* the size of a nodemask_t */ cpus = numa_allocate_cpumask(); - nodecpus = numa_allocate_cpumask(); - if (numa_sched_getaffinity_v2_int(0, cpus) < 0) - return numa_no_nodes; + if (numa_sched_getaffinity_v2_int(0, cpus) < 0){ + nmp = numa_no_nodes; + goto free_cpus; + } + nodecpus = numa_allocate_cpumask(); + bmp = allocate_nodemask_v1(); /* the size of a nodemask_t */ for (i = 0; i <= max; i++) { if (numa_node_to_cpus_v2_int(i, nodecpus) < 0) { /* It's possible for the node to not exist */ @@ -1429,8 +1433,12 @@ numa_get_run_node_mask_v1(void) numa_bitmask_setbit(bmp, i); } } - nmp = (nodemask_t *)bmp->maskp; - return *nmp; + copy_bitmask_to_nodemask(bmp, &nmp); + numa_bitmask_free(bmp); + numa_bitmask_free(nodecpus); +free_cpus: + numa_bitmask_free(cpus); + return nmp; } __asm__(".symver numa_get_run_node_mask_v1,numa_get_run_node_mask@xxxxxxxxxxx"); @@ -1442,13 +1450,15 @@ numa_get_run_node_mask_v2(void) int max = numa_max_node_int(); struct bitmask *bmp, *cpus, *nodecpus; + bmp = numa_allocate_cpumask(); cpus = numa_allocate_cpumask(); - nodecpus = numa_allocate_cpumask(); - - if (numa_sched_getaffinity_v2_int(0, cpus) < 0) - return numa_no_nodes_ptr; + if (numa_sched_getaffinity_v2_int(0, cpus) < 0){ + copy_bitmask_to_bitmask(numa_no_nodes_ptr, bmp); + goto free_cpus; + } + nodecpus = numa_allocate_cpumask(); for (i = 0; i <= max; i++) { if (numa_node_to_cpus_v2_int(i, nodecpus) < 0) { /* It's possible for the node to not exist */ @@ -1459,6 +1469,9 @@ numa_get_run_node_mask_v2(void) numa_bitmask_setbit(bmp, i); } } + numa_bitmask_free(nodecpus); +free_cpus: + numa_bitmask_free(cpus); return bmp; } __asm__(".symver numa_get_run_node_mask_v2,numa_get_run_node_mask@@libnuma_1.2"); @@ -1481,22 +1494,28 @@ int numa_move_pages(int pid, unsigned lo int numa_run_on_node(int node) { int numa_num_nodes = numa_num_possible_nodes(); + int ret = -1; struct bitmask *cpus; + if (node >= numa_num_nodes){ + errno = EINVAL; + goto out; + } + cpus = numa_allocate_cpumask(); - if (node == -1) { + + if (node == -1) numa_bitmask_setall(cpus); - } else if (node < numa_num_nodes) { - if (numa_node_to_cpus_v2_int(node, cpus) < 0) { - numa_warn(W_noderunmask, - "Cannot read node cpumask from sysfs"); - return -1; - } - } else { - errno = EINVAL; - return -1; + else if (numa_node_to_cpus_v2_int(node, cpus) < 0){ + numa_warn(W_noderunmask, "Cannot read node cpumask from sysfs"); + goto free; } - return numa_sched_setaffinity_v2_int(0, cpus); + + ret = numa_sched_setaffinity_v2_int(0, cpus); +free: + numa_bitmask_free(cpus); +out: + return ret; } int numa_preferred(void) -- To unsubscribe from this list: send the line "unsubscribe linux-numa" in the body of a message to majordomo@xxxxxxxxxxxxxxx More majordomo info at http://vger.kernel.org/majordomo-info.html