To preserve useful caching feature and minimize the risk of getting outdated node cpumask after cpu hotplugging, numactl refreshes cache for every call delayed more as one second from previous one. This is compromise between outdated static cache and node cpumask parsing during every call of numa_node_to_cpus which would be very inefficient. Signed-off-by: Petr Holasek <pholasek@xxxxxxxxxx> --- libnuma.c | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) diff --git a/libnuma.c b/libnuma.c index 3717d5b..30d4ac8 100644 --- a/libnuma.c +++ b/libnuma.c @@ -30,6 +30,7 @@ #include <sys/mman.h> #include <limits.h> +#include <time.h> #include "config.h" #include "numa.h" @@ -76,6 +77,7 @@ static int numprocnode = -1; static int numproccpu = -1; static int nodemask_sz = 0; static int cpumask_sz = 0; +static time_t last_tstamp = 0; int numa_exit_on_error = 0; int numa_exit_on_warn = 0; @@ -1328,6 +1330,7 @@ numa_node_to_cpus_v2(int node, struct bitmask *buffer) FILE *f; size_t len = 0; struct bitmask *mask; + time_t curr_tstamp; if (!node_cpu_mask_v2) init_node_cpu_mask_v2(); @@ -1338,7 +1341,9 @@ numa_node_to_cpus_v2(int node, struct bitmask *buffer) } numa_bitmask_clearall(buffer); - if (node_cpu_mask_v2[node]) { + curr_tstamp = time(NULL); + + if (node_cpu_mask_v2[node] && curr_tstamp == last_tstamp) { /* have already constructed a mask for this node */ if (buffer->size < node_cpu_mask_v2[node]->size) { numa_error("map size mismatch; abort\n"); @@ -1388,6 +1393,9 @@ numa_node_to_cpus_v2(int node, struct bitmask *buffer) else numa_bitmask_free(mask); } + + last_tstamp = curr_tstamp; + return err; } __asm__(".symver numa_node_to_cpus_v2,numa_node_to_cpus@@libnuma_1.2"); -- 2.4.3 -- To unsubscribe from this list: send the line "unsubscribe linux-numa" in the body of a message to majordomo@xxxxxxxxxxxxxxx More majordomo info at http://vger.kernel.org/majordomo-info.html