On Mon, Jun 09, 2003 at 08:26:40AM -0700, David S. Miller wrote: > > I've applied this, but we need to refine this for SMP on the > 2.5.x side. We need to use a percpu variable (see > DECLARE_PER_CPU) and also watch for cpu online/offline > events so that we init that cpus state. Alright, I've done it for the tasklets. If this looks OK I can do the rest too. -- Debian GNU/Linux 3.0 is out! ( http://www.debian.org/ ) Email: Herbert Xu ~{PmV>HI~} <herbert@gondor.apana.org.au> Home Page: http://gondor.apana.org.au/~herbert/ PGP Key: http://gondor.apana.org.au/~herbert/pubkey.txt
Index: kernel-source-2.5/net/core/flow.c =================================================================== RCS file: /home/gondolin/herbert/src/CVS/debian/kernel-source-2.5/net/core/flow.c,v retrieving revision 1.4 diff -u -r1.4 flow.c --- kernel-source-2.5/net/core/flow.c 10 Jun 2003 09:19:17 -0000 1.4 +++ kernel-source-2.5/net/core/flow.c 10 Jun 2003 11:37:47 -0000 @@ -15,6 +15,9 @@ #include <linux/interrupt.h> #include <linux/smp.h> #include <linux/completion.h> +#include <linux/percpu.h> +#include <linux/bitops.h> +#include <linux/notifier.h> #include <net/flow.h> #include <asm/atomic.h> #include <asm/semaphore.h> @@ -56,10 +59,12 @@ struct flow_flush_info { void *object; atomic_t cpuleft; + unsigned long cpumap; struct completion completion; }; -static struct tasklet_struct flow_flush_tasklets[NR_CPUS]; -static DECLARE_MUTEX(flow_flush_sem); +static DEFINE_PER_CPU(struct tasklet_struct, flow_flush_tasklet) = { NULL }; + +static unsigned long flow_cache_cpu_map; static void flow_cache_new_hashrnd(unsigned long arg) { @@ -182,7 +187,7 @@ } } - if (!fle) { + if (!fle && test_bit(cpu, &flow_cache_cpu_map)) { if (flow_count(cpu) > flow_hwm) flow_cache_shrink(cpu); @@ -230,10 +235,10 @@ cpu = smp_processor_id(); for (i = 0; i < flow_hash_size; i++) { - struct flow_cache_entry *fle, **flp; + struct flow_cache_entry *fle; - flp = &flow_table[(cpu << flow_hash_shift) + i]; - for (; (fle = *flp) != NULL; flp = &fle->next) { + fle = flow_table[(cpu << flow_hash_shift) + i]; + for (; fle; fle = fle->next) { if (fle->object != object) continue; fle->object = NULL; @@ -252,23 +257,28 @@ struct tasklet_struct *tasklet; cpu = smp_processor_id(); - tasklet = &flow_flush_tasklets[cpu]; - tasklet_init(tasklet, flow_cache_flush_tasklet, (unsigned long)info); + if (!test_bit(cpu, &info->cpumap)) + return; + + tasklet = &per_cpu(flow_flush_tasklet, cpu); + tasklet->data = (unsigned long)info; tasklet_schedule(tasklet); } void flow_cache_flush(void *object) { struct flow_flush_info info; + static DECLARE_MUTEX(flow_flush_sem); info.object = object; - atomic_set(&info.cpuleft, num_online_cpus()); + info.cpumap = flow_cache_cpu_map; + atomic_set(&info.cpuleft, hweight32(info.cpumap)); init_completion(&info.completion); down(&flow_flush_sem); - smp_call_function(flow_cache_flush_per_cpu, &info, 1, 0); local_bh_disable(); + smp_call_function(flow_cache_flush_per_cpu, &info, 1, 0); flow_cache_flush_per_cpu(&info); local_bh_enable(); @@ -277,6 +287,32 @@ up(&flow_flush_sem); } +static void __devinit flow_cache_cpu_online(int cpu) +{ + struct tasklet_struct *tasklet; + + tasklet = &per_cpu(flow_flush_tasklet, cpu); + tasklet_init(tasklet, flow_cache_flush_tasklet, 0); + + set_bit(cpu, &flow_cache_cpu_map); +} + +static int __devinit flow_cache_cpu_notify(struct notifier_block *self, + unsigned long action, void *hcpu) +{ + unsigned long cpu = (unsigned long)cpu; + switch (action) { + case CPU_UP_PREPARE: + flow_cache_cpu_online(cpu); + break; + } + return NOTIFY_OK; +} + +static struct notifier_block __devinitdata flow_cache_cpu_nb = { + .notifier_call = flow_cache_cpu_notify, +}; + static int __init flow_cache_init(void) { unsigned long order; @@ -315,6 +351,9 @@ panic("Failed to allocate flow cache hash table\n"); memset(flow_table, 0, PAGE_SIZE << order); + + flow_cache_cpu_online(smp_processor_id()); + register_cpu_notifier(&flow_cache_cpu_nb); return 0; }