On Sun, 25 May 2008, Steven Rostedt wrote: > > --- > arch/x86/lib/delay_32.c | 32 ++++++++++++++++++++++++++++---- > arch/x86/lib/delay_64.c | 31 +++++++++++++++++++++++++++---- > 2 files changed, 55 insertions(+), 8 deletions(-) > > Index: linux-tip.git/arch/x86/lib/delay_32.c > =================================================================== > --- linux-tip.git.orig/arch/x86/lib/delay_32.c 2008-05-22 14:51:02.000000000 -0400 > +++ linux-tip.git/arch/x86/lib/delay_32.c 2008-05-25 09:45:24.000000000 -0400 > @@ -44,13 +44,37 @@ static void delay_loop(unsigned long loo > static void delay_tsc(unsigned long loops) > { > unsigned long bclock, now; > + int cpu; > > - preempt_disable(); /* TSC's are per-cpu */ > + preempt_disable(); > + cpu = smp_processor_id(); > rdtscl(bclock); > - do { > - rep_nop(); > + for (;;) { > rdtscl(now); > - } while ((now-bclock) < loops); > + if ((now - bclock) >= loops) > + break; > + > + loops -= (now - bclock); Bah, this is mathematically incorrect. Going for -v4! A simple patch like this shouldn't take so much. I must need more sleep. -- Steve > + > + /* Allow RT tasks to run */ > + preempt_enable(); > + rep_nop(); > + preempt_disable(); > + > + /* > + * It is possible that we moved to another CPU, and > + * since TSC's are per-cpu we need to calculate > + * that. The delay must guarantee that we wait "at > + * least" the amount of time. Being moved to another > + * CPU could make the wait longer but we just need to > + * make sure we waited long enough. Rebalance the > + * counter for this CPU. > + */ > + if (unlikely(cpu != smp_processor_id())) { > + cpu = smp_processor_id(); > + rdtscl(bclock); > + } > + } > preempt_enable(); > } > -- To unsubscribe from this list: send the line "unsubscribe linux-rt-users" in the body of a message to majordomo@xxxxxxxxxxxxxxx More majordomo info at http://vger.kernel.org/majordomo-info.html