[PATCH 1/2] ia64: VIRT_CPU_ACCOUNTING (accurate cpu time accounting) take 2

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



Hi all,

Here is a updated patches to implement VIRT_CPU_ACCOUNTING for ia64,
which enable us to use more accurate cpu time accounting.

If you want to know the detail, please refer:
 [PATCH 0/9] ia64: VIRT_CPU_ACCOUNTING (accurate cpu time accounting)
 http://www.gelato.unsw.edu.au/archives/linux-ia64/0710/21262.html

This 1 of 2 is a patch bundled previous 9 patches plus some updates.

Followings are feedback of comments:
 - cputime.h: Better header comment description
 - cputime.h: Bracket (NSEC_PER_SEC / HZ) not to overflow fast
 - cputime.h: Retrieve "const" of timespec_to_cputime
 - minstate.h, ivt.S: Read ITC earlier
 - time.c: Follow recent change of update_process_times

And additional notable change is:
 - head.S: Define cycle_to_cputime properly since previous macro
           version easily overflows at cycles in ~18 sec.

Signed-off-by: Hidetoshi Seto <seto.hidetoshi@xxxxxxxxxxxxxx>

---
 arch/ia64/Kconfig              |   11 ++++
 arch/ia64/ia32/elfcore32.h     |   14 +++++
 arch/ia64/kernel/asm-offsets.c |    5 +
 arch/ia64/kernel/entry.S       |   22 ++++++++
 arch/ia64/kernel/fsys.S        |   20 +++++++
 arch/ia64/kernel/head.S        |   20 +++++++
 arch/ia64/kernel/ivt.S         |   57 ++++++++++++++++++++++
 arch/ia64/kernel/minstate.h    |   14 +++++
 arch/ia64/kernel/time.c        |   78 ++++++++++++++++++++++++++++++
 include/asm-ia64/cputime.h     |  104 +++++++++++++++++++++++++++++++++++++++++
 include/asm-ia64/system.h      |   12 ++++
 include/asm-ia64/thread_info.h |   13 +++++
 12 files changed, 369 insertions(+), 1 deletion(-)

Index: linux-2.6.24-rc5/arch/ia64/Kconfig
===================================================================
--- linux-2.6.24-rc5.orig/arch/ia64/Kconfig
+++ linux-2.6.24-rc5/arch/ia64/Kconfig
@@ -262,6 +262,17 @@
 	default "17" if HUGETLB_PAGE
 	default "11"

+config VIRT_CPU_ACCOUNTING
+	bool "Deterministic task and CPU time accounting"
+	default y
+	help
+	  Select this option to enable more accurate task and CPU time
+	  accounting.  This is done by reading a CPU counter on each
+	  kernel entry and exit and on transitions within the kernel
+	  between system, softirq and hardirq state, so there is a
+	  small performance impact.
+	  If in doubt, say Y here.
+
 config SMP
 	bool "Symmetric multi-processing support"
 	help
Index: linux-2.6.24-rc5/arch/ia64/kernel/time.c
===================================================================
--- linux-2.6.24-rc5.orig/arch/ia64/kernel/time.c
+++ linux-2.6.24-rc5/arch/ia64/kernel/time.c
@@ -59,6 +59,84 @@
 };
 static struct clocksource *itc_clocksource;

+#ifdef CONFIG_VIRT_CPU_ACCOUNTING
+
+#include <linux/kernel_stat.h>
+
+extern cputime_t cycle_to_cputime(u64 cyc);
+
+/*
+ * Called from the context switch with interrupts disabled, to charge all
+ * accumulated times to the current process, and to prepare accounting on
+ * the next process.
+ */
+void ia64_account_on_switch(struct task_struct *prev, struct task_struct *next)
+{
+	struct thread_info *pi = task_thread_info(prev);
+	struct thread_info *ni = task_thread_info(next);
+	cputime_t delta_stime, delta_utime;
+	__u64 now;
+
+	now = ia64_get_itc();
+
+	delta_stime = cycle_to_cputime(pi->ac_stime + (now - pi->ac_stamp));
+	account_system_time(prev, 0, delta_stime);
+	account_system_time_scaled(prev, delta_stime);
+
+	if (pi->ac_utime) {
+		delta_utime = cycle_to_cputime(pi->ac_utime);
+		account_user_time(prev, delta_utime);
+		account_user_time_scaled(prev, delta_utime);
+	}
+
+	pi->ac_stamp = ni->ac_stamp = now;
+	ni->ac_stime = ni->ac_utime = 0;
+}
+
+/*
+ * Account time for a transition between system, hard irq or soft irq state.
+ * Note that this function is called with interrupts enabled.
+ */
+void account_system_vtime(struct task_struct *tsk)
+{
+	struct thread_info *ti = task_thread_info(tsk);
+	unsigned long flags;
+	cputime_t delta_stime;
+	__u64 now;
+
+	local_irq_save(flags);
+
+	now = ia64_get_itc();
+
+	delta_stime = cycle_to_cputime(ti->ac_stime + (now - ti->ac_stamp));
+	account_system_time(tsk, 0, delta_stime);
+	account_system_time_scaled(tsk, delta_stime);
+	ti->ac_stime = 0;
+
+	ti->ac_stamp = now;
+
+	local_irq_restore(flags);
+}
+
+/*
+ * Called from the timer interrupt handler to charge accumulated user time
+ * to the current process.  Must be called with interrupts disabled.
+ */
+void account_process_tick(struct task_struct *p, int user_tick)
+{
+	struct thread_info *ti = task_thread_info(p);
+	cputime_t delta_utime;
+
+	if (ti->ac_utime) {
+		delta_utime = cycle_to_cputime(ti->ac_utime);
+		account_user_time(p, delta_utime);
+		account_user_time_scaled(p, delta_utime);
+		ti->ac_utime = 0;
+	}
+}
+
+#endif /* CONFIG_VIRT_CPU_ACCOUNTING */
+
 static irqreturn_t
 timer_interrupt (int irq, void *dev_id)
 {
Index: linux-2.6.24-rc5/include/asm-ia64/system.h
===================================================================
--- linux-2.6.24-rc5.orig/include/asm-ia64/system.h
+++ linux-2.6.24-rc5/include/asm-ia64/system.h
@@ -210,6 +210,13 @@
 extern void ia64_save_extra (struct task_struct *task);
 extern void ia64_load_extra (struct task_struct *task);

+#ifdef CONFIG_VIRT_CPU_ACCOUNTING
+extern void ia64_account_on_switch (struct task_struct *prev, struct task_struct *next);
+# define IA64_ACCOUNT_ON_SWITCH(p,n) ia64_account_on_switch(p,n)
+#else
+# define IA64_ACCOUNT_ON_SWITCH(p,n)
+#endif
+
 #ifdef CONFIG_PERFMON
   DECLARE_PER_CPU(unsigned long, pfm_syst_info);
 # define PERFMON_IS_SYSWIDE() (__get_cpu_var(pfm_syst_info) & 0x1)
@@ -222,6 +229,7 @@
 	 || IS_IA32_PROCESS(task_pt_regs(t)) || PERFMON_IS_SYSWIDE())

 #define __switch_to(prev,next,last) do {							 \
+	IA64_ACCOUNT_ON_SWITCH(prev, next);							 \
 	if (IA64_HAS_EXTRA_STATE(prev))								 \
 		ia64_save_extra(prev);								 \
 	if (IA64_HAS_EXTRA_STATE(next))								 \
@@ -266,6 +274,10 @@

 void default_idle(void);

+#ifdef CONFIG_VIRT_CPU_ACCOUNTING
+extern void account_system_vtime(struct task_struct *);
+#endif
+
 #endif /* __KERNEL__ */

 #endif /* __ASSEMBLY__ */
Index: linux-2.6.24-rc5/include/asm-ia64/cputime.h
===================================================================
--- linux-2.6.24-rc5.orig/include/asm-ia64/cputime.h
+++ linux-2.6.24-rc5/include/asm-ia64/cputime.h
@@ -1,6 +1,110 @@
+/*
+ * include/asm-ia64/cputime.h:
+ *		Definitions for measuring cputime on ia64 machines.
+ *
+ * Based on <asm-powerpc/cputime.h>.
+ *
+ * Copyright (C) 2007 FUJITSU LIMITED
+ * Copyright (C) 2007 Hidetoshi Seto <seto.hidetoshi@xxxxxxxxxxxxxx>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ * If we have CONFIG_VIRT_CPU_ACCOUNTING, we measure cpu time in nsec.
+ * Otherwise we measure cpu time in jiffies using the generic definitions.
+ */
+
 #ifndef __IA64_CPUTIME_H
 #define __IA64_CPUTIME_H

+#ifndef CONFIG_VIRT_CPU_ACCOUNTING
 #include <asm-generic/cputime.h>
+#else
+
+#include <linux/time.h>
+#include <linux/jiffies.h>
+#include <asm/processor.h>
+
+typedef u64 cputime_t;
+typedef u64 cputime64_t;
+
+#define cputime_zero			((cputime_t)0)
+#define cputime_max			((~((cputime_t)0) >> 1) - 1)
+#define cputime_add(__a, __b)		((__a) +  (__b))
+#define cputime_sub(__a, __b)		((__a) -  (__b))
+#define cputime_div(__a, __n)		((__a) /  (__n))
+#define cputime_halve(__a)		((__a) >> 1)
+#define cputime_eq(__a, __b)		((__a) == (__b))
+#define cputime_gt(__a, __b)		((__a) >  (__b))
+#define cputime_ge(__a, __b)		((__a) >= (__b))
+#define cputime_lt(__a, __b)		((__a) <  (__b))
+#define cputime_le(__a, __b)		((__a) <= (__b))
+
+#define cputime64_zero			((cputime64_t)0)
+#define cputime64_add(__a, __b)		((__a) + (__b))
+#define cputime64_sub(__a, __b)		((__a) - (__b))
+#define cputime_to_cputime64(__ct)	(__ct)
+
+/*
+ * Convert cputime <-> jiffies (HZ)
+ */
+#define cputime_to_jiffies(__ct)	((__ct) / (NSEC_PER_SEC / HZ))
+#define jiffies_to_cputime(__jif)	((__jif) * (NSEC_PER_SEC / HZ))
+#define cputime64_to_jiffies64(__ct)	((__ct) / (NSEC_PER_SEC / HZ))
+#define jiffies64_to_cputime64(__jif)	((__jif) * (NSEC_PER_SEC / HZ))
+
+/*
+ * Convert cputime <-> milliseconds
+ */
+#define cputime_to_msecs(__ct)		((__ct) / NSEC_PER_MSEC)
+#define msecs_to_cputime(__msecs)	((__msecs) * NSEC_PER_MSEC)
+
+/*
+ * Convert cputime <-> seconds
+ */
+#define cputime_to_secs(__ct)		((__ct) / NSEC_PER_SEC)
+#define secs_to_cputime(__secs)		((__secs) * NSEC_PER_SEC)
+
+/*
+ * Convert cputime <-> timespec (nsec)
+ */
+static inline cputime_t timespec_to_cputime(const struct timespec *val)
+{
+	cputime_t ret = val->tv_sec * NSEC_PER_SEC;
+	return (ret + val->tv_nsec);
+}
+static inline void cputime_to_timespec(const cputime_t ct, struct timespec *val)
+{
+	val->tv_sec  = ct / NSEC_PER_SEC;
+	val->tv_nsec = ct % NSEC_PER_SEC;
+}
+
+/*
+ * Convert cputime <-> timeval (msec)
+ */
+static inline cputime_t timeval_to_cputime(struct timeval *val)
+{
+	cputime_t ret = val->tv_sec * NSEC_PER_SEC;
+	return (ret + val->tv_usec * NSEC_PER_USEC);
+}
+static inline void cputime_to_timeval(const cputime_t ct, struct timeval *val)
+{
+	val->tv_sec = ct / NSEC_PER_SEC;
+	val->tv_usec = (ct % NSEC_PER_SEC) / NSEC_PER_USEC;
+}
+
+/*
+ * Convert cputime <-> clock (USER_HZ)
+ */
+#define cputime_to_clock_t(__ct)	((__ct) / (NSEC_PER_SEC / USER_HZ))
+#define clock_t_to_cputime(__x)		((__x) * (NSEC_PER_SEC / USER_HZ))
+
+/*
+ * Convert cputime64 to clock.
+ */
+#define cputime64_to_clock_t(__ct)      cputime_to_clock_t((cputime_t)__ct)

+#endif /* CONFIG_VIRT_CPU_ACCOUNTING */
 #endif /* __IA64_CPUTIME_H */
Index: linux-2.6.24-rc5/arch/ia64/ia32/elfcore32.h
===================================================================
--- linux-2.6.24-rc5.orig/arch/ia64/ia32/elfcore32.h
+++ linux-2.6.24-rc5/arch/ia64/ia32/elfcore32.h
@@ -30,7 +30,19 @@
 	int	si_errno;			/* errno */
 };

-#define jiffies_to_timeval(a,b) do { (b)->tv_usec = 0; (b)->tv_sec = (a)/HZ; }while(0)
+#ifdef CONFIG_VIRT_CPU_ACCOUNTING
+/*
+ * Hacks are here since types between compat_timeval (= pair of s32) and
+ * ia64-native timeval (= pair of s64) are not compatible, at least a file
+ * arch/ia64/ia32/../../../fs/binfmt_elf.c will get warnings from compiler on
+ * use of cputime_to_timeval(), which usually an alias of jiffies_to_timeval().
+ */
+#define cputime_to_timeval(a,b) \
+	do { (b)->tv_usec = 0; (b)->tv_sec = (a)/NSEC_PER_SEC; } while(0)
+#else
+#define jiffies_to_timeval(a,b) \
+	do { (b)->tv_usec = 0; (b)->tv_sec = (a)/HZ; } while(0)
+#endif

 struct elf_prstatus
 {
Index: linux-2.6.24-rc5/arch/ia64/kernel/asm-offsets.c
===================================================================
--- linux-2.6.24-rc5.orig/arch/ia64/kernel/asm-offsets.c
+++ linux-2.6.24-rc5/arch/ia64/kernel/asm-offsets.c
@@ -39,6 +39,11 @@
 	DEFINE(TI_FLAGS, offsetof(struct thread_info, flags));
 	DEFINE(TI_CPU, offsetof(struct thread_info, cpu));
 	DEFINE(TI_PRE_COUNT, offsetof(struct thread_info, preempt_count));
+#ifdef CONFIG_VIRT_CPU_ACCOUNTING
+	DEFINE(TI_AC_STAMP, offsetof(struct thread_info, ac_stamp));
+	DEFINE(TI_AC_STIME, offsetof(struct thread_info, ac_stime));
+	DEFINE(TI_AC_UTIME, offsetof(struct thread_info, ac_utime));
+#endif

 	BLANK();

Index: linux-2.6.24-rc5/include/asm-ia64/thread_info.h
===================================================================
--- linux-2.6.24-rc5.orig/include/asm-ia64/thread_info.h
+++ linux-2.6.24-rc5/include/asm-ia64/thread_info.h
@@ -31,6 +31,11 @@
 	mm_segment_t addr_limit;	/* user-level address space limit */
 	int preempt_count;		/* 0=premptable, <0=BUG; will also serve as bh-counter */
 	struct restart_block restart_block;
+#ifdef CONFIG_VIRT_CPU_ACCOUNTING
+	__u64 ac_stamp;
+	__u64 ac_stime;
+	__u64 ac_utime;
+#endif
 };

 #define THREAD_SIZE			KERNEL_STACK_SIZE
@@ -62,9 +67,17 @@
 #define task_stack_page(tsk)	((void *)(tsk))

 #define __HAVE_THREAD_FUNCTIONS
+#ifdef CONFIG_VIRT_CPU_ACCOUNTING
+#define setup_thread_stack(p, org)			\
+	*task_thread_info(p) = *task_thread_info(org);	\
+	task_thread_info(p)->ac_stime = 0;		\
+	task_thread_info(p)->ac_utime = 0;		\
+	task_thread_info(p)->task = (p);
+#else
 #define setup_thread_stack(p, org) \
 	*task_thread_info(p) = *task_thread_info(org); \
 	task_thread_info(p)->task = (p);
+#endif
 #define end_of_stack(p) (unsigned long *)((void *)(p) + IA64_RBS_OFFSET)

 #define __HAVE_ARCH_TASK_STRUCT_ALLOCATOR
Index: linux-2.6.24-rc5/arch/ia64/kernel/minstate.h
===================================================================
--- linux-2.6.24-rc5.orig/arch/ia64/kernel/minstate.h
+++ linux-2.6.24-rc5/arch/ia64/kernel/minstate.h
@@ -3,6 +3,18 @@

 #include "entry.h"

+#ifdef CONFIG_VIRT_CPU_ACCOUNTING
+/* read ar.itc in advance, and use it before leaving bank 0 */
+#define ACCOUNT_GET_STAMP				\
+(pUStk) mov.m r20=ar.itc;
+#define ACCOUNT_SYS_ENTER				\
+(pUStk) br.call.spnt rp=account_sys_enter		\
+	;;
+#else
+#define ACCOUNT_GET_STAMP
+#define ACCOUNT_SYS_ENTER
+#endif
+
 /*
  * DO_SAVE_MIN switches to the kernel stacks (if necessary) and saves
  * the minimum state necessary that allows us to turn psr.ic back
@@ -122,11 +134,13 @@
 	;;											\
 .mem.offset 0,0; st8.spill [r16]=r2,16;								\
 .mem.offset 8,0; st8.spill [r17]=r3,16;								\
+	ACCOUNT_GET_STAMP									\
 	adds r2=IA64_PT_REGS_R16_OFFSET,r1;							\
 	;;											\
 	EXTRA;											\
 	movl r1=__gp;		/* establish kernel global pointer */				\
 	;;											\
+	ACCOUNT_SYS_ENTER									\
 	bsw.1;			/* switch back to bank 1 (must be last in insn group) */	\
 	;;

Index: linux-2.6.24-rc5/arch/ia64/kernel/ivt.S
===================================================================
--- linux-2.6.24-rc5.orig/arch/ia64/kernel/ivt.S
+++ linux-2.6.24-rc5/arch/ia64/kernel/ivt.S
@@ -805,8 +805,13 @@

 (p8)	adds r28=16,r28				// A    switch cr.iip to next bundle
 (p9)	adds r8=1,r8				// A    increment ei to next slot
+#ifdef CONFIG_VIRT_CPU_ACCOUNTING
+	;;
+	mov b6=r30				// I0   setup syscall handler branch reg early
+#else
 	nop.i 0
 	;;
+#endif

 	mov.m r25=ar.unat			// M2 (5 cyc)
 	dep r29=r8,r29,41,2			// I0   insert new ei into cr.ipsr
@@ -817,7 +822,11 @@
 	//
 ///////////////////////////////////////////////////////////////////////
 	st1 [r16]=r0				// M2|3 clear current->thread.on_ustack flag
+#ifdef CONFIG_VIRT_CPU_ACCOUNTING
+	mov.m r30=ar.itc			// M    get cycle for accounting
+#else
 	mov b6=r30				// I0   setup syscall handler branch reg early
+#endif
 	cmp.eq pKStk,pUStk=r0,r17		// A    were we on kernel stacks already?

 	and r9=_TIF_SYSCALL_TRACEAUDIT,r9	// A    mask trace or audit
@@ -829,6 +838,24 @@
 	cmp.eq p14,p0=r9,r0			// A    are syscalls being traced/audited?
 	br.call.sptk.many b7=ia64_syscall_setup	// B
 1:
+#ifdef CONFIG_VIRT_CPU_ACCOUNTING
+	// mov.m r30=ar.itc is called in advance, and r13 is current
+	add r16=TI_AC_STAMP+IA64_TASK_SIZE,r13	// A
+	add r17=TI_AC_UTIME+IA64_TASK_SIZE,r13	// A
+(pKStk)	br.cond.spnt .skip_accounting		// B	unlikely skip
+	;;
+	ld8 r18=[r16]			// M  get last stamp
+	ld8 r19=[r17]			// M  cumulated utime
+	;;
+	sub r18=r30,r18			// A  elapsed time
+	;;
+	add r19=r19,r18			// A  sum
+	;;
+	st8 [r16]=r30			// M  update stamp
+	st8 [r17]=r19			// M  update utime
+	;;
+.skip_accounting:
+#endif
 	mov ar.rsc=0x3				// M2   set eager mode, pl 0, LE, loadrs=0
 	nop 0
 	bsw.1					// B (6 cyc) regs are saved, switch to bank 1
@@ -928,6 +955,7 @@
 	 *	- r27: saved ar.rsc
 	 *	- r28: saved cr.iip
 	 *	- r29: saved cr.ipsr
+	 *	- r30: ar.itc for accounting (don't touch)
 	 *	- r31: saved pr
 	 *	-  b0: original contents (to be saved)
 	 * On exit:
@@ -1090,6 +1118,35 @@
 	DBG_FAULT(16)
 	FAULT(16)

+#ifdef CONFIG_VIRT_CPU_ACCOUNTING
+	/*
+	 * There is no particular reason for this code to be here, other than
+	 * that there happens to be space here that would go unused otherwise.
+	 * If this fault ever gets "unreserved", simply moved the following
+	 * code to a more suitable spot...
+	 *
+	 * account_sys_enter is called from SAVE_MIN* macros if accounting is
+	 * enabled and if the macro is entered from user mode.
+	 */
+ENTRY(account_sys_enter)
+	// mov.m r20=ar.itc is called in advance, and r13 is current
+	add r16=TI_AC_STAMP+IA64_TASK_SIZE,r13
+	add r17=TI_AC_UTIME+IA64_TASK_SIZE,r13
+	;;
+	ld8 r18=[r16]			// get last stamp
+	ld8 r19=[r17]			// cumulated utime
+	;;
+	sub r18=r20,r18			// elapsed time
+	;;
+	add r19=r19,r18			// sum
+	;;
+	st8 [r16]=r20			// update stamp
+	st8 [r17]=r19			// update utime
+        ;;
+	br.ret.sptk.many rp
+END(account_sys_enter)
+#endif
+
 	.org ia64_ivt+0x4400
 /////////////////////////////////////////////////////////////////////////////////////////
 // 0x4400 Entry 17 (size 64 bundles) Reserved
Index: linux-2.6.24-rc5/arch/ia64/kernel/fsys.S
===================================================================
--- linux-2.6.24-rc5.orig/arch/ia64/kernel/fsys.S
+++ linux-2.6.24-rc5/arch/ia64/kernel/fsys.S
@@ -660,7 +660,11 @@
 	nop.i 0
 	;;
 	mov ar.rsc=0				// M2   set enforced lazy mode, pl 0, LE, loadrs=0
+#ifdef CONFIG_VIRT_CPU_ACCOUNTING
+	mov.m r30=ar.itc			// M    get cycle for accounting
+#else
 	nop.m 0
+#endif
 	nop.i 0
 	;;
 	mov r23=ar.bspstore			// M2 (12 cyc) save ar.bspstore
@@ -682,6 +686,22 @@
 	cmp.ne pKStk,pUStk=r0,r0		// A    set pKStk <- 0, pUStk <- 1
 	br.call.sptk.many b7=ia64_syscall_setup	// B
 	;;
+#ifdef CONFIG_VIRT_CPU_ACCOUNTING
+	// mov.m r30=ar.itc is called in advance
+	add r16=TI_AC_STAMP+IA64_TASK_SIZE,r2
+	add r17=TI_AC_UTIME+IA64_TASK_SIZE,r2
+	;;
+	ld8 r18=[r16]			// get last stamp
+	ld8 r19=[r17]			// cumulated utime
+	;;
+	sub r18=r30,r18			// elapsed time
+	;;
+	add r19=r19,r18			// sum
+	;;
+	st8 [r16]=r30			// update stamp
+	st8 [r17]=r19			// update utime
+	;;
+#endif
 	mov ar.rsc=0x3				// M2   set eager mode, pl 0, LE, loadrs=0
 	mov rp=r14				// I0   set the real return addr
 	and r3=_TIF_SYSCALL_TRACEAUDIT,r3	// A
Index: linux-2.6.24-rc5/arch/ia64/kernel/entry.S
===================================================================
--- linux-2.6.24-rc5.orig/arch/ia64/kernel/entry.S
+++ linux-2.6.24-rc5/arch/ia64/kernel/entry.S
@@ -995,6 +995,28 @@
 	shladd in0=loc1,3,r17
 	mov in1=0
 	;;
+#ifdef CONFIG_VIRT_CPU_ACCOUNTING
+account_sys_leave:
+	// The size of current frame
+	//	(in * 2 + loc * N (N = 8 or 12) + out * 2)
+	// is enough to work, so just take care to keep in0,in1
+	mov loc0=ar.itc
+	mov loc1=IA64_KR(CURRENT)	// M2 (12 cycle read latency)
+	;;
+	add loc2=TI_AC_STAMP+IA64_TASK_SIZE,loc1
+	add loc3=TI_AC_STIME+IA64_TASK_SIZE,loc1
+	;;
+	ld8 loc4=[loc2]			// get last stamp
+	ld8 loc5=[loc3]			// cumulated stime
+	;;
+	sub loc4=loc0,loc4		// elapsed time
+	;;
+	add loc5=loc5,loc4		// sum
+	;;
+	st8 [loc2]=loc0			// update stamp
+	st8 [loc3]=loc5			// update stime
+	;;
+#endif
 	TEXT_ALIGN(32)
 rse_clear_invalid:
 #ifdef CONFIG_ITANIUM
Index: linux-2.6.24-rc5/arch/ia64/kernel/head.S
===================================================================
--- linux-2.6.24-rc5.orig/arch/ia64/kernel/head.S
+++ linux-2.6.24-rc5/arch/ia64/kernel/head.S
@@ -1002,6 +1002,26 @@
 	br.ret.sptk.many rp
 END(sched_clock)

+#ifdef CONFIG_VIRT_CPU_ACCOUNTING
+GLOBAL_ENTRY(cycle_to_cputime)
+	alloc r16=ar.pfs,1,0,0,0
+	addl r8=THIS_CPU(cpu_info) + IA64_CPUINFO_NSEC_PER_CYC_OFFSET,r0
+	;;
+	ldf8 f8=[r8]
+	;;
+	setf.sig f9=r32
+	;;
+	xmpy.lu f10=f9,f8	// calculate low 64 bits of 128-bit product	(4 cyc)
+	xmpy.hu f11=f9,f8	// calculate high 64 bits of 128-bit product
+	;;
+	getf.sig r8=f10		//						(5 cyc)
+	getf.sig r9=f11
+	;;
+	shrp r8=r9,r8,IA64_NSEC_PER_CYC_SHIFT
+	br.ret.sptk.many rp
+END(cycle_to_cputime)
+#endif /* CONFIG_VIRT_CPU_ACCOUNTING */
+
 GLOBAL_ENTRY(start_kernel_thread)
 	.prologue
 	.save rp, r0				// this is the end of the call-chain


-
To unsubscribe from this list: send the line "unsubscribe linux-ia64" in
the body of a message to majordomo@xxxxxxxxxxxxxxx
More majordomo info at  http://vger.kernel.org/majordomo-info.html

[Index of Archives]     [Linux Kernel]     [Sparc Linux]     [DCCP]     [Linux ARM]     [Yosemite News]     [Linux SCSI]     [Linux x86_64]     [Linux for Ham Radio]

  Powered by Linux