[RFC PATCH --take 2] Add TRACE_IRQFLAGS_SUPPORT, LOCKDEP_SUPPORT then enable ftrace for ia64

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



Hi there,

I just get some free time to have the patch series updated
against Linux-2.6.35-rc3+.

The following rfc patch is to add lockdep support and IRQ-flags
state tracing support for ia64 architecture based on instructions
described in irqflags-tracing.

I've tested the patch against Linux-2.6.35-rc3+ git tree on
 ia64 systems I can have access right now.
It boots fine with and without the irq trace and lockdep
code enabled on all of them.

I also tried function trace and irqoff trace, they seem to work.

The next step is to fix issues and those strange workarounds.
And beautify the code to make it easy merged.

Yeah, I know it's not ready to ask for merge.
I need to find out reasonable ways to address all those
ugly workarounds that just make lockdep code happy..
Most of them are not as easy as it appears...
Some real issues must have been discovered by
those lockdep warnings, but I temporarily suppress
them to get ftrace working...

I also don't want everyone needing ftrace blocked..
Please let me know you have any problems with
the patch series.

The patch contains:

[1/8]  add  ia64 TRACE_IRQFLAGS_SUPPORT
[2/8]  enable LOCKDEP_SUPPORT for ia64
[3/8]  enlarge PERCPU_PAGE_SHIT to 20 from 16
[4/8]  workarounds for hard interrupt
[5/8]  workarounds for mca and NMI
[6/8]  workarounds for system call
[7/8]  various ugly workarounds to suppress locdkep warnning at run time
[8/8]  various ugly workarounds to suppress locdkep warnning  at boot time


# cat /proc/lockdep_stats
 lock-classes:                         1185 [max: 4095]
 direct dependencies:                  4434 [max: 16384]
 indirect dependencies:               12799
 all direct dependencies:             86955
 dependency chains:                    5580 [max: 32768]
 dependency chain hlocks:             14263 [max: 163840]
 in-hardirq chains:                      19
 in-softirq chains:                     316
 in-process chains:                    4136
 stack-trace entries:                     0 [max: 262144]
 combined max dependencies:        26228580
 hardirq-safe locks:                     30
 hardirq-unsafe locks:                  519
 softirq-safe locks:                    121
 softirq-unsafe locks:                  413
 irq-safe locks:                        129
 irq-unsafe locks:                      519
 hardirq-read-safe locks:                 0
 hardirq-read-unsafe locks:              99
 softirq-read-safe locks:                11
 softirq-read-unsafe locks:              89
 irq-read-safe locks:                    11
 irq-read-unsafe locks:                  99
 uncategorized locks:                   112
 unused locks:                            0
 max locking depth:                       9
 max bfs queue depth:                   122
 chain lookup misses:                  5579
 chain lookup hits:                39288389
 cyclic checks:                        4193
 find-mask forwards checks:            1487
 find-mask backwards checks:          28354
 hardirq on events:                57175409
 hardirq off events:               57175383
 redundant hardirq ons:            34600716
 redundant hardirq offs:          102820066
 softirq on events:                 9367917
 softirq off events:                9367945
 redundant softirq ons:                   0
 redundant softirq offs:                  0
 debug_locks:                             1


Ps. The patch is enclosed in attachment. The in-lined one
is c&p of it for reading.

Thanks,
Luming

Signed-off-by: Bob Picco <bob.picco@xxxxxx>
Signed-off-by: Yu Luming <luming.yu@xxxxxxxxx>

 arch/ia64/Kconfig              |    6 ++
 arch/ia64/Kconfig.debug        |    3 +
 arch/ia64/include/asm/page.h   |    2
 arch/ia64/include/asm/rwsem.h  |   27 ++++++++----
 arch/ia64/include/asm/system.h |   84 ---------------------------------------
 arch/ia64/kernel/Makefile      |    1
 arch/ia64/kernel/entry.S       |   88 +++++++++++++++++++++++++++++++++++++++++
 arch/ia64/kernel/irq_ia64.c    |    3 +
 arch/ia64/kernel/ivt.S         |    4 +
 arch/ia64/kernel/mca.c         |   15 ++++--
 arch/ia64/kernel/process.c     |    6 +-
 arch/ia64/kernel/ptrace.c      |   34 +++++++++++++++
 arch/ia64/kernel/setup.c       |    4 +
 arch/ia64/kernel/smpboot.c     |    2
 arch/ia64/kernel/time.c        |    4 -
 arch/ia64/mm/fault.c           |    1
 fs/ioctl.c                     |    1
 fs/open.c                      |    1
 fs/read_write.c                |    1
 include/linux/irqflags.h       |    2
 include/linux/lockdep.h        |    2
 kernel/fork.c                  |    4 +
 kernel/lockdep.c               |    8 +++
 kernel/sys.c                   |    1
 kernel/timer.c                 |    1
 kernel/trace/trace_irqsoff.c   |    6 ++
 26 files changed, 206 insertions(+), 105 deletions(-)

diff --git a/arch/ia64/Kconfig b/arch/ia64/Kconfig
index 9561082..bbb3e91 100644
--- a/arch/ia64/Kconfig
+++ b/arch/ia64/Kconfig
@@ -148,6 +148,12 @@ source "arch/ia64/xen/Kconfig"

 endif

+config LOCKDEP_SUPPORT
+	def_bool y
+
+config STACKTRACE_SUPPORT
+	def_bool y
+
 choice
 	prompt "System type"
 	default IA64_GENERIC
diff --git a/arch/ia64/Kconfig.debug b/arch/ia64/Kconfig.debug
index de9d507..b57bab5 100644
--- a/arch/ia64/Kconfig.debug
+++ b/arch/ia64/Kconfig.debug
@@ -2,6 +2,9 @@ menu "Kernel hacking"

 source "lib/Kconfig.debug"

+config TRACE_IRQFLAGS_SUPPORT
+	def_bool y
+
 choice
 	prompt "Physical memory granularity"
 	default IA64_GRANULE_64MB
diff --git a/arch/ia64/include/asm/page.h b/arch/ia64/include/asm/page.h
index 5f271bc..4109f09 100644
--- a/arch/ia64/include/asm/page.h
+++ b/arch/ia64/include/asm/page.h
@@ -41,7 +41,7 @@
 #define PAGE_SIZE		(__IA64_UL_CONST(1) << PAGE_SHIFT)
 #define PAGE_MASK		(~(PAGE_SIZE - 1))

-#define PERCPU_PAGE_SHIFT	16	/* log2() of max. size of per-CPU area */
+#define PERCPU_PAGE_SHIFT	20	/*20 log2() of max. size of per-CPU area */
 #define PERCPU_PAGE_SIZE	(__IA64_UL_CONST(1) << PERCPU_PAGE_SHIFT)


diff --git a/arch/ia64/include/asm/rwsem.h b/arch/ia64/include/asm/rwsem.h
index e876268..a9ea581 100644
--- a/arch/ia64/include/asm/rwsem.h
+++ b/arch/ia64/include/asm/rwsem.h
@@ -37,6 +37,9 @@ struct rw_semaphore {
 	signed long		count;
 	spinlock_t		wait_lock;
 	struct list_head	wait_list;
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
+	struct lockdep_map dep_map;
+#endif
 };

 #define RWSEM_UNLOCKED_VALUE		__IA64_UL_CONST(0x0000000000000000)
@@ -46,9 +49,15 @@ struct rw_semaphore {
 #define RWSEM_ACTIVE_READ_BIAS		RWSEM_ACTIVE_BIAS
 #define RWSEM_ACTIVE_WRITE_BIAS		(RWSEM_WAITING_BIAS + RWSEM_ACTIVE_BIAS)

+#ifdef CONFIG_DEBUG_LOCK_ALLOC
+# define __RWSEM_DEP_MAP_INIT(lockname) , .dep_map = { .name = #lockname }
+#else
+# define __RWSEM_DEP_MAP_INIT(lockname)
+#endif
+
 #define __RWSEM_INITIALIZER(name) \
 	{ RWSEM_UNLOCKED_VALUE, __SPIN_LOCK_UNLOCKED((name).wait_lock), \
-	  LIST_HEAD_INIT((name).wait_list) }
+	  LIST_HEAD_INIT((name).wait_list) __RWSEM_DEP_MAP_INIT(name)}

 #define DECLARE_RWSEM(name) \
 	struct rw_semaphore name = __RWSEM_INITIALIZER(name)
@@ -58,13 +67,15 @@ extern struct rw_semaphore
*rwsem_down_write_failed(struct rw_semaphore *sem);
 extern struct rw_semaphore *rwsem_wake(struct rw_semaphore *sem);
 extern struct rw_semaphore *rwsem_downgrade_wake(struct rw_semaphore *sem);

-static inline void
-init_rwsem (struct rw_semaphore *sem)
-{
-	sem->count = RWSEM_UNLOCKED_VALUE;
-	spin_lock_init(&sem->wait_lock);
-	INIT_LIST_HEAD(&sem->wait_list);
-}
+extern void __init_rwsem(struct rw_semaphore *sem, const char *name,
+			 struct lock_class_key *key);
+
+#define init_rwsem(sem)						\
+do {								\
+	static struct lock_class_key __key;			\
+								\
+	__init_rwsem((sem), #sem, &__key);			\
+} while (0)

 /*
  * lock for reading
diff --git a/arch/ia64/include/asm/system.h b/arch/ia64/include/asm/system.h
index 9f342a5..0c21f6d 100644
--- a/arch/ia64/include/asm/system.h
+++ b/arch/ia64/include/asm/system.h
@@ -107,88 +107,6 @@ extern struct ia64_boot_param {
  */
 #define set_mb(var, value)	do { (var) = (value); mb(); } while (0)

-#define safe_halt()         ia64_pal_halt_light()    /* PAL_HALT_LIGHT */
-
-/*
- * The group barrier in front of the rsm & ssm are necessary to ensure
- * that none of the previous instructions in the same group are
- * affected by the rsm/ssm.
- */
-/* For spinlocks etc */
-
-/*
- * - clearing psr.i is implicitly serialized (visible by next insn)
- * - setting psr.i requires data serialization
- * - we need a stop-bit before reading PSR because we sometimes
- *   write a floating-point register right before reading the PSR
- *   and that writes to PSR.mfl
- */
-#ifdef CONFIG_PARAVIRT
-#define __local_save_flags()	ia64_get_psr_i()
-#else
-#define __local_save_flags()	ia64_getreg(_IA64_REG_PSR)
-#endif
-
-#define __local_irq_save(x)			\
-do {						\
-	ia64_stop();				\
-	(x) = __local_save_flags();		\
-	ia64_stop();				\
-	ia64_rsm(IA64_PSR_I);			\
-} while (0)
-
-#define __local_irq_disable()			\
-do {						\
-	ia64_stop();				\
-	ia64_rsm(IA64_PSR_I);			\
-} while (0)
-
-#define __local_irq_restore(x)	ia64_intrin_local_irq_restore((x) & IA64_PSR_I)
-
-#ifdef CONFIG_IA64_DEBUG_IRQ
-
-  extern unsigned long last_cli_ip;
-
-# define __save_ip()		last_cli_ip = ia64_getreg(_IA64_REG_IP)
-
-# define local_irq_save(x)					\
-do {								\
-	unsigned long __psr;					\
-								\
-	__local_irq_save(__psr);				\
-	if (__psr & IA64_PSR_I)					\
-		__save_ip();					\
-	(x) = __psr;						\
-} while (0)
-
-# define local_irq_disable()	do { unsigned long __x;
local_irq_save(__x); } while (0)
-
-# define local_irq_restore(x)					\
-do {								\
-	unsigned long __old_psr, __psr = (x);			\
-								\
-	local_save_flags(__old_psr);				\
-	__local_irq_restore(__psr);				\
-	if ((__old_psr & IA64_PSR_I) && !(__psr & IA64_PSR_I))	\
-		__save_ip();					\
-} while (0)
-
-#else /* !CONFIG_IA64_DEBUG_IRQ */
-# define local_irq_save(x)	__local_irq_save(x)
-# define local_irq_disable()	__local_irq_disable()
-# define local_irq_restore(x)	__local_irq_restore(x)
-#endif /* !CONFIG_IA64_DEBUG_IRQ */
-
-#define local_irq_enable()	({ ia64_stop(); ia64_ssm(IA64_PSR_I);
ia64_srlz_d(); })
-#define local_save_flags(flags)	({ ia64_stop(); (flags) =
__local_save_flags(); })
-
-#define irqs_disabled()				\
-({						\
-	unsigned long __ia64_id_flags;		\
-	local_save_flags(__ia64_id_flags);	\
-	(__ia64_id_flags & IA64_PSR_I) == 0;	\
-})
-
 #ifdef __KERNEL__

 /*
@@ -265,7 +183,7 @@ extern void ia64_account_on_switch (struct
task_struct *prev, struct task_struct
 #define __ARCH_WANT_UNLOCKED_CTXSW
 #define ARCH_HAS_PREFETCH_SWITCH_STACK
 #define ia64_platform_is(x) (strcmp(x, platform_name) == 0)
-
+#include <linux/irqflags.h>
 void cpu_idle_wait(void);

 #define arch_align_stack(x) (x)
diff --git a/arch/ia64/kernel/Makefile b/arch/ia64/kernel/Makefile
index db10b1e..395c2f2 100644
--- a/arch/ia64/kernel/Makefile
+++ b/arch/ia64/kernel/Makefile
@@ -34,6 +34,7 @@ obj-$(CONFIG_AUDIT)		+= audit.o
 obj-$(CONFIG_PCI_MSI)		+= msi_ia64.o
 mca_recovery-y			+= mca_drv.o mca_drv_asm.o
 obj-$(CONFIG_IA64_MC_ERR_INJECT)+= err_inject.o
+obj-$(CONFIG_STACKTRACE)	+= stacktrace.o

 obj-$(CONFIG_PARAVIRT)		+= paravirt.o paravirtentry.o \
 				   paravirt_patch.o
diff --git a/arch/ia64/kernel/entry.S b/arch/ia64/kernel/entry.S
index 9a260b3..393e68d 100644
--- a/arch/ia64/kernel/entry.S
+++ b/arch/ia64/kernel/entry.S
@@ -48,6 +48,7 @@
 #include <asm/thread_info.h>
 #include <asm/unistd.h>
 #include <asm/ftrace.h>
+#include <asm/irqflags.h>

 #include "minstate.h"

@@ -583,6 +584,88 @@ strace_error:
 	br.cond.sptk .strace_save_retval
 END(ia64_trace_syscall)

+/*
+ * Derived from ia64_trace_syscall
+ */
+GLOBAL_ENTRY(ia64_irqtrace_syscall)
+	PT_REGS_UNWIND_INFO(0)
+	/*
+	 * We need to preserve the scratch registers f6-f11 in case the system
+	 * call is sigreturn.
+	 */
+	adds r16=PT(F6)+16,sp
+	adds r17=PT(F7)+16,sp
+	;;
+ 	stf.spill [r16]=f6,32
+ 	stf.spill [r17]=f7,32
+	;;
+ 	stf.spill [r16]=f8,32
+ 	stf.spill [r17]=f9,32
+	;;
+ 	stf.spill [r16]=f10
+ 	stf.spill [r17]=f11
+	br.call.sptk.many rp=syscall_irqtrace_enter
+	cmp.lt p6,p0=r8,r0			// check tracehook
+	adds r2=PT(R8)+16,sp			// r2 = &pt_regs.r8
+	adds r3=PT(R10)+16,sp			// r3 = &pt_regs.r10
+	mov r10=0
+(p6)	br.cond.sptk irqstrace_error		// syscall failed ->
+	adds r16=PT(F6)+16,sp
+	adds r17=PT(F7)+16,sp
+	;;
+	ldf.fill f6=[r16],32
+	ldf.fill f7=[r17],32
+	;;
+	ldf.fill f8=[r16],32
+	ldf.fill f9=[r17],32
+	;;
+	ldf.fill f10=[r16]
+	ldf.fill f11=[r17]
+	// the syscall number may have changed, so re-load it and re-calculate the
+	// syscall entry-point:
+	adds r15=PT(R15)+16,sp			// r15 = &pt_regs.r15 (syscall #)
+	;;
+	ld8 r15=[r15]
+	mov r3=NR_syscalls - 1
+	;;
+	adds r15=-1024,r15
+	movl r16=sys_call_table
+	;;
+	shladd r20=r15,3,r16			// r20 = sys_call_table + 8*(syscall-1024)
+	cmp.leu p6,p7=r15,r3
+	;;
+(p6)	ld8 r20=[r20]				// load address of syscall entry point
+(p7)	movl r20=sys_ni_syscall
+	;;
+	mov b6=r20
+	br.call.sptk.many rp=b6			// do the syscall
+.irqstrace_check_retval:
+	cmp.lt p6,p0=r8,r0			// syscall failed?
+	adds r2=PT(R8)+16,sp			// r2 = &pt_regs.r8
+	adds r3=PT(R10)+16,sp			// r3 = &pt_regs.r10
+	mov r10=0
+(p6)	br.cond.sptk irqstrace_error		// syscall failed ->
+	;;					// avoid RAW on r10
+.irqstrace_save_retval:
+.mem.offset 0,0; st8.spill [r2]=r8		// store return value in slot for r8
+.mem.offset 8,0; st8.spill [r3]=r10		// clear error indication in slot for r10
+	br.call.sptk.many rp=syscall_irqtrace_leave
+.irqtraceret3:
+(pUStk)	cmp.eq.unc p6,p0=r0,r0			// p6 <- pUStk
+(pUStk)	rsm psr.i				// disable interrupts
+	br.cond.sptk ia64_work_pending_syscall_end
+
+irqstrace_error:
+	ld8 r3=[r2]				// load pt_regs.r8
+	sub r9=0,r8				// negate return value to get errno value
+	;;
+	cmp.ne p6,p0=r3,r0			// is pt_regs.r8!=0?
+	adds r3=16,r2				// r3=&pt_regs.r10
+	;;
+(p6)	mov r10=-1
+(p6)	mov r8=r9
+	br.cond.sptk .irqstrace_save_retval
+END(ia64_irqtrace_syscall)
 	/*
 	 * When traced and returning from sigreturn, we invoke syscall_trace but then
 	 * go straight to ia64_leave_kernel rather than ia64_leave_syscall.
@@ -858,6 +941,11 @@ GLOBAL_ENTRY(__paravirt_leave_kernel)
 	cmp.eq p6,p0=r21,r0		// p6 <- pUStk || (preempt_count == 0)
 #else
 	RSM_PSR_I(pUStk, r17, r31)
+#if 0 //Todo: fix lock held when returning to user space
+	;;
+	LOCKDEP_SYS_EXIT
+	;;
+#endif
 	cmp.eq p0,pLvSys=r0,r0		// pLvSys=0: leave from kernel
 (pUStk)	cmp.eq.unc p6,p0=r0,r0		// p6 <- pUStk
 #endif
diff --git a/arch/ia64/kernel/irq_ia64.c b/arch/ia64/kernel/irq_ia64.c
index f14c35f..3732d59 100644
--- a/arch/ia64/kernel/irq_ia64.c
+++ b/arch/ia64/kernel/irq_ia64.c
@@ -453,6 +453,7 @@ ia64_handle_irq (ia64_vector vector, struct pt_regs *regs)
 	struct pt_regs *old_regs = set_irq_regs(regs);
 	unsigned long saved_tpr;

+	trace_hardirqs_off();
 #if IRQ_DEBUG
 	{
 		unsigned long bsp, sp;
@@ -523,6 +524,8 @@ ia64_handle_irq (ia64_vector vector, struct pt_regs *regs)
 	 * come through until ia64_eoi() has been done.
 	 */
 	irq_exit();
+	if(!current->hardirq_context)
+		trace_hardirqs_on_delayed();
 	set_irq_regs(old_regs);
 }

diff --git a/arch/ia64/kernel/ivt.S b/arch/ia64/kernel/ivt.S
index d93e396..f9731bd 100644
--- a/arch/ia64/kernel/ivt.S
+++ b/arch/ia64/kernel/ivt.S
@@ -855,7 +855,11 @@ ENTRY(break_fault)
 (p10)	br.cond.spnt.many ia64_ret_from_syscall	// B    return if bad
call-frame or r15 is a NaT

 	SSM_PSR_I(p15, p15, r16)		// M2   restore psr.i
+#ifdef CONFIG_TRACE_IRQFLAGS
+(p14)	br.cond.spnt.many ia64_irqtrace_syscall	// B	do syscall-tracing
thingamagic
+#else
 (p14)	br.call.sptk.many b6=b6			// B    invoke syscall-handker
(ignore return addr)
+#endif
 	br.cond.spnt.many ia64_trace_syscall	// B	do syscall-tracing thingamagic
 	// NOT REACHED
 ///////////////////////////////////////////////////////////////////////
diff --git a/arch/ia64/kernel/mca.c b/arch/ia64/kernel/mca.c
index a0220dc..ec48cd6 100644
--- a/arch/ia64/kernel/mca.c
+++ b/arch/ia64/kernel/mca.c
@@ -536,6 +536,7 @@ ia64_mca_cpe_int_handler (int cpe_irq, void *arg)
 	static int		index;
 	static DEFINE_SPINLOCK(cpe_history_lock);

+	lockdep_off();
 	IA64_MCA_DEBUG("%s: received interrupt vector = %#x on CPU %d\n",
 		       __func__, cpe_irq, smp_processor_id());

@@ -581,7 +582,7 @@ ia64_mca_cpe_int_handler (int cpe_irq, void *arg)
 out:
 	/* Get the CPE error record and log it */
 	ia64_mca_log_sal_error_record(SAL_INFO_TYPE_CPE);
-
+	lockdep_on();
 	return IRQ_HANDLED;
 }

@@ -780,7 +781,7 @@ ia64_mca_rendez_int_handler(int rendez_irq, void *arg)
 		{ .sos = NULL, .monarch_cpu = &monarch_cpu };

 	/* Mask all interrupts */
-	local_irq_save(flags);
+	raw_local_irq_save(flags);

 	NOTIFY_MCA(DIE_MCA_RENDZVOUS_ENTER, get_irq_regs(), (long)&nd, 1);

@@ -800,7 +801,7 @@ ia64_mca_rendez_int_handler(int rendez_irq, void *arg)

 	ia64_mc_info.imi_rendez_checkin[cpu] = IA64_MCA_RENDEZ_CHECKIN_NOTDONE;
 	/* Enable all interrupts */
-	local_irq_restore(flags);
+	raw_local_irq_restore(flags);
 	return IRQ_HANDLED;
 }

@@ -1399,7 +1400,7 @@ ia64_mca_cmc_int_handler(int cmc_irq, void *arg)
 		       __func__, cmc_irq, smp_processor_id());

 	/* SAL spec states this should run w/ interrupts enabled */
-	local_irq_enable();
+	raw_local_irq_enable();

 	spin_lock(&cmc_history_lock);
 	if (!cmc_polling_enabled) {
@@ -1536,6 +1537,7 @@ ia64_mca_cpe_int_caller(int cpe_irq, void *arg)
 	static int poll_time = MIN_CPE_POLL_INTERVAL;
 	unsigned int cpuid;

+	lockdep_off();
 	cpuid = smp_processor_id();

 	/* If first cpu, update count */
@@ -1569,6 +1571,7 @@ ia64_mca_cpe_int_caller(int cpe_irq, void *arg)
 			mod_timer(&cpe_poll_timer, jiffies + poll_time);
 		start_count = -1;
 	}
+	lockdep_on();

 	return IRQ_HANDLED;
 }
@@ -1894,10 +1897,10 @@ static void __cpuinit
ia64_mca_cmc_vector_adjust(void *dummy)
 {
 	unsigned long flags;

-	local_irq_save(flags);
+	raw_local_irq_save(flags);
 	if (!cmc_polling_enabled)
 		ia64_mca_cmc_vector_enable(NULL);
-	local_irq_restore(flags);
+	raw_local_irq_restore(flags);
 }

 static int __cpuinit mca_cpu_callback(struct notifier_block *nfb,
diff --git a/arch/ia64/kernel/process.c b/arch/ia64/kernel/process.c
index 53f1648..afe0ddf 100644
--- a/arch/ia64/kernel/process.c
+++ b/arch/ia64/kernel/process.c
@@ -237,14 +237,14 @@ update_pal_halt_status(int status)
 void
 default_idle (void)
 {
-	local_irq_enable();
+	raw_local_irq_enable();
 	while (!need_resched()) {
 		if (can_do_pal_halt) {
-			local_irq_disable();
+			raw_local_irq_disable();
 			if (!need_resched()) {
 				safe_halt();
 			}
-			local_irq_enable();
+			raw_local_irq_enable();
 		} else
 			cpu_relax();
 	}
diff --git a/arch/ia64/kernel/ptrace.c b/arch/ia64/kernel/ptrace.c
index 7c7909f..a23c683 100644
--- a/arch/ia64/kernel/ptrace.c
+++ b/arch/ia64/kernel/ptrace.c
@@ -1258,6 +1258,16 @@ syscall_trace_enter (long arg0, long arg1, long
arg2, long arg3,
 	return 0;
 }

+asmlinkage long
+syscall_irqtrace_enter (long arg0, long arg1, long arg2, long arg3,
+		     long arg4, long arg5, long arg6, long arg7,
+		     struct pt_regs regs)
+{
+	raw_local_irq_disable();
+	trace_hardirqs_on();
+	raw_local_irq_enable();
+	return 0;
+}
 /* "asmlinkage" so the input arguments are preserved... */

 asmlinkage void
@@ -1285,6 +1295,30 @@ syscall_trace_leave (long arg0, long arg1, long
arg2, long arg3,
 		ia64_sync_krbs();
 }

+asmlinkage void
+syscall_irqtrace_leave (long arg0, long arg1, long arg2, long arg3,
+		     long arg4, long arg5, long arg6, long arg7,
+		     struct pt_regs regs)
+{
+	int step;
+
+	if (unlikely(current->audit_context)) {
+		int success = AUDITSC_RESULT(regs.r10);
+		long result = regs.r8;
+
+		if (success != AUDITSC_SUCCESS)
+			result = -result;
+		audit_syscall_exit(success, result);
+	}
+
+	step = test_thread_flag(TIF_SINGLESTEP);
+	if (step || test_thread_flag(TIF_SYSCALL_TRACE))
+		tracehook_report_syscall_exit(&regs, step);
+
+	/* copy user rbs to kernel rbs */
+	if (test_thread_flag(TIF_RESTORE_RSE))
+		ia64_sync_krbs();
+}
 /* Utrace implementation starts here */
 struct regset_get {
 	void *kbuf;
diff --git a/arch/ia64/kernel/setup.c b/arch/ia64/kernel/setup.c
index 41ae6a5..8191631 100644
--- a/arch/ia64/kernel/setup.c
+++ b/arch/ia64/kernel/setup.c
@@ -1013,7 +1013,11 @@ cpu_init (void)
 	BUG_ON(current->mm);

 	ia64_mmu_init(ia64_imva(cpu_data));
+
+	/* Temporarily suppress lockdep WARN_ON irqs disabled*/
+	lockdep_off();
 	ia64_mca_cpu_init(ia64_imva(cpu_data));
+	lockdep_on();

 	/* Clear ITC to eliminate sched_clock() overflows in human time.  */
 	ia64_set_itc(0);
diff --git a/arch/ia64/kernel/smpboot.c b/arch/ia64/kernel/smpboot.c
index 6a1380e..bd4ac64 100644
--- a/arch/ia64/kernel/smpboot.c
+++ b/arch/ia64/kernel/smpboot.c
@@ -507,7 +507,7 @@ do_boot_cpu (int sapicid, int cpu)
 	struct create_idle c_idle = {
 		.work = __WORK_INITIALIZER(c_idle.work, do_fork_idle),
 		.cpu	= cpu,
-		.done	= COMPLETION_INITIALIZER(c_idle.done),
+		.done	= COMPLETION_INITIALIZER_ONSTACK(c_idle.done),
 	};

  	c_idle.idle = get_idle_for_cpu(cpu);
diff --git a/arch/ia64/kernel/time.c b/arch/ia64/kernel/time.c
index 653b3c4..39157a7 100644
--- a/arch/ia64/kernel/time.c
+++ b/arch/ia64/kernel/time.c
@@ -210,8 +210,8 @@ timer_interrupt (int irq, void *dev_id)
 		/*
 		 * Allow IPIs to interrupt the timer loop.
 		 */
-		local_irq_enable();
-		local_irq_disable();
+		raw_local_irq_enable();
+		raw_local_irq_disable();
 	}

 skip_process_time_accounting:
diff --git a/arch/ia64/mm/fault.c b/arch/ia64/mm/fault.c
index 0799fea..44f37dc 100644
--- a/arch/ia64/mm/fault.c
+++ b/arch/ia64/mm/fault.c
@@ -82,6 +82,7 @@ ia64_do_page_fault (unsigned long address, unsigned
long isr, struct pt_regs *re
 	unsigned long mask;
 	int fault;

+	trace_hardirqs_on_delayed();
 	/* mmap_sem is performance critical.... */
 	prefetchw(&mm->mmap_sem);

diff --git a/fs/ioctl.c b/fs/ioctl.c
index 2d140a7..1374920 100644
--- a/fs/ioctl.c
+++ b/fs/ioctl.c
@@ -616,6 +616,7 @@ SYSCALL_DEFINE3(ioctl, unsigned int, fd, unsigned
int, cmd, unsigned long, arg)
 	int error = -EBADF;
 	int fput_needed;

+	trace_hardirqs_on_delayed();
 	filp = fget_light(fd, &fput_needed);
 	if (!filp)
 		goto out;
diff --git a/fs/open.c b/fs/open.c
index 5463266..c8a7eca 100644
--- a/fs/open.c
+++ b/fs/open.c
@@ -973,6 +973,7 @@ SYSCALL_DEFINE1(close, unsigned int, fd)
 	struct fdtable *fdt;
 	int retval;

+	trace_hardirqs_on_delayed();
 	spin_lock(&files->file_lock);
 	fdt = files_fdtable(files);
 	if (fd >= fdt->max_fds)
diff --git a/fs/read_write.c b/fs/read_write.c
index 9c04852..eda37e2 100644
--- a/fs/read_write.c
+++ b/fs/read_write.c
@@ -412,6 +412,7 @@ SYSCALL_DEFINE3(write, unsigned int, fd, const
char __user *, buf,
 	ssize_t ret = -EBADF;
 	int fput_needed;

+	trace_hardirqs_on_delayed();
 	file = fget_light(fd, &fput_needed);
 	if (file) {
 		loff_t pos = file_pos_read(file);
diff --git a/include/linux/irqflags.h b/include/linux/irqflags.h
index 006bf45..51d0233 100644
--- a/include/linux/irqflags.h
+++ b/include/linux/irqflags.h
@@ -17,6 +17,7 @@
   extern void trace_softirqs_on(unsigned long ip);
   extern void trace_softirqs_off(unsigned long ip);
   extern void trace_hardirqs_on(void);
+  extern void trace_hardirqs_on_delayed(void);
   extern void trace_hardirqs_off(void);
 # define trace_hardirq_context(p)	((p)->hardirq_context)
 # define trace_softirq_context(p)	((p)->softirq_context)
@@ -29,6 +30,7 @@
 # define INIT_TRACE_IRQFLAGS	.softirqs_enabled = 1,
 #else
 # define trace_hardirqs_on()		do { } while (0)
+# define trace_hardirqs_on_delayed()		do { } while (0)
 # define trace_hardirqs_off()		do { } while (0)
 # define trace_softirqs_on(ip)		do { } while (0)
 # define trace_softirqs_off(ip)		do { } while (0)
diff --git a/include/linux/lockdep.h b/include/linux/lockdep.h
index 06aed83..baa573b 100644
--- a/include/linux/lockdep.h
+++ b/include/linux/lockdep.h
@@ -174,7 +174,7 @@ struct lock_chain {
 	u64				chain_key;
 };

-#define MAX_LOCKDEP_KEYS_BITS		13
+#define MAX_LOCKDEP_KEYS_BITS		12
 /*
  * Subtract one because we offset hlock->class_idx by 1 in order
  * to make 0 mean no class. This avoids overflowing the class_idx
diff --git a/kernel/fork.c b/kernel/fork.c
index b6cce14..0c4ae6d 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -1012,6 +1012,10 @@ static struct task_struct
*copy_process(unsigned long clone_flags,
 	rt_mutex_init_task(p);

 #ifdef CONFIG_PROVE_LOCKING
+	//workaround
+	p->hardirqs_enabled = 1;
+	p->hardirqs_enabled = 1;
+
 	DEBUG_LOCKS_WARN_ON(!p->hardirqs_enabled);
 	DEBUG_LOCKS_WARN_ON(!p->softirqs_enabled);
 #endif
diff --git a/kernel/lockdep.c b/kernel/lockdep.c
index 5428679..f501c10 100644
--- a/kernel/lockdep.c
+++ b/kernel/lockdep.c
@@ -2353,6 +2353,14 @@ void trace_hardirqs_on(void)
 }
 EXPORT_SYMBOL(trace_hardirqs_on);

+void trace_hardirqs_on_delayed(void)
+{
+	raw_local_irq_disable();
+	trace_hardirqs_on();
+	raw_local_irq_enable();
+}
+EXPORT_SYMBOL(trace_hardirqs_on_delayed);
+
 /*
  * Hardirqs were disabled:
  */
diff --git a/kernel/sys.c b/kernel/sys.c
index e83ddbb..e0c67db 100644
--- a/kernel/sys.c
+++ b/kernel/sys.c
@@ -993,6 +993,7 @@ SYSCALL_DEFINE1(getpgid, pid_t, pid)
 	struct pid *grp;
 	int retval;

+	trace_hardirqs_on_delayed();
 	rcu_read_lock();
 	if (!pid)
 		grp = task_pgrp(current);
diff --git a/kernel/timer.c b/kernel/timer.c
index ee305c8..7793991 100644
--- a/kernel/timer.c
+++ b/kernel/timer.c
@@ -1336,6 +1336,7 @@ SYSCALL_DEFINE1(alarm, unsigned int, seconds)
  */
 SYSCALL_DEFINE0(getpid)
 {
+	trace_hardirqs_on_delayed();
 	return task_tgid_vnr(current);
 }

diff --git a/kernel/trace/trace_irqsoff.c b/kernel/trace/trace_irqsoff.c
index 6fd486e..3c2a931 100644
--- a/kernel/trace/trace_irqsoff.c
+++ b/kernel/trace/trace_irqsoff.c
@@ -525,6 +525,12 @@ void trace_hardirqs_on(void)
 }
 EXPORT_SYMBOL(trace_hardirqs_on);

+void trace_hardirqs_on_delayed(void)
+{
+	trace_hardirqs_on();
+}
+EXPORT_SYMBOL(trace_hardirqs_on_delayed);
+
 void trace_hardirqs_off(void)
 {
 	if (!preempt_trace() && irq_trace())

Attachment: ia64-lockdep-0625.patch
Description: Binary data


[Index of Archives]     [Linux Kernel]     [Sparc Linux]     [DCCP]     [Linux ARM]     [Yosemite News]     [Linux SCSI]     [Linux x86_64]     [Linux for Ham Radio]

  Powered by Linux