[PATCH v6 5/6] task_isolation: add debug boot flag

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



The new "task_isolation_debug" flag simplifies debugging
of TASK_ISOLATION kernels when processes are running in
PR_TASK_ISOLATION_ENABLE mode.  Such processes should get no
interrupts from the kernel, and if they do, when this boot flag is
specified a kernel stack dump on the console is generated.

It's possible to use ftrace to simply detect whether a task_isolation
core has unexpectedly entered the kernel.  But what this boot flag
does is allow the kernel to provide better diagnostics, e.g. by
reporting in the IPI-generating code what remote core and context
is preparing to deliver an interrupt to a task_isolation core.

It may be worth considering other ways to generate useful debugging
output rather than console spew, but for now that is simple and direct.

Signed-off-by: Chris Metcalf <cmetcalf@xxxxxxxxxx>
---
 Documentation/kernel-parameters.txt |  7 +++++++
 arch/tile/mm/homecache.c            |  5 ++++-
 include/linux/isolation.h           |  2 ++
 kernel/irq_work.c                   |  5 ++++-
 kernel/sched/core.c                 | 21 +++++++++++++++++++++
 kernel/signal.c                     |  5 +++++
 kernel/smp.c                        |  4 ++++
 kernel/softirq.c                    |  7 +++++++
 8 files changed, 54 insertions(+), 2 deletions(-)

diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt
index 1d6f0459cd7b..934f172eb140 100644
--- a/Documentation/kernel-parameters.txt
+++ b/Documentation/kernel-parameters.txt
@@ -3595,6 +3595,13 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
 			neutralize any effect of /proc/sys/kernel/sysrq.
 			Useful for debugging.
 
+	task_isolation_debug	[KNL]
+			In kernels built with CONFIG_TASK_ISOLATION and booted
+			in nohz_full= mode, this setting will generate console
+			backtraces when the kernel is about to interrupt a
+			task that has requested PR_TASK_ISOLATION_ENABLE
+			and is running on a nohz_full core.
+
 	tcpmhash_entries= [KNL,NET]
 			Set the number of tcp_metrics_hash slots.
 			Default value is 8192 or 16384 depending on total
diff --git a/arch/tile/mm/homecache.c b/arch/tile/mm/homecache.c
index 40ca30a9fee3..a79325113105 100644
--- a/arch/tile/mm/homecache.c
+++ b/arch/tile/mm/homecache.c
@@ -31,6 +31,7 @@
 #include <linux/smp.h>
 #include <linux/module.h>
 #include <linux/hugetlb.h>
+#include <linux/isolation.h>
 
 #include <asm/page.h>
 #include <asm/sections.h>
@@ -83,8 +84,10 @@ static void hv_flush_update(const struct cpumask *cache_cpumask,
 	 * Don't bother to update atomically; losing a count
 	 * here is not that critical.
 	 */
-	for_each_cpu(cpu, &mask)
+	for_each_cpu(cpu, &mask) {
 		++per_cpu(irq_stat, cpu).irq_hv_flush_count;
+		task_isolation_debug(cpu);
+	}
 }
 
 /*
diff --git a/include/linux/isolation.h b/include/linux/isolation.h
index 27a4469831c1..9f1747331a36 100644
--- a/include/linux/isolation.h
+++ b/include/linux/isolation.h
@@ -18,11 +18,13 @@ extern void task_isolation_enter(void);
 extern void task_isolation_syscall(int nr);
 extern void task_isolation_exception(void);
 extern void task_isolation_wait(void);
+extern void task_isolation_debug(int cpu);
 #else
 static inline bool task_isolation_enabled(void) { return false; }
 static inline void task_isolation_enter(void) { }
 static inline void task_isolation_syscall(int nr) { }
 static inline void task_isolation_exception(void) { }
+static inline void task_isolation_debug(int cpu) { }
 #endif
 
 static inline bool task_isolation_strict(void)
diff --git a/kernel/irq_work.c b/kernel/irq_work.c
index cbf9fb899d92..745c2ea6a4e4 100644
--- a/kernel/irq_work.c
+++ b/kernel/irq_work.c
@@ -17,6 +17,7 @@
 #include <linux/cpu.h>
 #include <linux/notifier.h>
 #include <linux/smp.h>
+#include <linux/isolation.h>
 #include <asm/processor.h>
 
 
@@ -75,8 +76,10 @@ bool irq_work_queue_on(struct irq_work *work, int cpu)
 	if (!irq_work_claim(work))
 		return false;
 
-	if (llist_add(&work->llnode, &per_cpu(raised_list, cpu)))
+	if (llist_add(&work->llnode, &per_cpu(raised_list, cpu))) {
+		task_isolation_debug(cpu);
 		arch_send_call_function_single_ipi(cpu);
+	}
 
 	return true;
 }
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 78b4bad10081..0c4e4eba69b1 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -74,6 +74,7 @@
 #include <linux/binfmts.h>
 #include <linux/context_tracking.h>
 #include <linux/compiler.h>
+#include <linux/isolation.h>
 
 #include <asm/switch_to.h>
 #include <asm/tlb.h>
@@ -745,6 +746,26 @@ bool sched_can_stop_tick(void)
 }
 #endif /* CONFIG_NO_HZ_FULL */
 
+#ifdef CONFIG_TASK_ISOLATION
+/* Enable debugging of any interrupts of task_isolation cores. */
+static int task_isolation_debug_flag;
+static int __init task_isolation_debug_func(char *str)
+{
+	task_isolation_debug_flag = true;
+	return 1;
+}
+__setup("task_isolation_debug", task_isolation_debug_func);
+
+void task_isolation_debug(int cpu)
+{
+	if (task_isolation_debug_flag && tick_nohz_full_cpu(cpu) &&
+	    (cpu_curr(cpu)->task_isolation_flags & PR_TASK_ISOLATION_ENABLE)) {
+		pr_err("Interrupt detected for task_isolation cpu %d\n", cpu);
+		dump_stack();
+	}
+}
+#endif
+
 void sched_avg_update(struct rq *rq)
 {
 	s64 period = sched_avg_period();
diff --git a/kernel/signal.c b/kernel/signal.c
index 836df8dac6cc..60e15e835b9e 100644
--- a/kernel/signal.c
+++ b/kernel/signal.c
@@ -684,6 +684,11 @@ int dequeue_signal(struct task_struct *tsk, sigset_t *mask, siginfo_t *info)
  */
 void signal_wake_up_state(struct task_struct *t, unsigned int state)
 {
+#ifdef CONFIG_TASK_ISOLATION
+	/* If the task is being killed, don't complain about task_isolation. */
+	if (state & TASK_WAKEKILL)
+		t->task_isolation_flags = 0;
+#endif
 	set_tsk_thread_flag(t, TIF_SIGPENDING);
 	/*
 	 * TASK_WAKEKILL also means wake it up in the stopped/traced/killable
diff --git a/kernel/smp.c b/kernel/smp.c
index 07854477c164..b0bddff2693d 100644
--- a/kernel/smp.c
+++ b/kernel/smp.c
@@ -14,6 +14,7 @@
 #include <linux/smp.h>
 #include <linux/cpu.h>
 #include <linux/sched.h>
+#include <linux/isolation.h>
 
 #include "smpboot.h"
 
@@ -178,6 +179,7 @@ static int generic_exec_single(int cpu, struct call_single_data *csd,
 	 * locking and barrier primitives. Generic code isn't really
 	 * equipped to do the right thing...
 	 */
+	task_isolation_debug(cpu);
 	if (llist_add(&csd->llist, &per_cpu(call_single_queue, cpu)))
 		arch_send_call_function_single_ipi(cpu);
 
@@ -457,6 +459,8 @@ void smp_call_function_many(const struct cpumask *mask,
 	}
 
 	/* Send a message to all CPUs in the map */
+	for_each_cpu(cpu, cfd->cpumask)
+		task_isolation_debug(cpu);
 	arch_send_call_function_ipi_mask(cfd->cpumask);
 
 	if (wait) {
diff --git a/kernel/softirq.c b/kernel/softirq.c
index 479e4436f787..ed762fec7265 100644
--- a/kernel/softirq.c
+++ b/kernel/softirq.c
@@ -24,8 +24,10 @@
 #include <linux/ftrace.h>
 #include <linux/smp.h>
 #include <linux/smpboot.h>
+#include <linux/context_tracking.h>
 #include <linux/tick.h>
 #include <linux/irq.h>
+#include <linux/isolation.h>
 
 #define CREATE_TRACE_POINTS
 #include <trace/events/irq.h>
@@ -335,6 +337,11 @@ void irq_enter(void)
 		_local_bh_enable();
 	}
 
+	if (context_tracking_cpu_is_enabled() &&
+	    context_tracking_in_user() &&
+	    !in_interrupt())
+		task_isolation_debug(smp_processor_id());
+
 	__irq_enter();
 }
 
-- 
2.1.2

--
To unsubscribe from this list: send the line "unsubscribe linux-doc" in
the body of a message to majordomo@xxxxxxxxxxxxxxx
More majordomo info at  http://vger.kernel.org/majordomo-info.html



[Index of Archives]     [Kernel Newbies]     [Security]     [Netfilter]     [Bugtraq]     [Linux FS]     [Yosemite Forum]     [MIPS Linux]     [ARM Linux]     [Linux Security]     [Linux RAID]     [Samba]     [Video 4 Linux]     [Device Mapper]     [Linux Resources]

  Powered by Linux