On Mon, Mar 26, 2018 at 11:58 AM, Maninder Singh <maninder1.s@xxxxxxxxxxx> wrote: > Check whether the allocation happens in an IRQ handler. > This lets us strip everything below the IRQ entry point to reduce the > number of unique stack traces needed to be stored. > > so moved code of KASAN in generic file so that page_owner can also > do same filteration. > > Initial KASAN commit > id=be7635e7287e0e8013af3c89a6354a9e0182594c > > original:- > __alloc_pages_nodemask+0xfc/0x220 > page_frag_alloc+0x84/0x140 > __napi_alloc_skb+0x83/0xe0 > rtl8169_poll+0x1e5/0x670 > net_rx_action+0x132/0x3a0 > __do_softirq+0xce/0x298 > irq_exit+0xa3/0xb0 > do_IRQ+0x72/0xc0 > ret_from_intr+0x0/0x18 > cpuidle_enter_state+0x96/0x290 > do_idle+0x163/0x1a0 > > After patch:- > __alloc_pages_nodemask+0xfc/0x220 > page_frag_alloc+0x84/0x140 > __napi_alloc_skb+0x83/0xe0 > rtl8169_poll+0x1e5/0x670 > net_rx_action+0x132/0x3a0 > __do_softirq+0xce/0x298 > > Signed-off-by: Vaneet Narang <v.narang@xxxxxxxxxxx> > Signed-off-by: Maninder Singh <maninder1.s@xxxxxxxxxxx> > --- > v1->v2: fix build break for tile and blackfin > (https://lkml.org/lkml/2017/12/3/287, verified for blackfin) > > include/linux/stacktrace.h | 26 ++++++++++++++++++++++++++ > mm/kasan/kasan.c | 22 ---------------------- > mm/page_owner.c | 1 + > 3 files changed, 27 insertions(+), 22 deletions(-) > > diff --git a/include/linux/stacktrace.h b/include/linux/stacktrace.h > index ba29a06..3d3e49d 100644 > --- a/include/linux/stacktrace.h > +++ b/include/linux/stacktrace.h > @@ -4,6 +4,8 @@ > > #include <linux/types.h> > > +extern char __irqentry_text_start[], __irqentry_text_end[]; > +extern char __softirqentry_text_start[], __softirqentry_text_end[]; > struct task_struct; > struct pt_regs; > > @@ -26,6 +28,28 @@ extern int save_stack_trace_tsk_reliable(struct task_struct *tsk, > extern int snprint_stack_trace(char *buf, size_t size, > struct stack_trace *trace, int spaces); > > +static inline int in_irqentry_text(unsigned long ptr) > +{ > + return (ptr >= (unsigned long)&__irqentry_text_start && > + ptr < (unsigned long)&__irqentry_text_end) || > + (ptr >= (unsigned long)&__softirqentry_text_start && > + ptr < (unsigned long)&__softirqentry_text_end); > +} > + > +static inline void filter_irq_stacks(struct stack_trace *trace) > +{ > + int i; > + > + if (!trace->nr_entries) > + return; > + for (i = 0; i < trace->nr_entries; i++) > + if (in_irqentry_text(trace->entries[i])) { > + /* Include the irqentry function into the stack. */ > + trace->nr_entries = i + 1; > + break; > + } > +} > + > #ifdef CONFIG_USER_STACKTRACE_SUPPORT > extern void save_stack_trace_user(struct stack_trace *trace); > #else > @@ -38,6 +62,8 @@ extern int snprint_stack_trace(char *buf, size_t size, > # define save_stack_trace_user(trace) do { } while (0) > # define print_stack_trace(trace, spaces) do { } while (0) > # define snprint_stack_trace(buf, size, trace, spaces) do { } while (0) > +# define filter_irq_stacks(trace) do { } while (0) > +# define in_irqentry_text(ptr) do { } while (0) Hi, Every user of stack_depot should filter out irq frames, without that stack_depot will run out of memory sooner or later. so this is a change in the right direction. Do we need to define empty version of in_irqentry_text? Shouldn't only filter_irq_stacks be used by kernel code? > # define save_stack_trace_tsk_reliable(tsk, trace) ({ -ENOSYS; }) > #endif /* CONFIG_STACKTRACE */ > > diff --git a/mm/kasan/kasan.c b/mm/kasan/kasan.c > index 405bba4..129e7b8 100644 > --- a/mm/kasan/kasan.c > +++ b/mm/kasan/kasan.c > @@ -412,28 +412,6 @@ void kasan_poison_object_data(struct kmem_cache *cache, void *object) > KASAN_KMALLOC_REDZONE); > } > > -static inline int in_irqentry_text(unsigned long ptr) > -{ > - return (ptr >= (unsigned long)&__irqentry_text_start && > - ptr < (unsigned long)&__irqentry_text_end) || > - (ptr >= (unsigned long)&__softirqentry_text_start && > - ptr < (unsigned long)&__softirqentry_text_end); > -} > - > -static inline void filter_irq_stacks(struct stack_trace *trace) > -{ > - int i; > - > - if (!trace->nr_entries) > - return; > - for (i = 0; i < trace->nr_entries; i++) > - if (in_irqentry_text(trace->entries[i])) { > - /* Include the irqentry function into the stack. */ > - trace->nr_entries = i + 1; > - break; > - } > -} > - > static inline depot_stack_handle_t save_stack(gfp_t flags) > { > unsigned long entries[KASAN_STACK_DEPTH]; > diff --git a/mm/page_owner.c b/mm/page_owner.c > index 8602fb4..30e9cb2 100644 > --- a/mm/page_owner.c > +++ b/mm/page_owner.c > @@ -148,6 +148,7 @@ static noinline depot_stack_handle_t save_stack(gfp_t flags) > depot_stack_handle_t handle; > > save_stack_trace(&trace); > + filter_irq_stacks(&trace); > if (trace.nr_entries != 0 && > trace.entries[trace.nr_entries-1] == ULONG_MAX) > trace.nr_entries--; > -- > 1.9.1 >