Re: [PATCH v1 2/2] cleanup.h: Introduce DEFINE_INACTIVE_GUARD and activate_guard

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



On 2024-09-02 14:46, Linus Torvalds wrote:
[...]
IOW, that code should just have been something like this:

     #define __BPF_DECLARE_TRACE(call, proto, args, tp_flags)    \
     static notrace void                                         \
     __bpf_trace_##call(void *__data, proto)                     \
     {                                                           \
                                                                 \
         if ((tp_flags) & TRACEPOINT_MAY_FAULT) {                \
                 might_fault();                                  \
                 guard(preempt_notrace)();                       \
                 CONCATENATE(bpf_trace_run, ...                  \
                 return;                                         \
         }                                                       \
         CONCATENATE(bpf_trace_run, ...                          \
     }

instead.

If we look at perf_trace_##call(), with the conditional guard, it looks
like the following. It is not clear to me that code duplication would
be acceptable here.

I agree with you that the conditional guard is perhaps not something we
want at this stage, but in this specific case perhaps we should go back
to goto and labels ?

One alternative is to add yet another level of macros to handle the
code duplication.

#define _DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print, tp_flags) \
static notrace void                                                     \
perf_trace_##call(void *__data, proto)                                  \
{                                                                       \
        struct trace_event_call *event_call = __data;                   \
        struct trace_event_data_offsets_##call __maybe_unused __data_offsets;\
        struct trace_event_raw_##call *entry;                           \
        struct pt_regs *__regs;                                         \
        u64 __count = 1;                                                \
        struct task_struct *__task = NULL;                              \
        struct hlist_head *head;                                        \
        int __entry_size;                                               \
        int __data_size;                                                \
        int rctx;                                                       \
                                                                        \
        DEFINE_INACTIVE_GUARD(preempt_notrace, trace_event_guard);      \
                                                                        \
        if ((tp_flags) & TRACEPOINT_MAY_FAULT) {                        \
                might_fault();                                          \
                activate_guard(preempt_notrace, trace_event_guard)();   \
        }                                                               \
                                                                        \
        __data_size = trace_event_get_offsets_##call(&__data_offsets, args); \
                                                                        \
        head = this_cpu_ptr(event_call->perf_events);                   \
        if (!bpf_prog_array_valid(event_call) &&                        \
            __builtin_constant_p(!__task) && !__task &&                 \
            hlist_empty(head))                                          \
                return;                                                 \
                                                                        \
        __entry_size = ALIGN(__data_size + sizeof(*entry) + sizeof(u32),\
                             sizeof(u64));                              \
        __entry_size -= sizeof(u32);                                    \
                                                                        \
        entry = perf_trace_buf_alloc(__entry_size, &__regs, &rctx);     \
        if (!entry)                                                     \
                return;                                                 \
                                                                        \
        perf_fetch_caller_regs(__regs);                                 \
                                                                        \
        tstruct                                                         \
                                                                        \
        { assign; }                                                     \
                                                                        \
        perf_trace_run_bpf_submit(entry, __entry_size, rctx,            \
                                  event_call, __count, __regs,          \
                                  head, __task);                        \
}

Thanks,

Mathieu


--
Mathieu Desnoyers
EfficiOS Inc.
https://www.efficios.com





[Index of Archives]     [Linux Samsung SoC]     [Linux Rockchip SoC]     [Linux Actions SoC]     [Linux for Synopsys ARC Processors]     [Linux NFS]     [Linux NILFS]     [Linux USB Devel]     [Video for Linux]     [Linux Audio Users]     [Yosemite News]     [Linux Kernel]     [Linux SCSI]


  Powered by Linux