Patch "bpf: Mark bpf_spin_{lock,unlock}() helpers with notrace correctly" has been added to the 5.4-stable tree

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



This is a note to let you know that I've just added the patch titled

    bpf: Mark bpf_spin_{lock,unlock}() helpers with notrace correctly

to the 5.4-stable tree which can be found at:
    http://www.kernel.org/git/?p=linux/kernel/git/stable/stable-queue.git;a=summary

The filename of the patch is:
     bpf-mark-bpf_spin_-lock-unlock-helpers-with-notrace-.patch
and it can be found in the queue-5.4 subdirectory.

If you, or anyone else, feels it should not be added to the stable tree,
please let <stable@xxxxxxxxxxxxxxx> know about it.



commit 218a47bca69c12193e01416ce7a3b5e6968acd4f
Author: Yonghong Song <yonghong.song@xxxxxxxxx>
Date:   Tue Feb 6 23:01:02 2024 -0800

    bpf: Mark bpf_spin_{lock,unlock}() helpers with notrace correctly
    
    [ Upstream commit 178c54666f9c4d2f49f2ea661d0c11b52f0ed190 ]
    
    Currently tracing is supposed not to allow for bpf_spin_{lock,unlock}()
    helper calls. This is to prevent deadlock for the following cases:
      - there is a prog (prog-A) calling bpf_spin_{lock,unlock}().
      - there is a tracing program (prog-B), e.g., fentry, attached
        to bpf_spin_lock() and/or bpf_spin_unlock().
      - prog-B calls bpf_spin_{lock,unlock}().
    For such a case, when prog-A calls bpf_spin_{lock,unlock}(),
    a deadlock will happen.
    
    The related source codes are below in kernel/bpf/helpers.c:
      notrace BPF_CALL_1(bpf_spin_lock, struct bpf_spin_lock *, lock)
      notrace BPF_CALL_1(bpf_spin_unlock, struct bpf_spin_lock *, lock)
    notrace is supposed to prevent fentry prog from attaching to
    bpf_spin_{lock,unlock}().
    
    But actually this is not the case and fentry prog can successfully
    attached to bpf_spin_lock(). Siddharth Chintamaneni reported
    the issue in [1]. The following is the macro definition for
    above BPF_CALL_1:
      #define BPF_CALL_x(x, name, ...)                                               \
            static __always_inline                                                 \
            u64 ____##name(__BPF_MAP(x, __BPF_DECL_ARGS, __BPF_V, __VA_ARGS__));   \
            typedef u64 (*btf_##name)(__BPF_MAP(x, __BPF_DECL_ARGS, __BPF_V, __VA_ARGS__)); \
            u64 name(__BPF_REG(x, __BPF_DECL_REGS, __BPF_N, __VA_ARGS__));         \
            u64 name(__BPF_REG(x, __BPF_DECL_REGS, __BPF_N, __VA_ARGS__))          \
            {                                                                      \
                    return ((btf_##name)____##name)(__BPF_MAP(x,__BPF_CAST,__BPF_N,__VA_ARGS__));\
            }                                                                      \
            static __always_inline                                                 \
            u64 ____##name(__BPF_MAP(x, __BPF_DECL_ARGS, __BPF_V, __VA_ARGS__))
    
      #define BPF_CALL_1(name, ...)   BPF_CALL_x(1, name, __VA_ARGS__)
    
    The notrace attribute is actually applied to the static always_inline function
    ____bpf_spin_{lock,unlock}(). The actual callback function
    bpf_spin_{lock,unlock}() is not marked with notrace, hence
    allowing fentry prog to attach to two helpers, and this
    may cause the above mentioned deadlock. Siddharth Chintamaneni
    actually has a reproducer in [2].
    
    To fix the issue, a new macro NOTRACE_BPF_CALL_1 is introduced which
    will add notrace attribute to the original function instead of
    the hidden always_inline function and this fixed the problem.
    
      [1] https://lore.kernel.org/bpf/CAE5sdEigPnoGrzN8WU7Tx-h-iFuMZgW06qp0KHWtpvoXxf1OAQ@xxxxxxxxxxxxxx/
      [2] https://lore.kernel.org/bpf/CAE5sdEg6yUc_Jz50AnUXEEUh6O73yQ1Z6NV2srJnef0ZrQkZew@xxxxxxxxxxxxxx/
    
    Fixes: d83525ca62cf ("bpf: introduce bpf_spin_lock")
    Signed-off-by: Yonghong Song <yonghong.song@xxxxxxxxx>
    Signed-off-by: Andrii Nakryiko <andrii@xxxxxxxxxx>
    Acked-by: Jiri Olsa <jolsa@xxxxxxxxxx>
    Link: https://lore.kernel.org/bpf/20240207070102.335167-1-yonghong.song@xxxxxxxxx
    Signed-off-by: Sasha Levin <sashal@xxxxxxxxxx>

diff --git a/include/linux/filter.h b/include/linux/filter.h
index 11407e9e606db..0bec300b2e516 100644
--- a/include/linux/filter.h
+++ b/include/linux/filter.h
@@ -476,24 +476,27 @@ static inline bool insn_is_zext(const struct bpf_insn *insn)
 	__BPF_MAP(n, __BPF_DECL_ARGS, __BPF_N, u64, __ur_1, u64, __ur_2,       \
 		  u64, __ur_3, u64, __ur_4, u64, __ur_5)
 
-#define BPF_CALL_x(x, name, ...)					       \
+#define BPF_CALL_x(x, attr, name, ...)					       \
 	static __always_inline						       \
 	u64 ____##name(__BPF_MAP(x, __BPF_DECL_ARGS, __BPF_V, __VA_ARGS__));   \
 	typedef u64 (*btf_##name)(__BPF_MAP(x, __BPF_DECL_ARGS, __BPF_V, __VA_ARGS__)); \
-	u64 name(__BPF_REG(x, __BPF_DECL_REGS, __BPF_N, __VA_ARGS__));	       \
-	u64 name(__BPF_REG(x, __BPF_DECL_REGS, __BPF_N, __VA_ARGS__))	       \
+	attr u64 name(__BPF_REG(x, __BPF_DECL_REGS, __BPF_N, __VA_ARGS__));    \
+	attr u64 name(__BPF_REG(x, __BPF_DECL_REGS, __BPF_N, __VA_ARGS__))     \
 	{								       \
 		return ((btf_##name)____##name)(__BPF_MAP(x,__BPF_CAST,__BPF_N,__VA_ARGS__));\
 	}								       \
 	static __always_inline						       \
 	u64 ____##name(__BPF_MAP(x, __BPF_DECL_ARGS, __BPF_V, __VA_ARGS__))
 
-#define BPF_CALL_0(name, ...)	BPF_CALL_x(0, name, __VA_ARGS__)
-#define BPF_CALL_1(name, ...)	BPF_CALL_x(1, name, __VA_ARGS__)
-#define BPF_CALL_2(name, ...)	BPF_CALL_x(2, name, __VA_ARGS__)
-#define BPF_CALL_3(name, ...)	BPF_CALL_x(3, name, __VA_ARGS__)
-#define BPF_CALL_4(name, ...)	BPF_CALL_x(4, name, __VA_ARGS__)
-#define BPF_CALL_5(name, ...)	BPF_CALL_x(5, name, __VA_ARGS__)
+#define __NOATTR
+#define BPF_CALL_0(name, ...)	BPF_CALL_x(0, __NOATTR, name, __VA_ARGS__)
+#define BPF_CALL_1(name, ...)	BPF_CALL_x(1, __NOATTR, name, __VA_ARGS__)
+#define BPF_CALL_2(name, ...)	BPF_CALL_x(2, __NOATTR, name, __VA_ARGS__)
+#define BPF_CALL_3(name, ...)	BPF_CALL_x(3, __NOATTR, name, __VA_ARGS__)
+#define BPF_CALL_4(name, ...)	BPF_CALL_x(4, __NOATTR, name, __VA_ARGS__)
+#define BPF_CALL_5(name, ...)	BPF_CALL_x(5, __NOATTR, name, __VA_ARGS__)
+
+#define NOTRACE_BPF_CALL_1(name, ...)	BPF_CALL_x(1, notrace, name, __VA_ARGS__)
 
 #define bpf_ctx_range(TYPE, MEMBER)						\
 	offsetof(TYPE, MEMBER) ... offsetofend(TYPE, MEMBER) - 1
diff --git a/kernel/bpf/helpers.c b/kernel/bpf/helpers.c
index 4ba3dc621a345..9bfb4685d0684 100644
--- a/kernel/bpf/helpers.c
+++ b/kernel/bpf/helpers.c
@@ -271,7 +271,7 @@ static inline void __bpf_spin_lock_irqsave(struct bpf_spin_lock *lock)
 	__this_cpu_write(irqsave_flags, flags);
 }
 
-notrace BPF_CALL_1(bpf_spin_lock, struct bpf_spin_lock *, lock)
+NOTRACE_BPF_CALL_1(bpf_spin_lock, struct bpf_spin_lock *, lock)
 {
 	__bpf_spin_lock_irqsave(lock);
 	return 0;
@@ -293,7 +293,7 @@ static inline void __bpf_spin_unlock_irqrestore(struct bpf_spin_lock *lock)
 	local_irq_restore(flags);
 }
 
-notrace BPF_CALL_1(bpf_spin_unlock, struct bpf_spin_lock *, lock)
+NOTRACE_BPF_CALL_1(bpf_spin_unlock, struct bpf_spin_lock *, lock)
 {
 	__bpf_spin_unlock_irqrestore(lock);
 	return 0;




[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]
[Index of Archives]     [Linux USB Devel]     [Linux Audio Users]     [Yosemite News]     [Linux Kernel]     [Linux SCSI]

  Powered by Linux