From: Aleksandr Nogikh <nogikh@xxxxxxxxxx> Remote KCOV coverage collection enables coverage-guided fuzzing of the code that is not reachable during normal system call execution. It is especially helpful for fuzzing networking subsystems, where it is common to perform packet handling in separate work queues even for the packets that originated directly from the user space. Enable coverage-guided frame injection by adding kcov remote handle to skb extensions. Default initialization in __alloc_skb and __build_skb_around ensures that no socket buffer that was generated during a system call will be missed. Code that is of interest and that performs packet processing should be annotated with kcov_remote_start()/kcov_remote_stop(). An alternative approach is to determine kcov_handle solely on the basis of the device/interface that received the specific socket buffer. However, in this case it would be impossible to distinguish between packets that originated during normal background network processes or were intentionally injected from the user space. Signed-off-by: Aleksandr Nogikh <nogikh@xxxxxxxxxx> -- v2 -> v3: * Reimplemented this change. Now kcov handle is added to skb extensions instead of sk_buff. v1 -> v2: * Updated the commit message. --- include/linux/skbuff.h | 31 +++++++++++++++++++++++++++++++ net/core/skbuff.c | 11 +++++++++++ 2 files changed, 42 insertions(+) diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h index a828cf99c521..b63d90faa8e9 100644 --- a/include/linux/skbuff.h +++ b/include/linux/skbuff.h @@ -4150,6 +4150,9 @@ enum skb_ext_id { #endif #if IS_ENABLED(CONFIG_MPTCP) SKB_EXT_MPTCP, +#endif +#if IS_ENABLED(CONFIG_KCOV) + SKB_EXT_KCOV_HANDLE, #endif SKB_EXT_NUM, /* must be last */ }; @@ -4605,5 +4608,33 @@ static inline void skb_reset_redirect(struct sk_buff *skb) #endif } +#ifdef CONFIG_KCOV + +static inline void skb_set_kcov_handle(struct sk_buff *skb, const u64 kcov_handle) +{ + /* No reason to allocate skb extensions to set kcov_handle if kcov_handle is 0. */ + if (skb_has_extensions(skb) || kcov_handle) { + u64 *kcov_handle_ptr = skb_ext_add(skb, SKB_EXT_KCOV_HANDLE); + + if (kcov_handle_ptr) + *kcov_handle_ptr = kcov_handle; + } +} + +static inline u64 skb_get_kcov_handle(struct sk_buff *skb) +{ + u64 *kcov_handle = skb_ext_find(skb, SKB_EXT_KCOV_HANDLE); + + return kcov_handle ? *kcov_handle : 0; +} + +#else + +static inline void skb_set_kcov_handle(struct sk_buff *skb, const u64 kcov_handle) { } + +static inline u64 skb_get_kcov_handle(struct sk_buff *skb) { return 0; } + +#endif /* CONFIG_KCOV */ + #endif /* __KERNEL__ */ #endif /* _LINUX_SKBUFF_H */ diff --git a/net/core/skbuff.c b/net/core/skbuff.c index 1ba8f0163744..c5e6c0b83a92 100644 --- a/net/core/skbuff.c +++ b/net/core/skbuff.c @@ -249,6 +249,9 @@ struct sk_buff *__alloc_skb(unsigned int size, gfp_t gfp_mask, fclones->skb2.fclone = SKB_FCLONE_CLONE; } + + skb_set_kcov_handle(skb, kcov_common_handle()); + out: return skb; nodata: @@ -282,6 +285,8 @@ static struct sk_buff *__build_skb_around(struct sk_buff *skb, memset(shinfo, 0, offsetof(struct skb_shared_info, dataref)); atomic_set(&shinfo->dataref, 1); + skb_set_kcov_handle(skb, kcov_common_handle()); + return skb; } @@ -4203,6 +4208,9 @@ static const u8 skb_ext_type_len[] = { #if IS_ENABLED(CONFIG_MPTCP) [SKB_EXT_MPTCP] = SKB_EXT_CHUNKSIZEOF(struct mptcp_ext), #endif +#if IS_ENABLED(CONFIG_KCOV) + [SKB_EXT_KCOV_HANDLE] = SKB_EXT_CHUNKSIZEOF(u64), +#endif }; static __always_inline unsigned int skb_ext_total_length(void) @@ -4219,6 +4227,9 @@ static __always_inline unsigned int skb_ext_total_length(void) #endif #if IS_ENABLED(CONFIG_MPTCP) skb_ext_type_len[SKB_EXT_MPTCP] + +#endif +#if IS_ENABLED(CONFIG_KCOV) + skb_ext_type_len[SKB_EXT_KCOV_HANDLE] + #endif 0; } -- 2.29.0.rc1.297.gfa9743e501-goog