On 2/29/20 10:24 PM, Andrii Nakryiko wrote:
Switch BPF UAPI constants, previously defined as #define macro, to anonymous
enum values. This preserves constants values and behavior in expressions, but
has added advantaged of being captured as part of DWARF and, subsequently, BTF
type info. Which, in turn, greatly improves usefulness of generated vmlinux.h
for BPF applications, as it will not require BPF users to copy/paste various
flags and constants, which are frequently used with BPF helpers.
Signed-off-by: Andrii Nakryiko <andriin@xxxxxx>
---
include/uapi/linux/bpf.h | 272 +++++++++++++++----------
include/uapi/linux/bpf_common.h | 86 ++++----
include/uapi/linux/btf.h | 60 +++---
tools/include/uapi/linux/bpf.h | 274 ++++++++++++++++----------
tools/include/uapi/linux/bpf_common.h | 86 ++++----
tools/include/uapi/linux/btf.h | 60 +++---
6 files changed, 497 insertions(+), 341 deletions(-)
diff --git a/include/uapi/linux/bpf.h b/include/uapi/linux/bpf.h
index 8e98ced0963b..03e08f256bd1 100644
--- a/include/uapi/linux/bpf.h
+++ b/include/uapi/linux/bpf.h
@@ -14,34 +14,36 @@
/* Extended instruction set based on top of classic BPF */
/* instruction classes */
-#define BPF_JMP32 0x06 /* jmp mode in word width */
-#define BPF_ALU64 0x07 /* alu mode in double word width */
+enum {
+ BPF_JMP32 = 0x06, /* jmp mode in word width */
+ BPF_ALU64 = 0x07, /* alu mode in double word width */
Not sure whether we have uapi backward compatibility or not.
One possibility is to add
#define BPF_ALU64 BPF_ALU64
this way, people uses macros will continue to work.
If this is an acceptable solution, we have a lot of constants
in net related headers and will benefit from this conversion for
kprobe/tracepoint of networking related functions.
/* ld/ldx fields */
-#define BPF_DW 0x18 /* double word (64-bit) */
-#define BPF_XADD 0xc0 /* exclusive add */
+ BPF_DW = 0x18, /* double word (64-bit) */
+ BPF_XADD = 0xc0, /* exclusive add */
/* alu/jmp fields */
-#define BPF_MOV 0xb0 /* mov reg to reg */
-#define BPF_ARSH 0xc0 /* sign extending arithmetic shift right */
+ BPF_MOV = 0xb0, /* mov reg to reg */
+ BPF_ARSH = 0xc0, /* sign extending arithmetic shift right */
/* change endianness of a register */
-#define BPF_END 0xd0 /* flags for endianness conversion: */
-#define BPF_TO_LE 0x00 /* convert to little-endian */
-#define BPF_TO_BE 0x08 /* convert to big-endian */
-#define BPF_FROM_LE BPF_TO_LE
-#define BPF_FROM_BE BPF_TO_BE
+ BPF_END = 0xd0, /* flags for endianness conversion: */
+ BPF_TO_LE = 0x00, /* convert to little-endian */
+ BPF_TO_BE = 0x08, /* convert to big-endian */
+ BPF_FROM_LE = BPF_TO_LE,
+ BPF_FROM_BE = BPF_TO_BE,
/* jmp encodings */
-#define BPF_JNE 0x50 /* jump != */
-#define BPF_JLT 0xa0 /* LT is unsigned, '<' */
-#define BPF_JLE 0xb0 /* LE is unsigned, '<=' */
-#define BPF_JSGT 0x60 /* SGT is signed '>', GT in x86 */
-#define BPF_JSGE 0x70 /* SGE is signed '>=', GE in x86 */
-#define BPF_JSLT 0xc0 /* SLT is signed, '<' */
-#define BPF_JSLE 0xd0 /* SLE is signed, '<=' */
-#define BPF_CALL 0x80 /* function call */
-#define BPF_EXIT 0x90 /* function return */
+ BPF_JNE = 0x50, /* jump != */
+ BPF_JLT = 0xa0, /* LT is unsigned, '<' */
+ BPF_JLE = 0xb0, /* LE is unsigned, '<=' */
+ BPF_JSGT = 0x60, /* SGT is signed '>', GT in x86 */
+ BPF_JSGE = 0x70, /* SGE is signed '>=', GE in x86 */
+ BPF_JSLT = 0xc0, /* SLT is signed, '<' */
+ BPF_JSLE = 0xd0, /* SLE is signed, '<=' */
+ BPF_CALL = 0x80, /* function call */
+ BPF_EXIT = 0x90, /* function return */
+};
[...]