On Fri, Oct 09, 2020 at 12:14:29PM -0500, YiFei Zhu wrote: > From: YiFei Zhu <yifeifz2@xxxxxxxxxxxx> > > The overhead of running Seccomp filters has been part of some past > discussions [1][2][3]. Oftentimes, the filters have a large number > of instructions that check syscall numbers one by one and jump based > on that. Some users chain BPF filters which further enlarge the > overhead. A recent work [6] comprehensively measures the Seccomp > overhead and shows that the overhead is non-negligible and has a > non-trivial impact on application performance. > > We observed some common filters, such as docker's [4] or > systemd's [5], will make most decisions based only on the syscall > numbers, and as past discussions considered, a bitmap where each bit > represents a syscall makes most sense for these filters. > > The fast (common) path for seccomp should be that the filter permits > the syscall to pass through, and failing seccomp is expected to be > an exceptional case; it is not expected for userspace to call a > denylisted syscall over and over. > > When it can be concluded that an allow must occur for the given > architecture and syscall pair (this determination is introduced in > the next commit), seccomp will immediately allow the syscall, > bypassing further BPF execution. > > Each architecture number has its own bitmap. The architecture > number in seccomp_data is checked against the defined architecture > number constant before proceeding to test the bit against the > bitmap with the syscall number as the index of the bit in the > bitmap, and if the bit is set, seccomp returns allow. The bitmaps > are all clear in this patch and will be initialized in the next > commit. > > [1] https://lore.kernel.org/linux-security-module/c22a6c3cefc2412cad00ae14c1371711@xxxxxxxxxx/T/ > [2] https://lore.kernel.org/lkml/202005181120.971232B7B@keescook/T/ > [3] https://github.com/seccomp/libseccomp/issues/116 > [4] https://github.com/moby/moby/blob/ae0ef82b90356ac613f329a8ef5ee42ca923417d/profiles/seccomp/default.json > [5] https://github.com/systemd/systemd/blob/6743a1caf4037f03dc51a1277855018e4ab61957/src/shared/seccomp-util.c#L270 > [6] Draco: Architectural and Operating System Support for System Call Security > https://tianyin.github.io/pub/draco.pdf, MICRO-53, Oct. 2020 > > Co-developed-by: Dimitrios Skarlatos <dskarlat@xxxxxxxxxx> > Signed-off-by: Dimitrios Skarlatos <dskarlat@xxxxxxxxxx> > Signed-off-by: YiFei Zhu <yifeifz2@xxxxxxxxxxxx> > --- > kernel/seccomp.c | 72 ++++++++++++++++++++++++++++++++++++++++++++++++ > 1 file changed, 72 insertions(+) > > diff --git a/kernel/seccomp.c b/kernel/seccomp.c > index ae6b40cc39f4..73f6b6e9a3b0 100644 > --- a/kernel/seccomp.c > +++ b/kernel/seccomp.c > @@ -143,6 +143,34 @@ struct notification { > struct list_head notifications; > }; > > +#ifdef SECCOMP_ARCH_NATIVE > +/** > + * struct action_cache - per-filter cache of seccomp actions per > + * arch/syscall pair > + * > + * @allow_native: A bitmap where each bit represents whether the > + * filter will always allow the syscall, for the > + * native architecture. > + * @allow_compat: A bitmap where each bit represents whether the > + * filter will always allow the syscall, for the > + * compat architecture. > + */ > +struct action_cache { > + DECLARE_BITMAP(allow_native, SECCOMP_ARCH_NATIVE_NR); > +#ifdef SECCOMP_ARCH_COMPAT > + DECLARE_BITMAP(allow_compat, SECCOMP_ARCH_COMPAT_NR); > +#endif > +}; > +#else > +struct action_cache { }; > + > +static inline bool seccomp_cache_check_allow(const struct seccomp_filter *sfilter, > + const struct seccomp_data *sd) > +{ > + return false; > +} > +#endif /* SECCOMP_ARCH_NATIVE */ > + > /** > * struct seccomp_filter - container for seccomp BPF programs > * > @@ -298,6 +326,47 @@ static int seccomp_check_filter(struct sock_filter *filter, unsigned int flen) > return 0; > } > > +#ifdef SECCOMP_ARCH_NATIVE > +static inline bool seccomp_cache_check_allow_bitmap(const void *bitmap, > + size_t bitmap_size, > + int syscall_nr) > +{ > + if (unlikely(syscall_nr < 0 || syscall_nr >= bitmap_size)) > + return false; > + syscall_nr = array_index_nospec(syscall_nr, bitmap_size); > + > + return test_bit(syscall_nr, bitmap); > +} > + > +/** > + * seccomp_cache_check_allow - lookup seccomp cache > + * @sfilter: The seccomp filter > + * @sd: The seccomp data to lookup the cache with > + * > + * Returns true if the seccomp_data is cached and allowed. > + */ > +static inline bool seccomp_cache_check_allow(const struct seccomp_filter *sfilter, > + const struct seccomp_data *sd) > +{ > + int syscall_nr = sd->nr; > + const struct action_cache *cache = &sfilter->cache; > + > + if (likely(sd->arch == SECCOMP_ARCH_NATIVE)) > + return seccomp_cache_check_allow_bitmap(cache->allow_native, > + SECCOMP_ARCH_NATIVE_NR, > + syscall_nr); > +#ifdef SECCOMP_ARCH_COMPAT > + if (likely(sd->arch == SECCOMP_ARCH_COMPAT)) > + return seccomp_cache_check_allow_bitmap(cache->allow_compat, > + SECCOMP_ARCH_COMPAT_NR, > + syscall_nr); > +#endif /* SECCOMP_ARCH_COMPAT */ > + > + WARN_ON_ONCE(true); > + return false; > +} > +#endif /* SECCOMP_ARCH_NATIVE */ An small optimization for the non-compat case might be to do this to avoid the sd->arch test (which should have no way to ever change in such builds): static inline bool seccomp_cache_check_allow(const struct seccomp_filter *sfilter, const struct seccomp_data *sd) { const struct action_cache *cache = &sfilter->cache; #ifndef SECCOMP_ARCH_COMPAT /* A native-only architecture doesn't need to check sd->arch. */ return seccomp_cache_check_allow_bitmap(cache->allow_native, SECCOMP_ARCH_NATIVE_NR, sd->nr); #else /* SECCOMP_ARCH_COMPAT */ if (likely(sd->arch == SECCOMP_ARCH_NATIVE)) return seccomp_cache_check_allow_bitmap(cache->allow_native, SECCOMP_ARCH_NATIVE_NR, sd->nr); if (likely(sd->arch == SECCOMP_ARCH_COMPAT)) return seccomp_cache_check_allow_bitmap(cache->allow_compat, SECCOMP_ARCH_COMPAT_NR, sd->nr); #endif WARN_ON_ONCE(true); return false; } > + > /** > * seccomp_run_filters - evaluates all seccomp filters against @sd > * @sd: optional seccomp data to be passed to filters > @@ -320,6 +389,9 @@ static u32 seccomp_run_filters(const struct seccomp_data *sd, > if (WARN_ON(f == NULL)) > return SECCOMP_RET_KILL_PROCESS; > > + if (seccomp_cache_check_allow(f, sd)) > + return SECCOMP_RET_ALLOW; > + > /* > * All filters in the list are evaluated and the lowest BPF return > * value always takes priority (ignoring the DATA). > -- > 2.28.0 > This is all looking good; thank you! I'm doing some test builds/runs now. :) -- Kees Cook