From: Andrei Vagin <avagin@xxxxxxxxx> As it has been discussed on timens RFC, adding a new conditional branch `if (inside_time_ns)` on VDSO for all processes is undesirable. Addressing those problems, there are two versions of VDSO's .so: for host tasks (without any penalty) and for processes inside of time namespace with clk_to_ns() that subtracts offsets from host's time. Introduce vdso_static_branch_unlikely(), which is similar to static_branch_unlikely(); alias it with timens_static_branch_unlikely() under CONFIG_TIME_NS. The timens code in vdso will look like this: if (timens_static_branch_unlikely()) { clk_to_ns(clk, ts); } The version of vdso which is compiled from sources will never execute clk_to_ns(). And then we can patch the 'no-op' in the straight-line codepath with a 'jump' instruction to the out-of-line true branch and get the timens version of the vdso library. Signed-off-by: Andrei Vagin <avagin@xxxxxxxxx> Co-developed-by: Dmitry Safonov <dima@xxxxxxxxxx> Signed-off-by: Dmitry Safonov <dima@xxxxxxxxxx> --- arch/x86/include/asm/jump_label.h | 14 ++++++++++++++ lib/vdso/gettimeofday.c | 10 ++++++++-- 2 files changed, 22 insertions(+), 2 deletions(-) diff --git a/arch/x86/include/asm/jump_label.h b/arch/x86/include/asm/jump_label.h index 06c3cc22a058..376efb53183b 100644 --- a/arch/x86/include/asm/jump_label.h +++ b/arch/x86/include/asm/jump_label.h @@ -53,6 +53,20 @@ static __always_inline bool arch_static_branch_jump(struct static_key *key, bool return true; } +static __always_inline bool vdso_static_branch_unlikely(void) +{ + asm_volatile_goto("1:\n\t" + ".byte " __stringify(STATIC_KEY_INIT_NOP) "\n\t" + ".pushsection __jump_table, \"aw\"\n\t" + "2: .word 1b - 2b, %l[l_yes] - 2b\n\t" + ".popsection\n\t" + : : : : l_yes); + + return false; +l_yes: + return true; +} + #else /* __ASSEMBLY__ */ .macro STATIC_JUMP_IF_TRUE target, key, def diff --git a/lib/vdso/gettimeofday.c b/lib/vdso/gettimeofday.c index 7525433f8ba4..605bdb92055d 100644 --- a/lib/vdso/gettimeofday.c +++ b/lib/vdso/gettimeofday.c @@ -8,6 +8,7 @@ #include <linux/kernel.h> #include <linux/hrtimer_defs.h> #include <linux/timens_offsets.h> +#include <linux/jump_label.h> #include <vdso/datapage.h> #include <vdso/helpers.h> @@ -43,6 +44,8 @@ u64 vdso_calc_delta(u64 cycles, u64 last, u64 mask, u32 mult) extern u8 timens_page __attribute__((visibility("hidden"))); +#define timens_static_branch_unlikely vdso_static_branch_unlikely + notrace static __always_inline void clk_to_ns(clockid_t clk, struct __kernel_timespec *ts) { struct timens_offsets *timens = (struct timens_offsets *) &timens_page; @@ -79,6 +82,7 @@ notrace static __always_inline void clk_to_ns(clockid_t clk, struct __kernel_tim } #else notrace static __always_inline void clk_to_ns(clockid_t clk, struct __kernel_timespec *ts) {} +notrace static __always_inline bool timens_static_branch_unlikely(void) { return false; } #endif static int do_hres(const struct vdso_data *vd, clockid_t clk, @@ -108,7 +112,8 @@ static int do_hres(const struct vdso_data *vd, clockid_t clk, ts->tv_sec = sec + __iter_div_u64_rem(ns, NSEC_PER_SEC, &ns); ts->tv_nsec = ns; - clk_to_ns(clk, ts); + if (timens_static_branch_unlikely()) + clk_to_ns(clk, ts); return 0; } @@ -125,7 +130,8 @@ static void do_coarse(const struct vdso_data *vd, clockid_t clk, ts->tv_nsec = vdso_ts->nsec; } while (unlikely(vdso_read_retry(vd, seq))); - clk_to_ns(clk, ts); + if (timens_static_branch_unlikely()) + clk_to_ns(clk, ts); } static __maybe_unused int -- 2.22.0