On Fri, 22 Feb 2019, Vincenzo Frascino wrote: > +/* > + * There is one vdso_clocksource object in vvar for each vDSO clocksource > + * (mono, raw). This struct is designed to keep vdso_data "cache-line friendly" > + * and optimal in terms of access pattern. > + * > + * Note that mask and shift are the same for mono and raw. > + */ > +struct vdso_clocksource { > + u64 mask; /* Clocksource mask */ > + u32 mult; /* Clocksource multiplier */ > + u32 shift; /* Clocksource shift */ Can you please get rid of the tail comments and use proper kerneldoc format? > +/* > + * vdso_data will be accessed by 32 and 64 bit code at the same time > + * so we should be careful before modifying this structure. > + */ > +struct vdso_data { > + u32 seq; /* Timebase sequence counter */ > + > + s32 clock_mode; > + u64 cycle_last; /* Timebase at clocksource init */ > + > + struct vdso_clocksource cs[CLOCKSOURCE_BASES]; Why would you need different clocksource parameters? That really bloats the data structure and makes the cache access pattern worse. Also the vdso update needs to copy the same data over and over for no value. The only clock ID which needs a different mult/shift would be MONOTONIC_RAW, but if we expose that through the VDSO then we really can be smarter than this. See incomplete and uncompilable patch below for reference. You get the idea. > + struct vdso_timestamp basetime[VDSO_BASES]; > + > + s32 tz_minuteswest; /* Timezone definitions */ > + s32 tz_dsttime; Also please keep the tabular alignment of the members u32 foo; struct bar bar; that's way better to read than u32 foo; struct bar bar; Thanks tglx 8<----------------- --- a/arch/x86/entry/vdso/vclock_gettime.c +++ b/arch/x86/entry/vdso/vclock_gettime.c @@ -139,9 +139,10 @@ notrace static inline u64 vgetcyc(int mo return U64_MAX; } -notrace static int do_hres(clockid_t clk, struct timespec *ts) +notrace static int do_hres(clockid_t clk, struct timespec *ts, + struct vsyscall_gtod_data *vdata) { - struct vgtod_ts *base = >od->basetime[clk]; + struct vgtod_ts *base = &vdata->basetime[clk]; u64 cycles, last, sec, ns; unsigned int seq; @@ -168,9 +169,10 @@ notrace static int do_hres(clockid_t clk return 0; } -notrace static void do_coarse(clockid_t clk, struct timespec *ts) +notrace static void do_coarse(clockid_t clk, struct timespec *ts, + struct vsyscall_gtod_data *vdata) { - struct vgtod_ts *base = >od->basetime[clk]; + struct vgtod_ts *base = &vdata->basetime[clk]; unsigned int seq; do { @@ -194,10 +196,12 @@ notrace int __vdso_clock_gettime(clockid */ msk = 1U << clock; if (likely(msk & VGTOD_HRES)) { - return do_hres(clock, ts); + return do_hres(clock, ts, >od[0]); } else if (msk & VGTOD_COARSE) { - do_coarse(clock, ts); + do_coarse(clock, ts, >od[0]); return 0; + } else if (msk & VGTOD_RAW) { + return do_hres(clock, ts, >od[1]); } return vdso_fallback_gettime(clock, ts); } @@ -210,7 +214,7 @@ notrace int __vdso_gettimeofday(struct t if (likely(tv != NULL)) { struct timespec *ts = (struct timespec *) tv; - do_hres(CLOCK_REALTIME, ts); + do_hres(CLOCK_REALTIME, ts, >od[0]); tv->tv_usec /= 1000; } if (unlikely(tz != NULL)) { --- a/arch/x86/include/asm/vgtod.h +++ b/arch/x86/include/asm/vgtod.h @@ -30,6 +30,7 @@ struct vgtod_ts { #define VGTOD_BASES (CLOCK_TAI + 1) #define VGTOD_HRES (BIT(CLOCK_REALTIME) | BIT(CLOCK_MONOTONIC) | BIT(CLOCK_TAI)) #define VGTOD_COARSE (BIT(CLOCK_REALTIME_COARSE) | BIT(CLOCK_MONOTONIC_COARSE)) +#define VGTOD_RAW BIT(CLOCK_MOTONONIC_RAW) /* * vsyscall_gtod_data will be accessed by 32 and 64 bit code at the same time @@ -49,7 +50,8 @@ struct vsyscall_gtod_data { int tz_minuteswest; int tz_dsttime; }; -extern struct vsyscall_gtod_data vsyscall_gtod_data; + +extern struct vsyscall_gtod_data vsyscall_gtod_data[2]; extern int vclocks_used; static inline bool vclock_was_used(int vclock)