On 28/09/2015 07:38, Haozhong Zhang wrote: > > -static u64 __scale_tsc(u64 ratio, u64 tsc) > -{ > - u64 mult, frac, _tsc; > - > - mult = ratio >> 32; > - frac = ratio & ((1ULL << 32) - 1); > - > - _tsc = tsc; > - _tsc *= mult; > - _tsc += (tsc >> 32) * frac; > - _tsc += ((tsc & ((1ULL << 32) - 1)) * frac) >> 32; > - > - return _tsc; > -} This is basically return mul_u64_u64_shr(ratio, tsc, kvm_tsc_scaling_ratio_frac_bits); except that Linux has no mul_u64_u64_shr function, only mul_u64_u32_shr. We should implement that function in include/linux/math64.h instead. For the x86_64 case (or any other CONFIG_ARCH_SUPPORTS_INT128 architecture) we can just write it directly, as is done already for mul_u64_u32_shr. For the 32-bit case, here is an implementation of both the multiplication and the shift, lifted from QEMU: static inline void mul64(uint64_t *lo, uint64_t *hi, uint64_t a, uint64_t b) { typedef union { uint64_t ll; struct { #ifdef __BIG_ENDIAN uint32_t high, low; #else uint32_t low, high; #endif } l; } LL; LL rl, rm, rn, rh, a0, b0; uint64_t c; a0.ll = a; b0.ll = b; rl.ll = (uint64_t)a0.l.low * b0.l.low; rm.ll = (uint64_t)a0.l.low * b0.l.high; rn.ll = (uint64_t)a0.l.high * b0.l.low; rh.ll = (uint64_t)a0.l.high * b0.l.high; c = (uint64_t)rl.l.high + rm.l.low + rn.l.low; rl.l.high = c; c >>= 32; c = c + rm.l.high + rn.l.high + rh.l.low; rh.l.low = c; rh.l.high += (uint32_t)(c >> 32); *lo = rl.ll; *hi = rh.ll; } static inline void rshift128(uint64_t *lo, uint64_t *hi, int n) { uint64_t h; if (!n) { return; } h = *hi >> (n & 63); if (n >= 64) { *hi = 0; *lo = h; } else { *lo = (*lo >> n) | (*hi << (64 - n)); *hi = h; } } and you can easily reuse this code in Linux with just uintNN_t types changed to uNN + some extra cleanups when it's placed in a single functions. Paolo -- To unsubscribe from this list: send the line "unsubscribe kvm" in the body of a message to majordomo@xxxxxxxxxxxxxxx More majordomo info at http://vger.kernel.org/majordomo-info.html