The following changes since commit 9a496382133e8003bd56ab6f3d260c5afadae555: init: unify 't' time period (2018-07-24 15:23:28 -0600) are available in the git repository at: git://git.kernel.dk/fio.git master for you to fetch changes up to 40e8d8314f10a578765e20a4eb574b2603d292df: Merge branch 'fio-histo-log-pctiles' of https://github.com/parallel-fs-utils/fio (2018-07-25 14:42:57 -0600) ---------------------------------------------------------------- Ben England (4): get latency percentiles over time from fio histo logs use interpolation for more accurate percentile calculation switch to argparse module for CLI parsing design document for tools/hist/fio-histo-log-pctiles.py Jens Axboe (2): Merge branch 'fio-c++-engine' of https://github.com/tchaikov/fio Merge branch 'fio-histo-log-pctiles' of https://github.com/parallel-fs-utils/fio Kefu Chai (1): replace typeof with __typeof__ compiler/compiler.h | 4 +- doc/fio-histo-log-pctiles.pdf | Bin 0 -> 182996 bytes flist.h | 4 +- minmax.h | 12 +- oslib/libmtd_common.h | 10 +- tools/hist/fio-histo-log-pctiles.py | 660 ++++++++++++++++++++++++++++++++++++ verify.c | 2 +- 7 files changed, 676 insertions(+), 16 deletions(-) create mode 100644 doc/fio-histo-log-pctiles.pdf create mode 100755 tools/hist/fio-histo-log-pctiles.py --- Diff of recent changes: diff --git a/compiler/compiler.h b/compiler/compiler.h index dacb737..ddfbcc1 100644 --- a/compiler/compiler.h +++ b/compiler/compiler.h @@ -28,7 +28,7 @@ */ #define typecheck(type,x) \ ({ type __dummy; \ - typeof(x) __dummy2; \ + __typeof__(x) __dummy2; \ (void)(&__dummy == &__dummy2); \ 1; \ }) @@ -70,7 +70,7 @@ #ifdef FIO_INTERNAL #define ARRAY_SIZE(x) (sizeof((x)) / (sizeof((x)[0]))) -#define FIELD_SIZE(s, f) (sizeof(((typeof(s))0)->f)) +#define FIELD_SIZE(s, f) (sizeof(((__typeof__(s))0)->f)) #endif #endif diff --git a/doc/fio-histo-log-pctiles.pdf b/doc/fio-histo-log-pctiles.pdf new file mode 100644 index 0000000..069ab99 Binary files /dev/null and b/doc/fio-histo-log-pctiles.pdf differ diff --git a/flist.h b/flist.h index 2ca3d77..5437cd8 100644 --- a/flist.h +++ b/flist.h @@ -4,8 +4,8 @@ #include <stdlib.h> #include <stddef.h> -#define container_of(ptr, type, member) ({ \ - const typeof( ((type *)0)->member ) *__mptr = (ptr); \ +#define container_of(ptr, type, member) ({ \ + const __typeof__( ((type *)0)->member ) *__mptr = (ptr); \ (type *)( (char *)__mptr - offsetof(type,member) );}) /* diff --git a/minmax.h b/minmax.h index afc78f0..ec0848c 100644 --- a/minmax.h +++ b/minmax.h @@ -3,23 +3,23 @@ #ifndef min #define min(x,y) ({ \ - typeof(x) _x = (x); \ - typeof(y) _y = (y); \ + __typeof__(x) _x = (x); \ + __typeof__(y) _y = (y); \ (void) (&_x == &_y); \ _x < _y ? _x : _y; }) #endif #ifndef max #define max(x,y) ({ \ - typeof(x) _x = (x); \ - typeof(y) _y = (y); \ + __typeof__(x) _x = (x); \ + __typeof__(y) _y = (y); \ (void) (&_x == &_y); \ _x > _y ? _x : _y; }) #endif #define min_not_zero(x, y) ({ \ - typeof(x) __x = (x); \ - typeof(y) __y = (y); \ + __typeof__(x) __x = (x); \ + __typeof__(y) __y = (y); \ __x == 0 ? __y : ((__y == 0) ? __x : min(__x, __y)); }) #endif diff --git a/oslib/libmtd_common.h b/oslib/libmtd_common.h index 87f93b6..4ed9f0b 100644 --- a/oslib/libmtd_common.h +++ b/oslib/libmtd_common.h @@ -49,18 +49,18 @@ extern "C" { #define min(a, b) MIN(a, b) /* glue for linux kernel source */ #define ARRAY_SIZE(a) (sizeof(a) / sizeof((a)[0])) -#define ALIGN(x,a) __ALIGN_MASK(x,(typeof(x))(a)-1) +#define ALIGN(x,a) __ALIGN_MASK(x,(__typeof__(x))(a)-1) #define __ALIGN_MASK(x,mask) (((x)+(mask))&~(mask)) #define min_t(t,x,y) ({ \ - typeof((x)) _x = (x); \ - typeof((y)) _y = (y); \ + __typeof__((x)) _x = (x); \ + __typeof__((y)) _y = (y); \ (_x < _y) ? _x : _y; \ }) #define max_t(t,x,y) ({ \ - typeof((x)) _x = (x); \ - typeof((y)) _y = (y); \ + __typeof__((x)) _x = (x); \ + __typeof__((y)) _y = (y); \ (_x > _y) ? _x : _y; \ }) diff --git a/tools/hist/fio-histo-log-pctiles.py b/tools/hist/fio-histo-log-pctiles.py new file mode 100755 index 0000000..bad6887 --- /dev/null +++ b/tools/hist/fio-histo-log-pctiles.py @@ -0,0 +1,660 @@ +#!/usr/bin/env python + +# module to parse fio histogram log files, not using pandas +# runs in python v2 or v3 +# to get help with the CLI: $ python fio-histo-log-pctiles.py -h +# this can be run standalone as a script but is callable +# assumes all threads run for same time duration +# assumes all threads are doing the same thing for the entire run + +# percentiles: +# 0 - min latency +# 50 - median +# 100 - max latency + +# TO-DO: +# separate read and write stats for randrw mixed workload +# report average latency if needed +# prove that it works (partially done with unit tests) + +# to run unit tests, set UNITTEST environment variable to anything +# if you do this, don't pass normal CLI parameters to it +# otherwise it runs the CLI + +import sys, os, math, copy +from copy import deepcopy +import argparse +import unittest2 + +msec_per_sec = 1000 +nsec_per_usec = 1000 + +class FioHistoLogExc(Exception): + pass + +# if there is an error, print message, and exit with error status + +def myabort(msg): + print('ERROR: ' + msg) + sys.exit(1) + +# convert histogram log file into a list of +# (time_ms, direction, bsz, buckets) tuples where +# - time_ms is the time in msec at which the log record was written +# - direction is 0 (read) or 1 (write) +# - bsz is block size (not used) +# - buckets is a CSV list of counters that make up the histogram +# caller decides if the expected number of counters are present + + +def exception_suffix( record_num, pathname ): + return 'in histogram record %d file %s' % (record_num+1, pathname) + +# log file parser raises FioHistoLogExc exceptions +# it returns histogram buckets in whatever unit fio uses + +def parse_hist_file(logfn, buckets_per_interval): + max_timestamp_ms = 0.0 + + with open(logfn, 'r') as f: + records = [ l.strip() for l in f.readlines() ] + intervals = [] + for k, r in enumerate(records): + if r == '': + continue + tokens = r.split(',') + try: + int_tokens = [ int(t) for t in tokens ] + except ValueError as e: + raise FioHistoLogExc('non-integer value %s' % exception_suffix(k+1, logfn)) + + neg_ints = list(filter( lambda tk : tk < 0, int_tokens )) + if len(neg_ints) > 0: + raise FioHistoLogExc('negative integer value %s' % exception_suffix(k+1, logfn)) + + if len(int_tokens) < 3: + raise FioHistoLogExc('too few numbers %s' % exception_suffix(k+1, logfn)) + + time_ms = int_tokens[0] + if time_ms > max_timestamp_ms: + max_timestamp_ms = time_ms + + direction = int_tokens[1] + if direction != 0 and direction != 1: + raise FioHistoLogExc('invalid I/O direction %s' % exception_suffix(k+1, logfn)) + + bsz = int_tokens[2] + if bsz > (1 << 24): + raise FioHistoLogExc('block size too large %s' % exception_suffix(k+1, logfn)) + + buckets = int_tokens[3:] + if len(buckets) != buckets_per_interval: + raise FioHistoLogExc('%d buckets per interval but %d expected in %s' % + (len(buckets), buckets_per_interval, exception_suffix(k+1, logfn))) + intervals.append((time_ms, direction, bsz, buckets)) + if len(intervals) == 0: + raise FioHistoLogExc('no records in %s' % logfn) + return (intervals, max_timestamp_ms) + + +# compute time range for each bucket index in histogram record +# see comments in https://github.com/axboe/fio/blob/master/stat.h +# for description of bucket groups and buckets +# fio v3 bucket ranges are in nanosec (since response times are measured in nanosec) +# but we convert fio v3 nanosecs to floating-point microseconds + +def time_ranges(groups, counters_per_group, fio_version=3): + bucket_width = 1 + bucket_base = 0 + bucket_intervals = [] + for g in range(0, groups): + for b in range(0, counters_per_group): + rmin = float(bucket_base) + rmax = rmin + bucket_width + if fio_version == 3: + rmin /= nsec_per_usec + rmax /= nsec_per_usec + bucket_intervals.append( [rmin, rmax] ) + bucket_base += bucket_width + if g != 0: + bucket_width *= 2 + return bucket_intervals + + +# compute number of time quantum intervals in the test + +def get_time_intervals(time_quantum, max_timestamp_ms): + # round down to nearest second + max_timestamp = max_timestamp_ms // msec_per_sec + # round up to nearest whole multiple of time_quantum + time_interval_count = (max_timestamp + time_quantum) // time_quantum + end_time = time_interval_count * time_quantum + return (end_time, time_interval_count) + +# align raw histogram log data to time quantum so +# we can then combine histograms from different threads with addition +# for randrw workload we count both reads and writes in same output bucket +# but we separate reads and writes for purposes of calculating +# end time for histogram record. +# this requires us to weight a raw histogram bucket by the +# fraction of time quantum that the bucket overlaps the current +# time quantum interval +# for example, if we have a bucket with 515 samples for time interval +# [ 1010, 2014 ] msec since start of test, and time quantum is 1 sec, then +# for time quantum interval [ 1000, 2000 ] msec, the overlap is +# (2000 - 1010) / (2000 - 1000) = 0.99 +# so the contribution of this bucket to this time quantum is +# 515 x 0.99 = 509.85 + +def align_histo_log(raw_histogram_log, time_quantum, bucket_count, max_timestamp_ms): + + # slice up test time int intervals of time_quantum seconds + + (end_time, time_interval_count) = get_time_intervals(time_quantum, max_timestamp_ms) + time_qtm_ms = time_quantum * msec_per_sec + end_time_ms = end_time * msec_per_sec + aligned_intervals = [] + for j in range(0, time_interval_count): + aligned_intervals.append(( + j * time_qtm_ms, + [ 0.0 for j in range(0, bucket_count) ] )) + + log_record_count = len(raw_histogram_log) + for k, record in enumerate(raw_histogram_log): + + # find next record with same direction to get end-time + # have to avoid going past end of array + # for fio randrw workload, + # we have read and write records on same time interval + # sometimes read and write records are in opposite order + # assertion checks that next read/write record + # can be separated by at most 2 other records + + (time_msec, direction, sz, interval_buckets) = record + if k+1 < log_record_count: + (time_msec_end, direction2, _, _) = raw_histogram_log[k+1] + if direction2 != direction: + if k+2 < log_record_count: + (time_msec_end, direction2, _, _) = raw_histogram_log[k+2] + if direction2 != direction: + if k+3 < log_record_count: + (time_msec_end, direction2, _, _) = raw_histogram_log[k+3] + assert direction2 == direction + else: + time_msec_end = end_time_ms + else: + time_msec_end = end_time_ms + else: + time_msec_end = end_time_ms + + # calculate first quantum that overlaps this histogram record + + qtm_start_ms = (time_msec // time_qtm_ms) * time_qtm_ms + qtm_end_ms = ((time_msec + time_qtm_ms) // time_qtm_ms) * time_qtm_ms + qtm_index = qtm_start_ms // time_qtm_ms + + # for each quantum that overlaps this histogram record's time interval + + while qtm_start_ms < time_msec_end: # while quantum overlaps record + + # calculate fraction of time that this quantum + # overlaps histogram record's time interval + + overlap_start = max(qtm_start_ms, time_msec) + overlap_end = min(qtm_end_ms, time_msec_end) + weight = float(overlap_end - overlap_start) + weight /= (time_msec_end - time_msec) + (_,aligned_histogram) = aligned_intervals[qtm_index] + for bx, b in enumerate(interval_buckets): + weighted_bucket = weight * b + aligned_histogram[bx] += weighted_bucket + + # advance to the next time quantum + + qtm_start_ms += time_qtm_ms + qtm_end_ms += time_qtm_ms + qtm_index += 1 + + return aligned_intervals + +# add histogram in "source" to histogram in "target" +# it is assumed that the 2 histograms are precisely time-aligned + +def add_to_histo_from( target, source ): + for b in range(0, len(source)): + target[b] += source[b] + +# compute percentiles +# inputs: +# buckets: histogram bucket array +# wanted: list of floating-pt percentiles to calculate +# time_ranges: [tmin,tmax) time interval for each bucket +# returns None if no I/O reported. +# otherwise we would be dividing by zero +# think of buckets as probability distribution function +# and this loop is integrating to get cumulative distribution function + +def get_pctiles(buckets, wanted, time_ranges): + + # get total of IO requests done + total_ios = 0 + for io_count in buckets: + total_ios += io_count + + # don't return percentiles if no I/O was done during interval + if total_ios == 0.0: + return None + + pctile_count = len(wanted) + + # results returned as dictionary keyed by percentile + pctile_result = {} + + # index of next percentile in list + pctile_index = 0 + + # next percentile + next_pctile = wanted[pctile_index] + + # no one is interested in percentiles bigger than this but not 100.0 + # this prevents floating-point error from preventing loop exit + almost_100 = 99.9999 + + # pct is the percentile corresponding to + # all I/O requests up through bucket b + pct = 0.0 + total_so_far = 0 + for b, io_count in enumerate(buckets): + if io_count == 0: + continue + total_so_far += io_count + # last_pct_lt is the percentile corresponding to + # all I/O requests up to, but not including, bucket b + last_pct = pct + pct = 100.0 * float(total_so_far) / total_ios + # a single bucket could satisfy multiple pctiles + # so this must be a while loop + # for 100-percentile (max latency) case, no bucket exceeds it + # so we must stop there. + while ((next_pctile == 100.0 and pct >= almost_100) or + (next_pctile < 100.0 and pct > next_pctile)): + # interpolate between min and max time for bucket time interval + # we keep the time_ranges access inside this loop, + # even though it could be above the loop, + # because in many cases we will not be even entering + # the loop so we optimize out these accesses + range_max_time = time_ranges[b][1] + range_min_time = time_ranges[b][0] + offset_frac = (next_pctile - last_pct)/(pct - last_pct) + interpolation = range_min_time + (offset_frac*(range_max_time - range_min_time)) + pctile_result[next_pctile] = interpolation + pctile_index += 1 + if pctile_index == pctile_count: + break + next_pctile = wanted[pctile_index] + if pctile_index == pctile_count: + break + assert pctile_index == pctile_count + return pctile_result + + +# this is really the main program + +def compute_percentiles_from_logs(): + parser = argparse.ArgumentParser() + parser.add_argument("--fio-version", dest="fio_version", + default="3", choices=[2,3], type=int, + help="fio version (default=3)") + parser.add_argument("--bucket-groups", dest="bucket_groups", default="29", type=int, + help="fio histogram bucket groups (default=29)") + parser.add_argument("--bucket-bits", dest="bucket_bits", + default="6", type=int, + help="fio histogram buckets-per-group bits (default=6 means 64 buckets/group)") + parser.add_argument("--percentiles", dest="pctiles_wanted", + default="0 50 95 99 100", type=float, nargs='+', + help="fio histogram buckets-per-group bits (default=6 means 64 buckets/group)") + parser.add_argument("--time-quantum", dest="time_quantum", + default="1", type=int, + help="time quantum in seconds (default=1)") + parser.add_argument("--output-unit", dest="output_unit", + default="usec", type=str, + help="Latency percentile output unit: msec|usec|nsec (default usec)") + parser.add_argument("file_list", nargs='+') + args = parser.parse_args() + print(args) + + if not args.bucket_groups: + # default changes based on fio version + if fio_version == 2: + args.bucket_groups = 19 + else: + # default in fio 3.x + args.bucket_groups = 29 + + # print parameters + + print('bucket groups = %d' % args.bucket_groups) + print('bucket bits = %d' % args.bucket_bits) + print('time quantum = %d sec' % args.time_quantum) + print('percentiles = %s' % ','.join([ str(p) for p in args.pctiles_wanted ])) + buckets_per_group = 1 << args.bucket_bits + print('buckets per group = %d' % buckets_per_group) + buckets_per_interval = buckets_per_group * args.bucket_groups + print('buckets per interval = %d ' % buckets_per_interval) + bucket_index_range = range(0, buckets_per_interval) + if args.time_quantum == 0: + print('ERROR: time-quantum must be a positive number of seconds') + print('output unit = ' + args.output_unit) + if args.output_unit == 'msec': + time_divisor = 1000.0 + elif args.output_unit == 'usec': + time_divisor = 1.0 + + # calculate response time interval associated with each histogram bucket + + bucket_times = time_ranges(args.bucket_groups, buckets_per_group, fio_version=args.fio_version) + + # construct template for each histogram bucket array with buckets all zeroes + # we just copy this for each new histogram + + zeroed_buckets = [ 0.0 for r in bucket_index_range ] + + # print CSV header just like fiologparser_hist does + + header = 'msec, ' + for p in args.pctiles_wanted: + header += '%3.1f, ' % p + print('time (millisec), percentiles in increasing order with values in ' + args.output_unit) + print(header) + + # parse the histogram logs + # assumption: each bucket has a monotonically increasing time + # assumption: time ranges do not overlap for a single thread's records + # (exception: if randrw workload, then there is a read and a write + # record for the same time interval) + + max_timestamp_all_logs = 0 + hist_files = {} + for fn in args.file_list: + try: + (hist_files[fn], max_timestamp_ms) = parse_hist_file(fn, buckets_per_interval) + except FioHistoLogExc as e: + myabort(str(e)) + max_timestamp_all_logs = max(max_timestamp_all_logs, max_timestamp_ms) + + (end_time, time_interval_count) = get_time_intervals(args.time_quantum, max_timestamp_all_logs) + all_threads_histograms = [ ((j*args.time_quantum*msec_per_sec), deepcopy(zeroed_buckets)) + for j in range(0, time_interval_count) ] + + for logfn in hist_files.keys(): + aligned_per_thread = align_histo_log(hist_files[logfn], + args.time_quantum, + buckets_per_interval, + max_timestamp_all_logs) + for t in range(0, time_interval_count): + (_, all_threads_histo_t) = all_threads_histograms[t] + (_, log_histo_t) = aligned_per_thread[t] + add_to_histo_from( all_threads_histo_t, log_histo_t ) + + # calculate percentiles across aggregate histogram for all threads + + for (t_msec, all_threads_histo_t) in all_threads_histograms: + record = '%d, ' % t_msec + pct = get_pctiles(all_threads_histo_t, args.pctiles_wanted, bucket_times) + if not pct: + for w in args.pctiles_wanted: + record += ', ' + else: + pct_keys = [ k for k in pct.keys() ] + pct_values = [ str(pct[wanted]/time_divisor) for wanted in sorted(pct_keys) ] + record += ', '.join(pct_values) + print(record) + + + +#end of MAIN PROGRAM + + + +##### below are unit tests ############## + +import tempfile, shutil +from os.path import join +should_not_get_here = False + +class Test(unittest2.TestCase): + tempdir = None + + # a little less typing please + def A(self, boolean_val): + self.assertTrue(boolean_val) + + # initialize unit test environment + + @classmethod + def setUpClass(cls): + d = tempfile.mkdtemp() + Test.tempdir = d + + # remove anything left by unit test environment + # unless user sets UNITTEST_LEAVE_FILES environment variable + + @classmethod + def tearDownClass(cls): + if not os.getenv("UNITTEST_LEAVE_FILES"): + shutil.rmtree(cls.tempdir) + + def setUp(self): + self.fn = join(Test.tempdir, self.id()) + + def test_a_add_histos(self): + a = [ 1.0, 2.0 ] + b = [ 1.5, 2.5 ] + add_to_histo_from( a, b ) + self.A(a == [2.5, 4.5]) + self.A(b == [1.5, 2.5]) + + def test_b1_parse_log(self): + with open(self.fn, 'w') as f: + f.write('1234, 0, 4096, 1, 2, 3, 4\n') + f.write('5678,1,16384,5,6,7,8 \n') + (raw_histo_log, max_timestamp) = parse_hist_file(self.fn, 4) # 4 buckets per interval + self.A(len(raw_histo_log) == 2 and max_timestamp == 5678) + (time_ms, direction, bsz, histo) = raw_histo_log[0] + self.A(time_ms == 1234 and direction == 0 and bsz == 4096 and histo == [ 1, 2, 3, 4 ]) + (time_ms, direction, bsz, histo) = raw_histo_log[1] + self.A(time_ms == 5678 and direction == 1 and bsz == 16384 and histo == [ 5, 6, 7, 8 ]) + + def test_b2_parse_empty_log(self): + with open(self.fn, 'w') as f: + pass + try: + (raw_histo_log, max_timestamp_ms) = parse_hist_file(self.fn, 4) + self.A(should_not_get_here) + except FioHistoLogExc as e: + self.A(str(e).startswith('no records')) + + def test_b3_parse_empty_records(self): + with open(self.fn, 'w') as f: + f.write('\n') + f.write('1234, 0, 4096, 1, 2, 3, 4\n') + f.write('5678,1,16384,5,6,7,8 \n') + f.write('\n') + (raw_histo_log, max_timestamp_ms) = parse_hist_file(self.fn, 4) + self.A(len(raw_histo_log) == 2 and max_timestamp_ms == 5678) + (time_ms, direction, bsz, histo) = raw_histo_log[0] + self.A(time_ms == 1234 and direction == 0 and bsz == 4096 and histo == [ 1, 2, 3, 4 ]) + (time_ms, direction, bsz, histo) = raw_histo_log[1] + self.A(time_ms == 5678 and direction == 1 and bsz == 16384 and histo == [ 5, 6, 7, 8 ]) + + def test_b4_parse_non_int(self): + with open(self.fn, 'w') as f: + f.write('12, 0, 4096, 1a, 2, 3, 4\n') + try: + (raw_histo_log, _) = parse_hist_file(self.fn, 4) + self.A(False) + except FioHistoLogExc as e: + self.A(str(e).startswith('non-integer')) + + def test_b5_parse_neg_int(self): + with open(self.fn, 'w') as f: + f.write('-12, 0, 4096, 1, 2, 3, 4\n') + try: + (raw_histo_log, _) = parse_hist_file(self.fn, 4) + self.A(False) + except FioHistoLogExc as e: + self.A(str(e).startswith('negative integer')) + + def test_b6_parse_too_few_int(self): + with open(self.fn, 'w') as f: + f.write('0, 0\n') + try: + (raw_histo_log, _) = parse_hist_file(self.fn, 4) + self.A(False) + except FioHistoLogExc as e: + self.A(str(e).startswith('too few numbers')) + + def test_b7_parse_invalid_direction(self): + with open(self.fn, 'w') as f: + f.write('100, 2, 4096, 1, 2, 3, 4\n') + try: + (raw_histo_log, _) = parse_hist_file(self.fn, 4) + self.A(False) + except FioHistoLogExc as e: + self.A(str(e).startswith('invalid I/O direction')) + + def test_b8_parse_bsz_too_big(self): + with open(self.fn+'_good', 'w') as f: + f.write('100, 1, %d, 1, 2, 3, 4\n' % (1<<24)) + (raw_histo_log, max_timestamp_ms) = parse_hist_file(self.fn+'_good', 4) + with open(self.fn+'_bad', 'w') as f: + f.write('100, 1, 20000000, 1, 2, 3, 4\n') + try: + (raw_histo_log, _) = parse_hist_file(self.fn+'_bad', 4) + self.A(False) + except FioHistoLogExc as e: + self.A(str(e).startswith('block size too large')) + + def test_b9_parse_wrong_bucket_count(self): + with open(self.fn, 'w') as f: + f.write('100, 1, %d, 1, 2, 3, 4, 5\n' % (1<<24)) + try: + (raw_histo_log, _) = parse_hist_file(self.fn, 4) + self.A(False) + except FioHistoLogExc as e: + self.A(str(e).__contains__('buckets per interval')) + + def test_c1_time_ranges(self): + ranges = time_ranges(3, 2) # fio_version defaults to 3 + expected_ranges = [ # fio_version 3 is in nanoseconds + [0.000, 0.001], [0.001, 0.002], # first group + [0.002, 0.003], [0.003, 0.004], # second group same width + [0.004, 0.006], [0.006, 0.008]] # subsequent groups double width + self.A(ranges == expected_ranges) + ranges = time_ranges(3, 2, fio_version=3) + self.A(ranges == expected_ranges) + ranges = time_ranges(3, 2, fio_version=2) + expected_ranges_v2 = [ [ 1000.0 * min_or_max for min_or_max in time_range ] + for time_range in expected_ranges ] + self.A(ranges == expected_ranges_v2) + # see fio V3 stat.h for why 29 groups and 2^6 buckets/group + normal_ranges_v3 = time_ranges(29, 64) + # for v3, bucket time intervals are measured in nanoseconds + self.A(len(normal_ranges_v3) == 29 * 64 and normal_ranges_v3[-1][1] == 64*(1<<(29-1))/1000.0) + normal_ranges_v2 = time_ranges(19, 64, fio_version=2) + # for v2, bucket time intervals are measured in microseconds so we have fewer buckets + self.A(len(normal_ranges_v2) == 19 * 64 and normal_ranges_v2[-1][1] == 64*(1<<(19-1))) + + def test_d1_align_histo_log_1_quantum(self): + with open(self.fn, 'w') as f: + f.write('100, 1, 4096, 1, 2, 3, 4') + (raw_histo_log, max_timestamp_ms) = parse_hist_file(self.fn, 4) + self.A(max_timestamp_ms == 100) + aligned_log = align_histo_log(raw_histo_log, 5, 4, max_timestamp_ms) + self.A(len(aligned_log) == 1) + (time_ms0, h) = aligned_log[0] + self.A(time_ms0 == 0 and h == [1.0, 2.0, 3.0, 4.0]) + + # we need this to compare 2 lists of floating point numbers for equality + # because of floating-point imprecision + + def compare_2_floats(self, x, y): + if x == 0.0 or y == 0.0: + return (x+y) < 0.0000001 + else: + return (math.fabs(x-y)/x) < 0.00001 + + def is_close(self, buckets, buckets_expected): + if len(buckets) != len(buckets_expected): + return False + compare_buckets = lambda k: self.compare_2_floats(buckets[k], buckets_expected[k]) + indices_close = list(filter(compare_buckets, range(0, len(buckets)))) + return len(indices_close) == len(buckets) + + def test_d2_align_histo_log_2_quantum(self): + with open(self.fn, 'w') as f: + f.write('2000, 1, 4096, 1, 2, 3, 4\n') + f.write('7000, 1, 4096, 1, 2, 3, 4\n') + (raw_histo_log, max_timestamp_ms) = parse_hist_file(self.fn, 4) + self.A(max_timestamp_ms == 7000) + (_, _, _, raw_buckets1) = raw_histo_log[0] + (_, _, _, raw_buckets2) = raw_histo_log[1] + aligned_log = align_histo_log(raw_histo_log, 5, 4, max_timestamp_ms) + self.A(len(aligned_log) == 2) + (time_ms1, h1) = aligned_log[0] + (time_ms2, h2) = aligned_log[1] + # because first record is from time interval [2000, 7000] + # we weight it according + expect1 = [float(b) * 0.6 for b in raw_buckets1] + expect2 = [float(b) * 0.4 for b in raw_buckets1] + for e in range(0, len(expect2)): + expect2[e] += raw_buckets2[e] + self.A(time_ms1 == 0 and self.is_close(h1, expect1)) + self.A(time_ms2 == 5000 and self.is_close(h2, expect2)) + + # what to expect if histogram buckets are all equal + def test_e1_get_pctiles_flat_histo(self): + with open(self.fn, 'w') as f: + buckets = [ 100 for j in range(0, 128) ] + f.write('9000, 1, 4096, %s\n' % ', '.join([str(b) for b in buckets])) + (raw_histo_log, max_timestamp_ms) = parse_hist_file(self.fn, 128) + self.A(max_timestamp_ms == 9000) + aligned_log = align_histo_log(raw_histo_log, 5, 128, max_timestamp_ms) + time_intervals = time_ranges(4, 32) + # since buckets are all equal, then median is halfway through time_intervals + # and max latency interval is at end of time_intervals + self.A(time_intervals[64][1] == 0.066 and time_intervals[127][1] == 0.256) + pctiles_wanted = [ 0, 50, 100 ] + pct_vs_time = [] + for (time_ms, histo) in aligned_log: + pct_vs_time.append(get_pctiles(histo, pctiles_wanted, time_intervals)) + self.A(pct_vs_time[0] == None) # no I/O in this time interval + expected_pctiles = { 0:0.000, 50:0.064, 100:0.256 } + self.A(pct_vs_time[1] == expected_pctiles) + + # what to expect if just the highest histogram bucket is used + def test_e2_get_pctiles_highest_pct(self): + fio_v3_bucket_count = 29 * 64 + with open(self.fn, 'w') as f: + # make a empty fio v3 histogram + buckets = [ 0 for j in range(0, fio_v3_bucket_count) ] + # add one I/O request to last bucket + buckets[-1] = 1 + f.write('9000, 1, 4096, %s\n' % ', '.join([str(b) for b in buckets])) + (raw_histo_log, max_timestamp_ms) = parse_hist_file(self.fn, fio_v3_bucket_count) + self.A(max_timestamp_ms == 9000) + aligned_log = align_histo_log(raw_histo_log, 5, fio_v3_bucket_count, max_timestamp_ms) + (time_ms, histo) = aligned_log[1] + time_intervals = time_ranges(29, 64) + expected_pctiles = { 100.0:(64*(1<<28))/1000.0 } + pct = get_pctiles( histo, [ 100.0 ], time_intervals ) + self.A(pct == expected_pctiles) + +# we are using this module as a standalone program + +if __name__ == '__main__': + if os.getenv('UNITTEST'): + sys.exit(unittest2.main()) + else: + compute_percentiles_from_logs() + diff --git a/verify.c b/verify.c index 0f2c118..01492f2 100644 --- a/verify.c +++ b/verify.c @@ -1517,7 +1517,7 @@ int paste_blockoff(char *buf, unsigned int len, void *priv) struct io_u *io = priv; unsigned long long off; - typecheck(typeof(off), io->offset); + typecheck(__typeof__(off), io->offset); off = cpu_to_le64((uint64_t)io->offset); len = min(len, (unsigned int)sizeof(off)); memcpy(buf, &off, len); -- To unsubscribe from this list: send the line "unsubscribe fio" in the body of a message to majordomo@xxxxxxxxxxxxxxx More majordomo info at http://vger.kernel.org/majordomo-info.html