On Wed, 2024-12-18 at 13:01 -0800, Joanne Koong wrote: > Add support for reads/writes from buffers backed by hugepages. > This can be enabled through the '-h' flag. This flag should only be > used > on systems where THP capabilities are enabled. > > Signed-off-by: Joanne Koong <joannelkoong@xxxxxxxxx> > --- > ltp/fsx.c | 100 +++++++++++++++++++++++++++++++++++++++++++++++++--- > -- > 1 file changed, 92 insertions(+), 8 deletions(-) > > diff --git a/ltp/fsx.c b/ltp/fsx.c > index 41933354..3656fd9f 100644 > --- a/ltp/fsx.c > +++ b/ltp/fsx.c > @@ -190,6 +190,7 @@ int o_direct; /* -Z */ > int aio = 0; > int uring = 0; > int mark_nr = 0; > +int hugepages = 0; /* -h flag */ > > int page_size; > int page_mask; > @@ -2471,7 +2472,7 @@ void > usage(void) > { > fprintf(stdout, "usage: %s", > - "fsx [-dfknqxyzBEFHIJKLORWXZ0]\n\ > + "fsx [-dfhknqxyzBEFHIJKLORWXZ0]\n\ > [-b opnum] [-c Prob] [-g filldata] [-i logdev] [-j logid]\n\ > [-l flen] [-m start:end] [-o oplen] [-p progressinterval]\n\ > [-r readbdy] [-s style] [-t truncbdy] [-w writebdy]\n\ > @@ -2484,6 +2485,7 @@ usage(void) > -e: pollute post-eof on size changes (default 0)\n\ > -f: flush and invalidate cache after I/O\n\ > -g X: write character X instead of random generated data\n\ > + -h hugepages: use buffers backed by hugepages for > reads/writes\n\ > -i logdev: do integrity testing, logdev is the dm log writes > device\n\ > -j logid: prefix debug log messsages with this id\n\ > -k: do not truncate existing file and use its size as upper > bound on file size\n\ > @@ -2856,6 +2858,56 @@ keep_running(void) > return numops-- != 0; > } > > +static long > +get_hugepage_size(void) > +{ > + const char *str = "Hugepagesize:"; > + long hugepage_size = -1; > + char buffer[64]; > + FILE *file; > + > + file = fopen("/proc/meminfo", "r"); > + if (!file) { > + prterr("get_hugepage_size: fopen /proc/meminfo"); > + return -1; > + } > + while (fgets(buffer, sizeof(buffer), file)) { > + if (strncmp(buffer, str, strlen(str)) == 0) { Extremely minor: Since str is a fixed string, why not calculate the length outside the loop and not re-use strlen(str) multiple times? > + sscanf(buffer + strlen(str), "%ld", > &hugepage_size); > + break; > + } > + } > + fclose(file); > + if (hugepage_size == -1) { > + prterr("get_hugepage_size: failed to find " > + "hugepage size in /proc/meminfo\n"); > + return -1; > + } > + > + /* convert from KiB to bytes */ > + return hugepage_size * 1024; Minor: << 10 might be faster instead of '*' ? > +} > + > +static void * > +init_hugepages_buf(unsigned len, long hugepage_size) > +{ > + void *buf; > + long buf_size = roundup(len, hugepage_size); > + > + if (posix_memalign(&buf, hugepage_size, buf_size)) { > + prterr("posix_memalign for buf"); > + return NULL; > + } > + memset(buf, '\0', len); > + if (madvise(buf, buf_size, MADV_COLLAPSE)) { > + prterr("madvise collapse for buf"); > + free(buf); > + return NULL; > + } > + > + return buf; > +} > + > static struct option longopts[] = { > {"replay-ops", required_argument, 0, 256}, > {"record-ops", optional_argument, 0, 255}, > @@ -2883,7 +2935,7 @@ main(int argc, char **argv) > setvbuf(stdout, (char *)0, _IOLBF, 0); /* line buffered stdout > */ > > while ((ch = getopt_long(argc, argv, > - "0b:c:de:fg:i:j:kl:m:no:p:qr:s:t:uw:xy > ABD:EFJKHzCILN:OP:RS:UWXZ", > + "0b:c:de:fg:hi:j:kl:m:no:p:qr:s:t:uw:x > yABD:EFJKHzCILN:OP:RS:UWXZ", > longopts, NULL)) != EOF) > switch (ch) { > case 'b': > @@ -2916,6 +2968,9 @@ main(int argc, char **argv) > case 'g': > filldata = *optarg; > break; > + case 'h': > + hugepages = 1; > + break; > case 'i': > integrity = 1; > logdev = strdup(optarg); > @@ -3232,12 +3287,41 @@ main(int argc, char **argv) > original_buf = (char *) malloc(maxfilelen); > for (i = 0; i < maxfilelen; i++) > original_buf[i] = random() % 256; > - good_buf = (char *) malloc(maxfilelen + writebdy); > - good_buf = round_ptr_up(good_buf, writebdy, 0); > - memset(good_buf, '\0', maxfilelen); > - temp_buf = (char *) malloc(maxoplen + readbdy); > - temp_buf = round_ptr_up(temp_buf, readbdy, 0); > - memset(temp_buf, '\0', maxoplen); > + if (hugepages) { > + long hugepage_size; > + > + hugepage_size = get_hugepage_size(); > + if (hugepage_size == -1) { > + prterr("get_hugepage_size()"); > + exit(99); > + } > + > + if (writebdy != 1 && writebdy != hugepage_size) > + prt("ignoring write alignment (since -h is > enabled)"); > + > + if (readbdy != 1 && readbdy != hugepage_size) > + prt("ignoring read alignment (since -h is > enabled)"); > + > + good_buf = init_hugepages_buf(maxfilelen, > hugepage_size); > + if (!good_buf) { > + prterr("init_hugepages_buf failed for > good_buf"); > + exit(100); > + } > + > + temp_buf = init_hugepages_buf(maxoplen, hugepage_size); > + if (!temp_buf) { > + prterr("init_hugepages_buf failed for > temp_buf"); > + exit(101); > + } > + } else { > + good_buf = (char *) malloc(maxfilelen + writebdy); > + good_buf = round_ptr_up(good_buf, writebdy, 0); Not sure if it would matter but aren't we seeing a small memory leak here since good_buf's original will be lost after rounding up? > + memset(good_buf, '\0', maxfilelen); > + > + temp_buf = (char *) malloc(maxoplen + readbdy); > + temp_buf = round_ptr_up(temp_buf, readbdy, 0); > + memset(temp_buf, '\0', maxoplen); > + } > if (lite) { /* zero entire existing file */ > ssize_t written; >