The CPU tarce data is compressed in chunks, as chunk's size is multiple trace pages. The input handler is extended with the necessary structures, to control the data decompression. There are two approaches for data decompression, both are supported and can be used in different use cases: - in-memory decompression, page by page. - using a temporary file Signed-off-by: Tzvetomir Stoyanov (VMware) <tz.stoyanov@xxxxxxxxx> --- lib/trace-cmd/trace-input.c | 66 ++++++++++++++++++++++++++++++------- 1 file changed, 54 insertions(+), 12 deletions(-) diff --git a/lib/trace-cmd/trace-input.c b/lib/trace-cmd/trace-input.c index 5ee0adcf..f872e850 100644 --- a/lib/trace-cmd/trace-input.c +++ b/lib/trace-cmd/trace-input.c @@ -29,6 +29,9 @@ #define COMMIT_MASK ((1 << 27) - 1) +/* force uncompressing in memory */ +#define INMEMORY_DECOMPRESS + /* for debugging read instead of mmap */ static int force_read = 0; @@ -54,6 +57,24 @@ struct page { #endif }; +struct zchunk_cache { + struct list_head list; + struct tracecmd_compress_chunk *chunk; + void *map; + int ref; +}; + +struct cpu_zdata { + /* uncompressed cpu data */ + int fd; + char file[26]; /* strlen(COMPR_TEMP_FILE) */ + unsigned int count; + unsigned int last_chunk; + struct list_head cache; + struct tracecmd_compress_chunk *chunks; +}; + +#define COMPR_TEMP_FILE "/tmp/trace_cpu_dataXXXXXX" struct cpu_data { /* the first two never change */ unsigned long long file_offset; @@ -72,6 +93,7 @@ struct cpu_data { int page_cnt; int cpu; int pipe_fd; + struct cpu_zdata compress; }; struct cpu_file_data { @@ -150,6 +172,8 @@ struct tracecmd_input { bool use_trace_clock; bool read_page; bool use_pipe; + bool read_zpage; /* uncompress pages in memory, do not use tmp files */ + bool cpu_compressed; int file_version; struct cpu_data *cpu_data; long long ts_offset; @@ -3284,7 +3308,7 @@ static int read_cpu_data(struct tracecmd_input *handle) unsigned long long offset; handle->cpu_data[cpu].cpu = cpu; - + handle->cpu_data[cpu].compress.fd = -1; handle->cpu_data[cpu].kbuf = kbuffer_alloc(long_size, endian); if (!handle->cpu_data[cpu].kbuf) goto out_free; @@ -3701,6 +3725,9 @@ struct tracecmd_input *tracecmd_alloc_fd(int fd, int flags) /* By default, use usecs, unless told otherwise */ handle->flags |= TRACECMD_FL_IN_USECS; +#ifdef INMEMORY_DECOMPRESS + handle->read_zpage = 1; +#endif if (do_read_check(handle, buf, 3)) goto failed_read; @@ -3915,6 +3942,7 @@ void tracecmd_ref(struct tracecmd_input *handle) */ void tracecmd_close(struct tracecmd_input *handle) { + struct zchunk_cache *cache; struct file_section *del_sec; int i; @@ -3933,17 +3961,31 @@ void tracecmd_close(struct tracecmd_input *handle) /* The tracecmd_peek_data may have cached a record */ free_next(handle, i); free_page(handle, i); - if (handle->cpu_data && handle->cpu_data[i].kbuf) { - kbuffer_free(handle->cpu_data[i].kbuf); - if (handle->cpu_data[i].page_map) - free_page_map(handle->cpu_data[i].page_map); - - if (handle->cpu_data[i].page_cnt) - tracecmd_warning("%d pages still allocated on cpu %d%s", - handle->cpu_data[i].page_cnt, i, - show_records(handle->cpu_data[i].pages, - handle->cpu_data[i].nr_pages)); - free(handle->cpu_data[i].pages); + if (handle->cpu_data) { + if (handle->cpu_data[i].kbuf) { + kbuffer_free(handle->cpu_data[i].kbuf); + if (handle->cpu_data[i].page_map) + free_page_map(handle->cpu_data[i].page_map); + + if (handle->cpu_data[i].page_cnt) + tracecmd_warning("%d pages still allocated on cpu %d%s", + handle->cpu_data[i].page_cnt, i, + show_records(handle->cpu_data[i].pages, + handle->cpu_data[i].nr_pages)); + free(handle->cpu_data[i].pages); + } + if (handle->cpu_data[i].compress.fd >= 0) { + close(handle->cpu_data[i].compress.fd); + unlink(handle->cpu_data[i].compress.file); + } + while (!list_empty(&handle->cpu_data[i].compress.cache)) { + cache = container_of(handle->cpu_data[i].compress.cache.next, + struct zchunk_cache, list); + list_del(&cache->list); + free(cache->map); + free(cache); + } + free(handle->cpu_data[i].compress.chunks); } } -- 2.31.1