[...] > +EXAMPLE > +------- > +[source,c] > +-- > +#include <stdlib.h> > +#include <ctype.h> > +#include <tracefs.h> > + > +static void read_page(struct tep_handle *tep, struct kbuffer *kbuf) read_subbuf? > +{ > + static struct trace_seq seq; > + struct tep_record record; > + > + if (seq.buffer) > + trace_seq_reset(&seq); > + else > + trace_seq_init(&seq); > + > + while ((record.data = kbuffer_read_event(kbuf, &record.ts))) { > + record.size = kbuffer_event_size(kbuf); > + kbuffer_next_event(kbuf, NULL); > + tep_print_event(tep, &seq, &record, > + "%s-%d %9d\t%s: %s\n", > + TEP_PRINT_COMM, > + TEP_PRINT_PID, > + TEP_PRINT_TIME, > + TEP_PRINT_NAME, > + TEP_PRINT_INFO); > + trace_seq_do_printf(&seq); > + trace_seq_reset(&seq); > + } > +} > + [...] > +__hidden void *trace_mmap(int fd, struct kbuffer *kbuf) > +{ > + struct trace_mmap *tmap; > + int page_size; > + void *meta; > + void *data; > + > + page_size = getpagesize(); > + meta = mmap(NULL, page_size, PROT_READ, MAP_SHARED, fd, 0); > + if (meta == MAP_FAILED) > + return NULL; > + > + tmap = calloc(1, sizeof(*tmap)); > + if (!tmap) { > + munmap(meta, page_size); > + return NULL; > + } > + > + tmap->kbuf = kbuffer_dup(kbuf); > + if (!tmap->kbuf) { > + munmap(meta, page_size); > + free(tmap); > + } > + > + tmap->fd = fd; > + > + tmap->map = meta; > + tmap->meta_len = tmap->map->meta_page_size; > + > + if (tmap->meta_len > page_size) { > + munmap(meta, page_size); > + meta = mmap(NULL, tmap->meta_len, PROT_READ, MAP_SHARED, fd, 0); > + if (meta == MAP_FAILED) { > + kbuffer_free(tmap->kbuf); > + free(tmap); > + return NULL; > + } > + tmap->map = meta; > + } > + > + tmap->data_pages = meta + tmap->meta_len; > + > + tmap->data_len = tmap->map->subbuf_size * tmap->map->nr_subbufs; > + > + tmap->data = mmap(NULL, tmap->data_len, PROT_READ, MAP_SHARED, > + fd, tmap->meta_len); > + if (tmap->data == MAP_FAILED) { > + munmap(meta, tmap->meta_len); > + kbuffer_free(tmap->kbuf); > + free(tmap); > + return NULL; > + } > + > + tmap->last_idx = tmap->map->reader.id; > + > + data = tmap->data + tmap->map->subbuf_size * tmap->last_idx; > + kbuffer_load_subbuffer(kbuf, data); Could it fast-forward to the event until tmap->map->reader.read? So we don't read again the same events. Something like while (kbuf->curr < tmap->map->reader.read) kbuffer_next_event(kbuf, NULL); > + > + return tmap; > +} > + > +__hidden void trace_unmap(void *mapping) > +{ > + struct trace_mmap *tmap = mapping; > + > + munmap(tmap->data, tmap->data_len); > + munmap(tmap->map, tmap->meta_len); > + kbuffer_free(tmap->kbuf); > + free(tmap); > +} > + > +__hidden int trace_mmap_load_subbuf(void *mapping, struct kbuffer *kbuf) > +{ > + struct trace_mmap *tmap = mapping; > + void *data; > + int id; > + > + id = tmap->map->reader.id; > + data = tmap->data + tmap->map->subbuf_size * id; > + > + /* > + * If kbuf doesn't point to the current sub-buffer > + * just load it and return. > + */ > + if (data != kbuffer_subbuffer(kbuf)) { > + kbuffer_load_subbuffer(kbuf, data); > + return 1; > + } > + > + /* > + * Perhaps the reader page had a write that added > + * more data. > + */ > + kbuffer_refresh(kbuf); > + > + /* Are there still events to read? */ > + if (kbuffer_curr_size(kbuf)) > + return 1; It does not seem to be enough, only kbuf->size is updated in kbuffer_refresh() while kbuffer_curr_size is next - cur. > + > + /* See if a new page is ready? */ > + if (ioctl(tmap->fd, TRACE_MMAP_IOCTL_GET_READER) < 0) > + return -1; Maybe this ioctl should be called regardless if events are found on the current reader page. This would at least update the reader->read field and make sure subsequent readers are not getting the same events we already had here? > + id = tmap->map->reader.id; > + data = tmap->data + tmap->map->subbuf_size * id; > + [...]