Hello Mathieu, nice to hear someone is concerned about space. since you plan to go for uint perhaps we can drop that bitfield stuff at all ? re, wh Am 01.12.2011 15:20, schrieb Mathieu Desnoyers: > * walter harms (wharms@xxxxxx) wrote: >> hi, >> This patch looks ok to me but this design is ugly by itself. >> It should be replaced by an uchar uint whatever or use a >> real bool (obviously not preferred by this programmes). > > bool :1, uchar :1 or uint :1 could make sense. uchar:1/bool:1 won't save > any space here, because the surrounding fields are either uint or > pointers, so alignment will just add padding. > > I try to use int/uint whenever possible because x86 CPUs tend to get > less register false-dependencies when using instructions modifying the > whole register (generated by using int/uint types) rather than only part > of it (uchar/char/bool). I only use char/uchar/bool when there is a > clear wanted space gain. > > The reason why I never use the bool type within a structure when I want > a compact representation is that bool takes a whole byte just to > represent one bit: > > #include <stdio.h> > #include <stdbool.h> > > struct usebitfield { > int a; > unsigned int f:1, g:1, h:1, i:1, j:1; > int b; > }; > > struct usebool { > int a; > bool f, g, h, i, j; > int b; > }; > > struct useboolbf { > int a; > bool f:1, g:1, h:1, i:1, j:1; > int b; > }; > > int main() > { > printf("bitfield %d bytes, bool %d bytes, boolbitfield %d bytes\n", > sizeof(struct usebitfield), sizeof(struct usebool), > sizeof(struct useboolbf)); > } > > result: > > bitfield 12 bytes, bool 16 bytes, boolbitfield 12 bytes > > This is because each bool takes one byte, while the bitfields are put in > units of "unsigned int" (or bool for the 3rd struct). So in this > example, we need 5 bytes + 3 bytes alignment for the bool, but only 4 > bytes to hold the "unsigned int" unit for the bitfields. > > The choice between bool and bitfields must also take into account the > frequency of access to the variable, because bitfields require mask > operations to access the selected bit(s). You will notice that none of > these bitfields are accessed on the tracing fast-path: only in > slow-paths. Therefore, space gain is more important than speed here. > > One might argue that I have so few of these fields here that it does not > make an actual difference to go for bitfield or bool. I am just trying > to choose types best suited for their intended purpose, ensuring they > are future-proof and will allow simply adding more fields using the same > type, as needed. > > So I guess I'll go for uint :1. > > Thanks, > > Mathieu > >> >> re, >> wh >> >> Am 01.12.2011 10:37, schrieb Dan Carpenter: >>> Sparse complains that these signed bitfields look "dubious". The >>> problem is that instead of being either 0 or 1 like people would expect, >>> signed one bit variables like this are either 0 or -1. It doesn't cause >>> a problem in this case but it's ugly so lets fix them. >>> >>> Signed-off-by: Dan Carpenter <dan.carpenter@xxxxxxxxxx> >>> --- >>> I just did this against linux next but it applies fine on top of >>> Mathieu's recent patches. >>> >>> diff --git a/drivers/staging/lttng/lib/ringbuffer/backend_types.h b/drivers/staging/lttng/lib/ringbuffer/backend_types.h >>> index 1d301de..019929a 100644 >>> --- a/drivers/staging/lttng/lib/ringbuffer/backend_types.h >>> +++ b/drivers/staging/lttng/lib/ringbuffer/backend_types.h >>> @@ -65,7 +65,7 @@ struct channel_backend { >>> * for writer. >>> */ >>> unsigned int buf_size_order; /* Order of buffer size */ >>> - int extra_reader_sb:1; /* Bool: has extra reader subbuffer */ >>> + unsigned int extra_reader_sb:1; /* Bool: has extra reader subbuffer */ >>> struct lib_ring_buffer *buf; /* Channel per-cpu buffers */ >>> >>> unsigned long num_subbuf; /* Number of sub-buffers for writer */ >>> diff --git a/drivers/staging/lttng/lib/ringbuffer/frontend_types.h b/drivers/staging/lttng/lib/ringbuffer/frontend_types.h >>> index 5c7437f..9086c58 100644 >>> --- a/drivers/staging/lttng/lib/ringbuffer/frontend_types.h >>> +++ b/drivers/staging/lttng/lib/ringbuffer/frontend_types.h >>> @@ -60,8 +60,8 @@ struct channel { >>> struct notifier_block cpu_hp_notifier; /* CPU hotplug notifier */ >>> struct notifier_block tick_nohz_notifier; /* CPU nohz notifier */ >>> struct notifier_block hp_iter_notifier; /* hotplug iterator notifier */ >>> - int cpu_hp_enable:1; /* Enable CPU hotplug notif. */ >>> - int hp_iter_enable:1; /* Enable hp iter notif. */ >>> + unsigned int cpu_hp_enable:1; /* Enable CPU hotplug notif. */ >>> + unsigned int hp_iter_enable:1; /* Enable hp iter notif. */ >>> wait_queue_head_t read_wait; /* reader wait queue */ >>> wait_queue_head_t hp_wait; /* CPU hotplug wait queue */ >>> int finalized; /* Has channel been finalized */ >>> @@ -94,8 +94,8 @@ struct lib_ring_buffer_iter { >>> ITER_NEXT_RECORD, >>> ITER_PUT_SUBBUF, >>> } state; >>> - int allocated:1; >>> - int read_open:1; /* Opened for reading ? */ >>> + unsigned int allocated:1; >>> + unsigned int read_open:1; /* Opened for reading ? */ >>> }; >>> >>> /* ring buffer state */ >>> @@ -138,9 +138,9 @@ struct lib_ring_buffer { >>> unsigned long get_subbuf_consumed; /* Read-side consumed */ >>> unsigned long prod_snapshot; /* Producer count snapshot */ >>> unsigned long cons_snapshot; /* Consumer count snapshot */ >>> - int get_subbuf:1; /* Sub-buffer being held by reader */ >>> - int switch_timer_enabled:1; /* Protected by ring_buffer_nohz_lock */ >>> - int read_timer_enabled:1; /* Protected by ring_buffer_nohz_lock */ >>> + unsigned int get_subbuf:1; /* Sub-buffer being held by reader */ >>> + unsigned int switch_timer_enabled:1; /* Protected by ring_buffer_nohz_lock */ >>> + unsigned int read_timer_enabled:1; /* Protected by ring_buffer_nohz_lock */ >>> }; >>> >>> static inline >>> diff --git a/drivers/staging/lttng/ltt-events.h b/drivers/staging/lttng/ltt-events.h >>> index 36b281a..3fc355d 100644 >>> --- a/drivers/staging/lttng/ltt-events.h >>> +++ b/drivers/staging/lttng/ltt-events.h >>> @@ -191,7 +191,7 @@ struct ltt_event { >>> } ftrace; >>> } u; >>> struct list_head list; /* Event list */ >>> - int metadata_dumped:1; >>> + unsigned int metadata_dumped:1; >>> }; >>> >>> struct ltt_channel_ops { >>> @@ -251,7 +251,7 @@ struct ltt_channel { >>> struct ltt_event *sc_compat_unknown; >>> struct ltt_event *sc_exit; /* for syscall exit */ >>> int header_type; /* 0: unset, 1: compact, 2: large */ >>> - int metadata_dumped:1; >>> + unsigned int metadata_dumped:1; >>> }; >>> >>> struct ltt_session { >>> @@ -264,7 +264,7 @@ struct ltt_session { >>> struct list_head list; /* Session list */ >>> unsigned int free_chan_id; /* Next chan ID to allocate */ >>> uuid_le uuid; /* Trace session unique ID */ >>> - int metadata_dumped:1; >>> + unsigned int metadata_dumped:1; >>> }; >>> >>> struct ltt_session *ltt_session_create(void); >>> -- >>> To unsubscribe from this list: send the line "unsubscribe kernel-janitors" in >>> the body of a message to majordomo@xxxxxxxxxxxxxxx >>> More majordomo info at http://vger.kernel.org/majordomo-info.html >>> >>> > -- To unsubscribe from this list: send the line "unsubscribe kernel-janitors" in the body of a message to majordomo@xxxxxxxxxxxxxxx More majordomo info at http://vger.kernel.org/majordomo-info.html