From: Andrii Nakryiko <andriin@xxxxxx> Instead of relying on error-prone adjustment of bit/byte holes, use class__find_holes() to re-calculate them after members are moved around. As part of that change, fix bug with not adjusting bit_offset, when changing byte_offset. Signed-off-by: Andrii Nakryiko <andriin@xxxxxx> --- dwarves.c | 2 + dwarves_reorganize.c | 123 ++++++++++++------------------------------- 2 files changed, 37 insertions(+), 88 deletions(-) diff --git a/dwarves.c b/dwarves.c index 5be50b0..0d38ca3 100644 --- a/dwarves.c +++ b/dwarves.c @@ -1279,6 +1279,8 @@ void class__find_holes(struct class *class) int bitfield_end = min(ctype->size * 8, cur_bitfield_end); class->bit_padding = bitfield_end - last_seen_bit; last_seen_bit = bitfield_end; + } else { + class->bit_padding = 0; } class->padding = ctype->size - last_seen_bit / 8; diff --git a/dwarves_reorganize.c b/dwarves_reorganize.c index 288b582..9b3aea2 100644 --- a/dwarves_reorganize.c +++ b/dwarves_reorganize.c @@ -11,6 +11,11 @@ #include "dwarves_reorganize.h" #include "dwarves.h" +static void class__recalc_holes(struct class *class) { + class->holes_searched = 0; + class__find_holes(class); +} + void class__subtract_offsets_from(struct class *class, struct class_member *from, const uint16_t size) @@ -19,8 +24,10 @@ void class__subtract_offsets_from(struct class *class, list_prepare_entry(from, class__tags(class), tag.node); list_for_each_entry_continue(member, class__tags(class), tag.node) - if (member->tag.tag == DW_TAG_member) + if (member->tag.tag == DW_TAG_member) { member->byte_offset -= size; + member->bit_offset -= size * 8; + } if (class->padding != 0) { struct class_member *last_member = @@ -42,8 +49,10 @@ void class__add_offsets_from(struct class *class, struct class_member *from, list_prepare_entry(from, class__tags(class), tag.node); list_for_each_entry_continue(member, class__tags(class), tag.node) - if (member->tag.tag == DW_TAG_member) + if (member->tag.tag == DW_TAG_member) { member->byte_offset += size; + member->bit_offset += size * 8; + } } /* @@ -62,6 +71,7 @@ void class__fixup_alignment(struct class *class, const struct cu *cu) (pos->byte_offset - pos->byte_size)); pos->byte_offset = 0; + pos->bit_offset = 0; } else if (last_member != NULL && last_member->hole >= cu->addr_size) { size_t dec = (last_member->hole / cu->addr_size) * @@ -71,6 +81,7 @@ void class__fixup_alignment(struct class *class, const struct cu *cu) if (last_member->hole == 0) --class->nr_holes; pos->byte_offset -= dec; + pos->bit_offset -= dec * 8; class->type.size -= dec; class__subtract_offsets_from(class, pos, dec); } else for (power2 = cu->addr_size; power2 >= 2; power2 /= 2) { @@ -84,6 +95,7 @@ void class__fixup_alignment(struct class *class, const struct cu *cu) if (last_member->hole == 0) --class->nr_holes; pos->byte_offset -= remainder; + pos->bit_offset -= remainder * 8; class__subtract_offsets_from(class, pos, remainder); } else { const size_t inc = power2 - remainder; @@ -92,6 +104,7 @@ void class__fixup_alignment(struct class *class, const struct cu *cu) ++class->nr_holes; last_member->hole += inc; pos->byte_offset += inc; + pos->bit_offset += inc * 8; class->type.size += inc; class__add_offsets_from(class, pos, inc); } @@ -243,7 +256,6 @@ static void class__move_member(struct class *class, struct class_member *dest, list_prepare_entry(from, class__tags(class), tag.node); struct class_member *tmp; - uint8_t orig_tail_from_bit_hole = 0; LIST_HEAD(from_list); if (verbose) @@ -260,13 +272,10 @@ static void class__move_member(struct class *class, struct class_member *dest, break; tail_from = pos; orig_tail_from_hole = tail_from->hole; - orig_tail_from_bit_hole = tail_from->bit_hole; pos->byte_offset = new_from_offset; - pos->hole = 0; - pos->bit_hole = 0; + pos->bit_offset = new_from_offset * 8 + pos->bitfield_offset; list_move_tail(&pos->tag.node, &from_list); } - tail_from->bit_hole = orig_tail_from_bit_hole; list_splice(&from_list, &dest->tag.node); if (verbose) fprintf(fp, "'%s')", @@ -284,6 +293,7 @@ static void class__move_member(struct class *class, struct class_member *dest, __list_add(&from->tag.node, &dest->tag.node, dest->tag.node.next); from->byte_offset = new_from_offset; + from->bit_offset = new_from_offset * 8 + from->bitfield_offset; } if (verbose) @@ -300,12 +310,10 @@ static void class__move_member(struct class *class, struct class_member *dest, * Good, no need for padding anymore: */ class->type.size -= from_size + class->padding; - class->padding = 0; } else { /* * No, so just add from_size to the padding: */ - class->padding += from_size; if (verbose) fprintf(fp, "/* adding %zd bytes from %s to " "the padding */\n", @@ -313,7 +321,6 @@ static void class__move_member(struct class *class, struct class_member *dest, } } else if (from_was_last) { class->type.size -= from_size + class->padding; - class->padding = 0; } else { /* * See if we are adding a new hole that is bigger than @@ -330,25 +337,12 @@ static void class__move_member(struct class *class, struct class_member *dest, class->type.size -= cu->addr_size; class__subtract_offsets_from(class, from_prev, cu->addr_size); - } else { - /* - * Add the hole after 'from' + its size to the member - * before it: - */ - from_prev->hole += orig_tail_from_hole + from_size; } - /* - * Check if we have eliminated a hole - */ - if (dest->hole == from_size) - class->nr_holes--; } - tail_from->hole = dest->hole - (from_size + offset); - dest->hole = offset; + class__recalc_holes(class); if (verbose > 1) { - class__find_holes(class); class__fprintf(class, cu, fp); fputc('\n', fp); } @@ -392,40 +386,22 @@ static void class__move_bit_member(struct class *class, const struct cu *cu, class->type.size -= from_size + from->hole; class__subtract_offsets_from(class, from_prev, from_size + from->hole); - } else if (is_last_member) - class->padding += from_size; - else - from_prev->hole += from_size + from->hole; - if (is_last_member) { - /* - * Now we don't have bit_padding anymore - */ - class->bit_padding = 0; - } else - class->nr_bit_holes--; - } else { - /* - * Add add the holes after from + its size to the member - * before it: - */ - from_prev->bit_hole += from->bit_hole + from->bitfield_size; - from_prev->hole = from->hole; + } } - from->bit_hole = dest->bit_hole - from->bitfield_size; /* * Tricky, what are the rules for bitfield layouts on this arch? * Assume its IA32 */ from->bitfield_offset = dest->bitfield_offset + dest->bitfield_size; /* - * Now both have the some offset: + * Now both have the same offset: */ from->byte_offset = dest->byte_offset; - dest->bit_hole = 0; - from->hole = dest->hole; - dest->hole = 0; + from->bit_offset = dest->byte_offset * 8 + from->bitfield_offset; + + class__recalc_holes(class); + if (verbose > 1) { - class__find_holes(class); class__fprintf(class, cu, fp); fputc('\n', fp); } @@ -438,22 +414,16 @@ static void class__demote_bitfield_members(struct class *class, const struct base_type *new_type, type_id_t new_type_id) { - const uint8_t bit_diff = old_type->bit_size - new_type->bit_size; struct class_member *member = list_prepare_entry(from, class__tags(class), tag.node); list_for_each_entry_from(member, class__tags(class), tag.node) { if (member->tag.tag != DW_TAG_member) continue; - /* - * Assume IA32 bitfield layout - */ - member->bitfield_offset -= bit_diff; member->byte_size = new_type->bit_size / 8; member->tag.type = new_type_id; if (member == to) break; - member->bit_hole = 0; } } @@ -494,7 +464,7 @@ static int class__demote_bitfields(struct class *class, const struct cu *cu, struct class_member *member; struct class_member *bitfield_head = NULL; const struct tag *old_type_tag, *new_type_tag; - size_t current_bitfield_size = 0, size, bytes_needed, new_size; + size_t current_bitfield_size = 0, size, bytes_needed; int some_was_demoted = 0; type__for_each_data_member(&class->type, member) { @@ -562,19 +532,10 @@ static int class__demote_bitfields(struct class *class, const struct cu *cu, tag__base_type(old_type_tag), tag__base_type(new_type_tag), new_type_id); - new_size = member->byte_size; - member->hole = size - new_size; - if (member->hole != 0) - ++class->nr_holes; - member->bit_hole = new_size * 8 - current_bitfield_size; + class__recalc_holes(class); some_was_demoted = 1; - /* - * Have we packed it so that there are no hole now? - */ - if (member->bit_hole == 0) - --class->nr_bit_holes; + if (verbose > 1) { - class__find_holes(class); class__fprintf(class, cu, fp); fputc('\n', fp); } @@ -611,29 +572,13 @@ static int class__demote_bitfields(struct class *class, const struct cu *cu, } class__demote_bitfield_members(class, member, member, - tag__base_type(old_type_tag), - tag__base_type(new_type_tag), + tag__base_type(old_type_tag), + tag__base_type(new_type_tag), new_type_id); - new_size = member->byte_size; - member->hole = 0; - /* - * Do we need byte padding? - */ - if (member->byte_offset + new_size < class__size(class)) { - class->padding = (class__size(class) - - (member->byte_offset + new_size)); - class->bit_padding = 0; - member->bit_hole = (new_size * 8 - - member->bitfield_size); - } else { - class->padding = 0; - class->bit_padding = (new_size * 8 - - member->bitfield_size); - member->bit_hole = 0; - } + class__recalc_holes(class); some_was_demoted = 1; + if (verbose > 1) { - class__find_holes(class); class__fprintf(class, cu, fp); fputc('\n', fp); } @@ -778,10 +723,12 @@ struct irq_cfg { } bitfield_head = NULL; } + if (fixup_was_done) { + class__recalc_holes(class); + } if (verbose && fixup_was_done) { fprintf(fp, "/* bitfield types were fixed */\n"); if (verbose > 1) { - class__find_holes(class); class__fprintf(class, cu, fp); fputc('\n', fp); } @@ -794,6 +741,7 @@ void class__reorganize(struct class *class, const struct cu *cu, struct class_member *member, *brother, *last_member; size_t alignment_size; + class__find_holes(class); class__fixup_member_types(class, cu, verbose, fp); while (class__demote_bitfields(class, cu, verbose, fp)) @@ -802,7 +750,6 @@ void class__reorganize(struct class *class, const struct cu *cu, /* Now try to combine holes */ restart: alignment_size = 0; - class__find_holes(class); /* * It can be NULL if this class doesn't have any data members, * just inheritance entries -- 2.17.1