Existing code assumes alignment of any integer type, which breaks for packed structs. This patch fixes all the current discrepanies between dwarf and btf loader, when compared using btfdiff. It preserves bit_offset of non-bitfield members, while for bitfield ones it re-calculates initial byte/bit offset using natural alignment of the underlying integer type, which seems to be always the case for bitfields. I've tested this on toy examples for both x86-64 and arm targets, there were no differences reported by btfdiff. Testing on vmlinux on x86-64 shows only these discrepancies, which are unrelated to bit offsets: $ ./btfdiff /tmp/vmlinux4 --- /tmp/btfdiff.dwarf.GIVfpr 2019-02-20 12:18:29.138788970 -0800 +++ /tmp/btfdiff.btf.c3x2KY 2019-02-20 12:18:29.351786365 -0800 @@ -16884,7 +16884,7 @@ struct pebs_record_nhm { }; union hsw_tsx_tuning { struct { - unsigned int cycles_last_block:32; /* 0: 0 4 */ + u32 cycles_last_block:32; /* 0: 0 4 */ u32 hle_abort:1; /* 4:31 4 */ u32 rtm_abort:1; /* 4:30 4 */ u32 instruction_abort:1; /* 4:29 4 */ @@ -26154,7 +26154,7 @@ struct acpi_device_power { /* last cacheline: 40 bytes */ }; struct acpi_device_perf_flags { - unsigned char reserved:8; /* 0: 0 1 */ + u8 reserved:8; /* 0: 0 1 */ /* size: 1, cachelines: 1, members: 1 */ /* last cacheline: 1 bytes */ Signed-off-by: Andrii Nakryiko <andriin@xxxxxx> --- btf_loader.c | 21 ++++++++++++++------- 1 file changed, 14 insertions(+), 7 deletions(-) diff --git a/btf_loader.c b/btf_loader.c index fc28884..62e7e30 100644 --- a/btf_loader.c +++ b/btf_loader.c @@ -502,19 +502,26 @@ static int class__fixup_btf_bitfields(struct tag *tag, struct cu *cu, struct btf * such unlikely thing happens. */ pos->byte_size = integral_bit_size / 8; + pos->bit_size = type_bit_size; if (integral_bit_size == 0) { - pos->bit_size = integral_bit_size; + pos->bit_size = 0; continue; } - pos->bitfield_offset = pos->bit_offset % integral_bit_size; - if (!btfe->is_big_endian) - pos->bitfield_offset = integral_bit_size - pos->bitfield_offset - pos->bitfield_size; + if (pos->bitfield_size) { + /* bitfields seem to be always aligned, no matter the packing */ + pos->byte_offset = pos->bit_offset / integral_bit_size * integral_bit_size / 8; + } else { + pos->byte_offset = pos->bit_offset / 8; + } + - pos->bit_size = type_bit_size; - pos->byte_offset = (((pos->bit_offset / integral_bit_size) * - integral_bit_size) / 8); + if (pos->bitfield_size) { + pos->bitfield_offset = pos->bit_offset - pos->byte_offset * 8; + if (!btfe->is_big_endian) + pos->bitfield_offset = integral_bit_size - pos->bitfield_offset - pos->bitfield_size; + } } return 0; -- 2.17.1