Sure! Please see gdb backtrace output below. (gdb) bt #0 __GI_raise (sig=sig@entry=6) at ../sysdeps/unix/sysv/linux/raise.c:51 #1 0x00007f289d5c7921 in __GI_abort () at abort.c:79 #2 0x00007f289d610967 in __libc_message (action=action@entry=do_abort, fmt=fmt@entry=0x7f289d73db0d "%s\n") at ../sysdeps/posix/libc_fatal.c:181 #3 0x00007f289d6179da in malloc_printerr ( str=str@entry=0x7f289d73f368 "malloc_consolidate(): invalid chunk size") at malloc.c:5342 #4 0x00007f289d617c7e in malloc_consolidate (av=av@entry=0x7f289d972c40 <main_arena>) at malloc.c:4471 #5 0x00007f289d61b968 in _int_malloc (av=av@entry=0x7f289d972c40 <main_arena>, bytes=bytes@entry=33328) at malloc.c:3713 #6 0x00007f289d620275 in _int_memalign (bytes=32768, alignment=512, av=0x7f289d972c40 <main_arena>) at malloc.c:4683 #7 _mid_memalign (address=<optimized out>, bytes=32768, alignment=<optimized out>) at malloc.c:3315 #8 __GI___libc_memalign (alignment=<optimized out>, bytes=bytes@entry=32768) at malloc.c:3266 #9 0x000055fe39e2d7ec in __initbuf (bp=bp@entry=0x55fe3c781010, btp=btp@entry=0x55fe3b88a080, bno=bno@entry=98799836480, bytes=bytes@entry=32768) at rdwr.c:239 #10 0x000055fe39e2d8a4 in libxfs_initbuf (bytes=32768, bno=98799836480, btp=0x55fe3b88a080, bp=0x55fe3c781010) at rdwr.c:266 #11 libxfs_getbufr (btp=btp@entry=0x55fe3b88a080, blkno=blkno@entry=98799836480, bblen=<optimized out>) at rdwr.c:345 #12 0x000055fe39e2d9ab in libxfs_balloc (key=<optimized out>) at rdwr.c:554 #13 0x000055fe39e77bf8 in cache_node_allocate (key=0x7ffef716dcc0, cache=0x55fe3b879a70) at cache.c:305 #14 cache_node_get (cache=0x55fe3b879a70, key=key@entry=0x7ffef716dcc0, nodep=nodep@entry=0x7ffef716dc60) at cache.c:451 #15 0x000055fe39e2d496 in __cache_lookup (key=key@entry=0x7ffef716dcc0, flags=flags@entry=0, bpp=bpp@entry=0x7ffef716dcb8) at rdwr.c:388 #16 0x000055fe39e2e91f in libxfs_getbuf_flags (bpp=0x7ffef716dcb8, flags=0, len=<optimized out>, blkno=98799836480, btp=<optimized out>) at rdwr.c:440 #17 libxfs_buf_read_map (btp=0x55fe3b88a080, map=map@entry=0x7ffef716dd60, nmaps=nmaps@entry=1, flags=flags@entry=2, bpp=bpp@entry=0x7ffef716dd58, ops=ops@entry=0x55fe3a0adae0 <xfs_inode_buf_ops>) at rdwr.c:655 #18 0x000055fe39e1bc64 in libxfs_buf_read (ops=0x55fe3a0adae0 <xfs_inode_buf_ops>, bpp=0x7ffef716dd58, flags=2, numblks=64, blkno=98799836480, target=<optimized out>) at ../libxfs/libxfs_io.h:173 #19 set_cur (type=0x55fe3a0b11a8 <__typtab_crc+840>, blknum=98799836480, len=64, ring_flag=ring_flag@entry=0, bbmap=bbmap@entry=0x0) at io.c:550 #20 0x000055fe39e2155f in copy_inode_chunk (rp=0x55fe420f42a8, agno=<optimized out>) at metadump.c:2527 #21 scanfunc_ino (block=<optimized out>, agno=<optimized out>, agbno=<optimized out>, level=<optimized out>, btype=<optimized out>, arg=<optimized out>) at metadump.c:2604 #22 0x000055fe39e1d7df in scan_btree (agno=46, agbno=1553279, level=level@entry=1, btype=btype@entry=TYP_INOBT, arg=arg@entry=0x7ffef716e030, func=func@entry=0x55fe39e210b0 <scanfunc_ino>) at metadump.c:403 #23 0x000055fe39e2133d in scanfunc_ino (block=<optimized out>, agno=46, agbno=1197680, level=1, btype=TYP_INOBT, arg=0x7ffef716e030) at metadump.c:2627 #24 0x000055fe39e1d7df in scan_btree (agno=agno@entry=46, agbno=1197680, level=2, btype=btype@entry=TYP_INOBT, arg=arg@entry=0x7ffef716e030, func=func@entry=0x55fe39e210b0 <scanfunc_ino>) at metadump.c:403 #25 0x000055fe39e20eca in copy_inodes (agi=0x55fe41ca9400, agno=46) at metadump.c:2660 #26 scan_ag (agno=46) at metadump.c:2784 #27 metadump_f (argc=<optimized out>, argv=<optimized out>) at metadump.c:3086 #28 0x000055fe39e030d1 in main (argc=<optimized out>, argv=<optimized out>) at init.c:190 (gdb) Best, Sean On Wed, Feb 2, 2022 at 5:06 PM Dave Chinner <david@xxxxxxxxxxxxx> wrote: > > On Wed, Feb 02, 2022 at 03:18:34PM -0500, Sean Caron wrote: > > Hi Dave, > > > > It counted up to inode 13555712 and then crashed with the error: > > > > malloc_consolidate(): invalid chunk size > > That sounds like heap corruption or something similar - that's a > much more difficult problem to track down. > > Can you either run gdb on the core file it left and grab a stack > trace of where it crashed, or run metadump again from gdb so that it > can capture the crash and get a stack trace that way? > > > Immediately before that, it printed: > > > > xfs_metadump: invalid block number 4358190/50414336 (1169892770398976) > > in bmap extent 0 in symlink ino 98799839421 > > I don't think that would cause any problems - it just aborts > processing the extent records in that block and moves on to the next > valid one that is found. > > Cheers, > > Dave. > -- > Dave Chinner > david@xxxxxxxxxxxxx