Attached config, core bt.
_________________________________________________________________
http://im.live.com/messenger/im/home/?source=hmtextlinkjuly07
Script started on Sun 15 Jul 2007 12:19:30 PM PDT
root@device128:/# gdb -c core.20303 glusterfs
GNU gdb 6.4.90-debian
Copyright (C) 2006 Free Software Foundation, Inc.
GDB is free software, covered by the GNU General Public License, and you are
welcome to change it and/or distribute copies of it under certain
conditions.
Type "show copying" to see the conditions.
There is absolutely no warranty for GDB. Type "show warranty" for details.
This GDB was configured as "i486-linux-gnu"...Using host libthread_db
library "/lib/tls/i686/cmov/libthread_db.so.1".
warning: Can't read pathname for load map: Input/output error.
warning: .dynamic section for "/usr/local/lib/libfuse.so.2" is not at the
expected address
Reading symbols from /usr/local/lib/libglusterfs.so.0...done.
Loaded symbols for /usr/local/lib/libglusterfs.so.0
Reading symbols from /usr/local/lib/libfuse.so.2...done.
Loaded symbols for /usr/local/lib/libfuse.so.2
Reading symbols from /lib/tls/i686/cmov/libdl.so.2...done.
Loaded symbols for /lib/tls/i686/cmov/libdl.so.2
Reading symbols from /lib/tls/i686/cmov/libpthread.so.0...done.
Loaded symbols for /lib/tls/i686/cmov/libpthread.so.0
Reading symbols from /lib/tls/i686/cmov/libc.so.6...done.
Loaded symbols for /lib/tls/i686/cmov/libc.so.6
Reading symbols from /lib/tls/i686/cmov/librt.so.1...done.
Loaded symbols for /lib/tls/i686/cmov/librt.so.1
Reading symbols from /lib/ld-linux.so.2...done.
Loaded symbols for /lib/ld-linux.so.2
Reading symbols from
/usr/local/lib/glusterfs/1.3.0-pre5.2/xlator/protocol/client.so...done.
Loaded symbols for
/usr/local/lib/glusterfs/1.3.0-pre5.2/xlator/protocol/client.so
Reading symbols from
/usr/local/lib/glusterfs/1.3.0-pre5.2/xlator/cluster/unify.so...done.
Loaded symbols for
/usr/local/lib/glusterfs/1.3.0-pre5.2/xlator/cluster/unify.so
Reading symbols from
/usr/local/lib/glusterfs/1.3.0-pre5.2/xlator/performance/write-behind.so...done.
Loaded symbols for
/usr/local/lib/glusterfs/1.3.0-pre5.2/xlator/performance/write-behind.so
Reading symbols from
/usr/local/lib/glusterfs/1.3.0-pre5.2/xlator/performance/read-ahead.so...done.
Loaded symbols for
/usr/local/lib/glusterfs/1.3.0-pre5.2/xlator/performance/read-ahead.so
Reading symbols from
/usr/local/lib/glusterfs/1.3.0-pre5.2/transport/tcp/client.so...done.
Loaded symbols for
/usr/local/lib/glusterfs/1.3.0-pre5.2/transport/tcp/client.so
Reading symbols from
/usr/local/lib/glusterfs/1.3.0-pre5.2/scheduler/alu.so...done.
Loaded symbols for /usr/local/lib/glusterfs/1.3.0-pre5.2/scheduler/alu.so
Reading symbols from /lib/tls/i686/cmov/libnss_files.so.2...done.
Loaded symbols for /lib/tls/i686/cmov/libnss_files.so.2
Reading symbols from /lib/tls/i686/cmov/libnss_dns.so.2...done.
Loaded symbols for /lib/tls/i686/cmov/libnss_dns.so.2
Reading symbols from /lib/tls/i686/cmov/libresolv.so.2...done.
Loaded symbols for /lib/tls/i686/cmov/libresolv.so.2
Reading symbols from /lib/libgcc_s.so.1...done.
Loaded symbols for /lib/libgcc_s.so.1
Core was generated by `[glusterfs]
'.
Program terminated with signal 11, Segmentation fault.
#0 0xb76006c1 in gf_unify_self_heal (frame=0x80c0818, this=0x8059868,
local=0x80d2858)
at ../../../../../xlators/cluster/unify/src/unify-self-heal.c:427
427 ../../../../../xlators/cluster/unify/src/unify-self-heal.c: No such file
or directory.
in ../../../../../xlators/cluster/unify/src/unify-self-heal.c
(gdb) bt
#0 0xb76006c1 in gf_unify_self_heal (frame=0x80c0818, this=0x8059868,
local=0x80d2858)
at ../../../../../xlators/cluster/unify/src/unify-self-heal.c:427
#1 0xb75f3ab9 in unify_lookup_cbk (frame=0x80c0818, cookie=0x8054958,
this=0x8059868, op_ret=0, op_errno=116, inode=0x80d3980, buf=0x80cbce0)
at ../../../../../xlators/cluster/unify/src/unify.c:302
#2 0xb760ce28 in client_lookup_cbk (frame=0x809c1e0, args=0x80d1740)
at ../../../../../xlators/protocol/client/src/client-protocol.c:3873
#3 0xb760db8e in client_protocol_interpret (trans=0x805f878, blk=0x80ce8e8)
at ../../../../../xlators/protocol/client/src/client-protocol.c:4256
#4 0xb760e70e in notify (this=0x8054958, event=2, data=0x805f878)
at ../../../../../xlators/protocol/client/src/client-protocol.c:4565
#5 0xb7f8fbcf in transport_notify (this=0x805f878, event=1)
at ../../../libglusterfs/src/transport.c:152
#6 0xb7f902d8 in epoll_notify (eevent=1, data=0x805f878)
at ../../../libglusterfs/src/epoll.c:54
#7 0xb7f905bc in sys_epoll_iteration (ctx=0xbf996334)
at ../../../libglusterfs/src/epoll.c:146
#8 0xb7f8fdfc in poll_iteration (ctx=0xbf996334)
at ../../../libglusterfs/src/transport.c:260
#9 0x0804a8ce in main (argc=8, argv=0xbf996424)
at ../../../glusterfs-fuse/src/glusterfs.c:348
(gdb) quit
root@device128:/# exit
Script done on Sun 15 Jul 2007 12:19:42 PM PDT
### file: client-volume.spec.sample
##############################################
### GlusterFS Client Volume Specification ##
##############################################
#### CONFIG FILE RULES:
### "#" is comment character.
### - Config file is case sensitive
### - Options within a volume block can be in any order.
### - Spaces or tabs are used as delimitter within a line.
### - Each option should end within a line.
### - Missing or commented fields will assume default values.
### - Blank/commented lines are allowed.
### - Sub-volumes should already be defined above before referring.
#### Add client feature and attach to remote subvolume
#volume glfsd115-250GB-truecrypt
# type protocol/client
# option transport-type tcp/client # for TCP/IP transport
# option remote-host dell115 # IP address of the remote brick
## option remote-port 6996 # default server port is 6996
## option transport-timeout 30 # seconds to wait for a reply
# # from server for each request
# option remote-subvolume iothreads # name of the remote volume
#end-volume
# on abit101
#volume namespace-child
# type protocol/client
# option transport-type tcp/client # for TCP/IP transport
# option remote-host vm48 # IP address of the remote brick
# option remote-port 6996 # default server port is 6996
# option remote-subvolume iothreads-brick-namespace # name of the
remote volume
#end-volume
volume glfsd48-750GB-101SATAp0
type protocol/client
option transport-type tcp/client # for TCP/IP transport
option remote-host vm48 # IP address of the remote brick
# option remote-port 6996 # default server port is 6996
# option transport-timeout 30 # seconds to wait for a reply
# from server for each request
option remote-subvolume iothreads-brick # name of the remote volume
end-volume
volume glfsd49-40GB-118IDEss
type protocol/client
option transport-type tcp/client # for TCP/IP transport
option remote-host vm49 # IP address of the remote brick
option remote-port 6996 # default server port is 6996
option transport-timeout 30 # seconds to wait for a reply
# from server for each request
option remote-subvolume iothreads # name of the remote volume
end-volume
volume glfsd128-500GB-128IDEss
type protocol/client
option transport-type tcp/client # for TCP/IP transport
option remote-host device128 # IP address of the remote brick
# option remote-port 6996 # default server port is 6996
# option transport-timeout 30 # seconds to wait for a reply
# from server for each request
option remote-subvolume iothreads # name of the remote volume
end-volume
volume glfsd129-750GB-IDEps
type protocol/client
option transport-type tcp/client # for TCP/IP transport
option remote-host host129 # IP address of the remote brick
# option remote-port 6996 # default server port is 6996
# option transport-timeout 30 # seconds to wait for a reply
# from server for each request
option remote-subvolume iothreads # name of the remote volume
end-volume
#volume afr-glfsd49-40GB-118IDEss
# type cluster/afr
# subvolumes glfsd49-40GB-118IDEss
# option replicate *:1
#end-volume
#volume afr-glfsd48-750GB-101SATAp0
# type cluster/afr
# subvolumes glfsd48-750GB-101SATAp0
# option replicate *:1
#end-volume
#volume afr-glfsd115-250GB-truecrypt
# type cluster/afr
# subvolumes glfsd115-250GB-truecrypt
# option replicate *:1
#end-volume
#volume afr-glfsd128-500GB-128IDEss
# type cluster/afr
# subvolumes glfsd128-500GB-128IDEss
# option replicate *:1
#end-volume
#volume afr-glfsd129-750GB-IDEps
# type cluster/afr
# subvolumes glfsd129-750GB-IDEps
# option replicate *:1
#end-volume
volume bricks
type cluster/unify
#option namespace namespace-child # Additional "management" Brick
option namespace glfsd129-750GB-IDEps
#subvolumes afr-glfsd49-40GB-118IDEss afr-glfsd48-750GB-101SATAp0
afr-glfsd115-250GB-truecrypt afr-glfsd128-500GB-128IDEss
afr-glfsd129-750GB-IDEps
#subvolumes glfsd49-40GB-118IDEss glfsd48-750GB-101SATAp0
glfsd115-250GB-truecrypt glfsd128-500GB-128IDEss
subvolumes glfsd49-40GB-118IDEss glfsd48-750GB-101SATAp0
glfsd128-500GB-128IDEss
# Round Robin
#option scheduler rr
# ALU
option scheduler alu
option alu.limits.min-free-disk 6GB
option alu.limits.max-open-files 10000
option alu.order
disk-usage:read-usage:write-usage:open-fles-usage:disk-speed-usage
option alu.disk-usage.entry-threshold 2GB
option alu.disk-usage.exit-threshold 60MB
option alu.open-files-usage.entry-threshold 1024
option alu.open-files-usage.exit-threshold 32
option alu.stat-refresh.interval 10sec
end-volume
## Add writeback feature
volume writeback
type performance/write-behind
option aggregate-size 131072 # unit in bytes
subvolumes bricks
end-volume
### Add readahead feature
volume readahead
type performance/read-ahead
option page-size 65536 # unit in bytes
option page-count 16 # cache per file = (page-count x page-size)
subvolumes writeback
end-volume
## Add stat-prefetch feature
## If you are not concerned about performance of interactive commands
## like "ls -l", you wouln't need this translator.
# volume statprefetch
# type performance/stat-prefetch
# option cache-seconds 2 # timeout for stat cache
# subvolumes readahead
# end-volume
# Try to get NFS re-export working, Do i need this?
#volume server
# type protocol/server
# option transport-type tcp/server # For TCP/IP transport
# subvolumes readahead
# option auth.ip.readahead.allow * # Allow access to "brick" volume
#end-volume