Did some more investigating and research and come up a solution. Short version, Œmount.glusterfs¹ does a Œstat -c %i <mount point>¹ to test the mount. Unfortunately, the gluster mount takes a few seconds to mount fully, but the script doesn¹t wait and instead the Œstat¹ fails so the script assumes a failed mount and unmounts and exists: [gfs1::root:bkp]# sh -x mount.glusterfs gfs2:data /mnt + _init gfs2:data /mnt + LOG_NONE=NONE + LOG_CRITICAL=CRITICAL + LOG_ERROR=ERROR + LOG_WARNING=WARNING + LOG_INFO=INFO + LOG_DEBUG=DEBUG + LOG_TRACE=TRACE + HOST_NAME_MAX=64 + prefix=/usr + exec_prefix=/usr ++ echo /usr/sbin/glusterfs + cmd_line=/usr/sbin/glusterfs + case `uname -s` in ++ uname -s + getinode='stat -c %i ' + getdev='stat -c %d ' + lgetinode='stat -c %i -L' + lgetdev='stat -c %d -L' + mounttab=/etc/mtab + UPDATEDBCONF=/etc/updatedb.conf + main gfs2:data /mnt ++ sed -n 's/.*\--[ ]*\([^ ]*\).*/\1/p' ++ echo gfs2:data /mnt + helper= + in_opt=no + pos_args=0 + for opt in '"$@"' + '[' no = yes ']' + '[' gfs2:data = -o ']' + case $pos_args in + volfile_loc=gfs2:data + pos_args=1 + for opt in '"$@"' + '[' no = yes ']' + '[' /mnt = -o ']' + case $pos_args in + mount_point=/mnt + pos_args=2 + '[' no = yes -o 2 -lt 2 ']' + '[' -r gfs2:data ']' ++ sed -n 's/\([a-zA-Z0-9:.\-]*\):.*/\1/p' ++ echo gfs2:data + server_ip=gfs2 ++ sed -n 's/.*:\([^ ]*\).*/\1/p' ++ echo gfs2:data + test_str=data + '[' -n data ']' + volume_id=data + volfile_loc= + '[' -n '' ']' + '[' -z /mnt -o '!' -d /mnt ']' + grep -q '[[:space:]+]/mnt[[:space:]+]fuse' /etc/mtab + check_recursive_mount /mnt + '[' /mnt = / ']' + mnt_dir=/mnt + export PATH + which getfattr + '[' 0 -ne 0 ']' + grep -iq trusted.gfid= + getfattr -n trusted.gfid /mnt + '[' 1 -eq 0 ']' + GLUSTERD_WORKDIR=/var/lib/glusterd + ls -L /var/lib/glusterd/vols/data/bricks/gfs1:-gfs-brick0-data /var/lib/glusterd/vols/data/bricks/gfs2:-gfs-brick0-data + '[' 0 -ne 0 ']' ++ cut -d = -f 2 ++ grep '^path' /var/lib/glusterd/vols/data/bricks/gfs1:-gfs-brick0-data /var/lib/glusterd/vols/data/bricks/gfs2:-gfs-brick0-data + brick_path='/gfs/brick0/data /gfs/brick0/data' ++ stat -c %i -L / + root_inode=2 ++ stat -c %d -L / + root_dev=64512 ++ stat -c %i -L /mnt + mnt_inode=387856 ++ stat -c %d -L /mnt + mnt_dev=64512 + for brick in '"$brick_path"' + ls /gfs/brick0/data /gfs/brick0/data + '[' 0 -ne 0 ']' + grep -iq trusted.gfid= + getfattr -n trusted.gfid '/gfs/brick0/data /gfs/brick0/data' + '[' 1 -ne 0 ']' + continue + test -f /etc/updatedb.conf + grep -q glusterfs /etc/updatedb.conf + start_glusterfs + '[' -n '' ']' + '[' -n '' ']' + '[' -n '' ']' + '[' -n '' ']' + '[' -n '' ']' + '[' -n '' ']' + '[' -n '' ']' + '[' -n '' ']' + '[' -n '' ']' + '[' -n '' ']' + '[' -n '' ']' + '[' -n '' ']' + '[' -n '' ']' + '[' -n '' ']' + '[' -n '' ']' + '[' -n '' ']' + '[' -n '' ']' + '[' -n '' ']' + '[' -n '' ']' + '[' -n '' ']' + '[' -n '' ']' + '[' -n '' ']' + '[' -n '' ']' + volume_id_rdma= + '[' -z '' ']' + '[' -n gfs2 ']' ++ echo '/usr/sbin/glusterfs --volfile-server=gfs2' + cmd_line='/usr/sbin/glusterfs --volfile-server=gfs2' + '[' -n '' ']' + '[' -n '' ']' + '[' -n '' ']' + '[' -n '' ']' + '[' -n '' ']' + '[' -n data ']' + '[' -n '' ']' ++ echo '/usr/sbin/glusterfs --volfile-server=gfs2 --volfile-id=data' + cmd_line='/usr/sbin/glusterfs --volfile-server=gfs2 --volfile-id=data' + '[' -n '' ']' ++ echo '/usr/sbin/glusterfs --volfile-server=gfs2 --volfile-id=data /mnt' + cmd_line='/usr/sbin/glusterfs --volfile-server=gfs2 --volfile-id=data /mnt' + err=0 + /usr/sbin/glusterfs --volfile-server=gfs2 --volfile-id=data /mnt ++ stat -c %i /mnt + inode= + '[' -z '' ']' + inode=0 + '[' 0 -ne 1 ']' + err=1 + '[' 1 -eq 1 ']' + echo 'Mount failed. Please check the log file for more details.' Mount failed. Please check the log file for more details. + umount /mnt + exit 1 Adding a simple Œsleep 5¹ after the Œglusterfs¹ cmd seems to fix this: [gfs1::root:bkp]# sh -x mount.glusterfs gfs2:data /mnt + _init gfs2:data /mnt + LOG_NONE=NONE + LOG_CRITICAL=CRITICAL + LOG_ERROR=ERROR + LOG_WARNING=WARNING + LOG_INFO=INFO + LOG_DEBUG=DEBUG + LOG_TRACE=TRACE + HOST_NAME_MAX=64 + prefix=/usr + exec_prefix=/usr ++ echo /usr/sbin/glusterfs + cmd_line=/usr/sbin/glusterfs + case `uname -s` in ++ uname -s + getinode='stat -c %i ' + getdev='stat -c %d ' + lgetinode='stat -c %i -L' + lgetdev='stat -c %d -L' + mounttab=/etc/mtab + UPDATEDBCONF=/etc/updatedb.conf + main gfs2:data /mnt ++ sed -n 's/.*\--[ ]*\([^ ]*\).*/\1/p' ++ echo gfs2:data /mnt + helper= + in_opt=no + pos_args=0 + for opt in '"$@"' + '[' no = yes ']' + '[' gfs2:data = -o ']' + case $pos_args in + volfile_loc=gfs2:data + pos_args=1 + for opt in '"$@"' + '[' no = yes ']' + '[' /mnt = -o ']' + case $pos_args in + mount_point=/mnt + pos_args=2 + '[' no = yes -o 2 -lt 2 ']' + '[' -r gfs2:data ']' ++ sed -n 's/\([a-zA-Z0-9:.\-]*\):.*/\1/p' ++ echo gfs2:data + server_ip=gfs2 ++ sed -n 's/.*:\([^ ]*\).*/\1/p' ++ echo gfs2:data + test_str=data + '[' -n data ']' + volume_id=data + volfile_loc= + '[' -n '' ']' + '[' -z /mnt -o '!' -d /mnt ']' + grep -q '[[:space:]+]/mnt[[:space:]+]fuse' /etc/mtab + check_recursive_mount /mnt + '[' /mnt = / ']' + mnt_dir=/mnt + export PATH + which getfattr + '[' 0 -ne 0 ']' + grep -iq trusted.gfid= + getfattr -n trusted.gfid /mnt + '[' 1 -eq 0 ']' + GLUSTERD_WORKDIR=/var/lib/glusterd + ls -L /var/lib/glusterd/vols/data/bricks/gfs1:-gfs-brick0-data /var/lib/glusterd/vols/data/bricks/gfs2:-gfs-brick0-data + '[' 0 -ne 0 ']' ++ cut -d = -f 2 ++ grep '^path' /var/lib/glusterd/vols/data/bricks/gfs1:-gfs-brick0-data /var/lib/glusterd/vols/data/bricks/gfs2:-gfs-brick0-data + brick_path='/gfs/brick0/data /gfs/brick0/data' ++ stat -c %i -L / + root_inode=2 ++ stat -c %d -L / + root_dev=64512 ++ stat -c %i -L /mnt + mnt_inode=387856 ++ stat -c %d -L /mnt + mnt_dev=64512 + for brick in '"$brick_path"' + ls /gfs/brick0/data /gfs/brick0/data + '[' 0 -ne 0 ']' + grep -iq trusted.gfid= + getfattr -n trusted.gfid '/gfs/brick0/data /gfs/brick0/data' + '[' 1 -ne 0 ']' + continue + test -f /etc/updatedb.conf + grep -q glusterfs /etc/updatedb.conf + start_glusterfs + '[' -n '' ']' + '[' -n '' ']' + '[' -n '' ']' + '[' -n '' ']' + '[' -n '' ']' + '[' -n '' ']' + '[' -n '' ']' + '[' -n '' ']' + '[' -n '' ']' + '[' -n '' ']' + '[' -n '' ']' + '[' -n '' ']' + '[' -n '' ']' + '[' -n '' ']' + '[' -n '' ']' + '[' -n '' ']' + '[' -n '' ']' + '[' -n '' ']' + '[' -n '' ']' + '[' -n '' ']' + '[' -n '' ']' + '[' -n '' ']' + '[' -n '' ']' + volume_id_rdma= + '[' -z '' ']' + '[' -n gfs2 ']' ++ echo '/usr/sbin/glusterfs --volfile-server=gfs2' + cmd_line='/usr/sbin/glusterfs --volfile-server=gfs2' + '[' -n '' ']' + '[' -n '' ']' + '[' -n '' ']' + '[' -n '' ']' + '[' -n '' ']' + '[' -n data ']' + '[' -n '' ']' ++ echo '/usr/sbin/glusterfs --volfile-server=gfs2 --volfile-id=data' + cmd_line='/usr/sbin/glusterfs --volfile-server=gfs2 --volfile-id=data' + '[' -n '' ']' ++ echo '/usr/sbin/glusterfs --volfile-server=gfs2 --volfile-id=data /mnt' + cmd_line='/usr/sbin/glusterfs --volfile-server=gfs2 --volfile-id=data /mnt' + err=0 + /usr/sbin/glusterfs --volfile-server=gfs2 --volfile-id=data /mnt + sleep 5 ++ stat -c %i /mnt + inode=1 + '[' -z 1 ']' + '[' 1 -ne 1 ']' + '[' 0 -eq 1 ']' It is not elegant, but it seems to fix the issue. If anyone knows of a better fix, let me know. Thanks, --- Hani Duwaik | Mgr, Unix System Administration 650-430-5061 | hduwaik@xxxxxxxxxxxx | @NetSuite <https://twitter.com/netsuite> NetSuite <http://www.netsuite.com/>: Where Business is Going On 2/28/15, 12:38 PM, "Niels de Vos" <ndevos@xxxxxxxxxx> wrote: >On Sat, Feb 28, 2015 at 06:49:29PM +0000, Duwaik, Hani wrote: >> Having an issue trying to set up gluster 3.5.3-1 on OEL 6.6. >> >> After installing and setting up gluster, I am able to NFS mount the >>volume without any issues. However, I am unable to complete a >>Œglusterfs¹ mount. > >Unfortunately, the /var/log/glusterfs/data.log does not have an >indication on why it failed. Could you mount with more verbose logging >enabled? > > # mount -t glusterfs -o log-level=DEBUG server01x:/data /data > >If the log would still not explain whats wrong, you could replace DEBUG >with TRACE to get even more messages. > >HTH, >Niels > > >> >> Systems are OEL6.6 x86_64 with following packages installed: >> >> glusterfs-3.5.3-1.el6.x86_64 >> glusterfs-api-3.5.3-1.el6.x86_64 >> glusterfs-cli-3.5.3-1.el6.x86_64 >> glusterfs-fuse-3.5.3-1.el6.x86_64 >> glusterfs-libs-3.5.3-1.el6.x86_64 >> glusterfs-server-3.5.3-1.el6.x86_64 >> >> Volume info: >> =========== >> >> Volume Name: data >> >> Type: Distributed-Replicate >> >> Volume ID: 6e206cc1-d037-41a3-b614-869162ab3ece >> >> Status: Started >> >> Number of Bricks: 3 x 2 = 6 >> >> Transport-type: tcp >> >> Bricks: >> >> Brick1: server01x:/gfs/brick0/data >> >> Brick2: server02x:/gfs/brick0/data >> >> Brick3: server03x:/gfs/brick0/data >> >> Brick4: server04x:/gfs/brick0/data >> >> Brick5: server05x:/gfs/brick0/data >> >> Brick6: server06x:/gfs/brick0/data >> >> Options Reconfigured: >> >> cluster.background-self-heal-count: 2 >> >> cluster.data-self-heal-algorithm: full >> >> performance.client-io-threads: on >> >> performance.cache-size: 128MB >> >> performance.io-thread-count: 64 >> >> When I try to mount via gluster, I get the generic failure: >> >> server02> mount -t glusterfs server01x:/data /data >> Mount failed. Please check the log file for more details. >> >> The logs only show: >> >> /var/log/glusterfs/bricks/gfs-brick0-data.log: >> [2015-02-28 18:11:54.837363] I >>[server-handshake.c:575:server_setvolume] 0-data-server: accepted client >>from server02-24970-2015/02/28-18:11:53:352986-data-client-0-0-0 >>(version: 3.5.3) >> [2015-02-28 18:11:54.853135] I [server.c:520:server_rpc_notify] >>0-data-server: disconnecting connection from >>server02-24970-2015/02/28-18:11:53:352986-data-client-0-0-0 >> [2015-02-28 18:11:54.853226] I [client_t.c:417:gf_client_unref] >>0-data-server: Shutting down connection >>server02-24970-2015/02/28-18:11:53:352986-data-client-0-0-0 >> >> And >> >> /var/log/glusterfs/data.log >> >> [2015-02-28 18:37:17.600728] I [glusterfsd.c:1959:main] >>0-/usr/sbin/glusterfs: Started running /usr/sbin/glusterfs version 3.5.3 >>(/usr/sbin/glusterfs --volfile-server=server01x.svale.netledger.com >>--volfile-id=/data /data) >> >> [2015-02-28 18:37:17.605373] I [socket.c:3645:socket_init] 0-glusterfs: >>SSL support is NOT enabled >> >> [2015-02-28 18:37:17.605404] I [socket.c:3660:socket_init] 0-glusterfs: >>using system polling thread >> >> [2015-02-28 18:37:18.726440] I [dht-shared.c:314:dht_init_regex] >>0-data-dht: using regex rsync-hash-regex = ^\.(.+)\.[^.]+$ >> >> [2015-02-28 18:37:18.733444] I [socket.c:3645:socket_init] >>0-data-client-5: SSL support is NOT enabled >> >> [2015-02-28 18:37:18.733473] I [socket.c:3660:socket_init] >>0-data-client-5: using system polling thread >> >> [2015-02-28 18:37:18.733923] I [socket.c:3645:socket_init] >>0-data-client-4: SSL support is NOT enabled >> >> [2015-02-28 18:37:18.733943] I [socket.c:3660:socket_init] >>0-data-client-4: using system polling thread >> >> [2015-02-28 18:37:18.734404] I [socket.c:3645:socket_init] >>0-data-client-3: SSL support is NOT enabled >> >> [2015-02-28 18:37:18.734426] I [socket.c:3660:socket_init] >>0-data-client-3: using system polling thread >> >> [2015-02-28 18:37:18.734859] I [socket.c:3645:socket_init] >>0-data-client-2: SSL support is NOT enabled >> >> [2015-02-28 18:37:18.734906] I [socket.c:3660:socket_init] >>0-data-client-2: using system polling thread >> >> [2015-02-28 18:37:18.735364] I [socket.c:3645:socket_init] >>0-data-client-1: SSL support is NOT enabled >> >> [2015-02-28 18:37:18.735386] I [socket.c:3660:socket_init] >>0-data-client-1: using system polling thread >> >> [2015-02-28 18:37:18.735823] I [socket.c:3645:socket_init] >>0-data-client-0: SSL support is NOT enabled >> >> [2015-02-28 18:37:18.735843] I [socket.c:3660:socket_init] >>0-data-client-0: using system polling thread >> >> [2015-02-28 18:37:18.735903] I [client.c:2294:notify] 0-data-client-0: >>parent translators are ready, attempting connect on transport >> >> [2015-02-28 18:37:18.744117] I [client.c:2294:notify] 0-data-client-1: >>parent translators are ready, attempting connect on transport >> >> [2015-02-28 18:37:18.759585] I [client.c:2294:notify] 0-data-client-2: >>parent translators are ready, attempting connect on transport >> >> [2015-02-28 18:37:18.767297] I [client.c:2294:notify] 0-data-client-3: >>parent translators are ready, attempting connect on transport >> >> [2015-02-28 18:37:18.784718] I [client.c:2294:notify] 0-data-client-4: >>parent translators are ready, attempting connect on transport >> >> [2015-02-28 18:37:18.793474] I [client.c:2294:notify] 0-data-client-5: >>parent translators are ready, attempting connect on transport >> >> Final graph: >> >> >>+------------------------------------------------------------------------ >>------+ >> >> 1: volume data-client-0 >> >> 2: type protocol/client >> >> 3: option remote-host server01x >> >> 4: option remote-subvolume /gfs/brick0/data >> >> 5: option transport-type socket >> >> 6: option send-gids true >> >> 7: end-volume >> >> 8: >> >> 9: volume data-client-1 >> >> 10: type protocol/client >> >> 11: option remote-host server02x >> >> 12: option remote-subvolume /gfs/brick0/data >> >> 13: option transport-type socket >> >> 14: option send-gids true >> >> 15: end-volume >> >> 16: >> >> 17: volume data-replicate-0 >> >> 18: type cluster/replicate >> >> 19: option background-self-heal-count 2 >> >> 20: option data-self-heal-algorithm full >> >> 21: subvolumes data-client-0 data-client-1 >> >> 22: end-volume >> >> 23: >> >> 24: volume data-client-2 >> >> 25: type protocol/client >> >> 26: option remote-host server03x >> >> 27: option remote-subvolume /gfs/brick0/data >> >> 28: option transport-type socket >> >> 29: option send-gids true >> >> 30: end-volume >> >> 31: >> >> 32: volume data-client-3 >> >> 33: type protocol/client >> >> 34: option remote-host server04x >> >> 35: option remote-subvolume /gfs/brick0/data >> >> 36: option transport-type socket >> >> 37: option send-gids true >> >> 38: end-volume >> >> 39: >> >> 40: volume data-replicate-1 >> >> 41: type cluster/replicate >> >> 42: option background-self-heal-count 2 >> >> 43: option data-self-heal-algorithm full >> >> 44: subvolumes data-client-2 data-client-3 >> >> 45: end-volume >> >> 46: >> >> 47: volume data-client-4 >> >> 48: type protocol/client >> >> 49: option remote-host server05x >> >> 50: option remote-subvolume /gfs/brick0/data >> >> 51: option transport-type socket >> >> 52: option send-gids true >> >> 53: end-volume >> >> 54: >> >> 55: volume data-client-5 >> >> 56: type protocol/client >> >> 57: option remote-host server06x >> >> 58: option remote-subvolume /gfs/brick0/data >> >> 59: option transport-type socket >> >> 60: option send-gids true >> >> 61: end-volume >> >> 62: >> >> 63: volume data-replicate-2 >> >> 64: type cluster/replicate >> >> 65: option background-self-heal-count 2 >> >> 66: option data-self-heal-algorithm full >> >> 67: subvolumes data-client-4 data-client-5 >> >> 68: end-volume >> >> 69: >> >> 70: volume data-dht >> >> 71: type cluster/distribute >> >> 72: subvolumes data-replicate-0 data-replicate-1 data-replicate-2 >> >> 73: end-volume >> >> 74: >> >> 75: volume data-write-behind >> >> 76: type performance/write-behind >> >> 77: subvolumes data-dht >> >> 78: end-volume >> >> 79: >> >> 80: volume data-read-ahead >> >> 81: type performance/read-ahead >> >> 82: subvolumes data-write-behind >> >> 83: end-volume >> >> 84: >> >> 85: volume data-io-cache >> >> 86: type performance/io-cache >> >> 87: option cache-size 128MB >> >> 88: subvolumes data-read-ahead >> >> 89: end-volume >> >> 90: >> >> 91: volume data-quick-read >> >> 92: type performance/quick-read >> >> 93: option cache-size 128MB >> >> 94: subvolumes data-io-cache >> >> 95: end-volume >> >> 96: >> >> 97: volume data-open-behind >> >> 98: type performance/open-behind >> >> 99: subvolumes data-quick-read >> >> 100: end-volume >> >> 101: >> >> 102: volume data-md-cache >> >> 103: type performance/md-cache >> >> 104: subvolumes data-open-behind >> >> 105: end-volume >> >> 106: >> >> 107: volume data-io-threads >> >> 108: type performance/io-threads >> >> 109: option thread-count 64 >> >> 110: subvolumes data-md-cache >> >> 111: end-volume >> >> 112: >> >> 113: volume data >> >> 114: type debug/io-stats >> >> 115: option latency-measurement off >> >> 116: option count-fop-hits off >> >> 117: subvolumes data-io-threads >> >> 118: end-volume >> >> 119: >> >> >>+------------------------------------------------------------------------ >>------+ >> >> [2015-02-28 18:37:18.803366] I [rpc-clnt.c:1729:rpc_clnt_reconfig] >>0-data-client-1: changing port to 49154 (from 0) >> >> [2015-02-28 18:37:18.812481] I [rpc-clnt.c:1729:rpc_clnt_reconfig] >>0-data-client-0: changing port to 49154 (from 0) >> >> [2015-02-28 18:37:18.821302] I [rpc-clnt.c:1729:rpc_clnt_reconfig] >>0-data-client-2: changing port to 49154 (from 0) >> >> [2015-02-28 18:37:18.821368] I [rpc-clnt.c:1729:rpc_clnt_reconfig] >>0-data-client-3: changing port to 49154 (from 0) >> >> [2015-02-28 18:37:18.821411] I >>[client-handshake.c:1677:select_server_supported_programs] >>0-data-client-1: Using Program GlusterFS 3.3, Num (1298437), Version >>(330) >> >> [2015-02-28 18:37:18.821525] I [rpc-clnt.c:1729:rpc_clnt_reconfig] >>0-data-client-5: changing port to 49154 (from 0) >> >> [2015-02-28 18:37:18.821569] I [rpc-clnt.c:1729:rpc_clnt_reconfig] >>0-data-client-4: changing port to 49154 (from 0) >> >> [2015-02-28 18:37:18.850560] I >>[client-handshake.c:1462:client_setvolume_cbk] 0-data-client-1: >>Connected to 10.1.1.148:49154, attached to remote volume >>'/gfs/brick0/data'. >> >> [2015-02-28 18:37:18.850597] I >>[client-handshake.c:1474:client_setvolume_cbk] 0-data-client-1: Server >>and Client lk-version numbers are not same, reopening the fds >> >> [2015-02-28 18:37:18.850695] I [afr-common.c:4267:afr_notify] >>0-data-replicate-0: Subvolume 'data-client-1' came back up; going online. >> >> [2015-02-28 18:37:18.850863] I >>[client-handshake.c:450:client_set_lk_version_cbk] 0-data-client-1: >>Server lk version = 1 >> >> [2015-02-28 18:37:18.850945] I >>[client-handshake.c:1677:select_server_supported_programs] >>0-data-client-0: Using Program GlusterFS 3.3, Num (1298437), Version >>(330) >> >> [2015-02-28 18:37:18.851117] I >>[client-handshake.c:1677:select_server_supported_programs] >>0-data-client-2: Using Program GlusterFS 3.3, Num (1298437), Version >>(330) >> >> [2015-02-28 18:37:18.851199] I >>[client-handshake.c:1677:select_server_supported_programs] >>0-data-client-5: Using Program GlusterFS 3.3, Num (1298437), Version >>(330) >> >> [2015-02-28 18:37:18.851277] I >>[client-handshake.c:1677:select_server_supported_programs] >>0-data-client-3: Using Program GlusterFS 3.3, Num (1298437), Version >>(330) >> >> [2015-02-28 18:37:18.851356] I >>[client-handshake.c:1677:select_server_supported_programs] >>0-data-client-4: Using Program GlusterFS 3.3, Num (1298437), Version >>(330) >> >> [2015-02-28 18:37:18.851550] I >>[client-handshake.c:1462:client_setvolume_cbk] 0-data-client-0: >>Connected to 10.1.1.147:49154, attached to remote volume >>'/gfs/brick0/data'. >> >> [2015-02-28 18:37:18.851592] I >>[client-handshake.c:1474:client_setvolume_cbk] 0-data-client-0: Server >>and Client lk-version numbers are not same, reopening the fds >> >> [2015-02-28 18:37:18.851836] I >>[client-handshake.c:1462:client_setvolume_cbk] 0-data-client-2: >>Connected to 10.1.1.149:49154, attached to remote volume >>'/gfs/brick0/data'. >> >> [2015-02-28 18:37:18.851878] I >>[client-handshake.c:1474:client_setvolume_cbk] 0-data-client-2: Server >>and Client lk-version numbers are not same, reopening the fds >> >> [2015-02-28 18:37:18.851941] I [afr-common.c:4267:afr_notify] >>0-data-replicate-1: Subvolume 'data-client-2' came back up; going online. >> >> [2015-02-28 18:37:18.852002] I >>[client-handshake.c:450:client_set_lk_version_cbk] 0-data-client-0: >>Server lk version = 1 >> >> [2015-02-28 18:37:18.852039] I >>[client-handshake.c:1462:client_setvolume_cbk] 0-data-client-5: >>Connected to 10.1.1.152:49154, attached to remote volume >>'/gfs/brick0/data'. >> >> [2015-02-28 18:37:18.852076] I >>[client-handshake.c:1474:client_setvolume_cbk] 0-data-client-5: Server >>and Client lk-version numbers are not same, reopening the fds >> >> [2015-02-28 18:37:18.852189] I [afr-common.c:4267:afr_notify] >>0-data-replicate-2: Subvolume 'data-client-5' came back up; going online. >> >> [2015-02-28 18:37:18.852239] I >>[client-handshake.c:1462:client_setvolume_cbk] 0-data-client-3: >>Connected to 10.1.1.150:49154, attached to remote volume >>'/gfs/brick0/data'. >> >> [2015-02-28 18:37:18.852256] I >>[client-handshake.c:1474:client_setvolume_cbk] 0-data-client-3: Server >>and Client lk-version numbers are not same, reopening the fds >> >> [2015-02-28 18:37:18.852846] I >>[client-handshake.c:1462:client_setvolume_cbk] 0-data-client-4: >>Connected to 10.1.1.151:49154, attached to remote volume >>'/gfs/brick0/data'. >> >> [2015-02-28 18:37:18.852871] I >>[client-handshake.c:1474:client_setvolume_cbk] 0-data-client-4: Server >>and Client lk-version numbers are not same, reopening the fds >> >> [2015-02-28 18:37:18.861168] I [fuse-bridge.c:5016:fuse_graph_setup] >>0-fuse: switched to graph 0 >> >> [2015-02-28 18:37:18.861290] I >>[client-handshake.c:450:client_set_lk_version_cbk] 0-data-client-5: >>Server lk version = 1 >> >> [2015-02-28 18:37:18.861323] I >>[client-handshake.c:450:client_set_lk_version_cbk] 0-data-client-3: >>Server lk version = 1 >> >> [2015-02-28 18:37:18.861347] I >>[client-handshake.c:450:client_set_lk_version_cbk] 0-data-client-4: >>Server lk version = 1 >> >> [2015-02-28 18:37:18.861435] I >>[client-handshake.c:450:client_set_lk_version_cbk] 0-data-client-2: >>Server lk version = 1 >> >> [2015-02-28 18:37:18.861440] I [fuse-bridge.c:4857:fuse_thread_proc] >>0-fuse: unmounting /data >> >> [2015-02-28 18:37:18.861920] W [glusterfsd.c:1095:cleanup_and_exit] >>(-->/lib64/libc.so.6(clone+0x6d) [0x7f86e52939dd] >>(-->/lib64/libpthread.so.0(+0x79d1) [0x7f86e59299d1] >>(-->/usr/sbin/glusterfs(glusterfs_sigwaiter+0xd5) [0x4053e5]))) 0-: >>received signum (15), shutting down >> >> [2015-02-28 18:37:18.861974] I [fuse-bridge.c:5514:fini] 0-fuse: >>Unmounting '/data'. >> >> >> Any assistance will be greatly appreciated. >> >> Thanks, >> >> -Hani >> >> To learn more about SuiteWorld, visit >> www.netsuite.com/suiteworld. >> >> >> NOTICE: This email and any attachments may contain confidential and >>proprietary information of NetSuite Inc. and is for the sole use of the >>intended recipient for the stated purpose. Any improper use or >>distribution is prohibited. If you are not the intended recipient, >>please notify the sender; do not review, copy or distribute; and >>promptly delete or destroy all transmitted information. Please note that >>all communications and information transmitted through this email system >>may be monitored and retained by NetSuite or its agents and that all >>incoming email is automatically scanned by a third party spam and >>filtering service which may result in deletion of a legitimate e-mail >>before it is read by the intended recipient. >> >> _______________________________________________ >> Gluster-users mailing list >> Gluster-users@xxxxxxxxxxx >> http://www.gluster.org/mailman/listinfo/gluster-users > _______________________________________________ Gluster-users mailing list Gluster-users@xxxxxxxxxxx http://www.gluster.org/mailman/listinfo/gluster-users