[stack@devstack-vm cinder]$ df -h
Filesystem Size Used Avail Use% Mounted on
/dev/vda1 9.9G 3.7G 6.1G 38% /
devtmpfs 2.0G 0 2.0G 0% /dev
tmpfs 2.0G 0 2.0G 0% /dev/shm
tmpfs 2.0G 448K 2.0G 1% /run
tmpfs 2.0G 0 2.0G 0% /sys/fs/cgroup
192.168.122.252:/opt/stack/nfs/brick 9.9G 3.7G 6.1G 38% /opt/stack/data/cinder/mnt/f23011fcca5ae3a8b8ebfd7e4af2e190
[stack@devstack-vm cinder]$ sudo mount -t nfs 192.168.122.252:/opt/stack/nfs/brick /opt/stack/data/cinder/mnt/f23011fcca5ae3a8b8ebfd7e4af2e190/
mount.nfs: /opt/stack/data/cinder/mnt/f23011fcca5ae3a8b8ebfd7e4af2e190 is busy or already mounted
[stack@devstack-vm cinder]$ echo $?
32
[stack@devstack-vm ~]$ df -h
Filesystem Size Used Avail Use% Mounted on
/dev/vda1 9.9G 3.7G 6.1G 38% /
devtmpfs 2.0G 0 2.0G 0% /dev
tmpfs 2.0G 0 2.0G 0% /dev/shm
tmpfs 2.0G 448K 2.0G 1% /run
tmpfs 2.0G 0 2.0G 0% /sys/fs/cgroup
devstack-vm.localdomain:/gvol1 9.9G 3.7G 6.1G 38% /opt/stack/data/cinder/mnt/d45ccec4f1572f6f242b70befa3d80fe
devstack-vm.localdomain:/gvol2 9.9G 3.7G 6.1G 38% /opt/stack/data/cinder/mnt/413c1f8d14058d5b2d07f8a92814bd12
[stack@devstack-vm ~]$ sudo mount -t glusterfs devstack-vm.localdomain:/gvol1 /opt/stack/data/cinder/mnt/d45ccec4f1572f6f242b70befa3d80fe/
/sbin/mount.glusterfs: according to mtab, GlusterFS is already mounted on /opt/stack/data/cinder/mnt/d45ccec4f1572f6f242b70befa3d80fe
[stack@devstack-vm ~]$ echo $?
0
******************************************************************************************
# No need to do a ! -d test, it is taken care while initializing the
# variable mount_point
[ -z "$mount_point" -o ! -d "$mount_point" ] && {
echo "ERROR: Mount point does not exist."
usage;
exit 0;
}
# Simple check to avoid multiple identical mounts
if grep -q "[[:space:]+]${mount_point}[[:space:]+]fuse" $mounttab; then
echo -n "$0: according to mtab, GlusterFS is already mounted on "
echo "$mount_point"
exit 0;
fi
******************************************************************
deepak
_______________________________________________ Gluster-users mailing list Gluster-users@xxxxxxxxxxx http://supercolony.gluster.org/mailman/listinfo/gluster-users