Hmm seem I have something rather inconsistent,
[root@glusterp1 /]# gluster volume create gv1 replica 3 glusterp1:/brick1/gv1 glusterp2:/brick1/gv1 glusterp3:/brick1/gv1
volume create: gv1: failed: Host glusterp3 is not in 'Peer in Cluster' state
[root@glusterp1 /]# gluster peer probe glusterp3
peer probe: success. Host glusterp3 port 24007 already in peer list
[root@glusterp1 /]# gluster peer probe glusterp2
peer probe: success. Host glusterp2 port 24007 already in peer list
[root@glusterp1 /]# gluster volume create gv1 replica 3 glusterp1:/brick1/gv1 glusterp2:/brick1/gv1 glusterp3:/brick1/gv1
volume create: gv1: failed: /brick1/gv1 is already part of a volume
[root@glusterp1 /]# gluster volume show
unrecognized word: show (position 1)
[root@glusterp1 /]# gluster volume
add-brick delete info quota reset status
barrier geo-replication list rebalance set stop
clear-locks heal log remove-brick start sync
create help profile replace-brick statedump top
[root@glusterp1 /]# gluster volume list
volume1
[root@glusterp1 /]# gluster volume start gv0
volume start: gv0: failed: Volume gv0 does not exist
[root@glusterp1 /]# gluster volume start gv1
volume start: gv1: failed: Volume gv1 does not exist
[root@glusterp1 /]# gluster volume status
Status of volume: volume1
Gluster process TCP Port RDMA Port Online Pid
------------------------------------------------------------------------------
Brick glusterp1.graywitch.co.nz:/data1 49152 0 Y 2958
Brick glusterp2.graywitch.co.nz:/data1 49152 0 Y 2668
NFS Server on localhost N/A N/A N N/A
Self-heal Daemon on localhost N/A N/A Y 1038
NFS Server on glusterp2.graywitch.co.nz N/A N/A N N/A
Self-heal Daemon on glusterp2.graywitch.co.
nz N/A N/A Y 676
Task Status of Volume volume1
------------------------------------------------------------------------------
There are no active volume tasks
[root@glusterp1 /]#
[root@glusterp1 /]# gluster volume create gv1 replica 3 glusterp1:/brick1/gv1 glusterp2:/brick1/gv1 glusterp3:/brick1/gv1
volume create: gv1: failed: Host glusterp3 is not in 'Peer in Cluster' state
[root@glusterp1 /]# gluster peer probe glusterp3
peer probe: success. Host glusterp3 port 24007 already in peer list
[root@glusterp1 /]# gluster peer probe glusterp2
peer probe: success. Host glusterp2 port 24007 already in peer list
[root@glusterp1 /]# gluster volume create gv1 replica 3 glusterp1:/brick1/gv1 glusterp2:/brick1/gv1 glusterp3:/brick1/gv1
volume create: gv1: failed: /brick1/gv1 is already part of a volume
[root@glusterp1 /]# gluster volume show
unrecognized word: show (position 1)
[root@glusterp1 /]# gluster volume
add-brick delete info quota reset status
barrier geo-replication list rebalance set stop
clear-locks heal log remove-brick start sync
create help profile replace-brick statedump top
[root@glusterp1 /]# gluster volume list
volume1
[root@glusterp1 /]# gluster volume start gv0
volume start: gv0: failed: Volume gv0 does not exist
[root@glusterp1 /]# gluster volume start gv1
volume start: gv1: failed: Volume gv1 does not exist
[root@glusterp1 /]# gluster volume status
Status of volume: volume1
Gluster process TCP Port RDMA Port Online Pid
------------------------------------------------------------------------------
Brick glusterp1.graywitch.co.nz:/data1 49152 0 Y 2958
Brick glusterp2.graywitch.co.nz:/data1 49152 0 Y 2668
NFS Server on localhost N/A N/A N N/A
Self-heal Daemon on localhost N/A N/A Y 1038
NFS Server on glusterp2.graywitch.co.nz N/A N/A N N/A
Self-heal Daemon on glusterp2.graywitch.co.
nz N/A N/A Y 676
Task Status of Volume volume1
------------------------------------------------------------------------------
There are no active volume tasks
[root@glusterp1 /]#
On 14 October 2016 at 12:20, Thing <thing.thing@xxxxxxxxx> wrote:
how to fix this please?Obviously something isnt happy here but I have no idea what.......I deleted a gluster volume gv0 as I wanted to make it thin provisioned.I have rebuilt "gv0" but I am getting a failure,
==========
[root@glusterp1 /]# df -h
Filesystem Size Used Avail Use% Mounted on
/dev/mapper/centos-root20G 3.9G 17G 20% /
devtmpfs 1.8G 0 1.8G 0% /dev
tmpfs 1.8G 12K 1.8G 1% /dev/shm
tmpfs 1.8G 8.9M 1.8G 1% /run
tmpfs 1.8G 0 1.8G 0% /sys/fs/cgroup
/dev/mapper/centos-tmp 3.9G 33M 3.9G 1% /tmp
/dev/mapper/centos-home50G 41M 50G 1% /home
/dev/mapper/centos-data1 120G 33M 120G 1% /data1
/dev/sda1 997M 312M 685M 32% /boot
/dev/mapper/centos-var20G 401M 20G 2% /var
tmpfs 368M 0 368M 0% /run/user/1000
/dev/mapper/vol_brick1-brick1 100G 33M 100G 1% /brick1
[root@glusterp1 /]# mkdir /brick1/gv0
[root@glusterp1 /]# gluster volume create gv0 replica 3 glusterp1:/brick1/gv0 glusterp2:/brick1/gv0 glusterp3:/brick1/gv0
volume create: gv0: failed: Host glusterp3 is not in 'Peer in Cluster' state
[root@glusterp1 /]# gluster peer probe glusterp3
peer probe: success. Host glusterp3 port 24007 already in peer list
[root@glusterp1 /]# gluster volume create gv0 replica 3 glusterp1:/brick1/gv0 glusterp2:/brick1/gv0 glusterp3:/brick1/gv0
volume create: gv0: failed: /brick1/gv0 is already part of a volume
[root@glusterp1 /]# gluster volume start gv0
volume start: gv0: failed: Volume gv0 does not exist
[root@glusterp1 /]# gluster volume create gv0 replica 3 glusterp1:/brick1/gv0 glusterp2:/brick1/gv0 glusterp3:/brick1/gv0 --force
unrecognized option --force
[root@glusterp1 /]# gluster volume create gv0 replica 3 glusterp1:/brick1/gv0 glusterp2:/brick1/gv0 glusterp3:/brick1/gv0
volume create: gv0: failed: /brick1/gv0 is already part of a volume
[root@glusterp1 /]#
==========
_______________________________________________ Gluster-users mailing list Gluster-users@xxxxxxxxxxx http://www.gluster.org/mailman/listinfo/gluster-users