Re: split brain? but where?

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



I tried this already.

8><---
[root@glusterp2 fb]# find /bricks/brick1/gv0 -samefile /bricks/brick1/gv0/.glusterfs/ea/fb/eafb8799-4e7a-4264-9213-26997c5a4693
/bricks/brick1/gv0/.glusterfs/ea/fb/eafb8799-4e7a-4264-9213-26997c5a4693
[root@glusterp2 fb]#
8><---

gluster 4
Centos 7.4

8><---
df -h
[root@glusterp2 fb]# df -h
Filesystem                                               Size  Used Avail Use% Mounted on
/dev/mapper/centos-root                                   19G  3.4G   16G  19% /
devtmpfs                                                 3.8G     0  3.8G   0% /dev
tmpfs                                                    3.8G   12K  3.8G   1% /dev/shm
tmpfs                                                    3.8G  9.0M  3.8G   1% /run
tmpfs                                                    3.8G     0  3.8G   0% /sys/fs/cgroup
/dev/mapper/centos-data1                                 112G   33M  112G   1% /data1
/dev/mapper/centos-var                                    19G  219M   19G   2% /var
/dev/mapper/centos-home                                   47G   38M   47G   1% /home
/dev/mapper/centos-var_lib                               9.4G  178M  9.2G   2% /var/lib
/dev/mapper/vg--gluster--prod--1--2-gluster--prod--1--2  932G  263G  669G  29% /bricks/brick1
/dev/sda1                                                950M  235M  715M  25% /boot
8><---



So the output isnt helping..........







On 23 May 2018 at 00:29, Karthik Subrahmanya <ksubrahm@xxxxxxxxxx> wrote:
Hi,

Which version of gluster you are using?

You can find which file is that using the following command
find <brickpath> -samefile <brickpath/.glusterfs/<first two bits of gfid>/<next 2 bits of gfid>/<full gfid>

Please provide the getfatr output of the file which is in split brain.
The steps to recover from split-brain can be found here,

HTH,
Karthik

On Tue, May 22, 2018 at 4:03 AM, Joe Julian <joe@xxxxxxxxxxxxxxxx> wrote:
How do I find what  "eafb8799-4e7a-4264-9213-26997c5a4693"  is?

https://docs.gluster.org/en/v3/Troubleshooting/gfid-to-path/


On May 21, 2018 3:22:01 PM PDT, Thing <thing.thing@xxxxxxxxx> wrote:
>Hi,
>
>I seem to have a split brain issue, but I cannot figure out where this
>is
>and what it is, can someone help me pls,  I cant find what to fix here.
>
>==========
>root@salt-001:~# salt gluster* cmd.run 'df -h'
>glusterp2.graywitch.co.nz:
>    Filesystem                                               Size  Used
>Avail Use% Mounted on
>    /dev/mapper/centos-root                                   19G  3.4G
> 16G  19% /
>    devtmpfs                                                 3.8G     0
>3.8G   0% /dev
>    tmpfs                                                    3.8G   12K
>3.8G   1% /dev/shm
>    tmpfs                                                    3.8G  9.1M
>3.8G   1% /run
>    tmpfs                                                    3.8G     0
>3.8G   0% /sys/fs/cgroup
>    /dev/mapper/centos-tmp                                   3.8G   33M
>3.7G   1% /tmp
>    /dev/mapper/centos-var                                    19G  213M
> 19G   2% /var
>    /dev/mapper/centos-home                                   47G   38M
> 47G   1% /home
>    /dev/mapper/centos-data1                                 112G   33M
>112G   1% /data1
>    /dev/mapper/centos-var_lib                               9.4G  178M
>9.2G   2% /var/lib
>    /dev/mapper/vg--gluster--prod--1--2-gluster--prod--1--2  932G  264G
>668G  29% /bricks/brick1
>    /dev/sda1                                                950M  235M
>715M  25% /boot
>    tmpfs                                                    771M   12K
>771M   1% /run/user/42
>    glusterp2:gv0/glusterp2/images                           932G  273G
>659G  30% /var/lib/libvirt/images
>    glusterp2:gv0                                            932G  273G
>659G  30% /isos
>    tmpfs                                                    771M   48K
>771M   1% /run/user/1000
>    tmpfs                                                    771M     0
>771M   0% /run/user/0
>glusterp1.graywitch.co.nz:
>   Filesystem                                     Size  Used Avail Use%
>Mounted on
> /dev/mapper/centos-root                         20G  3.5G   17G  18% /
>   devtmpfs                                       3.8G     0  3.8G   0%
>/dev
>   tmpfs                                          3.8G   12K  3.8G   1%
>/dev/shm
>   tmpfs                                          3.8G  9.0M  3.8G   1%
>/run
>   tmpfs                                          3.8G     0  3.8G   0%
>/sys/fs/cgroup
>   /dev/sda1                                      969M  206M  713M  23%
>/boot
>   /dev/mapper/centos-home                         50G  4.3G   46G   9%
>/home
>   /dev/mapper/centos-tmp                         3.9G   33M  3.9G   1%
>/tmp
>   /dev/mapper/centos-data1                       120G   36M  120G   1%
>/data1
>   /dev/mapper/vg--gluster--prod1-gluster--prod1  932G  260G  673G  28%
>/bricks/brick1
>   /dev/mapper/centos-var                          20G  413M   20G   3%
>/var
>   /dev/mapper/centos00-var_lib                   9.4G  179M  9.2G   2%
>/var/lib
>   tmpfs                                          771M  8.0K  771M   1%
>/run/user/42
>   glusterp1:gv0                                  932G  273G  659G  30%
>/isos
>   glusterp1:gv0/glusterp1/images                 932G  273G  659G  30%
>/var/lib/libvirt/images
>glusterp3.graywitch.co.nz:
>    Filesystem                                               Size  Used
>Avail Use% Mounted on
>    /dev/mapper/centos-root                                   20G  3.5G
> 17G  18% /
>    devtmpfs                                                 3.8G     0
>3.8G   0% /dev
>    tmpfs                                                    3.8G   12K
>3.8G   1% /dev/shm
>    tmpfs                                                    3.8G  9.0M
>3.8G   1% /run
>    tmpfs                                                    3.8G     0
>3.8G   0% /sys/fs/cgroup
>    /dev/sda1                                                969M  206M
>713M  23% /boot
>    /dev/mapper/centos-var                                    20G  206M
> 20G   2% /var
>    /dev/mapper/centos-tmp                                   3.9G   33M
>3.9G   1% /tmp
>    /dev/mapper/centos-home                                   50G   37M
> 50G   1% /home
>    /dev/mapper/centos-data01                                120G   33M
>120G   1% /data1
>    /dev/mapper/centos00-var_lib                             9.4G  180M
>9.2G   2% /var/lib
>    /dev/mapper/vg--gluster--prod--1--3-gluster--prod--1--3  932G  262G
>670G  29% /bricks/brick1
>    glusterp3:gv0                                            932G  273G
>659G  30% /isos
>    glusterp3:gv0/glusterp3/images                           932G  273G
>659G  30% /var/lib/libvirt/images
>    tmpfs                                                    771M   12K
>771M   1% /run/user/42
>root@salt-001:~# salt gluster* cmd.run 'getfattr -d -m . -e hex
>/bricks/brick1/gv0'
>glusterp2.graywitch.co.nz:
>    getfattr: Removing leading '/' from absolute path names
>    # file: bricks/brick1/gv0
>
>security.selinux=0x73797374656d5f753a6f626a6563745f723a756e6c6162656c65645f743a733000
>    trusted.gfid=0x00000000000000000000000000000001
>    trusted.glusterfs.dht=0x000000010000000000000000ffffffff
>    trusted.glusterfs.volume-id=0xcfceb3535f0e4cf18b533ccfb1f091d3
>glusterp3.graywitch.co.nz:
>    getfattr: Removing leading '/' from absolute path names
>    # file: bricks/brick1/gv0
>
>security.selinux=0x73797374656d5f753a6f626a6563745f723a756e6c6162656c65645f743a733000
>    trusted.gfid=0x00000000000000000000000000000001
>    trusted.glusterfs.dht=0x000000010000000000000000ffffffff
>    trusted.glusterfs.volume-id=0xcfceb3535f0e4cf18b533ccfb1f091d3
>glusterp1.graywitch.co.nz:
>    getfattr: Removing leading '/' from absolute path names
>    # file: bricks/brick1/gv0
>
>security.selinux=0x73797374656d5f753a6f626a6563745f723a756e6c6162656c65645f743a733000
>    trusted.gfid=0x00000000000000000000000000000001
>    trusted.glusterfs.dht=0x000000010000000000000000ffffffff
>    trusted.glusterfs.volume-id=0xcfceb3535f0e4cf18b533ccfb1f091d3
>root@salt-001:~# salt gluster* cmd.run 'gluster volume heal gv0 info'
>glusterp2.graywitch.co.nz:
>    Brick glusterp1:/bricks/brick1/gv0
>    <gfid:eafb8799-4e7a-4264-9213-26997c5a4693> - Is in split-brain
>
>    Status: Connected
>    Number of entries: 1
>
>    Brick glusterp2:/bricks/brick1/gv0
>    <gfid:eafb8799-4e7a-4264-9213-26997c5a4693> - Is in split-brain
>
>    Status: Connected
>    Number of entries: 1
>
>    Brick glusterp3:/bricks/brick1/gv0
>    <gfid:eafb8799-4e7a-4264-9213-26997c5a4693> - Is in split-brain
>
>    Status: Connected
>    Number of entries: 1
>glusterp3.graywitch.co.nz:
>    Brick glusterp1:/bricks/brick1/gv0
>    <gfid:eafb8799-4e7a-4264-9213-26997c5a4693> - Is in split-brain
>
>    Status: Connected
>    Number of entries: 1
>
>    Brick glusterp2:/bricks/brick1/gv0
>    <gfid:eafb8799-4e7a-4264-9213-26997c5a4693> - Is in split-brain
>
>    Status: Connected
>    Number of entries: 1
>
>    Brick glusterp3:/bricks/brick1/gv0
>    <gfid:eafb8799-4e7a-4264-9213-26997c5a4693> - Is in split-brain
>
>    Status: Connected
>    Number of entries: 1
>glusterp1.graywitch.co.nz:
>    Brick glusterp1:/bricks/brick1/gv0
>    <gfid:eafb8799-4e7a-4264-9213-26997c5a4693> - Is in split-brain
>
>    Status: Connected
>    Number of entries: 1
>
>    Brick glusterp2:/bricks/brick1/gv0
>    <gfid:eafb8799-4e7a-4264-9213-26997c5a4693> - Is in split-brain
>
>    Status: Connected
>    Number of entries: 1
>
>    Brick glusterp3:/bricks/brick1/gv0
>    <gfid:eafb8799-4e7a-4264-9213-26997c5a4693> - Is in split-brain
>
>    Status: Connected
>    Number of entries: 1
>root@salt-001:~# salt gluster* cmd.run 'getfattr -d -m . -e hex
>/bricks/brick1/'
>glusterp2.graywitch.co.nz:
>glusterp3.graywitch.co.nz:
>glusterp1.graywitch.co.nz:
>root@salt-001:~# salt gluster* cmd.run 'gluster volume info gv0'
>glusterp2.graywitch.co.nz:
>
>    Volume Name: gv0
>    Type: Replicate
>    Volume ID: cfceb353-5f0e-4cf1-8b53-3ccfb1f091d3
>    Status: Started
>    Snapshot Count: 0
>    Number of Bricks: 1 x 3 = 3
>    Transport-type: tcp
>    Bricks:
>    Brick1: glusterp1:/bricks/brick1/gv0
>    Brick2: glusterp2:/bricks/brick1/gv0
>    Brick3: glusterp3:/bricks/brick1/gv0
>    Options Reconfigured:
>    transport.address-family: inet
>    nfs.disable: on
>    performance.client-io-threads: off
>glusterp1.graywitch.co.nz:
>
>    Volume Name: gv0
>    Type: Replicate
>    Volume ID: cfceb353-5f0e-4cf1-8b53-3ccfb1f091d3
>    Status: Started
>    Snapshot Count: 0
>    Number of Bricks: 1 x 3 = 3
>    Transport-type: tcp
>    Bricks:
>    Brick1: glusterp1:/bricks/brick1/gv0
>    Brick2: glusterp2:/bricks/brick1/gv0
>    Brick3: glusterp3:/bricks/brick1/gv0
>    Options Reconfigured:
>    performance.client-io-threads: off
>    nfs.disable: on
>    transport.address-family: inet
>glusterp3.graywitch.co.nz:
>
>    Volume Name: gv0
>    Type: Replicate
>    Volume ID: cfceb353-5f0e-4cf1-8b53-3ccfb1f091d3
>    Status: Started
>    Snapshot Count: 0
>    Number of Bricks: 1 x 3 = 3
>    Transport-type: tcp
>    Bricks:
>    Brick1: glusterp1:/bricks/brick1/gv0
>    Brick2: glusterp2:/bricks/brick1/gv0
>    Brick3: glusterp3:/bricks/brick1/gv0
>    Options Reconfigured:
>    performance.client-io-threads: off
>    nfs.disable: on
>    transport.address-family: inet
>root@salt-001:~#
>
>===========
>
>
>How do I find what  "eafb8799-4e7a-4264-9213-26997c5a4693"  is?
_______________________________________________
Gluster-users mailing list
Gluster-users@xxxxxxxxxxx
http://lists.gluster.org/mailman/listinfo/gluster-users


_______________________________________________
Gluster-users mailing list
Gluster-users@xxxxxxxxxxx
http://lists.gluster.org/mailman/listinfo/gluster-users

[Index of Archives]     [Gluster Development]     [Linux Filesytems Development]     [Linux ARM Kernel]     [Linux ARM]     [Linux Omap]     [Fedora ARM]     [IETF Annouce]     [Bugtraq]     [Linux OMAP]     [Linux MIPS]     [eCos]     [Asterisk Internet PBX]     [Linux API]

  Powered by Linux