Unfortunately not: Remount FS, then access test file from second client: [root@srv02 ~]# umount /mnt [root@srv02 ~]# mount -t glusterfs srv01:/test01 /mnt [root@srv02 ~]# ls -l /mnt/passwd -rw-r--r--. 1 root root 1505 авг 16 19:59 /mnt/passwd [root@srv02 ~]# ls -l /R1/test01/ итого 4 -rw-r--r--. 2 root root 1505 авг 16 19:59 passwd [root@srv02 ~]# Then remount FS and check if accessing the file from second node triggered self-heal on first node: [root@srv01 ~]# umount /mnt [root@srv01 ~]# mount -t glusterfs srv01:/test01 /mnt [root@srv01 ~]# ls -l /mnt итого 0 [root@srv01 ~]# ls -l /R1/test01/ итого 0 [root@srv01 ~]# Nothing appeared. [root@srv01 ~]# gluster volume info test01 Volume Name: test01 Type: Replicate Volume ID: 2c227085-0b06-4804-805c-ea9c1bb11d8b Status: Started Number of Bricks: 1 x 2 = 2 Transport-type: tcp Bricks: Brick1: srv01:/R1/test01 Brick2: srv02:/R1/test01 Options Reconfigured: features.scrub-freq: hourly features.scrub: Active features.bitrot: on transport.address-family: inet performance.readdir-ahead: on nfs.disable: on [root@srv01 ~]# [root@srv01 ~]# gluster volume get test01 all | grep heal cluster.background-self-heal-count 8 cluster.metadata-self-heal on cluster.data-self-heal on cluster.entry-self-heal on cluster.self-heal-daemon on cluster.heal-timeout 600 cluster.self-heal-window-size 1 cluster.data-self-heal-algorithm (null) cluster.self-heal-readdir-size 1KB cluster.heal-wait-queue-length 128 features.lock-heal off features.lock-heal off storage.health-check-interval 30 features.ctr_lookupheal_link_timeout 300 features.ctr_lookupheal_inode_timeout 300 cluster.disperse-self-heal-daemon enable disperse.background-heals 8 disperse.heal-wait-qlength 128 cluster.heal-timeout 600 cluster.granular-entry-heal no [root@srv01 ~]# -- Dmitry Glushenok Jet Infosystems
|
_______________________________________________ Gluster-users mailing list Gluster-users@xxxxxxxxxxx http://www.gluster.org/mailman/listinfo/gluster-users