Hi
I would like to be
sure which method to test GFS-6.0.2.20-1installed 3 node file cluster doesn't
corrupt data while multiple nodes writing on the same file. At the same time how
LOCK_GULM manage filesystem ? Is there any known test? or benchmark other than
bonnie++ . I would like to deploy 3 filesystem, each one on one gfs server and
export these file systems with smb to other smb clients. I plan to use 3 smb
instance for all 3 filesystem on every server node and load balancer in front of
them.
Platform
: 3x HP-DL380 G4
OP
: RedHat Enterprise Linux Advanced Server 3.0 Update 4
Cluster
Suite : 3.0, clumanager-1.2.22-2
redhat-config-cluster-1.0.3-1.noarch.rpm
GFS
:
GFS-devel-6.0.2.20-1
GFS-modules-smp-6.0.2.20-1
GFS-6.0.2.20-1
GFS-modules-smp-6.0.2.20-1
GFS-6.0.2.20-1
Fencing
Dev : fence_ilo
Any advice would be
appreciated.
Some notes about
system :
------------------------------------------
[root@gfs-test2
root]# gfs_tool df
/users/lnxsrv1:
SB lock proto = "lock_gulm"
SB lock table = "gfs-test:lnxsrv1"
SB ondisk format = 1308
SB multihost format = 1401
Block size = 4096
Journals = 8
Resource Groups = 1596
Mounted lock proto = "lock_gulm"
Mounted lock table = "gfs-test:lnxsrv1"
Mounted host data = ""
Journal number = 1
Lock module flags = async
Local flocks = FALSE
Local caching = FALSE
/users/lnxsrv1:
SB lock proto = "lock_gulm"
SB lock table = "gfs-test:lnxsrv1"
SB ondisk format = 1308
SB multihost format = 1401
Block size = 4096
Journals = 8
Resource Groups = 1596
Mounted lock proto = "lock_gulm"
Mounted lock table = "gfs-test:lnxsrv1"
Mounted host data = ""
Journal number = 1
Lock module flags = async
Local flocks = FALSE
Local caching = FALSE
Type
Total
Used
Free
use%
------------------------------------------------------------------------
inodes 8 8 0 100%
metadata 92131 60723 31408 66%
data 104492113 30341120 74150993 29%
------------------------------------------------------------------------
inodes 8 8 0 100%
metadata 92131 60723 31408 66%
data 104492113 30341120 74150993 29%
/users/lnxsrv2:
SB lock proto = "lock_gulm"
SB lock table = "gfs-test:lnxsrv2"
SB ondisk format = 1308
SB multihost format = 1401
Block size = 4096
Journals = 8
Resource Groups = 1596
Mounted lock proto = "lock_gulm"
Mounted lock table = "gfs-test:lnxsrv2"
Mounted host data = ""
Journal number = 1
Lock module flags = async
Local flocks = FALSE
Local caching = FALSE
SB lock proto = "lock_gulm"
SB lock table = "gfs-test:lnxsrv2"
SB ondisk format = 1308
SB multihost format = 1401
Block size = 4096
Journals = 8
Resource Groups = 1596
Mounted lock proto = "lock_gulm"
Mounted lock table = "gfs-test:lnxsrv2"
Mounted host data = ""
Journal number = 1
Lock module flags = async
Local flocks = FALSE
Local caching = FALSE
Type
Total
Used
Free
use%
------------------------------------------------------------------------
inodes 5 5 0 100%
metadata 38 38 0 100%
data 104584209 0 104584209 0%
------------------------------------------------------------------------
inodes 5 5 0 100%
metadata 38 38 0 100%
data 104584209 0 104584209 0%
/users/lnxsrv3:
SB lock proto = "lock_gulm"
SB lock table = "gfs-test:lnxsrv3"
SB ondisk format = 1308
SB multihost format = 1401
Block size = 4096
Journals = 8
Resource Groups = 396
Mounted lock proto = "lock_gulm"
Mounted lock table = "gfs-test:lnxsrv3"
Mounted host data = ""
Journal number = 1
Lock module flags = async
Local flocks = FALSE
Local caching = FALSE
SB lock proto = "lock_gulm"
SB lock table = "gfs-test:lnxsrv3"
SB ondisk format = 1308
SB multihost format = 1401
Block size = 4096
Journals = 8
Resource Groups = 396
Mounted lock proto = "lock_gulm"
Mounted lock table = "gfs-test:lnxsrv3"
Mounted host data = ""
Journal number = 1
Lock module flags = async
Local flocks = FALSE
Local caching = FALSE
Type
Total
Used
Free
use%
------------------------------------------------------------------------
inodes 5 5 0 100%
metadata 10 10 0 100%
data 25949437 0 25949437 0%
------------------------------------------------------------------------
inodes 5 5 0 100%
metadata 10 10 0 100%
data 25949437 0 25949437 0%
[root@gfs-test1
root]# gulm_tool nodeinfo gfs-test{3,1}.-----
Name: gfs-test1.-----
ip = 160.75.100.22
state = Logged in
mode = Slave
missed beats = 0
last beat = 1120235791271378
delay avg = 6672475
max delay = 9459850
Name: gfs-test1.-----
ip = 160.75.100.22
state = Logged in
mode = Slave
missed beats = 0
last beat = 1120235791271378
delay avg = 6672475
max delay = 9459850
[root@gfs-test1
root]# gulm_tool nodeinfo gfs-test{3,2}.------
Name: gfs-test2.------
ip = 160.75.100.23
state = Logged in
mode = Slave
missed beats = 0
last beat = 1120235802888768
delay avg = 6678723
max delay = 6880217
Name: gfs-test2.------
ip = 160.75.100.23
state = Logged in
mode = Slave
missed beats = 0
last beat = 1120235802888768
delay avg = 6678723
max delay = 6880217
Aydin SASMAZ
System Support Engineer
ITU BIDB
Phone: +90 212 2853930
Fax : +90 212
2856936
-- Linux-cluster@xxxxxxxxxx http://www.redhat.com/mailman/listinfo/linux-cluster