Trouble with AFR

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



-------- Original-Nachricht --------
> Datum: Thu, 2 Apr 2009 00:01:32 +0530
> Von: Anand Avati <avati at gluster.com>
> An: Steve <steeeeeveee at gmx.net>
> CC: gluster-users at gluster.org
> Betreff: Re: Trouble with AFR

> > ----
> > tisiphone ~ # md5sum --binary /var/www/vu-hosting/localhost/admin/ssl/*
> > c80162aa9220f8942fd11137f03243cb
> */var/www/vu-hosting/localhost/admin/ssl/nginx.crt
> > 330a48882caf1960180183d7e3d237c8
> */var/www/vu-hosting/localhost/admin/ssl/nginx.csr
> > 20169d74a3493b2941c99213ea24705b
> */var/www/vu-hosting/localhost/admin/ssl/nginx.key
> > d41d8cd98f00b204e9800998ecf8427e
> */var/www/vu-hosting/localhost/admin/ssl/nginx.pem
> > tisiphone ~ #
> > ----
> > ==> The pem file is wrong!
> >
> >
> > Redoing the check on hermes:
> > ----
> > hermes ~ # md5sum --binary /var/www/vu-hosting/localhost/admin/ssl/*
> > c80162aa9220f8942fd11137f03243cb
> */var/www/vu-hosting/localhost/admin/ssl/nginx.crt
> > 330a48882caf1960180183d7e3d237c8
> */var/www/vu-hosting/localhost/admin/ssl/nginx.csr
> > 20169d74a3493b2941c99213ea24705b
> */var/www/vu-hosting/localhost/admin/ssl/nginx.key
> > c92bad4bdfde4df52db7afe91977247d
> */var/www/vu-hosting/localhost/admin/ssl/nginx.pem
> > hermes ~ #
> > ----
> >
> >
> > Listing on hermes:
> > ----
> > hermes ~ # ls -lah /var/www/vu-hosting/localhost/admin/ssl/
> > total 16K
> > drwxr-xr-x 2 root root ? 70 Mar 30 18:27 .
> > drwxr-xr-x 3 root root ? 16 Mar 30 18:27 ..
> > -r--r--r-- 1 root root 1017 Mar 30 18:27 nginx.crt
> > -r--r--r-- 1 root root ?737 Mar 30 18:27 nginx.csr
> > -r-------- 1 root root ?887 Mar 30 18:27 nginx.key
> > -r-------- 1 root root 1.9K Mar 30 18:27 nginx.pem
> > hermes ~ #
> > ----
> >
> >
> > Listing on tisiphone:
> > ----
> > tisiphone ~ # ls -lah /var/www/vu-hosting/localhost/admin/ssl/
> > total 16K
> > drwxr-xr-x 2 root root ? 70 Mar 30 18:27 .
> > drwxr-xr-x 3 root root ? 16 Mar 30 18:27 ..
> > -r--r--r-- 1 root root 1017 Mar 30 18:27 nginx.crt
> > -r--r--r-- 1 root root ?737 Mar 30 18:27 nginx.csr
> > -r-------- 1 root root ?887 Mar 30 18:27 nginx.key
> > -r-------- 1 root root 1.9K Mar 30 18:27 nginx.pem
> > tisiphone ~ #
> > ----
> > ==> WHAT? This is new! I never had the whole file there in my tests. Now
> the file is there and not having 0 bytes size. Let me check MD5 sum.
> >
> >
> > Recheck on tisiphone:
> > ----
> > tisiphone ~ # md5sum --binary /var/www/vu-hosting/localhost/admin/ssl/*
> > c80162aa9220f8942fd11137f03243cb
> */var/www/vu-hosting/localhost/admin/ssl/nginx.crt
> > 330a48882caf1960180183d7e3d237c8
> */var/www/vu-hosting/localhost/admin/ssl/nginx.csr
> > 20169d74a3493b2941c99213ea24705b
> */var/www/vu-hosting/localhost/admin/ssl/nginx.key
> > d41d8cd98f00b204e9800998ecf8427e
> */var/www/vu-hosting/localhost/admin/ssl/nginx.pem
> > tisiphone ~ #
> > ----
> > ==> Okay. At least this is consistent. The pem file is not the same on
> both nodes. The size is this time not 0 bytes but the content is not the
> same. At least if I can trust md5sum.
> >
> 
> Can you remove io-cache and see if things continue to be the same?
> 
Done that and it's still the same. Currently I use:

Server:
-----
#######################################################
###  GlusterFS Server Volume Specification          ###
#######################################################

volume gfs-ds-posix
  type storage/posix
  option directory /mnt/glusterfs/vu-hosting
  # option o-direct off
  # option export-statfs-size yes
  # option mandate-attribute yes
  # option span-devices 1
end-volume

volume gfs-ds-locks
  type features/locks
  option mandatory-locks off
  subvolumes gfs-ds-posix
end-volume

#volume gfs-ds-io-threads
#  type performance/io-threads
#  option thread-count 8
#  subvolumes gfs-ds-locks
#end-volume

volume gfs-ds-server
  type protocol/server
  option transport-type tcp/server
  option transport.socket.listen-port 6997
  option auth.addr.gfs-ds-locks.allow 192.168.0.*,127.0.0.1
  option auth.addr.gfs-ds-io-threads.allow 192.168.0.*,127.0.0.1
  # subvolumes gfs-ds-io-threads
  subvolumes gfs-ds-locks
end-volume
-----


Client:
-----
#######################################################
###  GlusterFS Client Volume Specification          ###
#######################################################

##volume gfs-ds-fuse
##  type mount/fuse
##  option direct-io-mode on
##  option macfuse-local off
##  option mount-point /var/www/vu-hosting
##  option attribute-timeout 0.0
##  option entry-timeout 0.0
##end-volume

volume gfs-ds-vu-hosting-hermes
  type protocol/client
  option transport-type tcp/client
  # option username
  # option password
  option remote-host 192.168.0.70
  option remote-port 6997
  # option remote-subvolume gfs-ds-io-threads
  option remote-subvolume gfs-ds-locks
  # option transport-timeout 10
  # option ping-timeout 5
end-volume

volume gfs-ds-vu-hosting-tisiphone
  type protocol/client
  option transport-type tcp/client
  # option username
  # option password
  option remote-host 192.168.0.75
  option remote-port 6997
  # option remote-subvolume gfs-ds-io-threads
  option remote-subvolume gfs-ds-locks
  # option transport-timeout 10
  # option ping-timeout 5
end-volume

volume gfs-ds-replicate
  type cluster/replicate
  option data-self-heal on
  option metadata-self-heal on
  option entry-self-heal on
  # option read-subvolume
  # option favorite-child
  option data-change-log on
  option metadata-change-log on
  option entry-change-log on
  option data-lock-server-count 1
  option metadata-lock-server-count 1
  option entry-lock-server-count 1
  subvolumes gfs-ds-vu-hosting-hermes gfs-ds-vu-hosting-tisiphone
end-volume

#volume gfs-ds-io-threads
#  type performance/io-threads
#  option thread-count 8
#  subvolumes gfs-ds-replicate
#end-volume

#volume gfs-ds-write-behind
#  type performance/write-behind
#  option block-size 1MB
#  option cache-size 4MB
#  option flush-behind on
#  # opiton disable-for-first-nbytes 1
#  # option enable-O_SYNC false
#  subvolumes gfs-ds-io-threads
#end-volume

##volume gfs-ds-read-ahead
##  type performance/read-ahead
##  option page-size 65KB
##  option page-count 16
##  option force-atime-update off
##  subvolumes gfs-ds-write-behind
##end-volume

#volume gfs-ds-io-cache
#  type performance/io-cache
#  option page-size 4KB
#  option cache-size 64MB
#  option priority *:0
#  option cache-timeout 1
#  # subvolumes gfs-ds-read-ahead
#  subvolumes gfs-ds-write-behind
#end-volume
-----


As you see, I switched to client based AFR but the effect is the same. I loose data. This time it's that on the glusterfs mounted directory the metadata is there but the content is not there. On the underlaying XFS filesystem the file is there but has a size of 0 bytes. But only on one node. The one where I do the copy job everything seems to be okay. But replication does not work. Not in general, because some files get transferred over the wire but not all of them. The funny thing is that when I hit a file which can not be transferred, then I can always reproduce the error with just that file. In my case it's a pem file with SSL certificates.



> Avati
>
Steve

-- 
Psssst! Schon vom neuen GMX MultiMessenger geh?rt? Der kann`s mit allen: http://www.gmx.net/de/go/multimessenger01



[Index of Archives]     [Gluster Development]     [Linux Filesytems Development]     [Linux ARM Kernel]     [Linux ARM]     [Linux Omap]     [Fedora ARM]     [IETF Annouce]     [Bugtraq]     [Linux OMAP]     [Linux MIPS]     [eCos]     [Asterisk Internet PBX]     [Linux API]

  Powered by Linux