glusterfs speed

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



Hi all,

I've been trying to get glusterfs working at a reasonable speed on my
test system. I am currently wanting to replace a NFS over DRBD system,
and glusterfs looks to be a likely candidate.

I realise that the speed of the glusterfs will be lower than NFS, but
for my tests, I can get the speed to be similar (linespeed) for large
files, but for small files NFS is much faster. I have tried using
server-to-server AFR and client-based AFR, with and without unify, with
and without the various performance volumes as recommended in the
documentation. But I haven't been able to find a setup that is faster
than 1/3 the speed of NFS for small files.

I'm wondering if anyone could post their config files for the server and
client that has reasonable performance for small files (this is going to
be for web data so most files are <50kb)

Something I found with my tests, which makes me feel as though my
interpretation of the docs are wrong, is that by adding
readahead/writebehind/io-cache etc actually makes it go slower.

Thanks in advance,
Josh.

P.S here is the config I found to be the fastest, 5m43s (glusterfs) vs
1m7s (NFS) to tar. Time to copy the tar to/from glustrefs is only 38sec.
Tar file is 144Mb with 9400 files

Server

volume brick-data
    type storage/posix
    option directory /storage/export
end-volume

volume brick-data-mirror
  type storage/posix
  option directory /storage/export-mirror
end-volume

volume brick-data-ns
 type storage/posix
 option directory /storage/export-ns
end-volume

volume brick
  type performance/io-threads
  option thread-count 4  # default is 1
  option cache-size 32MB #64MB
  subvolumes brick-data
end-volume

volume brick-mirror
  type performance/io-threads
  option thread-count 4  # default is 1
  option cache-size 32MB #64MB
  subvolumes brick-data-mirror
end-volume

volume brick-ns
  type performance/io-threads
  option thread-count 4  # default is 1
  option cache-size 32MB #64MB
  subvolumes brick-data-ns
end-volume

volume server
    type protocol/server
    option transport-type tcp/server
    subvolumes brick brick-ns
    option auth.ip.brick.allow 192.168.102.* # Allow access to brick
    option auth.ip.brick-mirror.allow 192.168.102.* # Allow access to
brick
    option auth.ip.brick-ns.allow 192.168.102.* # Allow access to brick
end-volume

client

volume brick1
    type protocol/client
    option transport-type tcp/client # for TCP/IP transport
    option remote-host 192.168.102.250   # IP address of server1
    option remote-subvolume brick    # name of the remote volume on
server1
end-volume

volume brick2
    type protocol/client
    option transport-type tcp/client # for TCP/IP transport
    option remote-host 192.168.102.251   # IP address of server2
    option remote-subvolume brick    # name of the remote volume on
server2
end-volume

volume brick2-mirror
    type protocol/client
    option transport-type tcp/client # for TCP/IP transport
    option remote-host 192.168.102.250   # IP address of server1
    option remote-subvolume brick-mirror    # name of the remote volume
on server1
end-volume

volume brick1-mirror
    type protocol/client
    option transport-type tcp/client # for TCP/IP transport
    option remote-host 192.168.102.251   # IP address of server2
    option remote-subvolume brick-mirror    # name of the remote volume
on server2
end-volume

volume brick-ns1
    type protocol/client
    option transport-type tcp/client # for TCP/IP transport
    option remote-host 192.168.102.250   # IP address of server1
    option remote-subvolume brick-ns   # name of the remote volume on
server1
end-volume

volume brick-ns2
    type protocol/client
    option transport-type tcp/client # for TCP/IP transport
    option remote-host 192.168.102.251   # IP address of server2
    option remote-subvolume brick-ns    # name of the remote volume on
server2
end-volume

volume afr1
   type cluster/afr
   subvolumes brick1 brick1-mirror
end-volume

volume afr2
   type cluster/afr
   subvolumes brick2 brick2-mirror
end-volume

volume afr-ns
 type cluster/afr
 subvolumes brick-ns1 brick-ns2
end-volume

volume unify
 type cluster/unify
 option namespace afr-ns
 option scheduler rr
 subvolumes afr1 afr2
end-volume

volume io-cache
  type performance/io-cache
  option cache-size 64MB             # default is 32MB
  option page-size 1MB               #128KB is default option
  option priority *.html:2,*:1 # default is '*:0'
  option force-revalidate-timeout 2  # default is 1
  subvolumes unify
end-volume

volume readahead
  type performance/read-ahead
  option page-size 128kB        # 256KB is the default option
  option page-count 4           # 2 is default option
  option force-atime-update off # default is off
  subvolumes io-cache
end-volume

volume writebehind
  type performance/write-behind
  option aggregate-size 1MB # default is 0bytes
  option flush-behind on    # default is 'off'
  subvolumes readahead
end-volume







[Index of Archives]     [Gluster Users]     [Ceph Users]     [Linux ARM Kernel]     [Linux ARM]     [Linux Omap]     [Fedora ARM]     [IETF Annouce]     [Security]     [Bugtraq]     [Linux]     [Linux OMAP]     [Linux MIPS]     [eCos]     [Asterisk Internet PBX]     [Linux API]

  Powered by Linux