Re: performance seems extremely bad

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



See my email subjected:
"mainline-2.5 performance a known issue?"


Anand Avati wrote:
Dale,
it is tough to relate the performance numbers provided by dbench to actual real-life application's "performance" over a filesystem. dbench usually reports very low numbers on filesystem implementations via fuse, due to the marginally increased latency for every meta data operation. But for real life applications the perfrmance is almost as good as a local disk and sometimes better for very heavy I/O. Can you compare the dbench numbers (on the latest TLA patchset) with, say, NFS or other similar network filesystems?

thanks,
avati

2007/7/5, Dale Dude <dale@xxxxxxxxxxxxxxx <mailto:dale@xxxxxxxxxxxxxxx>>:

    Kernel 2.6.15. mainline-2.5 patch 275. fuse 2.6.5

    Tested with: dbench -t 10 10. Is performance supposed to be this bad?

    Glusterfs /volumes: Throughput 15.8983 MB/sec 10 procs

    Bypass glusterfs direct to /volume1: Throughput 65.0482 MB/sec 10
    procs

    Bypass glusterfs direct to /volume2: Throughput 66.5139 MB/sec 10
    procs



    =============
    client.vol:

    volume server1
             type protocol/client
             option transport-type tcp/client     # for TCP/IP transport
             option remote-host 127.0.0.1 <http://127.0.0.1>     # IP
    address of the remote brick
             option remote-subvolume volumenamespace
    end-volume

    volume server1vol1
             type protocol/client
             option transport-type tcp/client     # for TCP/IP transport
             option remote-host 127.0.0.1 <http://127.0.0.1>     # IP
    address of the remote brick
             option remote-subvolume clusterfs1
    end-volume


    volume server1vol2
             type protocol/client
             option transport-type tcp/client     # for TCP/IP transport
             option remote-host 127.0.0.1 <http://127.0.0.1>      # IP
    address of the remote brick
             option remote-subvolume clusterfs2
    end-volume

    volume bricks
      type cluster/unify
      option namespace server1
      option readdir-force-success on  # ignore failed mounts
      subvolumes server1vol1 server1vol2

      option scheduler rr
      option rr.limits.min-free-disk 5 #%
    end-volume

    volume writebehind   #writebehind improves write performance a lot
      type performance/write-behind
      option aggregate-size 131072 # in bytes
      subvolumes bricks
    end-volume

    volume readahead
      type performance/read-ahead
      option page-size 65536     # unit in bytes
      option page-count 16       # cache per file  = (page-count x
    page-size)
      subvolumes writebehind
    end-volume

    volume iothreads
       type performance/io-threads
       option thread-count 32
       subvolumes readahead
    end-volume

    ==============================
    server.vol :

    volume volume1
      type storage/posix
      option directory /volume1
    end-volume

    #volume posixlocks1
      #type features/posix-locks
      #option mandatory on          # enables mandatory locking on all
    files
      #subvolumes volume1
    #end-volume

    volume clusterfs1
       type performance/io-threads
       option thread-count 16
       subvolumes volume1
    end-volume

    #######

    volume volume2
      type storage/posix
      option directory /volume2
    end-volume

    #volume posixlocks2
      #type features/posix-locks
      #option mandatory on          # enables mandatory locking on all
    files
      #subvolumes volume2
    #end-volume

    volume clusterfs2
       type performance/io-threads
       option thread-count 16
       subvolumes volume2
    end-volume

    #######

    volume volumenamespace
      type storage/posix
      option directory /volume.namespace
    end-volume

    ###

    volume clusterfs
      type protocol/server
      option transport-type tcp/server
      subvolumes clusterfs1 clusterfs2 volumenamespace
      option auth.ip.clusterfs1.allow *
      option auth.ip.clusterfs2.allow *
      option auth.ip.volumenamespace.allow *
    end-volume


    _______________________________________________
    Gluster-devel mailing list
    Gluster-devel@xxxxxxxxxx <mailto:Gluster-devel@xxxxxxxxxx>
    http://lists.nongnu.org/mailman/listinfo/gluster-devel




--
Anand V. Avati


[Index of Archives]     [Gluster Users]     [Ceph Users]     [Linux ARM Kernel]     [Linux ARM]     [Linux Omap]     [Fedora ARM]     [IETF Annouce]     [Security]     [Bugtraq]     [Linux]     [Linux OMAP]     [Linux MIPS]     [eCos]     [Asterisk Internet PBX]     [Linux API]

  Powered by Linux