gluster-devel
[Top][All Lists]
Advanced

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Gluster-devel] Best scheduler for best parallel file creation across br


From: Dale Dude
Subject: [Gluster-devel] Best scheduler for best parallel file creation across bricks?
Date: Wed, 06 Jun 2007 18:52:26 -0400
User-agent: Thunderbird 2.0.0.5pre (Windows/20070605)

Can someone please provide either an example or brief explanation on which scheduler is best suited to create parallel files on "unused" bricks?

I have 1 client, 2 servers, 8 bricks total. 6 parallel rsyncs running on this 1 client.

Seems any scheduler I use creates all files on the same brick for a period of time.

Im using the RR scheduler now and it seems there is some parallel creations going on, but very little. *Maybe* 10% of the time I see 2 or 3 bricks being used at the same time for creations/write.

Any advice would be much appreciated.
Regards,
Dale

I was using this ALU in the below client config which didnt seem to parallelize creations at all:
volume bricks
       type cluster/unify
subvolumes server1vol1 server1vol2 server2vol1 server2vol2 server2vol3 server2vol4 server2vol5 server2vol6
       option scheduler alu
option alu.limits.min-free-disk 6GB # Don't create files one a volume with less than 60GB free diskspace option alu.limits.max-open-files 10000 # Don't create files on a volume with more than 10000 files open
       option alu.order read-usage:write-usage
option alu.read-usage.entry-threshold 20% # Kick in when the read-usage discrepancy is 20% option alu.read-usage.exit-threshold 4% # Don't stop until the discrepancy has been reduced with 4% option alu.write-usage.entry-threshold 20% # Kick in when the write-usage discrepancy is 20% option alu.write-usage.exit-threshold 4% # Don't stop until the discrepancy has been reduced with 4% option alu.stat-refresh.interval 10sec # Refresh the statistics used for decision-making every 10 seconds
end-volume


===================================

glusterfs-client.vol
------------------
volume server1vol1
        type protocol/client
        option transport-type tcp/client     # for TCP/IP transport
        option remote-host 192.168.0.2     # IP address of the remote brick
        option remote-subvolume clusterfs1
end-volume

volume server1vol2
        type protocol/client
        option transport-type tcp/client     # for TCP/IP transport
        option remote-host 192.168.0.2     # IP address of the remote brick
        option remote-subvolume clusterfs2
end-volume

###################

volume server2vol1
        type protocol/client
        option transport-type tcp/client     # for TCP/IP transport
        option remote-host 127.0.0.1     # IP address of the remote brick
        option remote-subvolume clusterfs1
end-volume

volume server2vol2
        type protocol/client
        option transport-type tcp/client     # for TCP/IP transport
        option remote-host 127.0.0.1     # IP address of the remote brick
        option remote-subvolume clusterfs2
end-volume

volume server2vol3
        type protocol/client
        option transport-type tcp/client     # for TCP/IP transport
        option remote-host 127.0.0.1     # IP address of the remote brick
        option remote-subvolume clusterfs3
end-volume

volume server2vol4
        type protocol/client
        option transport-type tcp/client     # for TCP/IP transport
        option remote-host 127.0.0.1     # IP address of the remote brick
        option remote-subvolume clusterfs4
end-volume

volume server2vol5
        type protocol/client
        option transport-type tcp/client     # for TCP/IP transport
        option remote-host 127.0.0.1     # IP address of the remote brick
        option remote-subvolume clusterfs5
end-volume

volume server2vol6
        type protocol/client
        option transport-type tcp/client     # for TCP/IP transport
        option remote-host 127.0.0.1     # IP address of the remote brick
        option remote-subvolume clusterfs6
end-volume

###################

volume bricks
 type cluster/unify
subvolumes server1vol1 server1vol2 server2vol1 server2vol2 server2vol3 server2vol4 server2vol5 server2vol6
 option scheduler random
 option random.limits.min-free-disk 6GB
end-volume

volume statprefetch
       type performance/stat-prefetch
       option cache-seconds 1
       subvolumes bricks
end-volume

volume writebehind
       type performance/write-behind
       #option aggregate-size 131072 # aggregate block size in bytes
       option aggregate-size 1048576 # aggregate block size in bytes
       subvolumes statprefetch
end-volume







reply via email to

[Prev in Thread] Current Thread [Next in Thread]