gluster-devel
[Top][All Lists]
Advanced

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [Gluster-devel] no element to prune


From: Pooya Woodcock
Subject: Re: [Gluster-devel] no element to prune
Date: Tue, 10 Jul 2007 12:10:18 -0700

Here are my configs...

# SERVER 1
volume brick
        type storage/posix
        option directory /GlusterFS
end-volume

volume locks
        type features/posix-locks
        subvolumes brick
end-volume

volume ns
        type storage/posix
        option directory /GlusterFS-NS
end-volume

volume brick-afr
        type storage/posix
        option directory /GlusterFS-AFR
end-volume

volume locks-afr
  type features/posix-locks
  subvolumes brick-afr
end-volume


volume server
        type protocol/server
        option transport-type tcp/server
        option bind-address 10.175.175.134
        option listen-port 6996
        subvolumes locks locks-afr
        option auth.ip.ns.allow 10.175.175.*
        option auth.ip.locks.allow 10.175.175.*
        option auth.ip.locks-afr.allow 10.175.175.*
end-volume

volume trace
        type debug/trace
        subvolumes server
        option debug on
end-volume

# SERVER 2-4
same as above without the namespace.

# CLIENT
volume ns
type protocol/client
option transport-type tcp/client
option remote-host 10.175.175.134
option remote-subvolume ns
end-volume

volume brick1
type protocol/client
option transport-type tcp/client
option remote-host 10.175.175.134
option remote-port 6996
option remote-subvolume locks
end-volume

volume brick1-afr
type protocol/client
option transport-type tcp/client
option remote-host 10.175.175.135
option remote-port 6996
option remote-subvolume locks-afr
end-volume

volume brick2
type protocol/client
option transport-type tcp/client
option remote-host 10.175.175.135
option remote-port 6996
option remote-subvolume locks
end-volume

volume brick2-afr
type protocol/client
option transport-type tcp/client
option remote-host 10.175.175.136
option remote-port 6996
option remote-subvolume locks-afr
end-volume

volume brick3
type protocol/client
option transport-type tcp/client
option remote-host 10.175.175.136
option remote-port 6996
option remote-subvolume locks
end-volume

volume brick3-afr
type protocol/client
option transport-type tcp/client
option remote-host 10.175.175.137
option remote-port 6996
option remote-subvolume locks-afr
end-volume

volume brick4
type protocol/client
option transport-type tcp/client
option remote-host 10.175.175.137
option remote-port 6996
option remote-subvolume locks
end-volume

volume brick4-afr
type protocol/client
option transport-type tcp/client
option remote-host 10.175.175.134
option remote-port 6996
option remote-subvolume locks-afr
end-volume

volume afr1
type cluster/afr
subvolumes brick1 brick1-afr
option replicate *:2
end-volume

volume afr2
type cluster/afr
subvolumes brick2 brick2-afr
option replicate *:2
end-volume

volume afr3
type cluster/afr
subvolumes brick3 brick3-afr
option replicate *:2
end-volume

volume afr4
type cluster/afr
subvolumes brick4 brick4-afr
option replicate *:2
end-volume

volume unify1
type cluster/unify
subvolumes afr1 afr2 afr3 afr4
  option namespace ns
  option readdir-force-success on
  option scheduler rr
  option rr.limits.min-free-disk 10GB
end-volume

volume iothreads
        type performance/io-threads
        option thread-count 8
        subvolumes unify1
end-volume

volume readahead
        type performance/read-ahead
        option page-size 131072
        option page-count 16
        subvolumes iothreads
end-volume

#volume io-cache
#       type performance/io-cache
#       option page-count 16
#       option page-size 1MB
#       subvolumes readahead
#end-volume




On Jul 10, 2007, at 12:05 PM, Pooya Woodcock wrote:


Hi everyone, been a while since I last posted since I've been running very table in the 2.4 branch. Just recently I took the plunge--- I am running mainline 2.5 latest TLA as for an hour ago (294?)

2007-07-10 12:01:11 D [inode.c:332:__passive_inode] brick-afr/ inode: passivating inode(34735742), lru=56/1000 2007-07-10 12:01:11 D [server-protocol.c:639:server_inode_prune] locks-afr: no element to prune 2007-07-10 12:01:11 D [server-protocol.c:639:server_inode_prune] locks-afr: no element to prune 2007-07-10 12:01:11 D [server-protocol.c:639:server_inode_prune] locks-afr: no element to prune 2007-07-10 12:01:11 D [server-protocol.c:639:server_inode_prune] locks-afr: no element to prune 2007-07-10 12:01:11 D [server-protocol.c:639:server_inode_prune] locks-afr: no element to prune 2007-07-10 12:01:12 D [inode.c:302:__active_inode] brick-afr/inode: activating inode(34685086), lru=55/1000 2007-07-10 12:01:12 D [server-protocol.c:639:server_inode_prune] locks-afr: no element to prune 2007-07-10 12:01:12 D [inode.c:332:__passive_inode] brick-afr/ inode: passivating inode(34685086), lru=56/1000 2007-07-10 12:01:12 D [inode.c:302:__active_inode] brick/inode: activating inode(11862174), lru=55/1000 2007-07-10 12:01:12 D [server-protocol.c:639:server_inode_prune] locks: no element to prune 2007-07-10 12:01:12 D [inode.c:332:__passive_inode] brick/inode: passivating inode(11862174), lru=56/1000 2007-07-10 12:01:12 D [inode.c:302:__active_inode] brick/inode: activating inode(11912029), lru=55/1000

There are lots of "no element to prune"s on my daemon log files. Should I worry about this?

address@hidden ~]# tail /var/log/glusterfsd.log
2007-07-10 12:03:45 D [inode.c:332:__passive_inode] brick-afr/ inode: passivating inode(10552395), lru=57/1000 2007-07-10 12:03:45 D [inode.c:302:__active_inode] brick-afr/inode: activating inode(10552395), lru=56/1000 2007-07-10 12:03:45 D [server-protocol.c:639:server_inode_prune] locks-afr: no element to prune 2007-07-10 12:03:45 D [inode.c:302:__active_inode] brick-afr/inode: activating inode(10552520), lru=55/1000 2007-07-10 12:03:45 D [server-protocol.c:639:server_inode_prune] locks-afr: no element to prune 2007-07-10 12:03:45 D [inode.c:332:__passive_inode] brick-afr/ inode: passivating inode(10552520), lru=56/1000 2007-07-10 12:03:45 D [inode.c:302:__active_inode] brick/inode: activating inode(12141234), lru=56/1000 2007-07-10 12:03:45 D [server-protocol.c:639:server_inode_prune] locks: no element to prune 2007-07-10 12:03:45 D [inode.c:332:__passive_inode] brick/inode: passivating inode(12141234), lru=57/1000 2007-07-10 12:03:45 D [server-protocol.c:639:server_inode_prune] locks: no element to prune


_______________________________________________
Gluster-devel mailing list
address@hidden
http://lists.nongnu.org/mailman/listinfo/gluster-devel





reply via email to

[Prev in Thread] Current Thread [Next in Thread]