How do I use the namespace option correctly? In the docs/examples I
think your using it wrong.
In cluster-client.vol you have volumes client1,client2,client3
configured and this line in cluster/unify:
option namespace client1 # this will not be storage child of unify.
subvolumes client1 client2 client3
I get the impression that namespace shouldnt be configured as a
subvolume of unify. If I use a namespace of a volume that is a subvolume
of unify it complains as such. If I create a volume on the server
specifically for the namespace (i.e. 1 storage/posix called
volumenamespace) and in the client config use that then it doesnt
complain anymore. But I cant even ls on the mounted volume. I get this
debug found below. A 'df -h' looks correct (glusterfs 5.9T 400M 5.9T
1% /volumes).
Btw, I have to use the ALU scheduler because any other scheduler keeps
saying that there isnt enough space on any of the "bricks".
DEBUG WHEN DOING AN 'ls -l /volumes':
==> glusterfsd.log <==
2007-06-20 15:19:15 D [server-protocol.c:5536:open_file_cleanup_fn]
protocol/server: force releaseing file 0x8052580
2007-06-20 15:19:15 D [server-protocol.c:5583:server_protocol_cleanup]
protocol/server: cleaned up xl_private of 0x8051cb8
2007-06-20 15:19:15 C [tcp.c:82:tcp_disconnect] transport/tcp:
clusterfs: connection disconnected
2007-06-20 15:19:15 D [tcp-server.c:230:gf_transport_fini] tcp/server:
destroying transport object for 127.0.0.1:1023 (fd=7)
2007-06-20 15:19:15 D [server-protocol.c:5536:open_file_cleanup_fn]
protocol/server: force releaseing file 0x8052c70
2007-06-20 15:19:15 D [server-protocol.c:5583:server_protocol_cleanup]
protocol/server: cleaned up xl_private of 0x8052248
2007-06-20 15:19:15 C [tcp.c:82:tcp_disconnect] transport/tcp:
clusterfs: connection disconnected
2007-06-20 15:19:15 D [tcp-server.c:230:gf_transport_fini] tcp/server:
destroying transport object for 127.0.0.1:1021 (fd=8)
2007-06-20 15:19:15 D [server-protocol.c:5536:open_file_cleanup_fn]
protocol/server: force releaseing file 0x8052b38
2007-06-20 15:19:15 D [server-protocol.c:5583:server_protocol_cleanup]
protocol/server: cleaned up xl_private of 0x8051fc8
2007-06-20 15:19:15 C [tcp.c:82:tcp_disconnect] transport/tcp:
clusterfs: connection disconnected
2007-06-20 15:19:15 D [tcp-server.c:230:gf_transport_fini] tcp/server:
destroying transport object for 127.0.0.1:1022 (fd=5)
==> glusterfs.log <==
2007-06-20 15:19:15 D [fuse-bridge.c:403:fuse_getattr] glusterfs-fuse:
GETATTR 1 ()
2007-06-20 15:19:15 C [common-utils.c:205:gf_print_trace]
debug-backtrace: Got signal (11), printing backtrace
2007-06-20 15:19:15 C [common-utils.c:207:gf_print_trace]
debug-backtrace: /lib/libglusterfs.so.0(gf_print_trace+0x2d) [0xb7fcc1bd]
2007-06-20 15:19:15 C [common-utils.c:207:gf_print_trace]
debug-backtrace: [0xffffe420]
2007-06-20 15:19:15 C [common-utils.c:207:gf_print_trace]
debug-backtrace: /lib/glusterfs/1.3.0-pre5/xlator/cluster/unify.so
[0xb762e325]
2007-06-20 15:19:15 C [common-utils.c:207:gf_print_trace]
debug-backtrace: /lib/glusterfs/1.3.0-pre5/xlator/protocol/client.so
[0xb763de0d]
2007-06-20 15:19:15 C [common-utils.c:207:gf_print_trace]
debug-backtrace:
/lib/glusterfs/1.3.0-pre5/xlator/protocol/client.so(notify+0x892)
[0xb7640102]
2007-06-20 15:19:15 C [common-utils.c:207:gf_print_trace]
debug-backtrace: /lib/libglusterfs.so.0(transport_notify+0x37) [0xb7fcd707]
2007-06-20 15:19:15 C [common-utils.c:207:gf_print_trace]
debug-backtrace: /lib/libglusterfs.so.0(sys_epoll_iteration+0xd9)
[0xb7fce229]
2007-06-20 15:19:15 C [common-utils.c:207:gf_print_trace]
debug-backtrace: /lib/libglusterfs.so.0(poll_iteration+0x1d) [0xb7fcd7dd]
2007-06-20 15:19:15 C [common-utils.c:207:gf_print_trace]
debug-backtrace: [glusterfs] [0x804a160]
2007-06-20 15:19:15 C [common-utils.c:207:gf_print_trace]
debug-backtrace: /lib/tls/i686/cmov/libc.so.6(__libc_start_main+0xdc)
[0xb7e65ebc]
2007-06-20 15:19:15 C [common-utils.c:207:gf_print_trace]
debug-backtrace: [glusterfs] [0x8049d61]
================================
glusterfs-client.vol:
volume clusterfs1
type storage/posix
option directory /volume1
end-volume
#######
volume clusterfs2
type storage/posix
option directory /volume2
end-volume
#######
volume volumenamespace
type storage/posix
option directory /volume.namespace
end-volume
###
volume clusterfs
type protocol/server
option transport-type tcp/server
subvolumes clusterfs1 clusterfs2 volumenamespace
option auth.ip.clusterfs1.allow *
option auth.ip.clusterfs2.allow *
option auth.ip.volumenamespace.allow *
end-volume
root@mayorsmoney:/etc/glusterfs# cat glusterfs-client.vol
volume server1
type protocol/client
option transport-type tcp/client # for TCP/IP transport
option remote-host 127.0.0.1 # IP address of the remote brick
option remote-subvolume volumenamespace
end-volume
volume server1vol1
type protocol/client
option transport-type tcp/client # for TCP/IP transport
option remote-host 127.0.0.1 # IP address of the remote brick
option remote-subvolume clusterfs1
end-volume
volume server1vol2
type protocol/client
option transport-type tcp/client # for TCP/IP transport
option remote-host 127.0.0.1 # IP address of the remote brick
option remote-subvolume clusterfs2
end-volume
###################
volume bricks
type cluster/unify
option namespace server1
option readdir-force-success on # ignore failed mounts
subvolumes server1vol1 server1vol2
### ** ALU Scheduler Option **
option scheduler alu
#option lock-node clusterfs # first child will be
lock-node by default
option alu.limits.min-free-disk 128GB
option alu.limits.max-open-files 10000
option alu.order
disk-usage:read-usage:write-usage:open-files-usage:disk-speed-usage
option alu.disk-usage.entry-threshold 2GB
option alu.disk-usage.exit-threshold 128MB
option alu.open-files-usage.entry-threshold 1024
option alu.open-files-usage.exit-threshold 32
option alu.read-usage.entry-threshold 20 #%
option alu.read-usage.exit-threshold 4 #%
option alu.write-usage.entry-threshold 20 #%
option alu.write-usage.exit-threshold 4 #%
option alu.disk-speed-usage.entry-threshold 0 # DO NOT SET IT. SPEED
IS CONSTANT!!!.
option alu.disk-speed-usage.exit-threshold 0 # DO NOT SET IT. SPEED IS
CONSTANT!!!.
option alu.stat-refresh.interval 10sec
option alu.stat-refresh.num-file-create 10
end-volume
=====================================
glusterfs-server.vol:
volume clusterfs1
type storage/posix
option directory /volume1
end-volume
#######
volume clusterfs2
type storage/posix
option directory /volume2
end-volume
#######
volume volumenamespace
type storage/posix
option directory /volume.namespace
end-volume
###
volume clusterfs
type protocol/server
option transport-type tcp/server
subvolumes clusterfs1 clusterfs2 volumenamespace
option auth.ip.clusterfs1.allow *
option auth.ip.clusterfs2.allow *
option auth.ip.volumenamespace.allow *
end-volume