Hi, we've created a 6 node cluster with GFS filesystem. The question is
why there's always one node that the CPU time of those GFS/lock related
processes is a lot higher than the others.
Node 1
root 3799 0.0 0.0 0 0 ? S< Mar31 0:00
[dlm_recoverd]
root 3806 0.1 0.0 0 0 ? S< Mar31 16:37 [lock_dlm1]
root 3807 0.1 0.0 0 0 ? S< Mar31 16:40 [lock_dlm2]
root 3808 1.0 0.0 0 0 ? S Mar31 102:27 [gfs_scand]
root 3809 0.1 0.0 0 0 ? S Mar31 18:05
[gfs_glockd]
root 3810 0.0 0.0 0 0 ? S Mar31 0:00
[gfs_recoverd]
root 3811 0.0 0.0 0 0 ? S Mar31 0:00 [gfs_logd]
root 3812 0.0 0.0 0 0 ? S Mar31 0:00
[gfs_quotad]
root 3813 0.0 0.0 0 0 ? S Mar31 0:18
[gfs_inoded]
Node 2
root 4230 0.0 0.0 0 0 ? S< Mar31 0:00
[dlm_recoverd]
root 4237 0.0 0.0 0 0 ? S< Mar31 4:16 [lock_dlm1]
root 4238 0.0 0.0 0 0 ? S< Mar31 4:13 [lock_dlm2]
root 4239 0.4 0.0 0 0 ? S Mar31 38:01 [gfs_scand]
root 4240 0.0 0.0 0 0 ? S Mar31 2:58
[gfs_glockd]
root 4241 0.0 0.0 0 0 ? S Mar31 0:00
[gfs_recoverd]
root 4242 0.0 0.0 0 0 ? S Mar31 0:00 [gfs_logd]
root 4243 0.0 0.0 0 0 ? S Mar31 0:00
[gfs_quotad]
root 4244 0.0 0.0 0 0 ? S Mar31 0:45
[gfs_inoded]
Node 3
root 4124 0.0 0.0 0 0 ? S< Mar31 0:00
[dlm_recoverd]
root 4131 0.0 0.0 0 0 ? S< Mar31 2:29 [lock_dlm1]
root 4132 0.0 0.0 0 0 ? S< Mar31 2:29 [lock_dlm2]
root 4133 0.9 0.0 0 0 ? S Mar31 88:45 [gfs_scand]
root 4134 0.0 0.0 0 0 ? S Mar31 2:35
[gfs_glockd]
root 4135 0.0 0.0 0 0 ? S Mar31 0:00
[gfs_recoverd]
root 4136 0.0 0.0 0 0 ? S Mar31 0:00 [gfs_logd]
root 4137 0.0 0.0 0 0 ? S Mar31 0:00
[gfs_quotad]
root 4138 0.0 0.0 0 0 ? S Mar31 0:06
[gfs_inoded]
Node 4
root 17576 0.0 0.0 0 0 ? S< Mar31 0:00
[dlm_recoverd]
root 17577 0.0 0.0 0 0 ? S< Mar31 0:00 [lock_dlm1]
root 17578 0.0 0.0 0 0 ? S< Mar31 0:00 [lock_dlm2]
root 17579 0.0 0.0 0 0 ? S Mar31 0:01 [gfs_scand]
root 17580 0.0 0.0 0 0 ? S Mar31 0:00
[gfs_glockd]
root 17581 0.0 0.0 0 0 ? S Mar31 0:00
[gfs_recoverd]
root 17582 0.0 0.0 0 0 ? S Mar31 0:00 [gfs_logd]
root 17583 0.0 0.0 0 0 ? S Mar31 0:00
[gfs_quotad]
root 17584 0.0 0.0 0 0 ? S Mar31 0:00
[gfs_inoded]
Node 5
root 30784 0.0 0.0 0 0 ? S< Mar31 0:00
[dlm_recoverd]
root 30785 0.0 0.0 0 0 ? S< Mar31 0:47 [lock_dlm1]
root 30786 0.0 0.0 0 0 ? S< Mar31 0:46 [lock_dlm2]
root 30787 0.2 0.0 0 0 ? S Mar31 10:00
[gfs_scand]
root 30788 0.0 0.0 0 0 ? S Mar31 0:50
[gfs_glockd]
root 30789 0.0 0.0 0 0 ? S Mar31 0:00
[gfs_recoverd]
root 30790 0.0 0.0 0 0 ? S Mar31 0:00 [gfs_logd]
root 30791 0.0 0.0 0 0 ? S Mar31 0:00
[gfs_quotad]
root 30792 0.0 0.0 0 0 ? S Mar31 0:00
[gfs_inoded]
Node 6
root 4273 0.0 0.0 0 0 ? S< Mar31 0:00
[dlm_recoverd]
root 4274 0.0 0.0 0 0 ? S< Mar31 0:18 [lock_dlm1]
root 4275 0.0 0.0 0 0 ? S< Mar31 0:17 [lock_dlm2]
root 4276 0.1 0.0 0 0 ? S Mar31 5:36 [gfs_scand]
root 4277 0.0 0.0 0 0 ? S Mar31 0:22
[gfs_glockd]
root 4278 0.0 0.0 0 0 ? S Mar31 0:00
[gfs_recoverd]
root 4279 0.0 0.0 0 0 ? S Mar31 0:00 [gfs_logd]
root 4280 0.0 0.0 0 0 ? S Mar31 0:00
[gfs_quotad]
root 4281 0.0 0.0 0 0 ? S Mar31 0:00
[gfs_inoded]
FC 4
kernel-smp-2.6.15-1.1831_FC4
dlm-kernel-smp-2.6.11.5-20050601.152643.FC4.21
GFS-kernel-smp-2.6.11.8-20050601.152643.FC4.24
cman-kernel-smp-2.6.11.5-20050601.152643.FC4.22
TIA
German Staltari
--
Linux-cluster@xxxxxxxxxx
https://www.redhat.com/mailman/listinfo/linux-cluster