When I start clumanager I notice that clumrmtabd does not start up. The man
page says the service manager daemon will automatically start is for each mount
point. But this does not seem to happen.
I can start it manually. Does anyone know if this is the expected behavior? Is it accepatable to manually start it? Below is my cluster.xml file.
<?xml version="1.0"?>
<cluconfig version="3.0">
<clumembd broadcast="no" interval="750000" loglevel="5" multicast="yes" multicast_ipaddress="225.0.0.11" thread="yes" tko_count="20"/>
<cluquorumd loglevel="5" pinginterval="" tiebreaker_ip=""/>
<clurmtabd loglevel="5" pollinterval="4"/>
<clusvcmgrd loglevel="5"/>
<clulockd loglevel="5"/>
<cluster config_viewnumber="1" key="d05ba7b1a725ad9e94017e45a3eda03e" name="Service Nodes"/>
<sharedstate driver="libsharedraw.so" rawprimary="/dev/raw/raw1" rawshadow="/dev/raw/raw2" type="raw"/>
<members>
<member id="0" name="10.20.70.100" watchdog="yes">
<powercontroller id="0" ipaddress="10.20.70.101" password="calvin" port="80" type="dell_rac" user="root"/>
</member>
<member id="1" name="10.20.70.102" watchdog="yes">
<powercontroller id="0" ipaddress="10.20.70.50" password="calvin" port="80" type="dell_rac" user="root"/>
</member>
</members>
<services>
<service checkinterval="5" failoverdomain="Service Nodes" id="0" name="service-core" userscript="/etc/init.d/service-core">
<service_ipaddresses>
<service_ipaddress broadcast="" id="0" ipaddress="10.20.70.104" netmask="255.255.255.0"/>
</service_ipaddresses>
<device id="0" name="LABEL=/service4" sharename="">
<mount forceunmount="yes" fstype="ext3" mountpoint="/service" options=""/>
</device>
</service>
</services>
<failoverdomains>
<failoverdomain id="0" name="Service Nodes" ordered="no" restricted="yes">
<failoverdomainnode id="0" name="10.20.70.100"/>
<failoverdomainnode id="1" name="10.20.70.102"/>
</failoverdomain>
</failoverdomains>
</cluconfig>
page says the service manager daemon will automatically start is for each mount
point. But this does not seem to happen.
I can start it manually. Does anyone know if this is the expected behavior? Is it accepatable to manually start it? Below is my cluster.xml file.
<?xml version="1.0"?>
<cluconfig version="3.0">
<clumembd broadcast="no" interval="750000" loglevel="5" multicast="yes" multicast_ipaddress="225.0.0.11" thread="yes" tko_count="20"/>
<cluquorumd loglevel="5" pinginterval="" tiebreaker_ip=""/>
<clurmtabd loglevel="5" pollinterval="4"/>
<clusvcmgrd loglevel="5"/>
<clulockd loglevel="5"/>
<cluster config_viewnumber="1" key="d05ba7b1a725ad9e94017e45a3eda03e" name="Service Nodes"/>
<sharedstate driver="libsharedraw.so" rawprimary="/dev/raw/raw1" rawshadow="/dev/raw/raw2" type="raw"/>
<members>
<member id="0" name="10.20.70.100" watchdog="yes">
<powercontroller id="0" ipaddress="10.20.70.101" password="calvin" port="80" type="dell_rac" user="root"/>
</member>
<member id="1" name="10.20.70.102" watchdog="yes">
<powercontroller id="0" ipaddress="10.20.70.50" password="calvin" port="80" type="dell_rac" user="root"/>
</member>
</members>
<services>
<service checkinterval="5" failoverdomain="Service Nodes" id="0" name="service-core" userscript="/etc/init.d/service-core">
<service_ipaddresses>
<service_ipaddress broadcast="" id="0" ipaddress="10.20.70.104" netmask="255.255.255.0"/>
</service_ipaddresses>
<device id="0" name="LABEL=/service4" sharename="">
<mount forceunmount="yes" fstype="ext3" mountpoint="/service" options=""/>
</device>
</service>
</services>
<failoverdomains>
<failoverdomain id="0" name="Service Nodes" ordered="no" restricted="yes">
<failoverdomainnode id="0" name="10.20.70.100"/>
<failoverdomainnode id="1" name="10.20.70.102"/>
</failoverdomain>
</failoverdomains>
</cluconfig>
-- Linux-cluster mailing list Linux-cluster@xxxxxxxxxx https://www.redhat.com/mailman/listinfo/linux-cluster