vm.sh resource script

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



Hi,

I'm trying to use; "node1> clusvcadm -M <service> -m node2" in Fedora 11 w/latest cluster updates installed and get the following error message that I'm having a hard time deciphering/debugging:

Trying to migrate service:svc1 to node2...S/Lang Script Error

In /var/log/messages:

Aug 14 16:44:08 node2 rgmanager[3210]: [S/Lang] Variable Uninitialized Error
Aug 14 16:44:08 node2 rgmanager[3210]: [S/Lang] Script Execution Failure

Not sure if there's something wrong with my cluster.conf or that error is indicative of something else?

// Thomas


My /etc/cluster.conf:

<?xml version="1.0" ?>
<cluster alias="virt-cluster" config_version="130" name="virt-cluster">

	<cman expected_votes="1" two_node="1">
		<multicast addr="225.0.0.12"/>
	</cman>
	<dlm plock_ownership="1" plock_rate_limit="0"/>
	<gfs_controld plock_rate_limit="0"/>
	<fence_daemon clean_start="1" post_fail_delay="0" post_join_delay="3"/>
	<clusternodes>
		<clusternode name="virt1-backup.sjolshagen.net" nodeid="1" votes="1">
			<multicast addr="225.0.0.12" interface="kvmbr1"/>
			<fence>
				<method name="1">
					<device name="power" port="1"/>
				</method>
				<method name="2">
					<device name="human" nodename="virt1-backup.sjolshagen.net"/>
				</method>
			</fence>
		</clusternode>
		<clusternode name="virt0-backup.sjolshagen.net" nodeid="3" votes="1">
			<multicast addr="225.0.0.12" interface="kvmbr1"/>
			<fence>
				<method name="1">
					<device name="power" port="8"/>
				</method>
				<method name="2">
					<device name="human" nodename="virt0-backup.sjolshagen.net"/>
				</method>
			</fence>
		</clusternode>
	</clusternodes>
	<fencedevices>
		<!-- Definition of fence device named "power", referenced previously -->
<fencedevice agent="fence_wti" name="power" ipaddr="192.168.1.10" password="thomas"/>
		<fencedevice agent="fence_manual" name="human"/>
	</fencedevices>
	<rm log_level="7" central_processing="1">
		<!-- Configure events to do service relationships -->
  		<events>
			<event class="service" name="service-ers">
				notice("Event service triggered!");
				evalfile("/usr/share/cluster/follow-service.sl");
follow_service("service:dhcpd", "service:rhc_mail_db", "service:rhc_mail_old", "service:rhc_mail_ui");
			</event>
			<event class="node" name="node-ers">
				notice("Event node triggered!");
				evalfile("/usr/share/cluster/follow-service.sl");
follow_service("service:dhcpd", "service:rhc_mail_db", "service:rhc_mail_old", "service:rhc_mail_ui");
			</event>
follow_service("service:dhcpd", "service:rhc_mail_db", "service:rhc_mail_old", "service:rhc_mail_ui");
                        </event>
                </events>
                <failoverdomains>
                        <failoverdomain name="ALL">
                                <failoverdomainnode name="node1"/>
                                <failoverdomainnode name="node2"/>
                        </failoverdomain>
<failoverdomain name="prefer-node1" restricted="1" ordered="1"> <failoverdomainnode name="node1" priority="10" /> <failoverdomainnode name="node2" priority="20" />
                        </failoverdomain>
                </failoverdomains>
                        <!-- Internet services definition -->

<service autostart="1" exclusive="0" domain="ALL" name="rhc_mail_db" recovery="relocate"> <vm name="kvm01-hvm" migrate="live" snapshot="/cluster/kvm-guests/snapshots" use_virsh="1" hypervisor="qemu"/>
                </service>

<service autostart="1" exclusive="0" domain="ALL" name="rhc_mail_old" recovery="relocate" depend="service:rhc_mail_db" depend_mode="soft"> <vm name="kvm02-hvm" migrate="live" snapshot="/cluster/kvm-guests/snapshots" use_virsh="1" hypervisor="qemu"> <vm name="kvm03-hvm" migrate="live" snapshot="/cluster/kvm-guests/snapshots" use_virsh="1" hypervisor="qemu">
                                        </vm>
                        </vm>
                </service>

<service autostart="1" exclusive="0" domain="ALL" name="windows_guests" recovery="relocate" depend="service:dhcpd" depend_mode="soft"> <vm name="kvm20-hvm" migrate="live" snapshot="/cluster/kvm-guests/snapshots" use_virsh="1" hypervisor="qemu"/>
                </service>

<service autostart="1" exclusive="0" domain="ALL" name="rhc_mail_ui" recovery="relocate" depend="service:rhc_mail_old" depend_mode="soft"> <vm name="kvm04-hvm" migrate="live" snapshot="/cluster/kvm-guests/snapshots" use_virsh="1" hypervisor="qemu"/>
                </service>

<service autostart="1" exclusive="0" domain="ALL" name="rhc_samba" recovery="relocate"> <vm name="kvm06-hvm" recovery="restart" snapshot="/cluster/kvm-guests/snapshots" use_virsh="1" hypervisor="qemu" />
                </service>
<service autostart="0" exclusive="0" name="rhc_mail_new" domain="prefer-node1" recovery="restart"> <vm name="kvm30-hvm" migrate="live" snapshot="/cluster/kvm-guests/snapshots" use_virsh="1" hypervisor="qemu"/> <vm name="kvm31-hvm" migrate="live" snapshot="/cluster/kvm-guests/snapshots" use_virsh="1" hypervisor="qemu"/>
                </service>
<service autostart="1" exclusive="0" domain="ALL" name="dhcpd" recovery="relocate"> <script name="dhcpd" domain="virt-cluster" file="/cluster/network/configs/dhcp/dhcpd" max_restarts="2"/>
                </service>
        </rm>
</cluster>


----------------------------------------------------------------
This message was sent using IMP, the Internet Messaging Program.


--
Linux-cluster mailing list
Linux-cluster@xxxxxxxxxx
https://www.redhat.com/mailman/listinfo/linux-cluster

[Index of Archives]     [Corosync Cluster Engine]     [GFS]     [Linux Virtualization]     [Centos Virtualization]     [Centos]     [Linux RAID]     [Fedora Users]     [Fedora SELinux]     [Big List of Linux Books]     [Yosemite Camping]

  Powered by Linux