IP Failover

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



I have two servers, 10.89.99.31(KNTCLFS001) and 10.89.99.32(KNTCLFS002). I am trying to use 10.89.99.30 to float between them in an Active/Active cluster.

 

I have several services which should be running simultaneously on both servers. I am trying to set up an Active/Active cluster. I am using the cluster solely for NFS and am using GFS2 for clustered storage. What I would like is for either one of the nodes to go out, and the storage to still be available. Right now, everything works fine if KNTCLFS002 goes out. If KNTCLFS001 goes out instead, then nothing works, which probably means that everything was attaching to KNTCFLS001 to begin with. I am not sure if I should adjust my stickiness settings.

 

The IP address dies, and naturally all the services are unreachable. Which causes the storage to no longer be reachable. I am trying to use this as storage for VMs and I would like the storage to always be available. Can someone look over the attached configuration and let me know if any changes are warranted in order to achieve ‘an always’ up NFS share? Is there a better solution?

 

 

Thanks,

William

 

node KNTCLFS001 \
	attributes standby="off"
node KNTCLFS002 \
	attributes standby="off"
primitive res_Filesystem_1 ocf:heartbeat:Filesystem \
	params device="/dev/drbd0" directory="/Storage" fstype="gfs2" \
	operations $id="res_Filesystem_1-operations" \
	op start interval="0" timeout="60" \
	op stop interval="0" timeout="60" \
	op monitor interval="20" timeout="40" start-delay="0" \
	op notify interval="0" timeout="60" \
	meta target-role="started" allow-migrate="true"
primitive res_IPaddr2_1 ocf:heartbeat:IPaddr2 \
	params ip="10.89.99.30" nic="em1" cidr_netmask="22" \
	operations $id="res_IPaddr2_1-operations" \
	op start interval="0" timeout="20" \
	op stop interval="0" timeout="20" \
	op monitor interval="10" timeout="20" start-delay="0" \
	meta allow-migrate="true" target-role="started"
primitive res_cman_1 lsb:cman \
	operations $id="res_cman_1-operations" \
	op start interval="0" timeout="15" \
	op stop interval="0" timeout="15" \
	op monitor interval="15" timeout="15" start-delay="15"
primitive res_corosync_1 lsb:corosync \
	operations $id="res_corosync_1-operations" \
	op start interval="0" timeout="15" \
	op stop interval="0" timeout="15" \
	op monitor interval="15" timeout="15" start-delay="15"
primitive res_drbd_2 lsb:drbd \
	operations $id="res_drbd_2-operations" \
	op start interval="0" timeout="15" \
	op stop interval="0" timeout="15" \
	op monitor interval="15" timeout="15" start-delay="15"
primitive res_nfs_1 lsb:nfs \
	operations $id="res_nfs_1-operations" \
	op start interval="0" timeout="15" \
	op stop interval="0" timeout="15" \
	op monitor interval="15" timeout="15" start-delay="15"
primitive res_sshd_1 lsb:sshd \
	operations $id="res_sshd_1-operations" \
	op start interval="0" timeout="15" \
	op stop interval="0" timeout="15" \
	op monitor interval="15" timeout="15" start-delay="15"
primitive stonith_fence_ipmilan_1 stonith:fence_ipmilan \
	meta target-role="started"
primitive stonith_fence_pcmk_1 stonith:fence_pcmk
clone cl_Filesystem_1 res_Filesystem_1 \
	meta clone-max="2" clone-node-max="2" notify="true" interleave="true"
clone cl_IPaddr2_1 res_IPaddr2_1 \
	meta clone-max="2" clone-node-max="2" notify="true" interleave="true" allow-migrate="true"
clone cl_cman_1 res_cman_1 \
	meta clone-max="2" clone-node-max="2" notify="true" interleave="true" target-role="started"
clone cl_corosync_1 res_corosync_1 \
	meta clone-max="2" clone-node-max="2" notify="true" interleave="true" target-role="started"
clone cl_drbd_2 res_drbd_2 \
	meta clone-max="2" clone-node-max="2" notify="true" interleave="true" target-role="started"
clone cl_fence_ipmilan_1 stonith_fence_ipmilan_1 \
	meta clone-max="2" clone-node-max="2" notify="true" interleave="true"
clone cl_fence_pcmk_1 stonith_fence_pcmk_1 \
	meta clone-max="2" notify="true" interleave="true"
clone cl_nfs_1 res_nfs_1 \
	meta clone-max="2" clone-node-max="2" notify="true" interleave="true" target-role="started"
clone cl_sshd_1 res_sshd_1 \
	meta clone-max="2" clone-node-max="2" notify="true" interleave="true" target-role="started"
location loc_cl_Filesystem_1_KNTCLFS001 cl_Filesystem_1 inf: KNTCLFS001
location loc_cl_Filesystem_1_KNTCLFS002 cl_Filesystem_1 inf: KNTCLFS002
location loc_cl_IPaddr2_1-ping-prefer cl_IPaddr2_1 \
	rule $id="loc_cl_IPaddr2_1-ping-prefer-rule" pingd: defined pingd
location loc_cl_IPaddr2_1_KNTCLFS001 cl_IPaddr2_1 inf: KNTCLFS001
location loc_cl_IPaddr2_1_KNTCLFS002 cl_IPaddr2_1 inf: KNTCLFS002
location loc_cl_cman_1-ping-prefer cl_cman_1 \
	rule $id="loc_cl_cman_1-ping-prefer-rule" pingd: defined pingd
location loc_cl_cman_1_KNTCLFS001 cl_cman_1 inf: KNTCLFS001
location loc_cl_cman_1_KNTCLFS002 cl_cman_1 inf: KNTCLFS002
location loc_cl_corosync_1_KNTCLFS001 cl_corosync_1 inf: KNTCLFS001
location loc_cl_corosync_1_KNTCLFS002 cl_corosync_1 inf: KNTCLFS002
location loc_cl_drbd_2_KNTCLFS001 cl_drbd_2 inf: KNTCLFS001
location loc_cl_drbd_2_KNTCLFS002 cl_drbd_2 inf: KNTCLFS002
location loc_cl_fence_ipmilan_1-ping-prefer cl_fence_ipmilan_1 \
	rule $id="loc_cl_fence_ipmilan_1-ping-prefer-rule" pingd: defined pingd
location loc_cl_fence_ipmilan_1_KNTCLFS001 cl_fence_ipmilan_1 inf: KNTCLFS001
location loc_cl_fence_ipmilan_1_KNTCLFS002 cl_fence_ipmilan_1 inf: KNTCLFS002
location loc_cl_fence_pcmk_1-ping-prefer cl_fence_pcmk_1 \
	rule $id="loc_cl_fence_pcmk_1-ping-prefer-rule" pingd: defined pingd
location loc_cl_fence_pcmk_1_KNTCLFS001 cl_fence_pcmk_1 inf: KNTCLFS001
location loc_cl_fence_pcmk_1_KNTCLFS002 cl_fence_pcmk_1 inf: KNTCLFS002
location loc_cl_nfs_1-ping-prefer cl_nfs_1 \
	rule $id="loc_cl_nfs_1-ping-prefer-rule" pingd: defined pingd
location loc_cl_nfs_1_KNTCLFS001 cl_nfs_1 inf: KNTCLFS001
location loc_cl_nfs_1_KNTCLFS002 cl_nfs_1 inf: KNTCLFS002
location loc_cl_sshd_1_KNTCLFS001 cl_sshd_1 inf: KNTCLFS001
location loc_cl_sshd_1_KNTCLFS002 cl_sshd_1 inf: KNTCLFS002
property $id="cib-bootstrap-options" \
	default-resource-stickiness="100" \
	expected-quorum-votes="2" \
	dc-version="1.1.6-3.el6-a02c0f19a00c1eb2527ad38f146ebc0834814558" \
	no-quorum-policy="ignore" \
	cluster-infrastructure="cman"
rsc_defaults $id="rsc-options" \
	target-role="started" \
	resource-stickiness="100"
_______________________________________________
discuss mailing list
discuss@xxxxxxxxxxxx
http://lists.corosync.org/mailman/listinfo/discuss

[Index of Archives]     [Linux Clusters]     [Corosync Project]     [Linux USB Devel]     [Linux Audio Users]     [Photo]     [Yosemite News]    [Yosemite Photos]    [Linux Kernel]     [Linux SCSI]     [X.Org]

  Powered by Linux