my drbd.conf ---------------------------------------------------------------------------------------- global { usage-count no; } resource drbd0 { protocol C; handlers { pri-on-incon-degr "echo o > /proc/sysrq-trigger ; halt -f"; pri-lost-after-sb "echo o > /proc/sysrq-trigger ; halt -f"; local-io-error "echo o > /proc/sysrq-trigger ; halt -f"; # outdate-peer "/usr/lib/heartbeat/drbd-peer-outdater -t 5"; } startup { wfc-timeout 0; degr-wfc-timeout 120; } net { timeout 60; connect-int 10; ping-int 10; ping-timeout 5; max-buffers 2048; unplug-watermark 128; max-epoch-size 2048; ko-count 4; allow-two-primaries; after-sb-0pri disconnect; after-sb-1pri disconnect; after-sb-2pri disconnect; rr-conflict disconnect; } disk { on-io-error detach; #size 16G; } syncer { rate 700000K; #after "drbd0"; al-extents 257; } on node1 { device /dev/drbd0; disk /dev/volg1/lv_test; address 192.168.0.1:7788; flexible-meta-disk internal; } on node2 { device /dev/drbd0; disk /dev/volg1/lv_test; address 192.168.0.2:7788; flexible-meta-disk internal; } } On Wednesday 27 February 2008 11:30:56 pm Lon Hohberger wrote: > On Tue, 2008-02-26 at 23:40 +0100, Thomas Börnert wrote: > > ------------------------------------------------------------------------- > >----------------------------------------- > > > > my drbd is no problem state is already primary (standalone) > > It still needs to call the outdate_peer handler (which must succeed!), > or it will block writes until the other node comes back. ok, i'd commented that, because i've not installed heartbeat. do still need heartbeat for drbd ? Thx Thomas -- Linux-cluster mailing list Linux-cluster@xxxxxxxxxx https://www.redhat.com/mailman/listinfo/linux-cluster