Avra/Poornima,
Please look into this.
Patch ==> http://review.gluster.com/#/c/6483/9
Author ==> Poornima pgurusid@xxxxxxxxxx
Build triggered by ==> amarts
Build-url ==>
http://build.gluster.org/job/regression/4847/consoleFull
Download-log-at ==>
http://build.gluster.org:443/logs/regression/glusterfs-logs-20140612:08:37:44.tgz
Test written by ==> Author: Avra Sengupta <asengupt@xxxxxxxxxx>
./tests/basic/mgmt_v3-locks.t [11, 12, 13]
0 #!/bin/bash
1
2 . $(dirname $0)/../include.rc
3 . $(dirname $0)/../cluster.rc
4
5 function check_peers {
6 $CLI_1 peer status | grep 'Peer in Cluster (Connected)' |
wc -l
7 }
8
9 function volume_count {
10 local cli=$1;
11 if [ $cli -eq '1' ] ; then
12 $CLI_1 volume info | grep 'Volume Name' | wc -l;
13 else
14 $CLI_2 volume info | grep 'Volume Name' | wc -l;
15 fi
16 }
17
18 function volinfo_field()
19 {
20 local vol=$1;
21 local field=$2;
22
23 $CLI_1 volume info $vol | grep "^$field: " | sed 's/.*: //';
24 }
25
26 function two_diff_vols_create {
27 # Both volume creates should be successful
28 $CLI_1 volume create $V0 $H1:$B1/$V0 $H2:$B2/$V0
$H3:$B3/$V0 &
29 PID_1=$!
30
31 $CLI_2 volume create $V1 $H1:$B1/$V1 $H2:$B2/$V1
$H3:$B3/$V1 &
32 PID_2=$!
33
34 wait $PID_1 $PID_2
35 }
36
37 function two_diff_vols_start {
38 # Both volume starts should be successful
39 $CLI_1 volume start $V0 &
40 PID_1=$!
41
42 $CLI_2 volume start $V1 &
43 PID_2=$!
44
45 wait $PID_1 $PID_2
46 }
47
48 function two_diff_vols_stop_force {
49 # Force stop, so that if rebalance from the
50 # remove bricks is in progress, stop can
51 # still go ahead. Both volume stops should
52 # be successful
53 $CLI_1 volume stop $V0 force &
54 PID_1=$!
55
56 $CLI_2 volume stop $V1 force &
57 PID_2=$!
58
59 wait $PID_1 $PID_2
60 }
61
62 function same_vol_remove_brick {
63
64 # Running two same vol commands at the same time can
result in
65 # two success', two failures, or one success and one
failure, all
66 # of which are valid. The only thing that shouldn't
happen is a
67 # glusterd crash.
68
69 local vol=$1
70 local brick=$2
71 $CLI_1 volume remove-brick $1 $2 start &
72 $CLI_2 volume remove-brick $1 $2 start
73 }
74
75 cleanup;
76
77 TEST launch_cluster 3;
78 TEST $CLI_1 peer probe $H2;
79 TEST $CLI_1 peer probe $H3;
80
81 EXPECT_WITHIN $PROBE_TIMEOUT 2 check_peers
82
83 two_diff_vols_create
84 EXPECT 'Created' volinfo_field $V0 'Status';
85 EXPECT 'Created' volinfo_field $V1 'Status';
86
87 two_diff_vols_start
88 EXPECT 'Started' volinfo_field $V0 'Status';
89 EXPECT 'Started' volinfo_field $V1 'Status';
90
91 same_vol_remove_brick $V0 $H2:$B2/$V0
92 # Checking glusterd crashed or not after same volume remove brick
93 # on both nodes.
94 EXPECT_WITHIN $PROBE_TIMEOUT 2 check_peers
95
96 same_vol_remove_brick $V1 $H2:$B2/$V1
97 # Checking glusterd crashed or not after same volume remove brick
98 # on both nodes.
99 EXPECT_WITHIN $PROBE_TIMEOUT 2 check_peers
100
101 $CLI_1 volume set $V0 diagnostics.client-log-level DEBUG &
102 $CLI_1 volume set $V1 diagnostics.client-log-level DEBUG
103 kill_glusterd 3
104 $CLI_1 volume status $V0
105 $CLI_2 volume status $V1
106 $CLI_1 peer status
**107 EXPECT_WITHIN $PROBE_TIMEOUT 1 check_peers
**108 EXPECT 'Started' volinfo_field $V0 'Status';
**109 EXPECT 'Started' volinfo_field $V1 'Status';
110
111 TEST $glusterd_3
112 $CLI_1 volume status $V0
113 $CLI_2 volume status $V1
114 $CLI_1 peer status
115 #EXPECT_WITHIN $PROBE_TIMEOUT 2 check_peers
116 #EXPECT 'Started' volinfo_field $V0 'Status';
117 #EXPECT 'Started' volinfo_field $V1 'Status';
118 #two_diff_vols_stop_force
119 #EXPECT_WITHIN $PROBE_TIMEOUT 2 check_peers
120 cleanup;
Pranith
_______________________________________________
Gluster-devel mailing list
Gluster-devel@xxxxxxxxxxx
http://supercolony.gluster.org/mailman/listinfo/gluster-devel