Re: why are there "degraded" PGs when adding OSDs?

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



Hi Sam,

Trying again today with crush tunables set to firefly.  Degraded peaked around 
46.8%.

I've attached the ceph pg dump and the crushmap (same as osdmap) from before 
and after the OSD additions. 3 osds were added on host osd03.  This added 5TB 
to about 17TB for a total of around 22TB.  5TB/22TB = 22.7%  Is it expected 
for 46.8% of PGs to be degraded after adding 22% of the storage?

Another weird thing is that the kernel RBD clients froze up after the OSDs 
were added, but worked fine after reboot.  (Debian kernel 3.16.7)

Thanks for checking!
C.
# begin crush map
tunable choose_local_tries 0
tunable choose_local_fallback_tries 0
tunable choose_total_tries 50
tunable chooseleaf_descend_once 1
tunable chooseleaf_vary_r 1
tunable straw_calc_version 1

# devices
device 0 osd.0
device 1 osd.1
device 2 osd.2
device 3 osd.3
device 4 osd.4
device 5 osd.5
device 6 osd.6
device 7 osd.7
device 8 osd.8
device 9 osd.9
device 10 osd.10
device 11 device11
device 12 osd.12
device 13 osd.13
device 14 osd.14
device 15 device15
device 16 osd.16
device 17 osd.17
device 18 osd.18
device 19 osd.19
device 20 osd.20
device 21 device21
device 22 device22
device 23 device23
device 24 osd.24
device 25 osd.25
device 26 osd.26
device 27 osd.27
device 28 device28
device 29 osd.29

# types
type 0 osd
type 1 host
type 2 rack
type 3 row
type 4 room
type 5 datacenter
type 6 root

# buckets
host osd05 {
	id -6		# do not change unnecessarily
	# weight 1.350
	alg straw
	hash 0	# rjenkins1
	item osd.5 weight 0.450
	item osd.13 weight 0.450
	item osd.14 weight 0.450
}
host osd06 {
	id -7		# do not change unnecessarily
	# weight 2.260
	alg straw
	hash 0	# rjenkins1
	item osd.2 weight 0.450
	item osd.6 weight 1.360
	item osd.12 weight 0.450
}
host osd07 {
	id -8		# do not change unnecessarily
	# weight 2.710
	alg straw
	hash 0	# rjenkins1
	item osd.16 weight 1.810
	item osd.18 weight 0.450
	item osd.19 weight 0.450
}
host osd08 {
	id -9		# do not change unnecessarily
	# weight 2.030
	alg straw
	hash 0	# rjenkins1
	item osd.25 weight 0.900
	item osd.26 weight 0.450
	item osd.0 weight 0.680
}
host osd09 {
	id -3		# do not change unnecessarily
	# weight 1.800
	alg straw
	hash 0	# rjenkins1
	item osd.3 weight 0.900
	item osd.4 weight 0.450
	item osd.8 weight 0.450
}
host osd02 {
	id -10		# do not change unnecessarily
	# weight 1.350
	alg straw
	hash 0	# rjenkins1
	item osd.27 weight 0.450
	item osd.29 weight 0.450
	item osd.9 weight 0.450
}
host osd10 {
	id -11		# do not change unnecessarily
	# weight 4.070
	alg straw
	hash 0	# rjenkins1
	item osd.10 weight 0.450
	item osd.7 weight 1.810
	item osd.1 weight 1.810
}
host osd01 {
	id -2		# do not change unnecessarily
	# weight 2.250
	alg straw
	hash 0	# rjenkins1
	item osd.20 weight 0.900
	item osd.17 weight 0.900
	item osd.24 weight 0.450
}
root default {
	id -1		# do not change unnecessarily
	# weight 17.820
	alg straw
	hash 0	# rjenkins1
	item osd05 weight 1.350
	item osd06 weight 2.260
	item osd07 weight 2.710
	item osd08 weight 2.030
	item osd09 weight 1.800
	item osd02 weight 1.350
	item osd10 weight 4.070
	item osd01 weight 2.250
}

# rules
rule data {
	ruleset 0
	type replicated
	min_size 1
	max_size 10
	step take default
	step chooseleaf firstn 0 type host
	step emit
}
rule metadata {
	ruleset 1
	type replicated
	min_size 1
	max_size 10
	step take default
	step chooseleaf firstn 0 type host
	step emit
}
rule rbd {
	ruleset 2
	type replicated
	min_size 1
	max_size 10
	step take default
	step chooseleaf firstn 0 type host
	step emit
}
rule tibsecpool {
	ruleset 3
	type erasure
	min_size 3
	max_size 20
	step set_chooseleaf_tries 5
	step take default
	step chooseleaf indep 0 type host
	step emit
}
rule tibs-ecpool {
	ruleset 4
	type erasure
	min_size 3
	max_size 20
	step set_chooseleaf_tries 5
	step take default
	step chooseleaf indep 0 type host
	step emit
}
rule tibs-ecpool2 {
	ruleset 5
	type erasure
	min_size 3
	max_size 20
	step set_chooseleaf_tries 5
	step take default
	step chooseleaf indep 0 type host
	step emit
}
rule tibs-ecpool-new {
	ruleset 6
	type erasure
	min_size 3
	max_size 20
	step set_chooseleaf_tries 5
	step take default
	step chooseleaf indep 0 type host
	step emit
}
rule userbackups-ecpool {
	ruleset 7
	type erasure
	min_size 3
	max_size 20
	step set_chooseleaf_tries 5
	step take default
	step chooseleaf indep 0 type host
	step emit
}
rule mirror-ecpool {
	ruleset 8
	type erasure
	min_size 3
	max_size 20
	step set_chooseleaf_tries 5
	step take default
	step chooseleaf indep 0 type host
	step emit
}

# end crush map
# begin crush map
tunable choose_local_tries 0
tunable choose_local_fallback_tries 0
tunable choose_total_tries 50
tunable chooseleaf_descend_once 1
tunable chooseleaf_vary_r 1
tunable straw_calc_version 1

# devices
device 0 osd.0
device 1 osd.1
device 2 osd.2
device 3 osd.3
device 4 osd.4
device 5 osd.5
device 6 osd.6
device 7 osd.7
device 8 osd.8
device 9 osd.9
device 10 osd.10
device 11 osd.11
device 12 osd.12
device 13 osd.13
device 14 osd.14
device 15 osd.15
device 16 osd.16
device 17 osd.17
device 18 osd.18
device 19 osd.19
device 20 osd.20
device 21 osd.21
device 22 device22
device 23 device23
device 24 osd.24
device 25 osd.25
device 26 osd.26
device 27 osd.27
device 28 device28
device 29 osd.29

# types
type 0 osd
type 1 host
type 2 rack
type 3 row
type 4 room
type 5 datacenter
type 6 root

# buckets
host osd05 {
	id -6		# do not change unnecessarily
	# weight 1.350
	alg straw
	hash 0	# rjenkins1
	item osd.5 weight 0.450
	item osd.13 weight 0.450
	item osd.14 weight 0.450
}
host osd06 {
	id -7		# do not change unnecessarily
	# weight 2.260
	alg straw
	hash 0	# rjenkins1
	item osd.2 weight 0.450
	item osd.6 weight 1.360
	item osd.12 weight 0.450
}
host osd07 {
	id -8		# do not change unnecessarily
	# weight 2.710
	alg straw
	hash 0	# rjenkins1
	item osd.16 weight 1.810
	item osd.18 weight 0.450
	item osd.19 weight 0.450
}
host osd08 {
	id -9		# do not change unnecessarily
	# weight 2.030
	alg straw
	hash 0	# rjenkins1
	item osd.25 weight 0.900
	item osd.26 weight 0.450
	item osd.0 weight 0.680
}
host osd09 {
	id -3		# do not change unnecessarily
	# weight 1.800
	alg straw
	hash 0	# rjenkins1
	item osd.3 weight 0.900
	item osd.4 weight 0.450
	item osd.8 weight 0.450
}
host osd02 {
	id -10		# do not change unnecessarily
	# weight 1.350
	alg straw
	hash 0	# rjenkins1
	item osd.27 weight 0.450
	item osd.29 weight 0.450
	item osd.9 weight 0.450
}
host osd10 {
	id -11		# do not change unnecessarily
	# weight 4.070
	alg straw
	hash 0	# rjenkins1
	item osd.10 weight 0.450
	item osd.7 weight 1.810
	item osd.1 weight 1.810
}
host osd01 {
	id -2		# do not change unnecessarily
	# weight 2.250
	alg straw
	hash 0	# rjenkins1
	item osd.20 weight 0.900
	item osd.17 weight 0.900
	item osd.24 weight 0.450
}
host osd03 {
	id -4		# do not change unnecessarily
	# weight 4.520
	alg straw
	hash 0	# rjenkins1
	item osd.11 weight 0.900
	item osd.15 weight 1.810
	item osd.21 weight 1.810
}
root default {
	id -1		# do not change unnecessarily
	# weight 22.340
	alg straw
	hash 0	# rjenkins1
	item osd05 weight 1.350
	item osd06 weight 2.260
	item osd07 weight 2.710
	item osd08 weight 2.030
	item osd09 weight 1.800
	item osd02 weight 1.350
	item osd10 weight 4.070
	item osd01 weight 2.250
	item osd03 weight 4.520
}

# rules
rule data {
	ruleset 0
	type replicated
	min_size 1
	max_size 10
	step take default
	step chooseleaf firstn 0 type host
	step emit
}
rule metadata {
	ruleset 1
	type replicated
	min_size 1
	max_size 10
	step take default
	step chooseleaf firstn 0 type host
	step emit
}
rule rbd {
	ruleset 2
	type replicated
	min_size 1
	max_size 10
	step take default
	step chooseleaf firstn 0 type host
	step emit
}
rule tibsecpool {
	ruleset 3
	type erasure
	min_size 3
	max_size 20
	step set_chooseleaf_tries 5
	step take default
	step chooseleaf indep 0 type host
	step emit
}
rule tibs-ecpool {
	ruleset 4
	type erasure
	min_size 3
	max_size 20
	step set_chooseleaf_tries 5
	step take default
	step chooseleaf indep 0 type host
	step emit
}
rule tibs-ecpool2 {
	ruleset 5
	type erasure
	min_size 3
	max_size 20
	step set_chooseleaf_tries 5
	step take default
	step chooseleaf indep 0 type host
	step emit
}
rule tibs-ecpool-new {
	ruleset 6
	type erasure
	min_size 3
	max_size 20
	step set_chooseleaf_tries 5
	step take default
	step chooseleaf indep 0 type host
	step emit
}
rule userbackups-ecpool {
	ruleset 7
	type erasure
	min_size 3
	max_size 20
	step set_chooseleaf_tries 5
	step take default
	step chooseleaf indep 0 type host
	step emit
}
rule mirror-ecpool {
	ruleset 8
	type erasure
	min_size 3
	max_size 20
	step set_chooseleaf_tries 5
	step take default
	step chooseleaf indep 0 type host
	step emit
}

# end crush map

<<attachment: before_osd_add_pg_dump.zip>>

<<attachment: after_osd_add_pg_dump.zip>>

_______________________________________________
ceph-users mailing list
ceph-users@xxxxxxxxxxxxxx
http://lists.ceph.com/listinfo.cgi/ceph-users-ceph.com

[Index of Archives]     [Information on CEPH]     [Linux Filesystem Development]     [Ceph Development]     [Ceph Large]     [Linux USB Development]     [Video for Linux]     [Linux Audio Users]     [Yosemite News]     [Linux Kernel]     [Linux SCSI]     [xfs]


  Powered by Linux