Hi Sebastien
Thanks for you reply , yes undersize pgs and recovery in process becuase of we added new osd after getting 2 OSD is near full warning . Yes newly added osd is reblancing the size.
[root@intcfs-osd6 ~]# ceph osd df
ID WEIGHT REWEIGHT SIZE USE AVAIL %USE VAR PGS
0 3.29749 1.00000 3376G 2875G 501G 85.15 1.26 165
1 3.26869 1.00000 3347G 1923G 1423G 57.46 0.85 152
2 3.27339 1.00000 3351G 1980G 1371G 59.08 0.88 161
3 3.24089 1.00000 3318G 2130G 1187G 64.21 0.95 168
4 3.24089 1.00000 3318G 2997G 320G 90.34 1.34 176
5 3.32669 1.00000 3406G 2466G 939G 72.42 1.07 165
6 3.27800 1.00000 3356G 1463G 1893G 43.60 0.65 166
ceph osd crush rule dump
[
{
"rule_id": 0,
"rule_name": "replicated_ruleset",
"ruleset": 0,
"type": 1,
"min_size": 1,
"max_size": 10,
"steps": [
{
"op": "take",
"item": -1,
"item_name": "default"
},
{
"op": "chooseleaf_firstn",
"num": 0,
"type": "host"
},
{
"op": "emit"
}
]
}
]
ceph version 10.2.2 and ceph version 10.2.9
ceph osd pool ls detail
pool 0 'rbd' replicated size 2 min_size 1 crush_ruleset 0 object_hash rjenkins pg_num 64 pgp_num 64 last_change 1 flags hashpspool stripe_width 0
pool 3 'downloads_data' replicated size 2 min_size 1 crush_ruleset 0 object_hash rjenkins pg_num 250 pgp_num 250 last_change 39 flags hashpspool crash_replay_interval 45 stripe_width 0
pool 4 'downloads_metadata' replicated size 2 min_size 1 crush_ruleset 0 object_hash rjenkins pg_num 250 pgp_num 250 last_change 36 flags hashpspool stripe_width 0