[root@osd01 ~]# ceph osd pool ls detail -f json-pretty
[
{
"pool_name": "rbd",
"flags": 1,
"flags_names": "hashpspool",
"type": 1,
"size": 2,
"min_size": 1,
"crush_rule": 0,
"object_hash": 2,
"pg_num": 128,
"pg_placement_num": 128,
"crash_replay_interval": 0,
"last_change": "300",
"last_force_op_resend": "0",
"last_force_op_resend_preluminous": "0",
"auid": 0,
"snap_mode": "selfmanaged",
"snap_seq": 0,
"snap_epoch": 0,
"pool_snaps": [],
"removed_snaps": "[]",
"quota_max_bytes": 0,
"quota_max_objects": 0,
"tiers": [],
"tier_of": -1,
"read_tier": -1,
"write_tier": -1,
"cache_mode": "none",
"target_max_bytes": 0,
"target_max_objects": 0,
"cache_target_dirty_ratio_micro": 400000,
"cache_target_dirty_high_ratio_micro": 600000,
"cache_target_full_ratio_micro": 800000,
"cache_min_flush_age": 0,
"cache_min_evict_age": 0,
"erasure_code_profile": "",
"hit_set_params": {
"type": "none"
},
"hit_set_period": 0,
"hit_set_count": 0,
"use_gmt_hitset": true,
"min_read_recency_for_promote": 0,
"min_write_recency_for_promote": 0,
"hit_set_grade_decay_rate": 0,
"hit_set_search_last_n": 0,
"grade_table": [],
"stripe_width": 0,
"expected_num_objects": 0,
"fast_read": false,
"options": {},
"application_metadata": {
"rbd": {}
}
}
]
[root@osd01 ~]# ceph osd crush rule dump
[
{
"rule_id": 0,
"rule_name": "replicated_rule",
"ruleset": 0,
"type": 1,
"min_size": 1,
"max_size": 10,
"steps": [
{
"op": "take",
"item": -1,
"item_name": "default"
},
{
"op": "chooseleaf_firstn",
"num": 0,
"type": "host"
},
{
"op": "emit"
}
]
}
]
On Wed, 11 Apr 2018 at 08:50, Konstantin Shalygin <k0ste@xxxxxxxx> wrote:
On 04/11/2018 07:48 PM, Steven Vacaroaia wrote:
> Thanks for the suggestion but , unfortunately, having same number of
> OSD did not solve the issue
> Here is with 2 OSD per server, 3 servers - identical servers and osd
> configuration
ceph osd pool ls detail
ceph osd crush rule dump
k
_______________________________________________ ceph-users mailing list ceph-users@xxxxxxxxxxxxxx http://lists.ceph.com/listinfo.cgi/ceph-users-ceph.com