Re: Ceph is rebalancing CRUSH on every osd add

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



I guess this is cause you are using always the Same root tree.

Am 23. Januar 2017 10:50:16 MEZ schrieb Sascha Spreitzer <sascha@xxxxxxxxxxxx>:
Hi all

I reckognized ceph is rebalancing the whole crush map when i add osd's
that should not affect any of my crush rulesets.

Is there a way to add osd's to the crush map without having the cluster
change all the osd mappings (rebalancing)?

Or am i doing something wrong terribly?

How does this work internally in general? What happens when you add an osd?

Ceph jewel, tunables optimal

# rules
rule replicated_ruleset {
ruleset 0
type replicated
min_size 1
max_size 10
step take default
step chooseleaf firstn 0 type host
step emit
}
rule sascha-ssd {
ruleset 1
type replicated
min_size 1
max_size 10
step take sascha
step chooseleaf firstn 0 type ssd
step emit
}
rule sascha-spin {
ruleset 2
type replicated
min_size 1
max_size 10
step take sascha
step chooseleaf firstn 0 type spin
step emit
}
rule sascha-usb {
ruleset 3
type replicated
min_size 1
max_size 10
step take sascha
step chooseleaf firstn 0 type usb
step emit
}
rule sascha-archive {
ruleset 4
type replicated
min_size 1
max_size 10
step take sascha
step chooseleaf firstn 1 type ssd
step emit
step take sascha
step chooseleaf firstn -1 type usb
step emit
}

# end crush map

[root@vm1 ceph]# ceph osd tree
ID WEIGHT TYPE NAME UP/DOWN REWEIGHT PRIMARY-AFFINITY
-1 6.00000 root default
-8 6.00000 region sascha
-9 6.00000 room sascha-living
-2 3.00000 host vm1
-4 3.00000 ssd ssd0
0 1.00000 osd.0 up 1.00000 1.00000
4 1.00000 osd.4 up 1.00000 1.00000
5 1.00000 osd.5 up 1.00000 1.00000
-6 0 usb usb0
-3 3.00000 host vm2
-5 3.00000 ssd ssd1
1 1.00000 osd.1 up 1.00000 1.00000
2 1.00000 osd.2 up 1.00000 1.00000
3 1.00000 osd.3 up 1.00000 1.00000
-7 0 usb usb1
6 0 osd.6 down 0 1.00000
7 0 osd.7 up 1.00000 1.00000
[root@vm1 ceph]# ceph osd crush tree
[
{
"id": -1,
"name": "default",
"type": "root",
"type_id": 13,
"items": [
{
"id": -8,
"name": "sascha",
"type": "region",
"type_id": 12,
"items": [
{
"id": -9,
"name": "sascha-living",
"type": "room",
"type_id": 10,
"items": [
{
"id": -2,
"name": "vm1",
"type": "host",
"type_id": 4,
"items": [
{
"id": -4,
"name": "ssd0",
"type": "ssd",
"type_id": 1,
"items": [
{
"id": 0,
"name": "osd.0",
"type": "osd",
"type_id": 0,
"crush_weight": 1.000000,
"depth": 5
},
{
"id": 4,
"name": "osd.4",
"type": "osd",
"type_id": 0,
"crush_weight": 1.000000,
"depth": 5
},
{
"id": 5,
"name": "osd.5",
"type": "osd",
"type_id": 0,
"crush_weight": 1.000000,
"depth": 5
}
]
},
{
"id": -6,
"name": "usb0",
"type": "usb",
"type_id": 3,
"items": []
}
]
},
{
"id": -3,
"name": "vm2",
"type": "host",
"type_id": 4,
"items": [
{
"id": -5,
"name": "ssd1",
"type": "ssd",
"type_id": 1,
"items": [
{
"id": 1,
"name": "osd.1",
"type": "osd",
"type_id": 0,
"crush_weight": 1.000000,
"depth": 5
},
{
"id": 2,
"name": "osd.2",
"type": "osd",
"type_id": 0,
"crush_weight": 1.000000,
"depth": 5
},
{
"id": 3,
"name": "osd.3",
"type": "osd",
"type_id": 0,
"crush_weight": 1.000000,
"depth": 5
}
]
},
{
"id": -7,
"name": "usb1",
"type": "usb",
"type_id": 3,
"items": []
}
]
}
]
}
]
}
]
}
]

[root@vm1 ceph]# ceph osd pool ls detail
pool 1 'rbd' replicated size 2 min_size 1 crush_ruleset 1 object_hash
rjenkins pg_num 128 pgp_num 128 last_change 2094 flags hashpspool
stripe_width 0
removed_snaps [1~1,6~4,b~1,d~5]
pool 2 'cephfs_data' replicated size 2 min_size 1 crush_ruleset 1
object_hash rjenkins pg_num 128 pgp_num 128 last_change 2095 flags
hashpspool crash_replay_interval 45 stripe_width 0
pool 3 'cephfs_metadata' replicated size 2 min_size 1 crush_ruleset 1
object_hash rjenkins pg_num 128 pgp_num 128 last_change 2096 flags
hashpspool stripe_width 0
pool 4 'openstack' replicated size 2 min_size 1 crush_ruleset 1
object_hash rjenkins pg_num 128 pgp_num 128 last_change 2097 flags
hashpspool stripe_width 0
removed_snaps [1~7]
pool 5 'openstack-images' replicated size 2 min_size 1 crush_ruleset 1
object_hash rjenkins pg_num 128 pgp_num 128 last_change 2098 flags
hashpspool stripe_width 0
removed_snaps [1~29,2f~1,31~5]
pool 6 'libvirt' replicated size 2 min_size 1 crush_ruleset 1
object_hash rjenkins pg_num 128 pgp_num 128 last_change 2099 flags
hashpspool stripe_width 0
pool 21 'cephfs_archive' replicated size 2 min_size 1 crush_ruleset 1
object_hash rjenkins pg_num 128 pgp_num 128 last_change 4861 flags
hashpspool stripe_width 0

Thank you for your expertise
Kind regards
Sascha

_______________________________________________
ceph-users mailing list
ceph-users@xxxxxxxxxxxxxx
http://lists.ceph.com/listinfo.cgi/ceph-users-ceph.com

[Index of Archives]     [Information on CEPH]     [Linux Filesystem Development]     [Ceph Development]     [Ceph Large]     [Linux USB Development]     [Video for Linux]     [Linux Audio Users]     [Yosemite News]     [Linux Kernel]     [Linux SCSI]     [xfs]


  Powered by Linux