Hi, I'm trying to do the same as Mark. Basically the same problem. Can’t get it to work. The —-master doesn’t make much of a difference for me. Any other idea, maybe? Ciao, Uli On Cluster #1 ("nceph"): ------------------------ radosgw-admin realm create --rgw-realm=acme --default radosgw-admin zonegroup create --rgw-zonegroup=us --rgw-realm=acme --master --default --endpoints=http://nceph00.uli.home:8080 radosgw-admin zone create --rgw-zonegroup=us --rgw-zone=us-west-1 --master --default --endpoints=http://nceph00.uli.home:8080 radosgw-admin user create --uid="sysuser" --display-name="System User" --system --access-key=N7Y6CM8KIN45UY2J5NQA --secret=a8QvbAMGpDwPBk8E3t3jHTyTSNqMQi4PK04yN9GX radosgw-admin zone modify --rgw-zone=us-west-1 --access-key=N7Y6CM8KIN45UY2J5NQA --secret=a8QvbAMGpDwPBk8E3t3jHTyTSNqMQi4PK04yN9GX radosgw-admin period update --commit ceph orch host label add nceph00 rgw ceph orch apply rgw acme --realm=acme --zone=us-west-1 '--placement=label:rgw count-per-host:1' --port=8080 echo -n "N7Y6CM8KIN45UY2J5NQA" > ac echo -n "a8QvbAMGpDwPBk8E3t3jHTyTSNqMQi4PK04yN9GX" > sc ceph dashboard set-rgw-api-access-key -i ac ceph dashboard set-rgw-api-secret-key -i sc radosgw-admin period update --commit { "id": "5b304997-e3ba-4cc2-9f80-af88a31827c3", "epoch": 2, "predecessor_uuid": "118ecc9a-3824-4560-afdf-98f901836fb2", "sync_status": [], "period_map": { "id": "5b304997-e3ba-4cc2-9f80-af88a31827c3", "zonegroups": [ { "id": "1df9e729-8fa0-47fa-942f-b5159fad8360", "name": "us", "api_name": "us", "is_master": "true", "endpoints": [ "http://nceph00.uli.home:8080" ], "hostnames": [], "hostnames_s3website": [], "master_zone": "7ac5da6e-ea41-43ce-b6fc-f5b6e794933f", "zones": [ { "id": "7ac5da6e-ea41-43ce-b6fc-f5b6e794933f", "name": "us-west-1", "endpoints": [ "http://nceph00.uli.home:8080" ], "log_meta": "false", "log_data": "false", "bucket_index_max_shards": 11, "read_only": "false", "tier_type": "", "sync_from_all": "true", "sync_from": [], "redirect_zone": "" } ], "placement_targets": [ { "name": "default-placement", "tags": [], "storage_classes": [ "STANDARD" ] } ], "default_placement": "default-placement", "realm_id": "657b514d-be49-45c8-a69e-7ee474276c9a", "sync_policy": { "groups": [] } } ], "short_zone_ids": [ { "key": "7ac5da6e-ea41-43ce-b6fc-f5b6e794933f", "val": 1454718312 } ] }, "master_zonegroup": "1df9e729-8fa0-47fa-942f-b5159fad8360", "master_zone": "7ac5da6e-ea41-43ce-b6fc-f5b6e794933f", "period_config": { "bucket_quota": { "enabled": false, "check_on_raw": false, "max_size": -1, "max_size_kb": 0, "max_objects": -1 }, "user_quota": { "enabled": false, "check_on_raw": false, "max_size": -1, "max_size_kb": 0, "max_objects": -1 } }, "realm_id": "657b514d-be49-45c8-a69e-7ee474276c9a", "realm_name": "acme", "realm_epoch": 2 } Dashboard works, too On cluster #2 ("ceph") ---------------------- radosgw-admin realm pull --url=http://nceph00.uli.home:8080 --access-key=N7Y6CM8KIN45UY2J5NQA --secret=a8QvbAMGpDwPBk8E3t3jHTyTSNqMQi4PK04yN9GX radosgw-admin zonegroup create --rgw-realm=acme --rgw-zonegroup=eu --endpoints=http://ceph00.uli.home:8080 radosgw-admin zone create --rgw-zone=eu-west-1 --rgw-zonegroup=eu --endpoints=http://ceph00.uli.home:8080 (With or without —default makes no difference) radosgw-admin zone modify --rgw-zone=eu-west-1 --rgw-zonegroup=eu --access-key=N7Y6CM8KIN45UY2J5NQA --secret=a8QvbAMGpDwPBk8E3t3jHTyTSNqMQi4PK04yN9GX ceph orch host label add ceph00 rgw ceph orch apply rgw acme --realm=acme --zone=eu-west-1 '--placement=label:rgw count-per-host:1' --port=8080 echo -n "N7Y6CM8KIN45UY2J5NQA" > ac echo -n "a8QvbAMGpDwPBk8E3t3jHTyTSNqMQi4PK04yN9GX" > sc ceph dashboard set-rgw-api-access-key -i ac ceph dashboard set-rgw-api-secret-key -i sc radosgw-admin period update --commit couldn't init storage provider ceph orch ps --refresh NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID ... rgw.acme.ceph00.qayeqr ceph00 *:8080 error 9m ago 29m - - <unknown> <unknown> <unknown> Dashboard for RGW obviously doesn’t work, either. No pools got created for zone eu-west-1. Creating them manually doesn’t make a difference. Log for rgw.acme.ceph00.qayeqr repeats ever 12 seconds: 2022-04-19T11:36:24.484+0000 ffff9c3e7040 0 deferred set uid:gid to 167:167 (ceph:ceph) 2022-04-19T11:36:24.484+0000 ffff9c3e7040 0 ceph version 16.2.7 (dd0603118f56ab514f133c8d2e3adfc983942503) pacific (stable), process radosgw, pid 2 2022-04-19T11:36:24.484+0000 ffff9c3e7040 0 framework: beast 2022-04-19T11:36:24.484+0000 ffff9c3e7040 0 framework conf key: port, val: 8080 2022-04-19T11:36:24.484+0000 ffff9c3e7040 1 radosgw_Main not setting numa affinity 2022-04-19T11:36:24.588+0000 ffff9c3e7040 1 rgw main: Cannot find zone id=61bff282-5f9e-496f-b15f-716d6c8b3810 (name=eu-west-1), switching to local zonegroup configuration 2022-04-19T11:36:24.592+0000 ffff9c3e7040 -1 rgw main: Cannot find zone id=61bff282-5f9e-496f-b15f-716d6c8b3810 (name=eu-west-1) 2022-04-19T11:36:24.592+0000 ffff9c3e7040 0 rgw main: ERROR: failed to start notify service ((22) Invalid argument 2022-04-19T11:36:24.592+0000 ffff9c3e7040 0 rgw main: ERROR: failed to init services (ret=(22) Invalid argument) 2022-04-19T11:36:24.600+0000 ffff9c3e7040 -1 Couldn't init storage provider (RADOS) radosgw_admin period update { "id": "657b514d-be49-45c8-a69e-7ee474276c9a:staging", "epoch": 2, "predecessor_uuid": "5b304997-e3ba-4cc2-9f80-af88a31827c3", "sync_status": [], "period_map": { "id": "5b304997-e3ba-4cc2-9f80-af88a31827c3", "zonegroups": [ { "id": "1df9e729-8fa0-47fa-942f-b5159fad8360", "name": "us", "api_name": "us", "is_master": "true", "endpoints": [ "http://nceph00.uli.home:8080" ], "hostnames": [], "hostnames_s3website": [], "master_zone": "7ac5da6e-ea41-43ce-b6fc-f5b6e794933f", "zones": [ { "id": "7ac5da6e-ea41-43ce-b6fc-f5b6e794933f", "name": "us-west-1", "endpoints": [ "http://nceph00.uli.home:8080" ], "log_meta": "false", "log_data": "false", "bucket_index_max_shards": 11, "read_only": "false", "tier_type": "", "sync_from_all": "true", "sync_from": [], "redirect_zone": "" } ], "placement_targets": [ { "name": "default-placement", "tags": [], "storage_classes": [ "STANDARD" ] } ], "default_placement": "default-placement", "realm_id": "657b514d-be49-45c8-a69e-7ee474276c9a", "sync_policy": { "groups": [] } }, { "id": "5ac89654-cd94-4804-90fd-1ffd9de18189", "name": "eu", "api_name": "eu", "is_master": "false", "endpoints": [ "http://ceph00.uli.home:8080" ], "hostnames": [], "hostnames_s3website": [], "master_zone": "61bff282-5f9e-496f-b15f-716d6c8b3810", "zones": [ { "id": "61bff282-5f9e-496f-b15f-716d6c8b3810", "name": "eu-west-1", "endpoints": [ "http://ceph00.uli.home:8080" ], "log_meta": "false", "log_data": "false", "bucket_index_max_shards": 11, "read_only": "false", "tier_type": "", "sync_from_all": "true", "sync_from": [], "redirect_zone": "" } ], "placement_targets": [ { "name": "default-placement", "tags": [], "storage_classes": [ "STANDARD" ] } ], "default_placement": "default-placement", "realm_id": "657b514d-be49-45c8-a69e-7ee474276c9a", "sync_policy": { "groups": [] } } ], "short_zone_ids": [ { "key": "61bff282-5f9e-496f-b15f-716d6c8b3810", "val": 3462507950 }, { "key": "7ac5da6e-ea41-43ce-b6fc-f5b6e794933f", "val": 1454718312 } ] }, "master_zonegroup": "1df9e729-8fa0-47fa-942f-b5159fad8360", "master_zone": "7ac5da6e-ea41-43ce-b6fc-f5b6e794933f", "period_config": { "bucket_quota": { "enabled": false, "check_on_raw": false, "max_size": -1, "max_size_kb": 0, "max_objects": -1 }, "user_quota": { "enabled": false, "check_on_raw": false, "max_size": -1, "max_size_kb": 0, "max_objects": -1 } }, "realm_id": "657b514d-be49-45c8-a69e-7ee474276c9a", "realm_name": "acme", "realm_epoch": 3 } Looks good, can’t commit :( > On 19. 04 2022, at 10:56, Eugen Block <eblock@xxxxxx> wrote: > > Hi, > > unless there are copy/paste mistakes involved I believe you shouldn't specify '--master' for the secondary zone because you did that already for the first zone which is supposed to be the master zone. You specified '--rgw-zone=us-west-1' as the master zone within your realm, but then you run this command on the second cluster: > > radosgw-admin zone create --rgw-zone=eu-west-1 \ > --rgw-zonegroup=eu \ > --default \ > --master \ > --endpoints=https://ceph2dev01.acme.com:443 > > That's the reason for this error when trying to commit on the second cluster: > > 2022-04-16T09:16:20.345-0700 7faf98ab6380 1 Cannot find zone id=9f8a06eb-5a1c-4052-b04d-359f21c95371 (name=eu-west-1), switching to local zonegroup configuration > > Because your master zone is this one: > > "master_zone": "d7ceaa4f-06c0-4c21-bcec-efe90f55ecfd" > > I'd recommend to purge the secondary zone and start over. If this is not the root cause, please update your post. > > Regards, > Eugen > > > > Zitat von Mark Selby <mselby@xxxxxxxxxx>: > >> I have been trying to build a multisite ceph rgw installation with a >> single realm, multiple zonegroups, and a single zone per zonegroup. This >> is a model around 3 locations spread over long distances. I have >> sucessfully create an installation with a single realm, a single >> zonegroup and multiple zones on that one zonegroup. >> >> I have had no luck getting my multiple zonegroup installation even >> setup. I have read the docs over and over but I still think that I am >> doing something incorrect (or possibly a bug?) >> >> I am running Pacific 16.2.7 in a containerized environment >> >> I have created a github gist of all of the commands and output show >> below as that may be easier to read for some. >> >> https://gist.github.com/tokenrain/4edf85b0060ce5004f2003aa8a66e67d >> >> Cluster 1 and Cluster 2 are separate ceph clusters. Cluster 1 commands >> were run on a node in cluster1 and Cluster 2 commands were run on a node >> in cluster2 >> >> All and any help is greatly appreciated. >> >> ############ >> # TOPOLOGY # >> ############ >> >> realm = acme.com >> zonegroup = us >> zone = us-west-1 >> zonegroup = eu >> zone = eu-west-1 >> zonegroup = as >> zone = as-west-1 >> >> ###################### >> # CLUSTER 1 COMMANDS # >> ###################### >> >> radosgw-admin realm create --rgw-realm=acme --default >> >> radosgw-admin zonegroup create --rgw-zonegroup=us --rgw-realm=acme --master --default --endpoints=https://ceph1dev01.acme.com:443 >> >> radosgw-admin zone create --rgw-zonegroup=us \ >> --rgw-zone=us-west-1 \ >> --master \ >> --default \ >> --endpoints=https://ceph1dev01.acme.com:443 >> >> radosgw-admin user create --uid="sync-user" --display-name="Synchronization User" --system >> >> radosgw-admin zone modify --rgw-zonegroup=us --rgw-zone=us-west-1 --access-key=<ACCESS_KEY> --secret=<SECRET> >> >> radosgw-admin period update --commit >> >> ceph orch apply -i rgw-us-west-1.yml >> >> ######################### >> # Cluster 1 -- RGW Spec # >> ######################### >> --- >> service_type: rgw >> service_id: us-west-1 >> placement: >> hosts: >> - ceph1dev01.acme.com >> - ceph1dev02.acme.com >> - ceph1dev03.acme.com >> spec: >> ssl: true >> rgw_realm: acme >> rgw_zone: us-west-1 >> rgw_frontend_port: 443 >> rgw_frontend_type: beast >> rgw_frontend_ssl_certificate: | >> >> ###################### >> # CLUSTER 2 COMMANDS # >> ###################### >> >> radosgw-admin realm pull --rgw-realm=acme --url=https://ceph1dev01.acme.com:443 --access-key=<ACCESS_KEY> --secret=<SECRET> --default >> >> radosgw-admin zonegroup create --rgw-realm=acme --rgw-zonegroup=eu --endpoints=https://ceph2dev01.acme.com:443 >> >> radosgw-admin zone create --rgw-zone=eu-west-1 \ >> --rgw-zonegroup=eu \ >> --default \ >> --master \ >> --endpoints=https://ceph2dev01.acme.com:443 >> >> radosgw-admin zone modify --rgw-zone=eu-west-1 --rgw-zonegroup=eu --access-key=<ACCESS_KEY> --secret=<SECRET >> >> radosgw-admin period update >> >> radosgw-admin period update --commit >> >> ceph orch apply -i rgw-eu-west1-2.yml >> >> ############################################## >> # CLUSTER 1 OUTPUT OF period update --commit # >> ############################################## >> >> { >> "id": "b153187a-1d91-4bbf-a674-d3cad9fd23da", >> "epoch": 1, >> "predecessor_uuid": "740d6999-ce83-47ff-81f5-615a3a441a96", >> "sync_status": [], >> "period_map": { >> "id": "b153187a-1d91-4bbf-a674-d3cad9fd23da", >> "zonegroups": [ >> { >> "id": "e39e0b42-43a8-47eb-b6cd-6c2524ff51d2", >> "name": "us", >> "api_name": "us", >> "is_master": "true", >> "endpoints": [ >> "https://ceph1dev01.acme.com:443" >> ], >> "hostnames": [], >> "hostnames_s3website": [], >> "master_zone": "d7ceaa4f-06c0-4c21-bcec-efe90f55ecfd", >> "zones": [ >> { >> "id": "d7ceaa4f-06c0-4c21-bcec-efe90f55ecfd", >> "name": "us-west-1", >> "endpoints": [ >> "https://ceph1dev01.acme.com:443" >> ], >> "log_meta": "false", >> "log_data": "false", >> "bucket_index_max_shards": 11, >> "read_only": "false", >> "tier_type": "", >> "sync_from_all": "true", >> "sync_from": [], >> "redirect_zone": "" >> } >> ], >> "placement_targets": [ >> { >> "name": "default-placement", >> "tags": [], >> "storage_classes": [ >> "STANDARD" >> ] >> } >> ], >> "default_placement": "default-placement", >> "realm_id": "1504a901-d969-463e-b43c-ce362782c9e2", >> "sync_policy": { >> "groups": [] >> } >> } >> ], >> "short_zone_ids": [ >> { >> "key": "d7ceaa4f-06c0-4c21-bcec-efe90f55ecfd", >> "val": 1041973059 >> } >> ] >> }, >> "master_zonegroup": "e39e0b42-43a8-47eb-b6cd-6c2524ff51d2", >> "master_zone": "d7ceaa4f-06c0-4c21-bcec-efe90f55ecfd", >> "period_config": { >> "bucket_quota": { >> "enabled": false, >> "check_on_raw": false, >> "max_size": -1, >> "max_size_kb": 0, >> "max_objects": -1 >> }, >> "user_quota": { >> "enabled": false, >> "check_on_raw": false, >> "max_size": -1, >> "max_size_kb": 0, >> "max_objects": -1 >> } >> }, >> "realm_id": "1504a901-d969-463e-b43c-ce362782c9e2", >> "realm_name": "acme", >> "realm_epoch": 2 >> } >> >> >> ##################################### >> # CLUSTER 2 OUTPUT OF period update # >> ##################################### >> >> { >> "id": "1504a901-d969-463e-b43c-ce362782c9e2:staging", >> "epoch": 1, >> "predecessor_uuid": "b153187a-1d91-4bbf-a674-d3cad9fd23da", >> "sync_status": [], >> "period_map": { >> "id": "b153187a-1d91-4bbf-a674-d3cad9fd23da", >> "zonegroups": [ >> { >> "id": "7e0bc1f3-a15f-449a-aeb5-ef0cb0fd6d8d", >> "name": "eu", >> "api_name": "eu", >> "is_master": "false", >> "endpoints": [ >> "https://ceph2dev01.acme.com:443" >> ], >> "hostnames": [], >> "hostnames_s3website": [], >> "master_zone": "9f8a06eb-5a1c-4052-b04d-359f21c95371", >> "zones": [ >> { >> "id": "9f8a06eb-5a1c-4052-b04d-359f21c95371", >> "name": "eu-west-1", >> "endpoints": [ >> "https://ceph2dev01.acme.com:443" >> ], >> "log_meta": "false", >> "log_data": "false", >> "bucket_index_max_shards": 11, >> "read_only": "false", >> "tier_type": "", >> "sync_from_all": "true", >> "sync_from": [], >> "redirect_zone": "" >> } >> ], >> "placement_targets": [ >> { >> "name": "default-placement", >> "tags": [], >> "storage_classes": [ >> "STANDARD" >> ] >> } >> ], >> "default_placement": "default-placement", >> "realm_id": "1504a901-d969-463e-b43c-ce362782c9e2", >> "sync_policy": { >> "groups": [] >> } >> }, >> { >> "id": "e39e0b42-43a8-47eb-b6cd-6c2524ff51d2", >> "name": "us", >> "api_name": "us", >> "is_master": "true", >> "endpoints": [ >> "https://ceph1dev01.acme.com:443" >> ], >> "hostnames": [], >> "hostnames_s3website": [], >> "master_zone": "d7ceaa4f-06c0-4c21-bcec-efe90f55ecfd", >> "zones": [ >> { >> "id": "d7ceaa4f-06c0-4c21-bcec-efe90f55ecfd", >> "name": "us-west-1", >> "endpoints": [ >> "https://ceph1dev01.acme.com:443" >> ], >> "log_meta": "false", >> "log_data": "false", >> "bucket_index_max_shards": 11, >> "read_only": "false", >> "tier_type": "", >> "sync_from_all": "true", >> "sync_from": [], >> "redirect_zone": "" >> } >> ], >> "placement_targets": [ >> { >> "name": "default-placement", >> "tags": [], >> "storage_classes": [ >> "STANDARD" >> ] >> } >> ], >> "default_placement": "default-placement", >> "realm_id": "1504a901-d969-463e-b43c-ce362782c9e2", >> "sync_policy": { >> "groups": [] >> } >> } >> ], >> "short_zone_ids": [ >> { >> "key": "9f8a06eb-5a1c-4052-b04d-359f21c95371", >> "val": 2999707275 >> }, >> { >> "key": "d7ceaa4f-06c0-4c21-bcec-efe90f55ecfd", >> "val": 1041973059 >> } >> ] >> }, >> "master_zonegroup": "e39e0b42-43a8-47eb-b6cd-6c2524ff51d2", >> "master_zone": "d7ceaa4f-06c0-4c21-bcec-efe90f55ecfd", >> "period_config": { >> "bucket_quota": { >> "enabled": false, >> "check_on_raw": false, >> "max_size": -1, >> "max_size_kb": 0, >> "max_objects": -1 >> }, >> "user_quota": { >> "enabled": false, >> "check_on_raw": false, >> "max_size": -1, >> "max_size_kb": 0, >> "max_objects": -1 >> } >> }, >> "realm_id": "1504a901-d969-463e-b43c-ce362782c9e2", >> "realm_name": "acme", >> "realm_epoch": 3 >> } >> >> ############################################## >> # CLUSTER 2 OUTPUT OF period update --commit # >> ############################################## >> >> 2022-04-16T09:16:20.345-0700 7faf98ab6380 1 Cannot find zone id=9f8a06eb-5a1c-4052-b04d-359f21c95371 (name=eu-west-1), switching to local zonegroup configuration >> 2022-04-16T09:16:20.345-0700 7faf98ab6380 -1 Cannot find zone id=9f8a06eb-5a1c-4052-b04d-359f21c95371 (name=eu-west-1) >> 2022-04-16T09:16:20.345-0700 7faf98ab6380 0 ERROR: failed to start notify service ((22) Invalid argument >> 2022-04-16T09:16:20.345-0700 7faf98ab6380 0 ERROR: failed to init services (ret=(22) Invalid argument) >> couldn't init storage provider >> >> -- >> >> >> Mark Selby >> Sr Linux Administrator, The Voleon Group >> mselby@xxxxxxxxxx >> >> This email is subject to important conditions and disclosures that are listed on this web page: https://voleon.com/disclaimer/. > > > > _______________________________________________ > ceph-users mailing list -- ceph-users@xxxxxxx > To unsubscribe send an email to ceph-users-leave@xxxxxxx _______________________________________________ ceph-users mailing list -- ceph-users@xxxxxxx To unsubscribe send an email to ceph-users-leave@xxxxxxx