could i build one region using two clusters£¬ each cluster has one zone¡£ so that I sync metadata and data from one cluster to another cluster¡£
I build two ceph clusters.
for the first cluster, I do the follow steps
1.create pools
sudo ceph osd pool create .us-east.rgw.root 64 64
sudo ceph osd pool create .us-east.rgw.control 64 64
sudo ceph osd pool create .us-east.rgw.gc 64 64
sudo ceph osd pool create .us-east.rgw.buckets 64 64
sudo ceph osd pool create .us-east.rgw.buckets.index 64 64
sudo ceph osd pool create .us-east.rgw.buckets.extra 64 64
sudo ceph osd pool create .us-east.log 64 64
sudo ceph osd pool create .us-east.intent-log 64 64
sudo ceph osd pool create .us-east.usage 64 64
sudo ceph osd pool create .us-east.users 64 64
sudo ceph osd pool create .us-east.users.email 64 64
sudo ceph osd pool create .us-east.users.swift 64 64
sudo ceph osd pool create .us-east.users.uid 64 64
for the first cluster, I do the follow steps
1.create pools
sudo ceph osd pool create .us-east.rgw.root 64 64
sudo ceph osd pool create .us-east.rgw.control 64 64
sudo ceph osd pool create .us-east.rgw.gc 64 64
sudo ceph osd pool create .us-east.rgw.buckets 64 64
sudo ceph osd pool create .us-east.rgw.buckets.index 64 64
sudo ceph osd pool create .us-east.rgw.buckets.extra 64 64
sudo ceph osd pool create .us-east.log 64 64
sudo ceph osd pool create .us-east.intent-log 64 64
sudo ceph osd pool create .us-east.usage 64 64
sudo ceph osd pool create .us-east.users 64 64
sudo ceph osd pool create .us-east.users.email 64 64
sudo ceph osd pool create .us-east.users.swift 64 64
sudo ceph osd pool create .us-east.users.uid 64 64
2.create a keyring
sudo ceph-authtool --create-keyring /etc/ceph/ceph.client.radosgw.keyring
sudo chmod +r /etc/ceph/ceph.client.radosgw.keyring
sudo ceph-authtool /etc/ceph/ceph.client.radosgw.keyring -n client.radosgw.us-east-1 --gen-key
sudo ceph-authtool -n client.radosgw.us-east-1 --cap osd 'allow rwx' --cap mon 'allow rwx' /etc/ceph
sudo ceph -k /etc/ceph/ceph.client.admin.keyring auth add client.radosgw.us-east-1 -i /etc/ceph/ceph.client.radosgw.keyring
sudo ceph-authtool --create-keyring /etc/ceph/ceph.client.radosgw.keyring
sudo chmod +r /etc/ceph/ceph.client.radosgw.keyring
sudo ceph-authtool /etc/ceph/ceph.client.radosgw.keyring -n client.radosgw.us-east-1 --gen-key
sudo ceph-authtool -n client.radosgw.us-east-1 --cap osd 'allow rwx' --cap mon 'allow rwx' /etc/ceph
sudo ceph -k /etc/ceph/ceph.client.admin.keyring auth add client.radosgw.us-east-1 -i /etc/ceph/ceph.client.radosgw.keyring
3.create a region
sudo radosgw-admin region set --infile us.json --name client.radosgw.us-east-1
sudo radosgw-admin region default --rgw-region=us --name client.radosgw.us-east-1
sudo radosgw-admin regionmap update --name client.radosgw.us-east-1
the content of us.json:
cat us.json
{ "name": "us",
"api_name": "us",
"is_master": "true",
"endpoints": [
"http:\/\/WH-CEPH-TEST01.MATRIX.CTRIPCORP.COM:80\/", "http:\/\/WH-CEPH-TEST02.MATRIX.CTRIPCORP.COM:80\/"],
"master_zone": "us-east",
"zones": [
{ "name": "us-east",
"endpoints": [
"http:\/\/WH-CEPH-TEST01.MATRIX.CTRIPCORP.COM:80\/"],
"log_meta": "true",
"log_data": "true"},
{ "name": "us-west",
"endpoints": [
"http:\/\/WH-CEPH-TEST02.MATRIX.CTRIPCORP.COM:80\/"],
"log_meta": "true",
"log_data": "true"}],
"placement_targets": [
{
"name": "default-placement",
"tags": []
}
],
"default_placement": "default-placement"}
4.create zones
sudo radosgw-admin zone set --rgw-zone=us-east --infile us-east-secert.json --name client.radosgw.us-east-1
sudo radosgw-admin regionmap update --name client.radosgw.us-east-1
cat us-east-secert.json
{ "domain_root": ".us-east.domain.rgw",
"control_pool": ".us-east.rgw.control",
"gc_pool": ".us-east.rgw.gc",
"log_pool": ".us-east.log",
"intent_log_pool": ".us-east.intent-log",
"usage_log_pool": ".us-east.usage",
"user_keys_pool": ".us-east.users",
"user_email_pool": ".us-east.users.email",
"user_swift_pool": ".us-east.users.swift",
"user_uid_pool": ".us-east.users.uid",
"system_key": { "access_key": "XNK0ST8WXTMWZGN29NF9", "secret_key": "7VJm8uAp71xKQZkjoPZmHu4sACA1SY8jTjay9dP5"},
"placement_pools": [
{ "key": "default-placement",
"val": { "index_pool": ".us-east.rgw.buckets.index",
"data_pool": ".us-east.rgw.buckets"}
}
]
}
sudo radosgw-admin zone set --rgw-zone=us-east --infile us-east-secert.json --name client.radosgw.us-east-1
sudo radosgw-admin regionmap update --name client.radosgw.us-east-1
cat us-east-secert.json
{ "domain_root": ".us-east.domain.rgw",
"control_pool": ".us-east.rgw.control",
"gc_pool": ".us-east.rgw.gc",
"log_pool": ".us-east.log",
"intent_log_pool": ".us-east.intent-log",
"usage_log_pool": ".us-east.usage",
"user_keys_pool": ".us-east.users",
"user_email_pool": ".us-east.users.email",
"user_swift_pool": ".us-east.users.swift",
"user_uid_pool": ".us-east.users.uid",
"system_key": { "access_key": "XNK0ST8WXTMWZGN29NF9", "secret_key": "7VJm8uAp71xKQZkjoPZmHu4sACA1SY8jTjay9dP5"},
"placement_pools": [
{ "key": "default-placement",
"val": { "index_pool": ".us-east.rgw.buckets.index",
"data_pool": ".us-east.rgw.buckets"}
}
]
}
#5 Create Zone Users system user
sudo radosgw-admin user create --uid="us-east" --display-name="Region-US Zone-East" --name client.radosgw.us-east-1 --access_key="XNK0ST8WXTMWZGN29NF9" --secret="7VJm8uAp71xKQZkjoPZmHu4sACA1SY8jTjay9dP5" --system
sudo radosgw-admin user create --uid="us-west" --display-name="Region-US Zone-West" --name client.radosgw.us-east-1 --access_key="AAK0ST8WXTMWZGN29NF9" --secret="AAJm8uAp71xKQZkjoPZmHu4sACA1SY8jTjay9dP5" --system
#6 creat zone users not system user
sudo radosgw-admin user create --uid="us-test-east" --display-name="Region-US Zone-East-test" --name client.radosgw.us-east-1 --access_key="DDK0ST8WXTMWZGN29NF9" --secret="DDJm8uAp71xKQZkjoPZmHu4sACA1SY8jTjay9dP5"
sudo radosgw-admin user create --uid="us-test-east" --display-name="Region-US Zone-East-test" --name client.radosgw.us-east-1 --access_key="DDK0ST8WXTMWZGN29NF9" --secret="DDJm8uAp71xKQZkjoPZmHu4sACA1SY8jTjay9dP5"
#7 subuser create
sudo radosgw-admin subuser create --uid="us-test-east" --subuser="us-test-east:swift" --access=full --name client.radosgw.us-east-1 --key-type swift --secret="ffJm8uAp71xKQZkjoPZmHu4sACA1SY8jTjay9dP5"
sudo radosgw-admin subuser create --uid="us-test-east" --subuser="us-test-east:swift" --access=full --name client.radosgw.us-east-1 --key-type swift --secret="ffJm8uAp71xKQZkjoPZmHu4sACA1SY8jTjay9dP5"
sudo /etc/init.d/ceph -a restart
sudo /etc/init.d/httpd re
sudo /etc/init.d/ceph-radosgw restart
sudo /etc/init.d/httpd re
sudo /etc/init.d/ceph-radosgw restart
for the second cluster, I do the follow steps
1.create pools
sudo ceph osd pool create .us-west.rgw.root 64 64
sudo ceph osd pool create .us-west.rgw.control 64 64
sudo ceph osd pool create .us-west.rgw.gc 64 64
sudo ceph osd pool create .us-west.rgw.buckets 64 64
sudo ceph osd pool create .us-west.rgw.buckets.index 64 64
sudo ceph osd pool create .us-west.rgw.buckets.extra 64 64
sudo ceph osd pool create .us-west.log 64 64
sudo ceph osd pool create .us-west.intent-log 64 64
sudo ceph osd pool create .us-west.usage 64 64
sudo ceph osd pool create .us-west.users 64 64
sudo ceph osd pool create .us-west.users.email 64 64
sudo ceph osd pool create .us-west.users.swift 64 64
sudo ceph osd pool create .us-west.users.uid 64 64
1.create pools
sudo ceph osd pool create .us-west.rgw.root 64 64
sudo ceph osd pool create .us-west.rgw.control 64 64
sudo ceph osd pool create .us-west.rgw.gc 64 64
sudo ceph osd pool create .us-west.rgw.buckets 64 64
sudo ceph osd pool create .us-west.rgw.buckets.index 64 64
sudo ceph osd pool create .us-west.rgw.buckets.extra 64 64
sudo ceph osd pool create .us-west.log 64 64
sudo ceph osd pool create .us-west.intent-log 64 64
sudo ceph osd pool create .us-west.usage 64 64
sudo ceph osd pool create .us-west.users 64 64
sudo ceph osd pool create .us-west.users.email 64 64
sudo ceph osd pool create .us-west.users.swift 64 64
sudo ceph osd pool create .us-west.users.uid 64 64
2.create a keyring
sudo ceph-authtool --create-keyring /etc/ceph/ceph.client.radosgw.keyring
sudo chmod +r /etc/ceph/ceph.client.radosgw.keyring
sudo ceph-authtool /etc/ceph/ceph.client.radosgw.keyring -n client.radosgw.us-west-1 --gen-key
sudo ceph -k /etc/ceph/ceph.client.admin.keyring auth del client.radosgw.us-west-1
sudo ceph -k /etc/ceph/ceph.client.admin.keyring auth add client.radosgw.us-west-1 -i /etc/ceph/ceph.client.radosgw.keyring
sudo ceph-authtool --create-keyring /etc/ceph/ceph.client.radosgw.keyring
sudo chmod +r /etc/ceph/ceph.client.radosgw.keyring
sudo ceph-authtool /etc/ceph/ceph.client.radosgw.keyring -n client.radosgw.us-west-1 --gen-key
sudo ceph -k /etc/ceph/ceph.client.admin.keyring auth del client.radosgw.us-west-1
sudo ceph -k /etc/ceph/ceph.client.admin.keyring auth add client.radosgw.us-west-1 -i /etc/ceph/ceph.client.radosgw.keyring
3.create a region
sudo radosgw-admin region set --infile us.json --name client.radosgw.us-west-1
sudo radosgw-admin region default --rgw-region=us --name client.radosgw.us-west-1
sudo radosgw-admin regionmap update --name client.radosgw.us-west-1
cat us.json
the content of us.json:
{ "name": "us",
"api_name": "us",
"is_master": "true",
"endpoints": [
"http:\/\/WH-CEPH-TEST01.MATRIX.CTRIPCORP.COM:80\/", "http:\/\/WH-CEPH-TEST02.MATRIX.CTRIPCORP.COM:80\/"],
"master_zone": "us-east",
"zones": [
{ "name": "us-east",
"endpoints": [
"http:\/\/WH-CEPH-TEST01.MATRIX.CTRIPCORP.COM:80\/"],
"log_meta": "true",
"log_data": "true"},
{ "name": "us-west",
"endpoints": [
"http:\/\/WH-CEPH-TEST02.MATRIX.CTRIPCORP.COM:80\/"],
"log_meta": "true",
"log_data": "true"}],
"placement_targets": [
{
"name": "default-placement",
"tags": []
}
],
"default_placement": "default-placement"}
4.create zones
sudo radosgw-admin zone set --rgw-zone=us-west --infile us-west-secert.json --name client.radosgw.us-west-1
sudo radosgw-admin regionmap update --name client.radosgw.us-west-1
the content of us-west-secert.json is:
cat us-west-secert.json
{ "domain_root": ".us-east.domain.rgw",
"control_pool": ".us-east.rgw.control",
"gc_pool": ".us-east.rgw.gc",
"log_pool": ".us-east.log",
"intent_log_pool": ".us-east.intent-log",
"usage_log_pool": ".us-east.usage",
"user_keys_pool": ".us-east.users",
"user_email_pool": ".us-east.users.email",
"user_swift_pool": ".us-east.users.swift",
"user_uid_pool": ".us-east.users.uid",
"system_key": { "access_key": "XNK0ST8WXTMWZGN29NF9", "secret_key": "7VJm8uAp71xKQZkjoPZmHu4sACA1SY8jTjay9dP5"},
"placement_pools": [
{ "key": "default-placement",
"val": { "index_pool": ".us-east.rgw.buckets.index",
"data_pool": ".us-east.rgw.buckets"}
}
]
}
cat us-west-secert.json
{ "domain_root": ".us-east.domain.rgw",
"control_pool": ".us-east.rgw.control",
"gc_pool": ".us-east.rgw.gc",
"log_pool": ".us-east.log",
"intent_log_pool": ".us-east.intent-log",
"usage_log_pool": ".us-east.usage",
"user_keys_pool": ".us-east.users",
"user_email_pool": ".us-east.users.email",
"user_swift_pool": ".us-east.users.swift",
"user_uid_pool": ".us-east.users.uid",
"system_key": { "access_key": "XNK0ST8WXTMWZGN29NF9", "secret_key": "7VJm8uAp71xKQZkjoPZmHu4sACA1SY8jTjay9dP5"},
"placement_pools": [
{ "key": "default-placement",
"val": { "index_pool": ".us-east.rgw.buckets.index",
"data_pool": ".us-east.rgw.buckets"}
}
]
}
5.Create Zone Users system user
sudo radosgw-admin user create --uid="us-east" --display-name="Region-US Zone-East" --name client.radosgw.us-west-1 --access_key="XNK0ST8WXTMWZGN29NF9" --secret="7VJm8uAp71xKQZkjoPZmHu4sACA1SY8jTjay9dP5" --system
sudo radosgw-admin user create --uid="us-west" --display-name="Region-US Zone-West" --name client.radosgw.us-west-1 --access_key="AAK0ST8WXTMWZGN29NF9" --secret="AAJm8uAp71xKQZkjoPZmHu4sACA1SY8jTjay9dP5" --system
sudo radosgw-admin user create --uid="us-east" --display-name="Region-US Zone-East" --name client.radosgw.us-west-1 --access_key="XNK0ST8WXTMWZGN29NF9" --secret="7VJm8uAp71xKQZkjoPZmHu4sACA1SY8jTjay9dP5" --system
sudo radosgw-admin user create --uid="us-west" --display-name="Region-US Zone-West" --name client.radosgw.us-west-1 --access_key="AAK0ST8WXTMWZGN29NF9" --secret="AAJm8uAp71xKQZkjoPZmHu4sACA1SY8jTjay9dP5" --system
6.reboot
sudo /etc/init.d/ceph -a restart
sudo /etc/init.d/httpd restart
sudo /etc/init.d/ceph-radosgw restart
sudo /etc/init.d/ceph -a restart
sudo /etc/init.d/httpd restart
sudo /etc/init.d/ceph-radosgw restart
after all of above, on the first cluster, i do the follow steps
1.source self.env
the content of self.env is :
cat self.env
export ST_AUTH="http://10.18.5.49/auth/1.0"
export ST_KEY=ffJm8uAp71xKQZkjoPZmHu4sACA1SY8jTjay9dP5
export ST_USER=us-test-east:swift
1.source self.env
the content of self.env is :
cat self.env
export ST_AUTH="http://10.18.5.49/auth/1.0"
export ST_KEY=ffJm8uAp71xKQZkjoPZmHu4sACA1SY8jTjay9dP5
export ST_USER=us-test-east:swift
2.swift list
3.swift upload test self.env
4.swift list test
self.env
3.swift upload test self.env
4.swift list test
self.env
5.sudo radosgw-agent -c ./ceph-data-sync.conf
the content of ceph-data-sync.conf is£º
cat ceph-data-sync.conf
src_access_key: XNK0ST8WXTMWZGN29NF9
src_secret_key: 7VJm8uAp71xKQZkjoPZmHu4sACA1SY8jTjay9dP5
destination: http://WH-CEPH-TEST02.MATRIX.CTRIPCORP.COM
dest_access_key: XNK0ST8WXTMWZGN29NF9
dest_secret_key: 7VJm8uAp71xKQZkjoPZmHu4sACA1SY8jTjay9dP5
log_file: /var/log/radosgw/radosgw-sync-us-east-west.log
the content of ceph-data-sync.conf is£º
cat ceph-data-sync.conf
src_access_key: XNK0ST8WXTMWZGN29NF9
src_secret_key: 7VJm8uAp71xKQZkjoPZmHu4sACA1SY8jTjay9dP5
destination: http://WH-CEPH-TEST02.MATRIX.CTRIPCORP.COM
dest_access_key: XNK0ST8WXTMWZGN29NF9
dest_secret_key: 7VJm8uAp71xKQZkjoPZmHu4sACA1SY8jTjay9dP5
log_file: /var/log/radosgw/radosgw-sync-us-east-west.log
there is some error as bellow£º
sudo radosgw-agent -c ./ceph-data-sync.conf
region map is: {u'us': [u'us-west', u'us-east']}
INFO:radosgw_agent.sync:Starting sync
INFO:radosgw_agent.worker:24062 is processing shard number 0
INFO:radosgw_agent.worker:finished processing shard 0
INFO:radosgw_agent.worker:24062 is processing shard number 1
INFO:radosgw_agent.sync:1/64 items processed
INFO:radosgw_agent.worker:finished processing shard 1
INFO:radosgw_agent.sync:2/64 items processed
INFO:radosgw_agent.worker:24062 is processing shard number 2
INFO:radosgw_agent.worker:finished processing shard 2
INFO:radosgw_agent.sync:3/64 items processed
INFO:radosgw_agent.worker:24062 is processing shard number 3
INFO:radosgw_agent.worker:finished processing shard 3
INFO:radosgw_agent.sync:4/64 items processed
INFO:radosgw_agent.worker:24062 is processing shard number 4
...
...
...
INFO:radosgw_agent.worker:syncing bucket "test"
ERROR:radosgw_agent.worker:failed to sync object test/self.env: state is error
INFO:radosgw_agent.worker:syncing bucket "test"
ERROR:radosgw_agent.worker:failed to sync object test/self.env: state is error
INFO:radosgw_agent.worker:finished processing shard 69
sudo radosgw-agent -c ./ceph-data-sync.conf
region map is: {u'us': [u'us-west', u'us-east']}
INFO:radosgw_agent.sync:Starting sync
INFO:radosgw_agent.worker:24062 is processing shard number 0
INFO:radosgw_agent.worker:finished processing shard 0
INFO:radosgw_agent.worker:24062 is processing shard number 1
INFO:radosgw_agent.sync:1/64 items processed
INFO:radosgw_agent.worker:finished processing shard 1
INFO:radosgw_agent.sync:2/64 items processed
INFO:radosgw_agent.worker:24062 is processing shard number 2
INFO:radosgw_agent.worker:finished processing shard 2
INFO:radosgw_agent.sync:3/64 items processed
INFO:radosgw_agent.worker:24062 is processing shard number 3
INFO:radosgw_agent.worker:finished processing shard 3
INFO:radosgw_agent.sync:4/64 items processed
INFO:radosgw_agent.worker:24062 is processing shard number 4
...
...
...
INFO:radosgw_agent.worker:syncing bucket "test"
ERROR:radosgw_agent.worker:failed to sync object test/self.env: state is error
INFO:radosgw_agent.worker:syncing bucket "test"
ERROR:radosgw_agent.worker:failed to sync object test/self.env: state is error
INFO:radosgw_agent.worker:finished processing shard 69
on the second cluster £¬i do the follow steps£º
1.source self.env
the content of self.env is :
cat self.env
export ST_AUTH="http://10.18.5.51/auth/1.0"
export ST_KEY=ffJm8uAp71xKQZkjoPZmHu4sACA1SY8jTjay9dP5
export ST_USER=us-test-east:swift
2.swift list
Auth GET failed: http://10.18.5.51/auth/1.0 403 Forbidden
Auth GET failed: http://10.18.5.51/auth/1.0 403 Forbidden
3.radosgw-admin --name client.radosgw.us-west-1 user info --uid="us-test-east"
{ "user_id": "us-test-east",
"display_name": "Region-US Zone-East-test",
"email": "",
"suspended": 0,
"max_buckets": 1000,
"auid": 0,
"subusers": [
{ "id": "us-test-east:swift",
"permissions": "full-control"}],
"keys": [
{ "user": "us-test-east",
"access_key": "DDK0ST8WXTMWZGN29NF9",
"secret_key": "DDJm8uAp71xKQZkjoPZmHu4sACA1SY8jTjay9dP5"}],
"swift_keys": [
{ "user": "us-test-east:swift",
"secret_key": "ffJm8uAp71xKQZkjoPZmHu4sACA1SY8jTjay9dP5"}],
"caps": [],
"op_mask": "read, write, delete",
"default_placement": "",
"placement_tags": [],
"bucket_quota": { "enabled": false,
"max_size_kb": -1,
"max_objects": -1},
"user_quota": { "enabled": false,
"max_size_kb": -1,
"max_objects": -1},
"temp_url_keys": []}
{ "user_id": "us-test-east",
"display_name": "Region-US Zone-East-test",
"email": "",
"suspended": 0,
"max_buckets": 1000,
"auid": 0,
"subusers": [
{ "id": "us-test-east:swift",
"permissions": "full-control"}],
"keys": [
{ "user": "us-test-east",
"access_key": "DDK0ST8WXTMWZGN29NF9",
"secret_key": "DDJm8uAp71xKQZkjoPZmHu4sACA1SY8jTjay9dP5"}],
"swift_keys": [
{ "user": "us-test-east:swift",
"secret_key": "ffJm8uAp71xKQZkjoPZmHu4sACA1SY8jTjay9dP5"}],
"caps": [],
"op_mask": "read, write, delete",
"default_placement": "",
"placement_tags": [],
"bucket_quota": { "enabled": false,
"max_size_kb": -1,
"max_objects": -1},
"user_quota": { "enabled": false,
"max_size_kb": -1,
"max_objects": -1},
"temp_url_keys": []}
4.radosgw-admin --name client.radosgw.us-west-1 bucket list
[
"test"]
[
"test"]
5.radosgw-admin --name client.radosgw.us-west-1 --bucket=test bucket list
[]
[]
it seems like that metadata is replicated from the first cluster£¬ data is not¡£
I don't known why?
_______________________________________________ ceph-users mailing list ceph-users@xxxxxxxxxxxxxx http://lists.ceph.com/listinfo.cgi/ceph-users-ceph.com