Hello. I was have multisite RGW (14.2.16 nautilus) setup and some of the bucket couldn't finish bucket sync due to overfill buckets, There was different needs and the sync started purpose of migration. I made the secondary zone the master and removed the old master zone from zonegroup. Now I still have sync errors and sync error trim do not work. radosgw-admin --id radosgw.srv1 sync error list | grep name | wc -l 32000 Thats a lot of errors. Sync error trim does nothing. When I run period update commit I saw sync status field has a lot of records as below. radosgw-admin --id radosgw.srv1 period update --commit { "id": "e5d30f8f", "epoch": 7, "predecessor_uuid": "1d0b7132", "sync_status": [ "1_1611733356.499643_1448979853.1", "1_1611225916.734727_865381974.1", "1_1611648125.876993_1659659292.1", "1_1608194415.061001_737663090.1", "1_1605880458.143435_1259922694.1", "1_1611225999.087089_1887995199.1", "1_1586035175.626619_488028.1", "", "", "1_1611057887.910246_973493243.1", "1_1612180963.822684_807349060.1", "", "", "1_1612180818.328001_807344892.1", "1_1611058156.662721_1887884194.1", "1_1611057588.159455_1887883796.1", "1_1611647015.874625_1129837262.1", "1_1586035175.602419_753756.1", "", "1_1606215091.912960_988474411.1", "", "1_1600418137.932356_1027064325.1", "1_1609926537.036681_832230841.1", "", "", "1_1611057624.857485_1658280806.1", "1_1600419671.553723_365405366.1", "", "1_1611057662.014628_859134308.1", "1_1611057665.933662_843443436.1", "1_1605879154.805811_700811071.1", "1_1602509494.904964_696294030.1", "", "1_1611057618.891024_1150752303.1", "1_1611440831.055432_1458827253.1", "1_1611451128.857514_806931659.1", "", "1_1611057597.877068_1785564634.1", "1_1611057860.565465_1785564826.1", "1_1585821684.950844_61616.1", "", "", "", "1_1601647994.988107_511440126.1", "", "1_1608194424.578834_777512349.1", "1_1605879126.845904_958578574.1", "", "1_1590061636.162223_183644368.1", "1_1609834839.884870_1076396513.1", "", "1_1612430017.546386_612493167.1", "1_1605879158.230856_1635059634.1", "", "1_1612420115.322098_1468865033.1", "1_1611057731.182423_817020944.1", "1_1611225026.887795_806142997.1", "1_1612188490.428048_1152864210.1", "1_1612187913.914410_861646554.1", "1_1609393942.952120_574675578.1", "1_1611733086.223927_861322773.1", "1_1605880394.928467_759903023.1", "1_1600418082.175862_556536400.1", "1_1605879150.320951_1210709666.1" ], "period_map": { "id": "e5d30f8f", "zonegroups": [ { "id": "667afef", "name": "xy", "api_name": "xy", "is_master": "true", "endpoints": [ "http://dns:80" ], "hostnames": [], "hostnames_s3website": [], "master_zone": "fe8ee939", "zones": [ { "id": "fe8ee939", "name": "prod", "endpoints": [ "http://dns:80" ], "log_meta": "false", "log_data": "false", "bucket_index_max_shards": 101, "read_only": "false", "tier_type": "", "sync_from_all": "false", "sync_from": [], "redirect_zone": "" } ], "placement_targets": [ { "name": "default-placement", "tags": [], "storage_classes": [ "STANDARD" ] } ], "default_placement": "default-placement", "realm_id": "234837df" } ], "short_zone_ids": [ { "key": "fe8ee939", "val": 2970845644 } ] }, "master_zonegroup": "667afefc", "master_zone": "fe8ee939", "period_config": { "bucket_quota": { "enabled": false, "check_on_raw": false, "max_size": -1, "max_size_kb": 0, "max_objects": -1 }, "user_quota": { "enabled": false, "check_on_raw": false, "max_size": -1, "max_size_kb": 0, "max_objects": -1 } }, "realm_id": "234837df", "realm_name": "rep", "realm_epoch": 3 } I need to clean these errors before re-add the secondary zone to zonegroup. Do you have any opinion? If I delete old periods what will happen? _______________________________________________ ceph-users mailing list -- ceph-users@xxxxxxx To unsubscribe send an email to ceph-users-leave@xxxxxxx