Deal all: I have a ceph cluste with a pool named vms . size 1. 150 osds, 1024pgs. Bluestore. Some pg located at osd.74. eg: pg 10.71 I do such steps: 1. stop osd.74. 2. I make a backup with ceph-objectsstore-tool --data-path /var/lib/ceph/osd/ceph-74 –type bluestore -op export –file 10.71.pgdata 3. the disk of osd.74 is destroyed by mistake. So the osd.74 is lost. 4. I reweight osd.74 to 0 by ceph osd crush reweight osd.74 0 5. the cluster begin recovery. 6. this pg 10.71 state is down 7. ceph pg query 10.71 show that acting [72], up [72] but peer blocked by osd.74. detail is below. How can I rebuild the pg by my backup datafile 10.71.pgdata { "state": "down", "snap_trimq": "[]", "snap_trimq_len": 0, "epoch": 119206, "up": [ 72 ], "acting": [ 72 ], "info": { "pgid": "10.a9", "last_update": "108634'240572", "last_complete": "108634'240572", "log_tail": "86023'239072", "last_user_version": 0, "last_backfill": "MIN", "last_backfill_bitwise": 1, "purged_snaps": [ { "start": "1", "length": "14b" } ], "history": { "epoch_created": 2011, "epoch_pool_created": 2011, "last_epoch_started": 119195, "last_interval_started": 119194, "last_epoch_clean": 119175, "last_interval_clean": 119174, "last_epoch_split": 75616, "last_epoch_marked_full": 2513, "same_up_since": 119189, "same_interval_since": 119196, "same_primary_since": 119196, "last_scrub": "108634'240572", "last_scrub_stamp": "2021-12-28 11:10:07.116683", "last_deep_scrub": "108634'240570", "last_deep_scrub_stamp": "2021-12-25 14:30:27.491912", "last_clean_scrub_stamp": "2021-12-28 11:10:07.116683" }, "stats": { "version": "108634'240572", "reported_seq": "17", "reported_epoch": "119206", "state": "down", "last_fresh": "2022-01-07 15:41:10.709124", "last_change": "2022-01-07 15:36:27.626528", "last_active": "0.000000", "last_peered": "0.000000", "last_clean": "0.000000", "last_became_active": "0.000000", "last_became_peered": "0.000000", "last_unstale": "2022-01-07 15:41:10.709124", "last_undegraded": "2022-01-07 15:41:10.709124", "last_fullsized": "2022-01-07 15:41:10.709124", "mapping_epoch": 119196, "log_start": "86023'239072", "ondisk_log_start": "86023'239072", "created": 2011, "last_epoch_clean": 119175, "parent": "0.0", "parent_split_bits": 0, "last_scrub": "108634'240572", "last_scrub_stamp": "2021-12-28 11:10:07.116683", "last_deep_scrub": "108634'240570", "last_deep_scrub_stamp": "2021-12-25 14:30:27.491912", "last_clean_scrub_stamp": "2021-12-28 11:10:07.116683", "log_size": 1500, "ondisk_log_size": 1500, "stats_invalid": false, "dirty_stats_invalid": false, "omap_stats_invalid": false, "hitset_stats_invalid": false, "hitset_bytes_stats_invalid": false, "pin_stats_invalid": false, "snaptrimq_len": 0, "stat_sum": { "num_bytes": 0, "num_objects": 0, "num_object_clones": 0, "num_object_copies": 0, "num_objects_missing_on_primary": 0, "num_objects_missing": 0, "num_objects_degraded": 0, "num_objects_misplaced": 0, "num_objects_unfound": 0, "num_objects_dirty": 0, "num_whiteouts": 0, "num_read": 0, "num_read_kb": 0, "num_write": 0, "num_write_kb": 0, "num_scrub_errors": 0, "num_shallow_scrub_errors": 0, "num_deep_scrub_errors": 0, "num_objects_recovered": 0, "num_bytes_recovered": 0, "num_keys_recovered": 0, "num_objects_omap": 0, "num_objects_hit_set_archive": 0, "num_bytes_hit_set_archive": 0, "num_flush": 0, "num_flush_kb": 0, "num_evict": 0, "num_evict_kb": 0, "num_promote": 0, "num_flush_mode_high": 0, "num_flush_mode_low": 0, "num_evict_mode_some": 0, "num_evict_mode_full": 0, "num_objects_pinned": 0, "num_legacy_snapsets": 0, "num_large_omap_objects": 0 }, "up": [ 72 ], "acting": [ 72 ], "blocked_by": [ 74 ], "up_primary": 72, "acting_primary": 72 }, "empty": 0, "dne": 0, "incomplete": 1, "last_epoch_started": 119195, "hit_set_history": { "current_last_update": "0'0", "history": [] } }, "peer_info": [], "recovery_state": [ { "name": "Started/Primary/Peering/Down", "enter_time": "2022-01-07 15:36:27.626517", "comment": "not enough up instances of this PG to go active" }, { "name": "Started/Primary/Peering", "enter_time": "2022-01-07 15:36:27.626466", "past_intervals": [ { "first": "119174", "last": "119195", "all_participants": [ { "osd": 72 }, { "osd": 74 } ], "intervals": [ { "first": "119189", "last": "119193", "acting": "72" }, { "first": "119194", "last": "119195", "acting": "74" } ] } ], "probing_osds": [ "72" ], "blocked": "peering is blocked due to down osds", "down_osds_we_would_probe": [ 74 ], "peering_blocked_by": [ { "osd": 74, "current_lost_at": 0, "comment": "starting or marking this osd lost may let us proceed" } ] }, { "name": "Started", "enter_time": "2022-01-07 15:36:27.626413" } ], "agent_state": {} } _______________________________________________ ceph-users mailing list -- ceph-users@xxxxxxx To unsubscribe send an email to ceph-users-leave@xxxxxxx