Hi all, We have a cluster whose fsmap and mdsmap have different value. Also, each mds has different mdsmap epoch. Active mds has epoch 52, and other two standby mds have 53 and 55, respectively. Why are the mdsmap epoch of each mds different? Our cluster: ceph 11.2.0 3 nodes. Each node has a mon, mds and 4 OSDs. $ ceph mds stat --format=json { "fsmap": { "epoch": 55, "compat": { "compat": {}, "ro_compat": {}, "incompat": { "feature_1": "base v0.20", "feature_2": "client writeable ranges", "feature_3": "default file layouts on dirs", "feature_4": "dir inode in separate object", "feature_5": "mds uses versioned encoding", "feature_6": "dirfrag is stored in omap", "feature_8": "file layout v2" } }, "feature_flags": { "enable_multiple": false, "ever_enabled_multiple": false }, "standbys": [ { "gid": 414444, "name": "Host2", "rank": -1, "incarnation": 0, "state": "up:standby", "state_seq": 2, "addr": "10.4.154.141:6816/304221716", "standby_for_rank": -1, "standby_for_fscid": -1, "standby_for_name": "", "standby_replay": false, "export_targets": [], "features": 1152921504336314400, "epoch": 53 }, { "gid": 424717, "name": "Host3", "rank": -1, "incarnation": 0, "state": "up:standby", "state_seq": 2, "addr": "10.4.154.142:6816/627678162", "standby_for_rank": -1, "standby_for_fscid": -1, "standby_for_name": "", "standby_replay": false, "export_targets": [], "features": 1152921504336314400, "epoch": 55 } ], "filesystems": [ { "mdsmap": { "epoch": 52, "flags": 0, "ever_allowed_features": 0, "explicitly_allowed_features": 0, "created": "2017-06-15 11:56:32.709015", "modified": "2017-06-15 11:56:32.709015", "tableserver": 0, "root": 0, "session_timeout": 60, "session_autoclose": 300, "max_file_size": 1099511627776, "last_failure": 0, "last_failure_osd_epoch": 154, "compat": { "compat": {}, "ro_compat": {}, "incompat": { "feature_1": "base v0.20", "feature_2": "client writeable ranges", "feature_3": "default file layouts on dirs", "feature_4": "dir inode in separate object", "feature_5": "mds uses versioned encoding", "feature_6": "dirfrag is stored in omap", "feature_8": "file layout v2" } }, "max_mds": 1, "in": [ 0 ], "up": { "mds_0": 399818 }, "failed": [], "damaged": [], "stopped": [], "info": { "gid_399818": { "gid": 399818, "name": “Host1", "rank": 0, "incarnation": 49, "state": "up:active", "state_seq": 492, "addr": "10.4.154.140:6816/3168307953", "standby_for_rank": -1, "standby_for_fscid": -1, "standby_for_name": "", "standby_replay": false, "export_targets": [], "features": 1152921504336314400 } }, "data_pools": [ 2 ], "metadata_pool": 3, "enabled": true, "fs_name": "cephfs", "balancer": "" }, "id": 1 } ] }, "mdsmap_first_committed": 1, "mdsmap_last_committed": 55 } Thanks, Tim Lin _______________________________________________ ceph-users mailing list ceph-users@xxxxxxxxxxxxxx http://lists.ceph.com/listinfo.cgi/ceph-users-ceph.com