I have just finished the update of a ceph cluster from luminous to nautilus
Everything seems running, but I keep receiving notifications (about ~ 10 so far, involving different PGs and different OSDs) of PGs in inconsistent state.
rados list-inconsistent-obj pg-id --format=json-pretty (an example is attached) says that the problem is "size_too_large".
"ceph pg repair" is able to "fix" the problem, but I am not able to understand what is the problem
Thanks, Massimo
{ "epoch": 1966551, "inconsistents": [ { "object": { "name": "/hbWPh36KajAKcJUlCjG9XdqLGQMzkwn3NDrrLDi_mTM/file2", "nspace": "", "locator": "", "snap": "head", "version": 368 }, "errors": [ "size_too_large" ], "union_shard_errors": [], "selected_object_info": { "oid": { "oid": "/hbWPh36KajAKcJUlCjG9XdqLGQMzkwn3NDrrLDi_mTM/file2", "key": "", "snapid": -2, "hash": 1714937604, "max": 0, "pool": 13, "namespace": "" }, "version": "243582'368", "prior_version": "243582'367", "last_reqid": "client.13143063.0:20504", "user_version": 368, "size": 385888256, "mtime": "2017-10-10 14:09:12.098334", "local_mtime": "2017-10-10 14:10:29.321446", "lost": 0, "flags": [ "dirty", "data_digest", "omap_digest" ], "truncate_seq": 0, "truncate_size": 0, "data_digest": "0x9229f11b", "omap_digest": "0xffffffff", "expected_object_size": 0, "expected_write_size": 0, "alloc_hint_flags": 0, "manifest": { "type": 0 }, "watchers": {} }, "shards": [ { "osd": 13, "primary": false, "errors": [], "size": 385888256, "omap_digest": "0xffffffff", "data_digest": "0x9229f11b" }, { "osd": 38, "primary": false, "errors": [], "size": 385888256, "omap_digest": "0xffffffff", "data_digest": "0x9229f11b" }, { "osd": 54, "primary": true, "errors": [], "size": 385888256, "omap_digest": "0xffffffff", "data_digest": "0x9229f11b" } ] }, { "object": { "name": "/hbWPh36KajAKcJUlCjG9XdqLGQMzkwn3NDrrLDi_mTM/file8", "nspace": "", "locator": "", "snap": "head", "version": 417 }, "errors": [ "size_too_large" ], "union_shard_errors": [], "selected_object_info": { "oid": { "oid": "/hbWPh36KajAKcJUlCjG9XdqLGQMzkwn3NDrrLDi_mTM/file8", "key": "", "snapid": -2, "hash": 3180021668, "max": 0, "pool": 13, "namespace": "" }, "version": "243596'417", "prior_version": "243596'416", "last_reqid": "client.13143063.0:20858", "user_version": 417, "size": 385888256, "mtime": "2017-10-10 14:16:32.814931", "local_mtime": "2017-10-10 14:17:50.248174", "lost": 0, "flags": [ "dirty", "data_digest", "omap_digest" ], "truncate_seq": 0, "truncate_size": 0, "data_digest": "0x9229f11b", "omap_digest": "0xffffffff", "expected_object_size": 0, "expected_write_size": 0, "alloc_hint_flags": 0, "manifest": { "type": 0 }, "watchers": {} }, "shards": [ { "osd": 13, "primary": false, "errors": [], "size": 385888256, "omap_digest": "0xffffffff", "data_digest": "0x9229f11b" }, { "osd": 38, "primary": false, "errors": [], "size": 385888256, "omap_digest": "0xffffffff", "data_digest": "0x9229f11b" }, { "osd": 54, "primary": true, "errors": [], "size": 385888256, "omap_digest": "0xffffffff", "data_digest": "0x9229f11b" } ] } ] }
_______________________________________________ ceph-users mailing list ceph-users@xxxxxxxxxxxxxx http://lists.ceph.com/listinfo.cgi/ceph-users-ceph.com