thanks for the tip, I did wonder about that, and checked that at one point, and assumed that was ok.
root@cnx-11:~# ceph osd pool application get cephfs_data
{
"cephfs": {
"data": "cephfs"
}
}
root@cnx-11:~# ceph osd pool application get cephfs_data2
{
"cephfs": {
"data": "cephfs"
}
}
root@cnx-11:~# ceph osd pool application get cephfs_metadata
{
"cephfs": {
"metadata": "cephfs"
}
}
root@cnx-11:~#
{
"cephfs": {
"data": "cephfs"
}
}
root@cnx-11:~# ceph osd pool application get cephfs_data2
{
"cephfs": {
"data": "cephfs"
}
}
root@cnx-11:~# ceph osd pool application get cephfs_metadata
{
"cephfs": {
"metadata": "cephfs"
}
}
root@cnx-11:~#
Is the act of setting it again likely to make a needed change elsewhere that is fixed by that git pull?
On Wed, 3 Jul 2019 at 17:20, Paul Emmerich <paul.emmerich@xxxxxxxx> wrote:
Your cephfs was probably created with a buggy version that didn't set the metadata tags on the data pools correctly. IIRC there still isn't any automated migration of old broken pools.
_______________________________________________ ceph-users mailing list ceph-users@xxxxxxxxxxxxxx http://lists.ceph.com/listinfo.cgi/ceph-users-ceph.com