root@fulen-w006:~# ll client.fulen.keyring
-rw-r--r-- 1 root root 69 Feb 11 15:30 client.fulen.keyring
root@fulen-w006:~# ll ceph.conf
-rw-r--r-- 1 root root 118 Feb 11 19:15 ceph.conf
root@fulen-w006:~# rbd -c ceph.conf --id fulen --keyring
client.fulen.keyring map fulen-nvme-meta/test-loreg-3
rbd: sysfs write failed
In some cases useful info is found in syslog - try "dmesg | tail".
rbd: map failed: (1) Operation not permitted
where test-loreg-3 is an image in a EC pool.
root@fulen-w006:~# rbd -c ceph.conf --id fulen --keyring
client.fulen.keyring info fulen-nvme-meta/test-loreg-3
rbd image 'test-loreg-3':
size 1 GiB in 256 objects
order 22 (4 MiB objects)
snapshot_count: 0
id: e2375bbddf414a
data_pool: fulen-hdd-data
block_name_prefix: rbd_data.36.e2375bbddf414a
format: 2
features: layering, exclusive-lock, data-pool
op_features:
flags:
create_timestamp: Thu Feb 10 18:17:42 2022
access_timestamp: Thu Feb 10 18:17:42 2022
modify_timestamp: Thu Feb 10 18:17:42 2022
Giuseppe
On 11.02.22, 14:52, "Eugen Block" <eblock@xxxxxx> wrote:
How are the permissions of the client keyring on both systems?
Zitat von Lo Re Giuseppe <giuseppe.lore@xxxxxxx>:
> Hi,
>
> It's a single ceph cluster, I'm testing from 2 different client nodes.
> The caps are below.
> I think is unlikely that caps are the cause as they work from one
> client node, same ceph user, and not from the other one...
>
> Cheers,
>
> Giuseppe
>
>
> [root@naret-monitor01 ~]# ceph auth get client.fulen
> exported keyring for client.fulen
> [client.fulen]
> key = xxxxxxxx
> caps mgr = "profile rbd pool=fulen-hdd, profile rbd
> pool=fulen-nvme, profile rbd pool=fulen-dcache, profile rbd
> pool=fulen-dcache-data, profile rbd pool=fulen-dcache-meta, profile
> rbd pool=fulen-hdd-data, profile rbd pool=fulen-nvme-meta"
> caps mon = "profile rbd"
> caps osd = "profile rbd pool=fulen-hdd, profile rbd
> pool=fulen-nvme, profile rbd pool=fulen-dcache, profile rbd
> pool=fulen-dcache-data, profile rbd pool=fulen-dcache-meta, profile
> rbd pool=fulen-hdd-data, profile rbd pool=fulen-nvme-meta"
>
>
>
> On 11.02.22, 13:22, "Eugen Block" <eblock@xxxxxx> wrote:
>
> Hi,
>
> the first thing coming to mind are the user's caps. Which
permissions
> do they have? Have you compared 'ceph auth get
client.fulen' on both
> clusters? Please paste the output from both clusters and redact
> sensitive information.
>
>
> Zitat von Lo Re Giuseppe <giuseppe.lore@xxxxxxx>:
>
> > Hi all,
> >
> > This is my first post to this user group, I’m not a ceph expert,
> > sorry if I say/ask anything trivial.
> >
> > On a Kubernetes cluster I have an issue in creating
volumes from a
> > (csi) ceph EC pool.
> >
> > I can reproduce the problem from rbd cli like this from
one of the
> > k8s worker nodes:
> >
> > “””
> > root@fulen-w006:~# ceph -v
> > ceph version 15.2.14 (cd3bb7e87a2f62c1b862ff3fd8b1eec13391a5be)
> > octopus (stable)
> >
> > root@fulen-w006:~# rbd -m 148.187.20.141:6789 --id fulen
--keyfile
> > key create test-loreg-3 --data-pool fulen-hdd-data --pool
> > fulen-nvme-meta --size 1G
> >
> > root@fulen-w006:~# rbd -m 148.187.20.141:6789 --id fulen
--keyfile
> > key map fulen-nvme-meta/test-loreg-3
> > ...
> > rbd: sysfs write failed
> > ...
> > In some cases useful info is found in syslog - try
"dmesg | tail".
> > rbd: map failed: (1) Operation not permitted
> > “””
> >
> > The same sequence of operations works on a different
node (not part
> > of the k8s cluster, completely different setup):
> >
> > “””
> > root@storagesmw: # ceph -v
> > ceph version 15.2.13 (c44bc49e7a57a87d84dfff2a077a2058aa2172e2)
> > octopus (stable)
> >
> > root@storagesmw: # rbd -m 148.187.20.141:6789 --id fulen
--keyfile
> > client.fulen.key create test-loreg-4 --data-pool fulen-hdd-data
> > --pool fulen-nvme-meta --size 1G
> >
> > root@storagesmw: # rbd -m 148.187.20.141:6789 --id fulen
--keyfile
> > client.fulen.key info fulen-nvme-meta/test-loreg-4
> > rbd image 'test-loreg-4':
> > size 1 GiB in 256 objects
> > order 22 (4 MiB objects)
> > snapshot_count: 0
> > id: cafc436ff3573
> > data_pool: fulen-hdd-data
> > block_name_prefix: rbd_data.36.cafc436ff3573
> > format: 2
> > features: layering, exclusive-lock, object-map,
fast-diff,
> > deep-flatten, data-pool
> > op_features:
> > flags:
> > create_timestamp: Thu Feb 10 18:23:26 2022
> > access_timestamp: Thu Feb 10 18:23:26 2022
> > modify_timestamp: Thu Feb 10 18:23:26 2022
> >
> > root@storagesmw: # rbd -m 148.187.20.141:6789 --id fulen
--keyfile
> > client.fulen.key map fulen-nvme-meta/test-loreg-4
> > RBD image feature set mismatch. You can disable features
unsupported
> > by the kernel with "rbd feature disable
fulen-nvme-meta/test-loreg-4
> > object-map fast-diff deep-flatten".
> > In some cases useful info is found in syslog - try
"dmesg | tail".
> > rbd: map failed: (6) No such device or address
> >
> > root@storagesmw: # rbd -m 148.187.20.141:6789 --id fulen
--keyfile
> > client.fulen.key feature disable fulen-nvme-meta/test-loreg-4
> > object-map fast-diff deep-flatten
> >
> > root@storagesmw: # rbd -m 148.187.20.141:6789 --id fulen
--keyfile
> > client.fulen.key map fulen-nvme-meta/test-loreg-4
> > /dev/rbd0
> > “””
> >
> > The 2 nodes OS release and kernel are below.
> >
> > Does anyone have any advice on how to debug this?
> >
> > Thanks in advance,
> >
> > Giuseppe
> >
> > ============================================== Fulen-w006:
> >
> > root@fulen-w006:~# cat /etc/os-release
> > NAME="Ubuntu"
> > VERSION="20.04.3 LTS (Focal Fossa)"
> > ID=ubuntu
> > ID_LIKE=debian
> > PRETTY_NAME="Ubuntu 20.04.3 LTS"
> > VERSION_ID="20.04"
> > HOME_URL="https://www.ubuntu.com/"
> > SUPPORT_URL="https://help.ubuntu.com/"
> > BUG_REPORT_URL="https://bugs.launchpad.net/ubuntu/"
> >
>
PRIVACY_POLICY_URL="https://www.ubuntu.com/legal/terms-and-policies/privacy-policy"
> > VERSION_CODENAME=focal
> > UBUNTU_CODENAME=focal
> > root@fulen-w006:~# uname -a
> > Linux fulen-w006.cscs.ch 5.4.0-96-generic #109-Ubuntu
SMP Wed Jan 12
> > 16:49:16 UTC 2022 x86_64 x86_64 x86_64 GNU/Linux
> >
> > Storagesmw:
> >
> > root@storagesmw:~/loreg/ceph_conf # cat /etc/os-release
> > NAME="Red Hat Enterprise Linux Server"
> > VERSION="7.8 (Maipo)"
> > ID="rhel"
> > ID_LIKE="fedora"
> > VARIANT="Server"
> > VARIANT_ID="server"
> > VERSION_ID="7.8"
> > PRETTY_NAME="Red Hat Enterprise Linux"
> > ANSI_COLOR="0;31"
> > CPE_NAME="cpe:/o:redhat:enterprise_linux:7.8:GA:server"
> > HOME_URL="https://www.redhat.com/"
> > BUG_REPORT_URL="https://bugzilla.redhat.com/"
> >
> > REDHAT_BUGZILLA_PRODUCT="Red Hat Enterprise Linux 7"
> > REDHAT_BUGZILLA_PRODUCT_VERSION=7.8
> > REDHAT_SUPPORT_PRODUCT="Red Hat Enterprise Linux"
> > REDHAT_SUPPORT_PRODUCT_VERSION="7.8"
> > root@storagesmw:~/loreg/ceph_conf # uname -a
> > Linux storagesmw.cscs.ch 3.10.0-1127.19.1.el7.x86_64 #1
SMP Tue Aug
> > 11 19:12:04 EDT 2020 x86_64 x86_64 x86_64 GNU/Linux
> >
> >
> >
> >
> >
> > _______________________________________________
> > ceph-users mailing list -- ceph-users@xxxxxxx
> > To unsubscribe send an email to ceph-users-leave@xxxxxxx
>
>
>
> _______________________________________________
> ceph-users mailing list -- ceph-users@xxxxxxx
> To unsubscribe send an email to ceph-users-leave@xxxxxxx