Hi folks,
we deployed new reef cluster to our lab.
all of the nodes are up and running, but we can't allocate lun to target.
on the gui we got "disk create/update failed on ceph-iscsigw0. LUN
allocation failure" message.
We created images on gui
do you have any idea?
Thanks
root@ceph-mgr0:~# ceph -s
*** DEVELOPER MODE: setting PATH, PYTHONPATH and LD_LIBRARY_PATH ***
cluster:
id: ad0aede2-4100-11ee-bc14-1c40244f5c21
health: HEALTH_OK
services:
mon: 5 daemons, quorum
ceph-mgr0,ceph-mgr1,ceph-osd5,ceph-osd7,ceph-osd6 (age 28h)
mgr: ceph-mgr0.sapbav(active, since 45h), standbys:
ceph-mgr1.zwzyuc
osd: 44 osds: 44 up (since 4h), 44 in (since 4h)
tcmu-runner: 1 portal active (1 hosts)
data:
pools: 5 pools, 3074 pgs
objects: 27 objects, 453 KiB
usage: 30 GiB used, 101 TiB / 101 TiB avail
pgs: 3074 active+clean
io:
client: 2.7 KiB/s rd, 2 op/s rd, 0 op/s wr
root@ceph-mgr0:~#
root@ceph-mgr0:~# rados lspools
.mgr
ace1
1T-r3-01
ace0
x
root@ceph-mgr0:~# rbd ls 1T-r3-01
111
aaaa
bb
pool2
teszt
root@ceph-mgr0:~# rbd ls x
x-a
root@ceph-mgr0:~#
root@ceph-mgr0:~# rbd info 1T-r3-01/111
rbd image '111':
size 1 GiB in 256 objects
order 22 (4 MiB objects)
snapshot_count: 0
id: 5f927ce161de
block_name_prefix: rbd_data.5f927ce161de
format: 2
features: layering, exclusive-lock, object-map, fast-diff, deep-flatten
op_features:
flags:
create_timestamp: Thu Aug 24 17:33:37 2023
access_timestamp: Thu Aug 24 17:33:37 2023
modify_timestamp: Thu Aug 24 17:33:37 2023
root@ceph-mgr0:~# rbd info 1T-r3-01/aaaa
rbd image 'aaaa':
size 1 GiB in 256 objects
order 22 (4 MiB objects)
snapshot_count: 0
id: 5f926a0e299f
block_name_prefix: rbd_data.5f926a0e299f
format: 2
features: layering, exclusive-lock, object-map, fast-diff, deep-flatten
op_features:
flags:
create_timestamp: Thu Aug 24 17:18:06 2023
access_timestamp: Thu Aug 24 17:18:06 2023
modify_timestamp: Thu Aug 24 17:18:06 2023
root@ceph-mgr0:~# rbd info x/x-a
rbd image 'x-a':
size 1 GiB in 256 objects
order 22 (4 MiB objects)
snapshot_count: 0
id: 5f922dbdf6c6
block_name_prefix: rbd_data.5f922dbdf6c6
format: 2
features: layering, exclusive-lock, object-map, fast-diff, deep-flatten
op_features:
flags:
create_timestamp: Thu Aug 24 17:48:28 2023
access_timestamp: Thu Aug 24 17:48:28 2023
modify_timestamp: Thu Aug 24 17:48:28 2023
root@ceph-mgr0:~#
root@ceph-mgr0:~# ceph orch ls --service_type iscsi
*** DEVELOPER MODE: setting PATH, PYTHONPATH and LD_LIBRARY_PATH ***
NAME PORTS RUNNING REFRESHED AGE PLACEMENT
iscsi.gw-1 ?:5000 2/2 4m ago 6m ceph-iscsigw0;ceph-iscsigw1
root@ceph-mgr0:~#
GW:
root@ceph-iscsigw0:~# docker ps
CONTAINER ID IMAGE COMMAND CREATED STATUS
PORTS NAMES
d677a8abd2d8 quay.io/ceph/ceph "/usr/bin/rbd-target…" 6 seconds
ago Up 5 seconds
ceph-ad0aede2-4100-11ee-bc14-1c40244f5c21-iscsi-gw-1-ceph-iscsigw0-fmuyhi
ead503586cdd quay.io/ceph/ceph "/usr/bin/tcmu-runner" 6 seconds
ago Up 5 seconds
ceph-ad0aede2-4100-11ee-bc14-1c40244f5c21-iscsi-gw-1-ceph-iscsigw0-fmuyhi-tcmu
3ae0014bcc41 quay.io/ceph/ceph "/usr/bin/ceph-crash…" About an hour
ago Up About an hour
ceph-ad0aede2-4100-11ee-bc14-1c40244f5c21-crash-ceph-iscsigw0
1a7bc044ed8a quay.io/ceph/ceph "/usr/bin/ceph-expor…" About an hour
ago Up About an hour
ceph-ad0aede2-4100-11ee-bc14-1c40244f5c21-ceph-exporter-ceph-iscsigw0
c746a4da2bbb quay.io/prometheus/node-exporter:v1.5.0
"/bin/node_exporter …" About an hour ago Up About an hour
ceph-ad0aede2-4100-11ee-bc14-1c40244f5c21-node-exporter-ceph-iscsigw0
root@ceph-iscsigw0:~# docker exec -it d677a8abd2d8 /bin/bash
[root@ceph-iscsigw0 /]# gwcli ls
o- /
.........................................................................................................................
[...]
o- cluster
.........................................................................................................
[Clusters: 1]
| o- ceph
............................................................................................................
[HEALTH_OK]
| o- pools
..........................................................................................................
[Pools: 5]
| | o- .mgr
..................................................................
[(x3), Commit: 0.00Y/33602764M (0%), Used: 3184K]
| | o- 1T-r3-01
................................................................ [(x3),
Commit: 0.00Y/5793684M (0%), Used: 108K]
| | o- ace0
...................................................................
[(2+1), Commit: 0.00Y/11587368M (0%), Used: 24K]
| | o- ace1
...................................................................
[(2+1), Commit: 0.00Y/55665220M (0%), Used: 12K]
| | o- x
.......................................................................
[(x3), Commit: 0.00Y/33602764M (0%), Used: 36K]
| o- topology
...............................................................................................
[OSDs: 44,MONs: 5]
o- disks
.......................................................................................................
[0.00Y, Disks: 0]
o- iscsi-targets
...............................................................................
[DiscoveryAuth: None, Targets: 1]
o- iqn.2001-07.com.ceph:1692892702115
................................................................ [Auth:
None, Gateways: 2]
o- disks
..........................................................................................................
[Disks: 0]
o- gateways
............................................................................................
[Up: 2/2, Portals: 2]
| o- ceph-iscsigw0
............................................................................
[10.202.5.21,10.202.4.21 (UP)]
| o- ceph-iscsigw1
............................................................................
[10.202.3.21,10.202.2.21 (UP)]
o- host-groups
..................................................................................................
[Groups : 0]
o- hosts
.......................................................................................
[Auth: ACL_ENABLED, Hosts: 0]
[root@ceph-iscsigw0 /]#
_______________________________________________
ceph-users mailing list -- ceph-users@xxxxxxx
To unsubscribe send an email to ceph-users-leave@xxxxxxx