Hi,
thank you for your reply.
That really is all. I tried to call `cephadm ceph-volume lvm activate
--all`, see below, and this apparently crashes because of some unicode
problem... might that be the root cause?
Cheers,
Manuel
[root@dmz-host-4 rocky]# cephadm ceph-volume lvm activate --all
Inferring fsid d221bc3c-8ff4-11ec-b4ba-b02628267680
Using recent ceph image
quay.io/ceph/ceph@sha256:0d927ccbd8892180ee09894c2b2c26d07c938bf96a56eaee9b80fc9f26083ddb
Non-zero exit code 1 from /bin/docker run --rm --ipc=host
--stop-signal=SIGTERM --net=host --entrypoint /usr/sbin/ceph-volume
--privileged --group-add=disk --init -e CONTAINER_IMAGE=
quay.io/ceph/ceph@sha256:0d927ccbd8892180ee09894c2b2c26d07c938bf96a56eaee9b80fc9f26083ddb
-e NODE_NAME=dmz-host-4 -e CEPH_USE_RANDOM_NONCE=1 -v
/var/run/ceph/d221bc3c-8ff4-11ec-b4ba-b02628267680:/var/run/ceph:z -v
/var/log/ceph/d221bc3c-8ff4-11ec-b4ba-b02628267680:/var/log/ceph:z -v
/var/lib/ceph/d221bc3c-8ff4-11ec-b4ba-b02628267680/crash:/var/lib/ceph/crash:z
-v /dev:/dev -v /run/udev:/run/udev -v /sys:/sys -v /run/lvm:/run/lvm -v
/run/lock/lvm:/run/lock/lvm -v /tmp/ceph-tmp1rkweygp:/etc/ceph/ceph.conf:z
quay.io/ceph/ceph@sha256:0d927ccbd8892180ee09894c2b2c26d07c938bf96a56eaee9b80fc9f26083ddb
lvm activate --all
/bin/docker: stderr --> Activating OSD ID 12 FSID
e2ebb627-28aa-45a3-9261-d7c27bc08448
/bin/docker: stderr Running command: /usr/bin/mount -t tmpfs tmpfs
/var/lib/ceph/osd/ceph-12
/bin/docker: stderr Running command: /usr/bin/chown -R ceph:ceph
/var/lib/ceph/osd/ceph-12
/bin/docker: stderr Running command: /usr/bin/ceph-bluestore-tool
--cluster=ceph prime-osd-dir --dev
/dev/ceph-2cbf3973-13a3-444a-b335-a0262cff6074/osd-block-e2ebb627-28aa-45a3-9261-d7c27bc08448
--path /var/lib/ceph/osd/ceph-12 --no-mon-config
/bin/docker: stderr Running command: /usr/bin/ln -snf
/dev/ceph-2cbf3973-13a3-444a-b335-a0262cff6074/osd-block-e2ebb627-28aa-45a3-9261-d7c27bc08448
/var/lib/ceph/osd/ceph-12/block
/bin/docker: stderr Running command: /usr/bin/chown -h ceph:ceph
/var/lib/ceph/osd/ceph-12/block
/bin/docker: stderr Running command: /usr/bin/chown -R ceph:ceph /dev/dm-3
/bin/docker: stderr Running command: /usr/bin/chown -R ceph:ceph
/var/lib/ceph/osd/ceph-12
/bin/docker: stderr Running command: /usr/bin/systemctl enable
ceph-volume@lvm-12-e2ebb627-28aa-45a3-9261-d7c27bc08448
/bin/docker: stderr stderr: Created symlink
/etc/systemd/system/multi-user.target.wants/ceph-volume@lvm-12-e2ebb627-28aa-45a3-9261-d7c27bc08448.service
\u2192 /usr/lib/systemd/system/ceph-volume@.service.
/bin/docker: stderr --- Logging error ---
/bin/docker: stderr Traceback (most recent call last):
/bin/docker: stderr File "/usr/lib64/python3.6/logging/__init__.py", line
996, in emit
/bin/docker: stderr stream.write(msg)
/bin/docker: stderr UnicodeEncodeError: 'ascii' codec can't encode
character '\u2192' in position 186: ordinal not in range(128)
/bin/docker: stderr Call stack:
/bin/docker: stderr File "/usr/sbin/ceph-volume", line 11, in <module>
/bin/docker: stderr load_entry_point('ceph-volume==1.0.0',
'console_scripts', 'ceph-volume')()
/bin/docker: stderr File
"/usr/lib/python3.6/site-packages/ceph_volume/main.py", line 40, in __init__
/bin/docker: stderr self.main(self.argv)
/bin/docker: stderr File
"/usr/lib/python3.6/site-packages/ceph_volume/decorators.py", line 59, in
newfunc
/bin/docker: stderr return f(*a, **kw)
/bin/docker: stderr File
"/usr/lib/python3.6/site-packages/ceph_volume/main.py", line 152, in main
/bin/docker: stderr terminal.dispatch(self.mapper, subcommand_args)
/bin/docker: stderr File
"/usr/lib/python3.6/site-packages/ceph_volume/terminal.py", line 194, in
dispatch
/bin/docker: stderr instance.main()
/bin/docker: stderr File
"/usr/lib/python3.6/site-packages/ceph_volume/devices/lvm/main.py", line
46, in main
/bin/docker: stderr terminal.dispatch(self.mapper, self.argv)
/bin/docker: stderr File
"/usr/lib/python3.6/site-packages/ceph_volume/terminal.py", line 194, in
dispatch
/bin/docker: stderr instance.main()
/bin/docker: stderr File
"/usr/lib/python3.6/site-packages/ceph_volume/devices/lvm/activate.py",
line 373, in main
/bin/docker: stderr self.activate_all(args)
/bin/docker: stderr File
"/usr/lib/python3.6/site-packages/ceph_volume/decorators.py", line 16, in
is_root
/bin/docker: stderr return func(*a, **kw)
/bin/docker: stderr File
"/usr/lib/python3.6/site-packages/ceph_volume/devices/lvm/activate.py",
line 254, in activate_all
/bin/docker: stderr self.activate(args, osd_id=osd_id,
osd_fsid=osd_fsid)
/bin/docker: stderr File
"/usr/lib/python3.6/site-packages/ceph_volume/decorators.py", line 16, in
is_root
/bin/docker: stderr return func(*a, **kw)
/bin/docker: stderr File
"/usr/lib/python3.6/site-packages/ceph_volume/devices/lvm/activate.py",
line 299, in activate
/bin/docker: stderr activate_bluestore(lvs, args.no_systemd)
/bin/docker: stderr File
"/usr/lib/python3.6/site-packages/ceph_volume/devices/lvm/activate.py",
line 214, in activate_bluestore
/bin/docker: stderr systemctl.enable_volume(osd_id, osd_fsid, 'lvm')
/bin/docker: stderr File
"/usr/lib/python3.6/site-packages/ceph_volume/systemd/systemctl.py", line
82, in enable_volume
/bin/docker: stderr return enable(volume_unit % (device_type, id_,
fsid))
/bin/docker: stderr File
"/usr/lib/python3.6/site-packages/ceph_volume/systemd/systemctl.py", line
22, in enable
/bin/docker: stderr process.run(['systemctl', 'enable', unit])
/bin/docker: stderr File
"/usr/lib/python3.6/site-packages/ceph_volume/process.py", line 137, in run
/bin/docker: stderr log_descriptors(reads, process, terminal_logging)
/bin/docker: stderr File
"/usr/lib/python3.6/site-packages/ceph_volume/process.py", line 59, in
log_descriptors
/bin/docker: stderr log_output(descriptor_name, message,
terminal_logging, True)
/bin/docker: stderr File
"/usr/lib/python3.6/site-packages/ceph_volume/process.py", line 34, in
log_output
/bin/docker: stderr logger.info(line)
/bin/docker: stderr Message: 'stderr Created symlink
/etc/systemd/system/multi-user.target.wants/ceph-volume@lvm-12-e2ebb627-28aa-45a3-9261-d7c27bc08448.service
\u2192 /usr/lib/systemd/system/ceph-volume@.service.'
/bin/docker: stderr Arguments: ()
/bin/docker: stderr Running command: /usr/bin/systemctl enable --runtime
ceph-osd@12
/bin/docker: stderr stderr: Created symlink
/run/systemd/system/ceph-osd.target.wants/ceph-osd@12.service \u2192
/usr/lib/systemd/system/ceph-osd@.service.
/bin/docker: stderr --- Logging error ---
/bin/docker: stderr Traceback (most recent call last):
/bin/docker: stderr File "/usr/lib64/python3.6/logging/__init__.py", line
996, in emit
/bin/docker: stderr stream.write(msg)
/bin/docker: stderr UnicodeEncodeError: 'ascii' codec can't encode
character '\u2192' in position 140: ordinal not in range(128)
/bin/docker: stderr Call stack:
/bin/docker: stderr File "/usr/sbin/ceph-volume", line 11, in <module>
/bin/docker: stderr load_entry_point('ceph-volume==1.0.0',
'console_scripts', 'ceph-volume')()
/bin/docker: stderr File
"/usr/lib/python3.6/site-packages/ceph_volume/main.py", line 40, in __init__
/bin/docker: stderr self.main(self.argv)
/bin/docker: stderr File
"/usr/lib/python3.6/site-packages/ceph_volume/decorators.py", line 59, in
newfunc
/bin/docker: stderr return f(*a, **kw)
/bin/docker: stderr File
"/usr/lib/python3.6/site-packages/ceph_volume/main.py", line 152, in main
/bin/docker: stderr terminal.dispatch(self.mapper, subcommand_args)
/bin/docker: stderr File
"/usr/lib/python3.6/site-packages/ceph_volume/terminal.py", line 194, in
dispatch
/bin/docker: stderr instance.main()
/bin/docker: stderr File
"/usr/lib/python3.6/site-packages/ceph_volume/devices/lvm/main.py", line
46, in main
/bin/docker: stderr terminal.dispatch(self.mapper, self.argv)
/bin/docker: stderr File
"/usr/lib/python3.6/site-packages/ceph_volume/terminal.py", line 194, in
dispatch
/bin/docker: stderr instance.main()
/bin/docker: stderr File
"/usr/lib/python3.6/site-packages/ceph_volume/devices/lvm/activate.py",
line 373, in main
/bin/docker: stderr self.activate_all(args)
/bin/docker: stderr File
"/usr/lib/python3.6/site-packages/ceph_volume/decorators.py", line 16, in
is_root
/bin/docker: stderr return func(*a, **kw)
/bin/docker: stderr File
"/usr/lib/python3.6/site-packages/ceph_volume/devices/lvm/activate.py",
line 254, in activate_all
/bin/docker: stderr self.activate(args, osd_id=osd_id,
osd_fsid=osd_fsid)
/bin/docker: stderr File
"/usr/lib/python3.6/site-packages/ceph_volume/decorators.py", line 16, in
is_root
/bin/docker: stderr return func(*a, **kw)
/bin/docker: stderr File
"/usr/lib/python3.6/site-packages/ceph_volume/devices/lvm/activate.py",
line 299, in activate
/bin/docker: stderr activate_bluestore(lvs, args.no_systemd)
/bin/docker: stderr File
"/usr/lib/python3.6/site-packages/ceph_volume/devices/lvm/activate.py",
line 217, in activate_bluestore
/bin/docker: stderr systemctl.enable_osd(osd_id)
/bin/docker: stderr File
"/usr/lib/python3.6/site-packages/ceph_volume/systemd/systemctl.py", line
70, in enable_osd
/bin/docker: stderr return enable(osd_unit % id_, runtime=True)
/bin/docker: stderr File
"/usr/lib/python3.6/site-packages/ceph_volume/systemd/systemctl.py", line
20, in enable
/bin/docker: stderr process.run(['systemctl', 'enable', '--runtime',
unit])
/bin/docker: stderr File
"/usr/lib/python3.6/site-packages/ceph_volume/process.py", line 137, in run
/bin/docker: stderr log_descriptors(reads, process, terminal_logging)
/bin/docker: stderr File
"/usr/lib/python3.6/site-packages/ceph_volume/process.py", line 59, in
log_descriptors
/bin/docker: stderr log_output(descriptor_name, message,
terminal_logging, True)
/bin/docker: stderr File
"/usr/lib/python3.6/site-packages/ceph_volume/process.py", line 34, in
log_output
/bin/docker: stderr logger.info(line)
/bin/docker: stderr Message: 'stderr Created symlink
/run/systemd/system/ceph-osd.target.wants/ceph-osd@12.service \u2192
/usr/lib/systemd/system/ceph-osd@.service.'
/bin/docker: stderr Arguments: ()
/bin/docker: stderr Running command: /usr/bin/systemctl start ceph-osd@12
/bin/docker: stderr stderr: Failed to connect to bus: No such file or
directory
/bin/docker: stderr --> RuntimeError: command returned non-zero exit
status: 1
Traceback (most recent call last):
File "/sbin/cephadm", line 8571, in <module>
main()
File "/sbin/cephadm", line 8559, in main
r = ctx.func(ctx)
File "/sbin/cephadm", line 1737, in _infer_config
return func(ctx)
File "/sbin/cephadm", line 1707, in _infer_fsid
return func(ctx)
File "/sbin/cephadm", line 1765, in _infer_image
return func(ctx)
File "/sbin/cephadm", line 1665, in _validate_fsid
return func(ctx)
File "/sbin/cephadm", line 4822, in command_ceph_volume
out, err, code = call_throws(ctx, c.run_cmd())
File "/sbin/cephadm", line 1467, in call_throws
raise RuntimeError('Failed command: %s' % ' '.join(command))
RuntimeError: Failed command: /bin/docker run --rm --ipc=host
--stop-signal=SIGTERM --net=host --entrypoint /usr/sbin/ceph-volume
--privileged --group-add=disk --init -e CONTAINER_IMAGE=
quay.io/ceph/ceph@sha256:0d927ccbd8892180ee09894c2b2c26d07c938bf96a56eaee9b80fc9f26083ddb
-e NODE_NAME=dmz-host-4 -e CEPH_USE_RANDOM_NONCE=1 -v
/var/run/ceph/d221bc3c-8ff4-11ec-b4ba-b02628267680:/var/run/ceph:z -v
/var/log/ceph/d221bc3c-8ff4-11ec-b4ba-b02628267680:/var/log/ceph:z -v
/var/lib/ceph/d221bc3c-8ff4-11ec-b4ba-b02628267680/crash:/var/lib/ceph/crash:z
-v /dev:/dev -v /run/udev:/run/udev -v /sys:/sys -v /run/lvm:/run/lvm -v
/run/lock/lvm:/run/lock/lvm -v /tmp/ceph-tmp1rkweygp:/etc/ceph/ceph.conf:z
quay.io/ceph/ceph@sha256:0d927ccbd8892180ee09894c2b2c26d07c938bf96a56eaee9b80fc9f26083ddb
lvm activate --all
On Wed, Apr 20, 2022 at 1:26 PM Eugen Block <eblock@xxxxxx> wrote:
Hi,
> and the relevant log output is here:
>
https://privatepastebin.com/?f95c66924a7ddda9#ADEposX5DCo5fb5wGv42czrxaHscnwoHB7igc3eNQMwc
This is just the output of 'ceph-volume lvm list', is that really all?
I haven't had the chance to test 'ceph cephadm osd activate' myself so
I can't really tell what to expect but apparently the LVs and OSDs are
present, cephadm just doesn't seem to try to do anything with them. Do
you see in the host's logs any attempt to start container for the OSDs?
Zitat von Manuel Holtgrewe <zyklenfrei@xxxxxxxxx>:
> Dear Eugen,
>
> thanks for the hint. The output is pasted below. I can't gather any
useful
> information from that.
>
> I also followed the instructions from
>
>
https://docs.ceph.com/en/latest/cephadm/operations/#watching-cephadm-log-messages
>
> ```
> ceph config set mgr mgr/cephadm/log_to_cluster_level debug
> ceph -W cephadm --watch-debug
> ```
>
> and the relevant log output is here:
>
> -
>
https://privatepastebin.com/?f95c66924a7ddda9#ADEposX5DCo5fb5wGv42czrxaHscnwoHB7igc3eNQMwc
>
> Cheers,
> Manuel
>
>
> [2022-04-20 10:38:01,909][ceph_volume.main][INFO ] Running command:
> ceph-volume lvm list --format json
> [2022-04-20 10:38:01,911][ceph_volume.process][INFO ] Running command:
> /usr/sbin/lvs --noheadings --readonly --separator=";" -a --units=b
> --nosuffix -S -o lv_tags,lv_path,lv_name,vg_name,lv_uuid,lv_size
> [2022-04-20 10:38:01,975][ceph_volume.process][INFO ] stdout
>
ceph.block_device=/dev/ceph-2cbf3973-13a3-444a-b335-a0262cff6074/osd-block-e2ebb627-28aa-45a3-9261-d7c27bc08448,ceph.block_uuid=AbCFEt-JrjE-bfnc-HiHu-XlyH-G72j-0jwec7,ceph.cephx_lockbox_secret=,ceph.cluster_fsid=d221bc3c-8ff4-11ec-b4ba-b02628267680,ceph.cluster_name=ceph,ceph.crush_device_class=None,ceph.encrypted=0,ceph.osd_fsid=e2ebb627-28aa-45a3-9261-d7c27bc08448,ceph.osd_id=12,ceph.osdspec_affinity=all,ceph.type=block,ceph.vdo=0";"/dev/ceph-2cbf3973-13a3-444a-b335-a0262cff6074/osd-block-e2ebb627-28aa-45a3-9261-d7c27bc08448";"osd-block-e2ebb627-28aa-45a3-9261-d7c27bc08448";"ceph-2cbf3973-13a3-444a-b335-a0262cff6074";"AbCFEt-JrjE-bfnc-HiHu-XlyH-G72j-0jwec7";"1920378863616
> [2022-04-20 10:38:01,975][ceph_volume.process][INFO ] stdout
>
ceph.block_device=/dev/ceph-6f69c24e-2930-48ff-a18f-278470e558e1/osd-block-3f3d61f8-6964-4922-98cb-6620aff5cb6f,ceph.block_uuid=MS5ExP-ROK9-nVRG-l6wA-eHLL-Wjic-lMsYPM,ceph.cephx_lockbox_secret=,ceph.cluster_fsid=d221bc3c-8ff4-11ec-b4ba-b02628267680,ceph.cluster_name=ceph,ceph.crush_device_class=None,ceph.encrypted=0,ceph.osd_fsid=3f3d61f8-6964-4922-98cb-6620aff5cb6f,ceph.osd_id=25,ceph.osdspec_affinity=all,ceph.type=block,ceph.vdo=0";"/dev/ceph-6f69c24e-2930-48ff-a18f-278470e558e1/osd-block-3f3d61f8-6964-4922-98cb-6620aff5cb6f";"osd-block-3f3d61f8-6964-4922-98cb-6620aff5cb6f";"ceph-6f69c24e-2930-48ff-a18f-278470e558e1";"MS5ExP-ROK9-nVRG-l6wA-eHLL-Wjic-lMsYPM";"1920378863616
> [2022-04-20 10:38:01,975][ceph_volume.process][INFO ] stdout
>
ceph.block_device=/dev/ceph-8d9bd6eb-ce97-4940-8fed-f57f7bed3f5a/osd-block-8d0b1bad-069a-4acf-b13b-982fab58f285,ceph.block_uuid=rrJbzA-JSuc-GrSf-KsXd-VWi6-ffTW-LS5PB3,ceph.cephx_lockbox_secret=,ceph.cluster_fsid=d221bc3c-8ff4-11ec-b4ba-b02628267680,ceph.cluster_name=ceph,ceph.crush_device_class=None,ceph.encrypted=0,ceph.osd_fsid=8d0b1bad-069a-4acf-b13b-982fab58f285,ceph.osd_id=0,ceph.osdspec_affinity=all,ceph.type=block,ceph.vdo=0";"/dev/ceph-8d9bd6eb-ce97-4940-8fed-f57f7bed3f5a/osd-block-8d0b1bad-069a-4acf-b13b-982fab58f285";"osd-block-8d0b1bad-069a-4acf-b13b-982fab58f285";"ceph-8d9bd6eb-ce97-4940-8fed-f57f7bed3f5a";"rrJbzA-JSuc-GrSf-KsXd-VWi6-ffTW-LS5PB3";"1920378863616
> [2022-04-20 10:38:01,975][ceph_volume.process][INFO ] stdout
>
ceph.block_device=/dev/ceph-a8c3f39b-ec8d-4dd5-a85a-13a5773b99fa/osd-block-ff82d0d0-6d55-49c2-85cc-bb8a0a74ae89,ceph.block_uuid=CB1rmb-Onhq-Vdrh-9Ide-XpA0-IBg3-TFrSNX,ceph.cephx_lockbox_secret=,ceph.cluster_fsid=d221bc3c-8ff4-11ec-b4ba-b02628267680,ceph.cluster_name=ceph,ceph.crush_device_class=None,ceph.encrypted=0,ceph.osd_fsid=ff82d0d0-6d55-49c2-85cc-bb8a0a74ae89,ceph.osd_id=8,ceph.osdspec_affinity=all,ceph.type=block,ceph.vdo=0";"/dev/ceph-a8c3f39b-ec8d-4dd5-a85a-13a5773b99fa/osd-block-ff82d0d0-6d55-49c2-85cc-bb8a0a74ae89";"osd-block-ff82d0d0-6d55-49c2-85cc-bb8a0a74ae89";"ceph-a8c3f39b-ec8d-4dd5-a85a-13a5773b99fa";"CB1rmb-Onhq-Vdrh-9Ide-XpA0-IBg3-TFrSNX";"1920378863616
> [2022-04-20 10:38:01,975][ceph_volume.process][INFO ] stdout
>
ceph.block_device=/dev/ceph-bf94d5a5-eb04-42e3-867a-dee2886daa62/osd-block-313160a8-594d-4384-9640-68c4d8c1b6da,ceph.block_uuid=CgvBFd-wk3r-7a0y-XPiH-WoKS-VGmX-yntCse,ceph.cephx_lockbox_secret=,ceph.cluster_fsid=d221bc3c-8ff4-11ec-b4ba-b02628267680,ceph.cluster_name=ceph,ceph.crush_device_class=None,ceph.encrypted=0,ceph.osd_fsid=313160a8-594d-4384-9640-68c4d8c1b6da,ceph.osd_id=4,ceph.osdspec_affinity=all,ceph.type=block,ceph.vdo=0";"/dev/ceph-bf94d5a5-eb04-42e3-867a-dee2886daa62/osd-block-313160a8-594d-4384-9640-68c4d8c1b6da";"osd-block-313160a8-594d-4384-9640-68c4d8c1b6da";"ceph-bf94d5a5-eb04-42e3-867a-dee2886daa62";"CgvBFd-wk3r-7a0y-XPiH-WoKS-VGmX-yntCse";"1920378863616
> [2022-04-20 10:38:01,975][ceph_volume.process][INFO ] stdout
>
ceph.block_device=/dev/ceph-ee2517dc-ecb6-4c14-ab22-59d9c54f0952/osd-block-f7e67343-4fde-4e45-bc70-f44c92a178bd,ceph.block_uuid=pfWtmF-6Xlc-R2LO-kzeV-2jIw-3Ki8-gCOMwZ,ceph.cephx_lockbox_secret=,ceph.cluster_fsid=d221bc3c-8ff4-11ec-b4ba-b02628267680,ceph.cluster_name=ceph,ceph.crush_device_class=None,ceph.encrypted=0,ceph.osd_fsid=f7e67343-4fde-4e45-bc70-f44c92a178bd,ceph.osd_id=20,ceph.osdspec_affinity=all,ceph.type=block,ceph.vdo=0";"/dev/ceph-ee2517dc-ecb6-4c14-ab22-59d9c54f0952/osd-block-f7e67343-4fde-4e45-bc70-f44c92a178bd";"osd-block-f7e67343-4fde-4e45-bc70-f44c92a178bd";"ceph-ee2517dc-ecb6-4c14-ab22-59d9c54f0952";"pfWtmF-6Xlc-R2LO-kzeV-2jIw-3Ki8-gCOMwZ";"1920378863616
> [2022-04-20 10:38:01,975][ceph_volume.process][INFO ] stdout
>
ceph.block_device=/dev/ceph-efb79afa-5c60-4a48-8419-259b72a5d73e/osd-block-078ece8f-1251-4482-b664-8f7068bc1685,ceph.block_uuid=ufliE0-MMk9-nURa-GtC0-kcaP-mR8c-iHp7Wl,ceph.cephx_lockbox_secret=,ceph.cluster_fsid=d221bc3c-8ff4-11ec-b4ba-b02628267680,ceph.cluster_name=ceph,ceph.crush_device_class=None,ceph.encrypted=0,ceph.osd_fsid=078ece8f-1251-4482-b664-8f7068bc1685,ceph.osd_id=16,ceph.osdspec_affinity=all,ceph.type=block,ceph.vdo=0";"/dev/ceph-efb79afa-5c60-4a48-8419-259b72a5d73e/osd-block-078ece8f-1251-4482-b664-8f7068bc1685";"osd-block-078ece8f-1251-4482-b664-8f7068bc1685";"ceph-efb79afa-5c60-4a48-8419-259b72a5d73e";"ufliE0-MMk9-nURa-GtC0-kcaP-mR8c-iHp7Wl";"1920378863616
> [2022-04-20 10:38:01,976][ceph_volume.process][INFO ] Running command:
> /usr/sbin/pvs --noheadings --readonly --separator=";" -S
> lv_uuid=AbCFEt-JrjE-bfnc-HiHu-XlyH-G72j-0jwec7 -o
> pv_name,pv_tags,pv_uuid,vg_name,lv_uuid
> [2022-04-20 10:38:02,025][ceph_volume.process][INFO ] stdout
>
/dev/sdd";"";"hHdDtY-umyH-78lt-K3XC-qo7s-vGfj-SQiDLp";"ceph-2cbf3973-13a3-444a-b335-a0262cff6074";"AbCFEt-JrjE-bfnc-HiHu-XlyH-G72j-0jwec7
> [2022-04-20 10:38:02,026][ceph_volume.process][INFO ] Running command:
> /usr/sbin/pvs --noheadings --readonly --separator=";" -S
> lv_uuid=MS5ExP-ROK9-nVRG-l6wA-eHLL-Wjic-lMsYPM -o
> pv_name,pv_tags,pv_uuid,vg_name,lv_uuid
> [2022-04-20 10:38:02,088][ceph_volume.process][INFO ] stdout
>
/dev/sdg";"";"J5nuJt-ylF4-wG3Q-Twnj-Rw5h-PoIh-VHnJJq";"ceph-6f69c24e-2930-48ff-a18f-278470e558e1";"MS5ExP-ROK9-nVRG-l6wA-eHLL-Wjic-lMsYPM
> [2022-04-20 10:38:02,089][ceph_volume.process][INFO ] Running command:
> /usr/sbin/pvs --noheadings --readonly --separator=";" -S
> lv_uuid=rrJbzA-JSuc-GrSf-KsXd-VWi6-ffTW-LS5PB3 -o
> pv_name,pv_tags,pv_uuid,vg_name,lv_uuid
> [2022-04-20 10:38:02,146][ceph_volume.process][INFO ] stdout
>
/dev/sda";"";"EfXlSa-jzab-toFc-sim7-wh67-TY4D-vXwQyk";"ceph-8d9bd6eb-ce97-4940-8fed-f57f7bed3f5a";"rrJbzA-JSuc-GrSf-KsXd-VWi6-ffTW-LS5PB3
> [2022-04-20 10:38:02,147][ceph_volume.process][INFO ] Running command:
> /usr/sbin/pvs --noheadings --readonly --separator=";" -S
> lv_uuid=CB1rmb-Onhq-Vdrh-9Ide-XpA0-IBg3-TFrSNX -o
> pv_name,pv_tags,pv_uuid,vg_name,lv_uuid
> [2022-04-20 10:38:02,196][ceph_volume.process][INFO ] stdout
>
/dev/sdc";"";"1KJrej-zCY8-4Qaq-Ab10-MFQe-LRln-wjQeUj";"ceph-a8c3f39b-ec8d-4dd5-a85a-13a5773b99fa";"CB1rmb-Onhq-Vdrh-9Ide-XpA0-IBg3-TFrSNX
> [2022-04-20 10:38:02,197][ceph_volume.process][INFO ] Running command:
> /usr/sbin/pvs --noheadings --readonly --separator=";" -S
> lv_uuid=CgvBFd-wk3r-7a0y-XPiH-WoKS-VGmX-yntCse -o
> pv_name,pv_tags,pv_uuid,vg_name,lv_uuid
> [2022-04-20 10:38:02,252][ceph_volume.process][INFO ] stdout
>
/dev/sdb";"";"ceOUeN-zG4y-JMhj-Gi82-yo88-2fBY-dMD8Go";"ceph-bf94d5a5-eb04-42e3-867a-dee2886daa62";"CgvBFd-wk3r-7a0y-XPiH-WoKS-VGmX-yntCse
> [2022-04-20 10:38:02,252][ceph_volume.process][INFO ] Running command:
> /usr/sbin/pvs --noheadings --readonly --separator=";" -S
> lv_uuid=pfWtmF-6Xlc-R2LO-kzeV-2jIw-3Ki8-gCOMwZ -o
> pv_name,pv_tags,pv_uuid,vg_name,lv_uuid
> [2022-04-20 10:38:02,301][ceph_volume.process][INFO ] stdout
>
/dev/sdf";"";"j2Ilk4-12ZW-qR9u-3n5Y-gn6B-80yO-vWgdAn";"ceph-ee2517dc-ecb6-4c14-ab22-59d9c54f0952";"pfWtmF-6Xlc-R2LO-kzeV-2jIw-3Ki8-gCOMwZ
> [2022-04-20 10:38:02,302][ceph_volume.process][INFO ] Running command:
> /usr/sbin/pvs --noheadings --readonly --separator=";" -S
> lv_uuid=ufliE0-MMk9-nURa-GtC0-kcaP-mR8c-iHp7Wl -o
> pv_name,pv_tags,pv_uuid,vg_name,lv_uuid
> [2022-04-20 10:38:02,355][ceph_volume.process][INFO ] stdout
>
/dev/sde";"";"Ym7N4c-IBxm-cK3l-EOdZ-oN8c-gFjz-Nj0rVT";"ceph-efb79afa-5c60-4a48-8419-259b72a5d73e";"ufliE0-MMk9-nURa-GtC0-kcaP-mR8c-iHp7Wl
>
> On Wed, Apr 20, 2022 at 12:28 PM Eugen Block <eblock@xxxxxx> wrote:
>
>> Hi,
>>
>> have you checked /var/log/ceph/cephadm.log for any hints?
>> ceph-volume.log may also provide some information
>> (/var/log/ceph/<FSID>/ceph-volume.log) what might be going on.
>>
>> Zitat von Manuel Holtgrewe <zyklenfrei@xxxxxxxxx>:
>>
>> > Dear all,
>> >
>> > I now attempted this and my host is back in the cluster but the `ceph
>> > cephadm osd activate` does not work.
>> >
>> > # ceph cephadm osd activate HOST
>> > Created no osd(s) on host HOST; already created?
>> >
>> > Using --verbose is not too helpful either:
>> >
>> > bestcmds_sorted:
>> > [{'flags': 8,
>> > 'help': 'Start OSD containers for existing OSDs',
>> > 'module': 'mgr',
>> > 'perm': 'rw',
>> > 'sig': [argdesc(<class 'ceph_argparse.CephPrefix'>, req=True,
>> > name=prefix, n=1, numseen=0, prefix=cephadm),
>> > argdesc(<class 'ceph_argparse.CephPrefix'>, req=True,
>> > name=prefix, n=1, numseen=0, prefix=osd),
>> > argdesc(<class 'ceph_argparse.CephPrefix'>, req=True,
>> > name=prefix, n=1, numseen=0, prefix=activate),
>> > argdesc(<class 'ceph_argparse.CephString'>, req=True,
>> name=host,
>> > n=N, numseen=0)]}]
>> > Submitting command: {'prefix': 'cephadm osd activate', 'host':
['HOST'],
>> > 'target': ('mon-mgr', '')}
>> > submit {"prefix": "cephadm osd activate", "host": ["HOST"], "target":
>> > ["mon-mgr", ""]} to mon-mgr
>> >
>> > How can I find out what goes wrong here?
>> >
>> > Thanks!
>> > Manuel
>> >
>> > On Wed, Feb 2, 2022 at 12:43 PM Robert Sander <
>> r.sander@xxxxxxxxxxxxxxxxxxx>
>> > wrote:
>> >
>> >> On 02.02.22 12:15, Manuel Holtgrewe wrote:
>> >> >
>> >> > Would this also work when renaming hosts at the same time?
>> >> >
>> >> > - remove host from ceph orch
>> >> > - reinstall host with different name/IP
>> >> > - add back host into ceph orch
>> >> > - use ceph osd activate
>> >> >
>> >> > as above?
>> >>
>> >> That could also work as long as the OSDs are still in the CRUSH map.
>> >> Keep in mind that the last command is
>> >>
>> >> ceph cephadm osd activate $HOSTNAME
>> >>
>> >> Regards
>> >> --
>> >> Robert Sander
>> >> Heinlein Consulting GmbH
>> >> Schwedter Str. 8/9b, 10119 Berlin
>> >>
>> >> https://www.heinlein-support.de
>> >>
>> >> Tel: 030 / 405051-43
>> >> Fax: 030 / 405051-19
>> >>
>> >> Amtsgericht Berlin-Charlottenburg - HRB 220009 B
>> >> Geschäftsführer: Peer Heinlein - Sitz: Berlin
>> >> _______________________________________________
>> >> ceph-users mailing list -- ceph-users@xxxxxxx
>> >> To unsubscribe send an email to ceph-users-leave@xxxxxxx
>> >>
>> > _______________________________________________
>> > ceph-users mailing list -- ceph-users@xxxxxxx
>> > To unsubscribe send an email to ceph-users-leave@xxxxxxx
>>
>>
>>
>> _______________________________________________
>> ceph-users mailing list -- ceph-users@xxxxxxx
>> To unsubscribe send an email to ceph-users-leave@xxxxxxx
>>