root@cephadmin~/mycluster#ceph-deploy
osd activate cephnode1:/home/data/osd1 cephnode2:/home/data/osd2
cephnode3:/home/data/osd3
[ceph_deploy.conf][DEBUG
] found configuration file at: /root/.cephdeploy.conf
[ceph_deploy.cli][INFO
] Invoked (1.5.37): /usr/bin/ceph-deploy osd activate
cephnode1:/home/data/osd1 cephnode2:/home/data/osd2
cephnode3:/home/data/osd3
[ceph_deploy.cli][INFO
] ceph-deploy options:
[ceph_deploy.cli][INFO
] username
: None
[ceph_deploy.cli][INFO
] verbose
: False
[ceph_deploy.cli][INFO
] overwrite_conf
: False
[ceph_deploy.cli][INFO
] subcommand
: activate
[ceph_deploy.cli][INFO
] quiet
: False
[ceph_deploy.cli][INFO
] cd_conf
: <ceph_deploy.conf.cephdeploy.Conf instance at 0xbcc7a0>
[ceph_deploy.cli][INFO
] cluster
: ceph
[ceph_deploy.cli][INFO
] func
: <function osd at 0xbbc050>
[ceph_deploy.cli][INFO
] ceph_conf
: None
[ceph_deploy.cli][INFO
] default_release
: False
[ceph_deploy.cli][INFO
] disk
: [('cephnode1', '/home/data/osd1', None), ('cephnode2',
'/home/data/osd2', None), ('cephnode3', '/home/data/osd3', None)]
[ceph_deploy.osd][DEBUG
] Activating cluster ceph disks cephnode1:/home/data/osd1:
cephnode2:/home/data/osd2: cephnode3:/home/data/osd3:
**************************************************************************************************************************
WARNING: This system
is a restricted access system. All activity on this system is
subject to monitoring. If information collected reveals
possible criminal activity or activity that exceeds privileges,
evidence of such activity may be providedto the relevant authorities
for further action.
By continuing past
this point, you expressly consent to this monitoring.-
ZOHO Corporation
**************************************************************************************************************************
**************************************************************************************************************************
WARNING: This system
is a restricted access system. All activity on this system is
subject to monitoring. If information collected reveals
possible criminal activity or activity that exceeds privileges,
evidence of such activity may be providedto the relevant authorities
for further action.
By continuing past
this point, you expressly consent to this monitoring.-
ZOHO Corporation
**************************************************************************************************************************
[cephnode1][DEBUG ]
connected to host: cephnode1
[cephnode1][DEBUG ]
detect platform information from remote host
[cephnode1][DEBUG ]
detect machine type
[cephnode1][DEBUG ]
find the location of an executable
[ceph_deploy.osd][INFO
] Distro info: CentOS Linux 7.3.1611 Core
[ceph_deploy.osd][DEBUG
] activating host cephnode1 disk /home/data/osd1
[ceph_deploy.osd][DEBUG
] will use init type: systemd
[cephnode1][DEBUG ]
find the location of an executable
[cephnode1][INFO
] Running command: /usr/sbin/ceph-disk -v activate --mark-init
systemd --mount /home/data/osd1
[cephnode1][WARNIN]
main_activate: path = /home/data/osd1
[cephnode1][WARNIN]
activate: Cluster uuid is 228e2b14-a6f2-4a46-b99e-673e3cd6774f
[cephnode1][WARNIN]
command: Running command: /usr/bin/ceph-osd --cluster=ceph
--show-config-value=fsid
[cephnode1][WARNIN]
activate: Cluster name is ceph
[cephnode1][WARNIN]
activate: OSD uuid is 147347cb-cc6b-400d-9a72-abae8cc75207
[cephnode1][WARNIN]
allocate_osd_id: Allocating OSD id...
[cephnode1][WARNIN]
command: Running command: /usr/bin/ceph --cluster ceph --name
client.bootstrap-osd --keyring
/var/lib/ceph/bootstrap-osd/ceph.keyring osd create --concise
147347cb-cc6b-400d-9a72-abae8cc75207
[cephnode1][WARNIN]
command: Running command: /usr/sbin/restorecon -R
/home/data/osd1/whoami.3203.tmp
[cephnode1][WARNIN]
command: Running command: /usr/bin/chown -R ceph:ceph
/home/data/osd1/whoami.3203.tmp
[cephnode1][WARNIN]
activate: OSD id is 0
[cephnode1][WARNIN]
activate: Initializing OSD...
[cephnode1][WARNIN]
command_check_call: Running command: /usr/bin/ceph --cluster ceph
--name client.bootstrap-osd --keyring
/var/lib/ceph/bootstrap-osd/ceph.keyring mon getmap -o
/home/data/osd1/activate.monmap
[cephnode1][WARNIN]
got monmap epoch 1
[cephnode1][WARNIN]
command: Running command: /usr/bin/timeout 300 ceph-osd --cluster
ceph --mkfs --mkkey -i 0 --monmap /home/data/osd1/activate.monmap
--osd-data /home/data/osd1 --osd-journal /home/data/osd1/journal
--osd-uuid 147347cb-cc6b-400d-9a72-abae8cc75207 --keyring
/home/data/osd1/keyring --setuser ceph --setgroup ceph
[cephnode1][WARNIN]
activate: Marking with init system systemd
[cephnode1][WARNIN]
command: Running command: /usr/sbin/restorecon -R
/home/data/osd1/systemd
[cephnode1][WARNIN]
command: Running command: /usr/bin/chown -R ceph:ceph
/home/data/osd1/systemd
[cephnode1][WARNIN]
activate: Authorizing OSD key...
[cephnode1][WARNIN]
command_check_call: Running command: /usr/bin/ceph --cluster ceph
--name client.bootstrap-osd --keyring
/var/lib/ceph/bootstrap-osd/ceph.keyring auth add osd.0 -i
/home/data/osd1/keyring osd allow * mon allow profile osd
[cephnode1][WARNIN]
added key for osd.0
[cephnode1][WARNIN]
command: Running command: /usr/sbin/restorecon -R
/home/data/osd1/active.3203.tmp
[cephnode1][WARNIN]
command: Running command: /usr/bin/chown -R ceph:ceph
/home/data/osd1/active.3203.tmp
[cephnode1][WARNIN]
activate: ceph osd.0 data dir is ready at /home/data/osd1
[cephnode1][WARNIN]
activate_dir: Creating symlink /var/lib/ceph/osd/ceph-0 ->
/home/data/osd1
[cephnode1][WARNIN]
start_daemon: Starting ceph osd.0...
[cephnode1][WARNIN]
command_check_call: Running command: /usr/bin/systemctl enable
ceph-osd@0
[cephnode1][WARNIN]
Created symlink from
/etc/systemd/system/ceph-osd.target.wants/ceph-osd@0.service to
/usr/lib/systemd/system/ceph-osd@.service.
[cephnode1][WARNIN]
command_check_call: Running command: /usr/bin/systemctl start
ceph-osd@0
[cephnode1][WARNIN]
Job for ceph-osd@0.service failed because the control process exited
with error code. See "systemctl status ceph-osd@0.service"
and "journalctl -xe" for details.
[cephnode1][WARNIN]
Traceback (most recent call last):
[cephnode1][WARNIN]
File "/usr/sbin/ceph-disk", line 9, in <module>
[cephnode1][WARNIN]
load_entry_point('ceph-disk==1.0.0', 'console_scripts',
'ceph-disk')()
[cephnode1][WARNIN]
File "/usr/lib/python2.7/site-packages/ceph_disk/main.py",
line 5009, in run
[cephnode1][WARNIN]
main(sys.argv[1:])
[cephnode1][WARNIN]
File "/usr/lib/python2.7/site-packages/ceph_disk/main.py",
line 4960, in main
[cephnode1][WARNIN]
args.func(args)
[cephnode1][WARNIN]
File "/usr/lib/python2.7/site-packages/ceph_disk/main.py",
line 3359, in main_activate
[cephnode1][WARNIN]
osd_id=osd_id,
[cephnode1][WARNIN]
File "/usr/lib/python2.7/site-packages/ceph_disk/main.py",
line 2906, in start_daemon
[cephnode1][WARNIN]
raise Error('ceph osd start failed', e)
[cephnode1][WARNIN]
ceph_disk.main.Error
[cephnode1][ERROR ]
RuntimeError: command returned non-zero exit status: 1
[ceph_deploy][ERROR
] RuntimeError: Failed to execute command: /usr/sbin/ceph-disk -v
activate --mark-init systemd --mount /home/data/osd1
_______________________________________________ ceph-users mailing list ceph-users@xxxxxxxxxxxxxx http://lists.ceph.com/listinfo.cgi/ceph-users-ceph.com