Hi,
I am following the documentation on how to prepare and activate ceph-disk and ran into the following problem:command_check_call: Running command: /usr/bin/ceph-osd --cluster ceph --mkfs --mkkey -i 8 --monmap /var/lib/ceph/tmp/mnt.RxRUd8/activate.monmap --osd-data /var/lib/ceph/tmp/mnt.RxRUd8 --osd-journal /var/lib/ceph/tmp/mnt.RxRUd8/journal --osd-uuid 6d346826-18c0-4063-9287-d0acb6b88cc2 --keyring /var/lib/ceph/tmp/mnt.RxRUd8/keyring --setuser ceph --setgroup ceph
2016-05-06 16:44:19.432929 7f17087cc800 -1 filestore(/var/lib/ceph/tmp/mnt.RxRUd8) mkjournal error creating journal on /var/lib/ceph/tmp/mnt.RxRUd8/journal: (13) Permission denied
2016-05-06 16:44:19.432945 7f17087cc800 -1 OSD::mkfs: ObjectStore::mkfs failed with error -13
2016-05-06 16:44:19.432997 7f17087cc800 -1 [0;31m ** ERROR: error creating empty object store in /var/lib/ceph/tmp/mnt.RxRUd8: (13) Permission denied[0m
mount_activate: Failed to activate
I forced the permission on the directory and its children and run the following command trying replicate and still ran into the same problem.
[root@cinder6 ~]# ll /var/lib/ceph/tmp/mnt.RxRUd8/
total 32
-rwxrwxrwx. 1 ceph ceph 193 May 6 16:57 activate.monmap
-rwxrwxrwx. 1 ceph ceph 37 May 6 16:43 ceph_fsid
drwxrwxrwx. 3 ceph ceph 37 May 6 16:44 current
-rwxrwxrwx. 1 ceph ceph 37 May 6 16:43 fsid
lrwxrwxrwx. 1 ceph ceph 9 May 6 16:43 journal -> /dev/sda1
-rwxrwxrwx. 1 ceph ceph 37 May 6 16:43 journal_uuid
-rwxrwxrwx. 1 ceph ceph 21 May 6 16:43 magic
-rwxrwxrwx. 1 ceph ceph 4 May 6 16:44 store_version
-rwxrwxrwx. 1 ceph ceph 53 May 6 16:44 superblock
-rwxrwxrwx. 1 ceph ceph 2 May 6 16:44 whoami
[root@cinder6 ~]# /usr/bin/ceph-osd --cluster ceph --mkfs --mkkey -i 8 --monmap /var/lib/ceph/tmp/mnt.RxRUd8/activate.monmap --osd-data /var/lib/ceph/tmp/mnt.RxRUd8 --osd-journal /var/lib/ceph/tmp/mnt.RxRUd8/journal --osd-uuid 6d346826-18c0-4063-9287-d0acb6b88cc2 --keyring /var/lib/ceph/tmp/mnt.RxRUd8/keyring --setuser ceph --setgroup ceph
2016-05-06 17:02:27.198217 7f51eece0800 -1 filestore(/var/lib/ceph/tmp/mnt.RxRUd8) mkjournal error creating journal on /var/lib/ceph/tmp/mnt.RxRUd8/journal: (13) Permission denied
2016-05-06 17:02:27.198241 7f51eece0800 -1 OSD::mkfs: ObjectStore::mkfs failed with error -13
2016-05-06 17:02:27.198997 7f51eece0800 -1 ** ERROR: error creating empty object store in /var/lib/ceph/tmp/mnt.RxRUd8: (13) Permission denied
Then I tried with using root root as user and group and still more issues:
[root@cinder6 ~]# /usr/bin/ceph-osd --cluster ceph --mkfs --mkkey -i 8 --monmap /var/lib/ceph/tmp/mnt.RxRUd8/activate.monmap --osd-data /var/lib/ceph/tmp/mnt.RxRUd8 --osd-journal /var/lib/ceph/tmp/mnt.RxRUd8/journal --osd-uuid 6d346826-18c0-4063-9287-d0acb6b88cc2 --keyring /var/lib/ceph/tmp/mnt.RxRUd8/keyring --setuser root --setgroup root
SG_IO: bad/missing sense data, sb[]: 70 00 05 00 00 00 00 28 00 00 00 00 20 00 00 00 00 00 00 85 87 02 01 16 00 00 00 00 00 00 21 00
2016-05-06 17:05:23.361063 7fcb39552800 -1 journal read_header error decoding journal header
SG_IO: bad/missing sense data, sb[]: 70 00 05 00 00 00 00 28 00 00 00 00 20 00 00 00 00 00 00 85 87 02 01 16 00 00 00 00 00 00 21 00
SG_IO: bad/missing sense data, sb[]: 70 00 05 00 00 00 00 28 00 00 00 00 20 00 00 00 00 00 00 85 87 02 01 16 00 00 00 00 00 00 21 00
SG_IO: bad/missing sense data, sb[]: 70 00 05 00 00 00 00 28 00 00 00 00 20 00 00 00 00 00 00 85 87 02 01 16 00 00 00 00 00 00 21 00
2016-05-06 17:05:23.407021 7fcb39552800 -1 filestore(/var/lib/ceph/tmp/mnt.RxRUd8) could not find #-1:7b3f43c4:::osd_superblock:0# in index: (2) No such file or directory
2016-05-06 17:05:23.427628 7fcb39552800 -1 created object store /var/lib/ceph/tmp/mnt.RxRUd8 for osd.8 fsid ea1d47de-f843-4c6c-8a7c-5c5bd89b80b6
2016-05-06 17:05:23.427683 7fcb39552800 -1 auth: error reading file: /var/lib/ceph/tmp/mnt.RxRUd8/keyring: can't open /var/lib/ceph/tmp/mnt.RxRUd8/keyring: (2) No such file or directory
2016-05-06 17:05:23.427977 7fcb39552800 -1 created new key in keyring /var/lib/ceph/tmp/mnt.RxRUd8/keyring
[root@cinder6 ~]# ll /var/lib/ceph/tmp/mnt.RxRUd8/keyring
-rw-------. 1 root root 56 May 6 17:05 /var/lib/ceph/tmp/mnt.RxRUd8/keyring
[root@cinder6 ~]# ll /var/lib/ceph/tmp/mnt.RxRUd8/
total 32
-rwxrwxrwx. 1 ceph ceph 193 May 6 16:57 activate.monmap
-rwxrwxrwx. 1 ceph ceph 37 May 6 16:43 ceph_fsid
drwxrwxrwx. 3 ceph ceph 37 May 6 16:44 current
-rwxrwxrwx. 1 ceph ceph 37 May 6 16:43 fsid
lrwxrwxrwx. 1 ceph ceph 9 May 6 16:43 journal -> /dev/sda1
-rwxrwxrwx. 1 ceph ceph 37 May 6 16:43 journal_uuid
-rwxrwxrwx. 1 ceph ceph 21 May 6 16:43 magic
-rwxrwxrwx. 1 ceph ceph 4 May 6 16:44 store_version
-rwxrwxrwx. 1 ceph ceph 53 May 6 16:44 superblock
-rwxrwxrwx. 1 ceph ceph 2 May 6 16:44 whoami
[root@cinder6 ~]# /usr/bin/ceph-osd --cluster ceph --mkfs --mkkey -i 8 --monmap /var/lib/ceph/tmp/mnt.RxRUd8/activate.monmap --osd-data /var/lib/ceph/tmp/mnt.RxRUd8 --osd-journal /var/lib/ceph/tmp/mnt.RxRUd8/journal --osd-uuid 6d346826-18c0-4063-9287-d0acb6b88cc2 --keyring /var/lib/ceph/tmp/mnt.RxRUd8/keyring --setuser ceph --setgroup ceph
2016-05-06 17:02:27.198217 7f51eece0800 -1 filestore(/var/lib/ceph/tmp/mnt.RxRUd8) mkjournal error creating journal on /var/lib/ceph/tmp/mnt.RxRUd8/journal: (13) Permission denied
2016-05-06 17:02:27.198241 7f51eece0800 -1 OSD::mkfs: ObjectStore::mkfs failed with error -13
2016-05-06 17:02:27.198997 7f51eece0800 -1 ** ERROR: error creating empty object store in /var/lib/ceph/tmp/mnt.RxRUd8: (13) Permission denied
Then I tried with using root root as user and group and still more issues:
[root@cinder6 ~]# /usr/bin/ceph-osd --cluster ceph --mkfs --mkkey -i 8 --monmap /var/lib/ceph/tmp/mnt.RxRUd8/activate.monmap --osd-data /var/lib/ceph/tmp/mnt.RxRUd8 --osd-journal /var/lib/ceph/tmp/mnt.RxRUd8/journal --osd-uuid 6d346826-18c0-4063-9287-d0acb6b88cc2 --keyring /var/lib/ceph/tmp/mnt.RxRUd8/keyring --setuser root --setgroup root
SG_IO: bad/missing sense data, sb[]: 70 00 05 00 00 00 00 28 00 00 00 00 20 00 00 00 00 00 00 85 87 02 01 16 00 00 00 00 00 00 21 00
2016-05-06 17:05:23.361063 7fcb39552800 -1 journal read_header error decoding journal header
SG_IO: bad/missing sense data, sb[]: 70 00 05 00 00 00 00 28 00 00 00 00 20 00 00 00 00 00 00 85 87 02 01 16 00 00 00 00 00 00 21 00
SG_IO: bad/missing sense data, sb[]: 70 00 05 00 00 00 00 28 00 00 00 00 20 00 00 00 00 00 00 85 87 02 01 16 00 00 00 00 00 00 21 00
SG_IO: bad/missing sense data, sb[]: 70 00 05 00 00 00 00 28 00 00 00 00 20 00 00 00 00 00 00 85 87 02 01 16 00 00 00 00 00 00 21 00
2016-05-06 17:05:23.407021 7fcb39552800 -1 filestore(/var/lib/ceph/tmp/mnt.RxRUd8) could not find #-1:7b3f43c4:::osd_superblock:0# in index: (2) No such file or directory
2016-05-06 17:05:23.427628 7fcb39552800 -1 created object store /var/lib/ceph/tmp/mnt.RxRUd8 for osd.8 fsid ea1d47de-f843-4c6c-8a7c-5c5bd89b80b6
2016-05-06 17:05:23.427683 7fcb39552800 -1 auth: error reading file: /var/lib/ceph/tmp/mnt.RxRUd8/keyring: can't open /var/lib/ceph/tmp/mnt.RxRUd8/keyring: (2) No such file or directory
2016-05-06 17:05:23.427977 7fcb39552800 -1 created new key in keyring /var/lib/ceph/tmp/mnt.RxRUd8/keyring
[root@cinder6 ~]# ll /var/lib/ceph/tmp/mnt.RxRUd8/keyring
-rw-------. 1 root root 56 May 6 17:05 /var/lib/ceph/tmp/mnt.RxRUd8/keyring
I had a hammer installation which was working and ran into the above issue only when I attempted to upgrade to Jewel.
I wonder if anybody can help me in spotting the issue.
The complete log for preparing and activating the OSD is as follows:
1. Command 1:
ceph-disk --verbose prepare --zap-disk --cluster ceph --cluster-uuid ea1d47de-f843-4c6c-8a7c-5c5bd89b80b6 --fs-type xfs /dev/sdc /dev/sda1
Output of command 1:
****************************************************************************
Caution: Found protective or hybrid MBR and corrupt GPT. Using GPT, but disk
verification and recovery are STRONGLY recommended.
****************************************************************************
GPT data structures destroyed! You may now partition the disk using fdisk or
other utilities.
command: Running command: /usr/bin/ceph-osd --check-allows-journal -i 0 --cluster ceph
command: Running command: /usr/bin/ceph-osd --check-wants-journal -i 0 --cluster ceph
command: Running command: /usr/bin/ceph-osd --check-needs-journal -i 0 --cluster ceph
get_dm_uuid: get_dm_uuid /dev/sdc uuid path is /sys/dev/block/8:32/dm/uuid
command: Running command: /usr/bin/ceph-osd --cluster=ceph --show-config-value=osd_journal_size
get_dm_uuid: get_dm_uuid /dev/sdc uuid path is /sys/dev/block/8:32/dm/uuid
get_dm_uuid: get_dm_uuid /dev/sdc uuid path is /sys/dev/block/8:32/dm/uuid
get_dm_uuid: get_dm_uuid /dev/sdc uuid path is /sys/dev/block/8:32/dm/uuid
command: Running command: /usr/bin/ceph-conf --cluster=ceph --name=osd. --lookup osd_mkfs_options_xfs
command: Running command: /usr/bin/ceph-conf --cluster=ceph --name=osd. --lookup osd_fs_mkfs_options_xfs
command: Running command: /usr/bin/ceph-conf --cluster=ceph --name=osd. --lookup osd_mount_options_xfs
get_dm_uuid: get_dm_uuid /dev/sdc uuid path is /sys/dev/block/8:32/dm/uuid
zap: Zapping partition table on /dev/sdc
command_check_call: Running command: /usr/sbin/sgdisk --zap-all -- /dev/sdc
Caution: invalid backup GPT header, but valid main header; regenerating
backup header from main header.
****************************************************************************
Caution: Found protective or hybrid MBR and corrupt GPT. Using GPT, but disk
verification and recovery are STRONGLY recommended.
****************************************************************************
GPT data structures destroyed! You may now partition the disk using fdisk or
other utilities.
command_check_call: Running command: /usr/sbin/sgdisk --clear --mbrtogpt -- /dev/sdc
Creating new GPT entries.
The operation has completed successfully.
update_partition: Calling partprobe on zapped device /dev/sdc
command_check_call: Running command: /usr/bin/udevadm settle --timeout=600
command: Running command: /usr/sbin/partprobe /dev/sdc
command_check_call: Running command: /usr/bin/udevadm settle --timeout=600
get_dm_uuid: get_dm_uuid /dev/sda1 uuid path is /sys/dev/block/8:1/dm/uuid
prepare_device: Journal /dev/sda1 is a partition
get_dm_uuid: get_dm_uuid /dev/sda1 uuid path is /sys/dev/block/8:1/dm/uuid
prepare_device: OSD will not be hot-swappable if journal is not the same device as the osd data
command: Running command: /usr/sbin/blkid -o udev -p /dev/sda1
prepare_device: Journal /dev/sda1 was not prepared with ceph-disk. Symlinking directly.
get_dm_uuid: get_dm_uuid /dev/sdc uuid path is /sys/dev/block/8:32/dm/uuid
set_data_partition: Creating osd partition on /dev/sdc
get_dm_uuid: get_dm_uuid /dev/sdc uuid path is /sys/dev/block/8:32/dm/uuid
ptype_tobe_for_name: name = data
get_dm_uuid: get_dm_uuid /dev/sdc uuid path is /sys/dev/block/8:32/dm/uuid
create_partition: Creating data partition num 1 size 0 on /dev/sdc
command_check_call: Running command: /usr/sbin/sgdisk --largest-new=1 --change-name=1:ceph data --partition-guid=1:6d346826-18c0-4063-9287-d0acb6b88cc2 --typecode=1:89c57f98-2fe5-4dc0-89c1-f3ad0ceff2be --mbrtogpt -- /dev/sdc
The operation has completed successfully.
update_partition: Calling partprobe on created device /dev/sdc
command_check_call: Running command: /usr/bin/udevadm settle --timeout=600
command: Running command: /usr/sbin/partprobe /dev/sdc
command_check_call: Running command: /usr/bin/udevadm settle --timeout=600
get_dm_uuid: get_dm_uuid /dev/sdc uuid path is /sys/dev/block/8:32/dm/uuid
get_dm_uuid: get_dm_uuid /dev/sdc uuid path is /sys/dev/block/8:32/dm/uuid
get_dm_uuid: get_dm_uuid /dev/sdc1 uuid path is /sys/dev/block/8:33/dm/uuid
populate_data_path_device: Creating xfs fs on /dev/sdc1
command_check_call: Running command: /usr/sbin/mkfs -t xfs -f -i size=2048 -- /dev/sdc1
meta-data="" isize=2048 agcount=4, agsize=24419397 blks
= sectsz=4096 attr=2, projid32bit=1
= crc=0 finobt=0
data = bsize=4096 blocks=97677585, imaxpct=25
= sunit=0 swidth=0 blks
naming =version 2 bsize=4096 ascii-ci=0 ftype=0
log =internal log bsize=4096 blocks=47694, version=2
= sectsz=4096 sunit=1 blks, lazy-count=1
realtime =none extsz=4096 blocks=0, rtextents=0
mount: Mounting /dev/sdc1 on /var/lib/ceph/tmp/mnt.4e8Mtg with options rw,noatime,inode64
command_check_call: Running command: /usr/bin/mount -t xfs -o rw,noatime,inode64 -- /dev/sdc1 /var/lib/ceph/tmp/mnt.4e8Mtg
command: Running command: /usr/sbin/restorecon /var/lib/ceph/tmp/mnt.4e8Mtg
populate_data_path: Preparing osd data dir /var/lib/ceph/tmp/mnt.4e8Mtg
command: Running command: /usr/sbin/restorecon -R /var/lib/ceph/tmp/mnt.4e8Mtg/ceph_fsid.25754.tmp
command: Running command: /usr/bin/chown -R ceph:ceph /var/lib/ceph/tmp/mnt.4e8Mtg/ceph_fsid.25754.tmp
command: Running command: /usr/sbin/restorecon -R /var/lib/ceph/tmp/mnt.4e8Mtg/fsid.25754.tmp
command: Running command: /usr/bin/chown -R ceph:ceph /var/lib/ceph/tmp/mnt.4e8Mtg/fsid.25754.tmp
command: Running command: /usr/sbin/restorecon -R /var/lib/ceph/tmp/mnt.4e8Mtg/magic.25754.tmp
command: Running command: /usr/bin/chown -R ceph:ceph /var/lib/ceph/tmp/mnt.4e8Mtg/magic.25754.tmp
command: Running command: /usr/sbin/restorecon -R /var/lib/ceph/tmp/mnt.4e8Mtg/journal_uuid.25754.tmp
command: Running command: /usr/bin/chown -R ceph:ceph /var/lib/ceph/tmp/mnt.4e8Mtg/journal_uuid.25754.tmp
adjust_symlink: Creating symlink /var/lib/ceph/tmp/mnt.4e8Mtg/journal -> /dev/sda1
command: Running command: /usr/sbin/restorecon -R /var/lib/ceph/tmp/mnt.4e8Mtg
command: Running command: /usr/bin/chown -R ceph:ceph /var/lib/ceph/tmp/mnt.4e8Mtg
unmount: Unmounting /var/lib/ceph/tmp/mnt.4e8Mtg
command_check_call: Running command: /bin/umount -- /var/lib/ceph/tmp/mnt.4e8Mtg
get_dm_uuid: get_dm_uuid /dev/sdc uuid path is /sys/dev/block/8:32/dm/uuid
command_check_call: Running command: /usr/sbin/sgdisk --typecode=1:4fbd7e29-9d25-41b8-afd0-062c0ceff05d -- /dev/sdc
The operation has completed successfully.
update_partition: Calling partprobe on prepared device /dev/sdc
command_check_call: Running command: /usr/bin/udevadm settle --timeout=600
command: Running command: /usr/sbin/partprobe /dev/sdc
command_check_call: Running command: /usr/bin/udevadm settle --timeout=600
command_check_call: Running command: /usr/bin/udevadm trigger --action="" --sysname-match sdc1
2. Command: ceph-disk --verbose activate --mark-init systemd /dev/sdc1
Output of command 2:
main_activate: path = /dev/sdc1
get_dm_uuid: get_dm_uuid /dev/sdc1 uuid path is /sys/dev/block/8:33/dm/uuid
command: Running command: /usr/sbin/blkid -o udev -p /dev/sdc1
command: Running command: /sbin/blkid -p -s TYPE -o value -- /dev/sdc1
command: Running command: /usr/bin/ceph-conf --cluster=ceph --name=osd. --lookup osd_mount_options_xfs
mount: Mounting /dev/sdc1 on /var/lib/ceph/tmp/mnt.RxRUd8 with options rw,noatime,inode64
command_check_call: Running command: /usr/bin/mount -t xfs -o rw,noatime,inode64 -- /dev/sdc1 /var/lib/ceph/tmp/mnt.RxRUd8
command: Running command: /usr/sbin/restorecon /var/lib/ceph/tmp/mnt.RxRUd8
activate: Cluster uuid is ea1d47de-f843-4c6c-8a7c-5c5bd89b80b6
command: Running command:
activate: Cluster name is ceph
activate: OSD uuid is 6d346826-18c0-4063-9287-d0acb6b88cc2
allocate_osd_id: Allocating OSD id...
command: Running command: /usr/bin/ceph --cluster ceph --name client.bootstrap-osd --keyring /var/lib/ceph/bootstrap-osd/ceph.keyring osd create --concise 6d346826-18c0-4063-9287-d0acb6b88cc2
command: Running command: /usr/sbin/restorecon -R /var/lib/ceph/tmp/mnt.RxRUd8/whoami.25824.tmp
command: Running command: /usr/bin/chown -R ceph:ceph /var/lib/ceph/tmp/mnt.RxRUd8/whoami.25824.tmp
activate: OSD id is 8
activate: Initializing OSD...
command_check_call: Running command: /usr/bin/ceph --cluster ceph --name client.bootstrap-osd --keyring /var/lib/ceph/bootstrap-osd/ceph.keyring mon getmap -o /var/lib/ceph/tmp/mnt.RxRUd8/activate.monmap
got monmap epoch 1
command_check_call: Running command: /usr/bin/ceph-osd --cluster ceph --mkfs --mkkey -i 8 --monmap /var/lib/ceph/tmp/mnt.RxRUd8/activate.monmap --osd-data /var/lib/ceph/tmp/mnt.RxRUd8 --osd-journal /var/lib/ceph/tmp/mnt.RxRUd8/journal --osd-uuid 6d346826-18c0-4063-9287-d0acb6b88cc2 --keyring /var/lib/ceph/tmp/mnt.RxRUd8/keyring --setuser ceph --setgroup ceph
2016-05-06 16:44:19.432929 7f17087cc800 -1 filestore(/var/lib/ceph/tmp/mnt.RxRUd8) mkjournal error creating journal on /var/lib/ceph/tmp/mnt.RxRUd8/journal: (13) Permission denied
2016-05-06 16:44:19.432945 7f17087cc800 -1 OSD::mkfs: ObjectStore::mkfs failed with error -13
2016-05-06 16:44:19.432997 7f17087cc800 -1 [0;31m ** ERROR: error creating empty object store in /var/lib/ceph/tmp/mnt.RxRUd8: (13) Permission denied[0m
mount_activate: Failed to activate
unmount: Unmounting /var/lib/ceph/tmp/mnt.RxRUd8
command_check_call: Running command: /bin/umount -- /var/lib/ceph/tmp/mnt.RxRUd8
Traceback (most recent call last):
File "/usr/sbin/ceph-disk", line 9, in <module>
load_entry_point('ceph-disk==1.0.0', 'console_scripts', 'ceph-disk')()
File "/usr/lib/python2.7/site-packages/ceph_disk/main.py", line 4964, in run
main(sys.argv[1:])
File "/usr/lib/python2.7/site-packages/ceph_disk/main.py", line 4915, in main
args.func(args)
File "/usr/lib/python2.7/site-packages/ceph_disk/main.py", line 3269, in main_activate
reactivate=args.reactivate,
File "/usr/lib/python2.7/site-packages/ceph_disk/main.py", line 3026, in mount_activate
(osd_id, cluster) = activate(path, activate_key_template, init)
File "/usr/lib/python2.7/site-packages/ceph_disk/main.py", line 3202, in activate
keyring=keyring,
File "/usr/lib/python2.7/site-packages/ceph_disk/main.py", line 2695, in mkfs
'--setgroup', get_ceph_group(),
File "/usr/lib/python2.7/site-packages/ceph_disk/main.py", line 439, in command_check_call
return subprocess.check_call(arguments)
File "/usr/lib64/python2.7/subprocess.py", line 542, in check_call
raise CalledProcessError(retcode, cmd)
subprocess.CalledProcessError: Command '['/usr/bin/ceph-osd', '--cluster', 'ceph', '--mkfs', '--mkkey', '-i', '8', '--monmap', '/var/lib/ceph/tmp/mnt.RxRUd8/activate.monmap', '--osd-data', '/var/lib/ceph/tmp/mnt.RxRUd8', '--osd-journal', '/var/lib/ceph/tmp/mnt.RxRUd8/journal', '--osd-uuid', '6d346826-18c0-4063-9287-d0acb6b88cc2', '--keyring', '/var/lib/ceph/tmp/mnt.RxRUd8/keyring', '--setuser', 'ceph', '--setgroup', 'ceph']' returned non-zero exit status 1
1. Command 1:
ceph-disk --verbose prepare --zap-disk --cluster ceph --cluster-uuid ea1d47de-f843-4c6c-8a7c-5c5bd89b80b6 --fs-type xfs /dev/sdc /dev/sda1
Output of command 1:
****************************************************************************
Caution: Found protective or hybrid MBR and corrupt GPT. Using GPT, but disk
verification and recovery are STRONGLY recommended.
****************************************************************************
GPT data structures destroyed! You may now partition the disk using fdisk or
other utilities.
command: Running command: /usr/bin/ceph-osd --check-allows-journal -i 0 --cluster ceph
command: Running command: /usr/bin/ceph-osd --check-wants-journal -i 0 --cluster ceph
command: Running command: /usr/bin/ceph-osd --check-needs-journal -i 0 --cluster ceph
get_dm_uuid: get_dm_uuid /dev/sdc uuid path is /sys/dev/block/8:32/dm/uuid
command: Running command: /usr/bin/ceph-osd --cluster=ceph --show-config-value=osd_journal_size
get_dm_uuid: get_dm_uuid /dev/sdc uuid path is /sys/dev/block/8:32/dm/uuid
get_dm_uuid: get_dm_uuid /dev/sdc uuid path is /sys/dev/block/8:32/dm/uuid
get_dm_uuid: get_dm_uuid /dev/sdc uuid path is /sys/dev/block/8:32/dm/uuid
command: Running command: /usr/bin/ceph-conf --cluster=ceph --name=osd. --lookup osd_mkfs_options_xfs
command: Running command: /usr/bin/ceph-conf --cluster=ceph --name=osd. --lookup osd_fs_mkfs_options_xfs
command: Running command: /usr/bin/ceph-conf --cluster=ceph --name=osd. --lookup osd_mount_options_xfs
get_dm_uuid: get_dm_uuid /dev/sdc uuid path is /sys/dev/block/8:32/dm/uuid
zap: Zapping partition table on /dev/sdc
command_check_call: Running command: /usr/sbin/sgdisk --zap-all -- /dev/sdc
Caution: invalid backup GPT header, but valid main header; regenerating
backup header from main header.
****************************************************************************
Caution: Found protective or hybrid MBR and corrupt GPT. Using GPT, but disk
verification and recovery are STRONGLY recommended.
****************************************************************************
GPT data structures destroyed! You may now partition the disk using fdisk or
other utilities.
command_check_call: Running command: /usr/sbin/sgdisk --clear --mbrtogpt -- /dev/sdc
Creating new GPT entries.
The operation has completed successfully.
update_partition: Calling partprobe on zapped device /dev/sdc
command_check_call: Running command: /usr/bin/udevadm settle --timeout=600
command: Running command: /usr/sbin/partprobe /dev/sdc
command_check_call: Running command: /usr/bin/udevadm settle --timeout=600
get_dm_uuid: get_dm_uuid /dev/sda1 uuid path is /sys/dev/block/8:1/dm/uuid
prepare_device: Journal /dev/sda1 is a partition
get_dm_uuid: get_dm_uuid /dev/sda1 uuid path is /sys/dev/block/8:1/dm/uuid
prepare_device: OSD will not be hot-swappable if journal is not the same device as the osd data
command: Running command: /usr/sbin/blkid -o udev -p /dev/sda1
prepare_device: Journal /dev/sda1 was not prepared with ceph-disk. Symlinking directly.
get_dm_uuid: get_dm_uuid /dev/sdc uuid path is /sys/dev/block/8:32/dm/uuid
set_data_partition: Creating osd partition on /dev/sdc
get_dm_uuid: get_dm_uuid /dev/sdc uuid path is /sys/dev/block/8:32/dm/uuid
ptype_tobe_for_name: name = data
get_dm_uuid: get_dm_uuid /dev/sdc uuid path is /sys/dev/block/8:32/dm/uuid
create_partition: Creating data partition num 1 size 0 on /dev/sdc
command_check_call: Running command: /usr/sbin/sgdisk --largest-new=1 --change-name=1:ceph data --partition-guid=1:6d346826-18c0-4063-9287-d0acb6b88cc2 --typecode=1:89c57f98-2fe5-4dc0-89c1-f3ad0ceff2be --mbrtogpt -- /dev/sdc
The operation has completed successfully.
update_partition: Calling partprobe on created device /dev/sdc
command_check_call: Running command: /usr/bin/udevadm settle --timeout=600
command: Running command: /usr/sbin/partprobe /dev/sdc
command_check_call: Running command: /usr/bin/udevadm settle --timeout=600
get_dm_uuid: get_dm_uuid /dev/sdc uuid path is /sys/dev/block/8:32/dm/uuid
get_dm_uuid: get_dm_uuid /dev/sdc uuid path is /sys/dev/block/8:32/dm/uuid
get_dm_uuid: get_dm_uuid /dev/sdc1 uuid path is /sys/dev/block/8:33/dm/uuid
populate_data_path_device: Creating xfs fs on /dev/sdc1
command_check_call: Running command: /usr/sbin/mkfs -t xfs -f -i size=2048 -- /dev/sdc1
meta-data="" isize=2048 agcount=4, agsize=24419397 blks
= sectsz=4096 attr=2, projid32bit=1
= crc=0 finobt=0
data = bsize=4096 blocks=97677585, imaxpct=25
= sunit=0 swidth=0 blks
naming =version 2 bsize=4096 ascii-ci=0 ftype=0
log =internal log bsize=4096 blocks=47694, version=2
= sectsz=4096 sunit=1 blks, lazy-count=1
realtime =none extsz=4096 blocks=0, rtextents=0
mount: Mounting /dev/sdc1 on /var/lib/ceph/tmp/mnt.4e8Mtg with options rw,noatime,inode64
command_check_call: Running command: /usr/bin/mount -t xfs -o rw,noatime,inode64 -- /dev/sdc1 /var/lib/ceph/tmp/mnt.4e8Mtg
command: Running command: /usr/sbin/restorecon /var/lib/ceph/tmp/mnt.4e8Mtg
populate_data_path: Preparing osd data dir /var/lib/ceph/tmp/mnt.4e8Mtg
command: Running command: /usr/sbin/restorecon -R /var/lib/ceph/tmp/mnt.4e8Mtg/ceph_fsid.25754.tmp
command: Running command: /usr/bin/chown -R ceph:ceph /var/lib/ceph/tmp/mnt.4e8Mtg/ceph_fsid.25754.tmp
command: Running command: /usr/sbin/restorecon -R /var/lib/ceph/tmp/mnt.4e8Mtg/fsid.25754.tmp
command: Running command: /usr/bin/chown -R ceph:ceph /var/lib/ceph/tmp/mnt.4e8Mtg/fsid.25754.tmp
command: Running command: /usr/sbin/restorecon -R /var/lib/ceph/tmp/mnt.4e8Mtg/magic.25754.tmp
command: Running command: /usr/bin/chown -R ceph:ceph /var/lib/ceph/tmp/mnt.4e8Mtg/magic.25754.tmp
command: Running command: /usr/sbin/restorecon -R /var/lib/ceph/tmp/mnt.4e8Mtg/journal_uuid.25754.tmp
command: Running command: /usr/bin/chown -R ceph:ceph /var/lib/ceph/tmp/mnt.4e8Mtg/journal_uuid.25754.tmp
adjust_symlink: Creating symlink /var/lib/ceph/tmp/mnt.4e8Mtg/journal -> /dev/sda1
command: Running command: /usr/sbin/restorecon -R /var/lib/ceph/tmp/mnt.4e8Mtg
command: Running command: /usr/bin/chown -R ceph:ceph /var/lib/ceph/tmp/mnt.4e8Mtg
unmount: Unmounting /var/lib/ceph/tmp/mnt.4e8Mtg
command_check_call: Running command: /bin/umount -- /var/lib/ceph/tmp/mnt.4e8Mtg
get_dm_uuid: get_dm_uuid /dev/sdc uuid path is /sys/dev/block/8:32/dm/uuid
command_check_call: Running command: /usr/sbin/sgdisk --typecode=1:4fbd7e29-9d25-41b8-afd0-062c0ceff05d -- /dev/sdc
The operation has completed successfully.
update_partition: Calling partprobe on prepared device /dev/sdc
command_check_call: Running command: /usr/bin/udevadm settle --timeout=600
command: Running command: /usr/sbin/partprobe /dev/sdc
command_check_call: Running command: /usr/bin/udevadm settle --timeout=600
command_check_call: Running command: /usr/bin/udevadm trigger --action="" --sysname-match sdc1
2. Command: ceph-disk --verbose activate --mark-init systemd /dev/sdc1
Output of command 2:
main_activate: path = /dev/sdc1
get_dm_uuid: get_dm_uuid /dev/sdc1 uuid path is /sys/dev/block/8:33/dm/uuid
command: Running command: /usr/sbin/blkid -o udev -p /dev/sdc1
command: Running command: /sbin/blkid -p -s TYPE -o value -- /dev/sdc1
command: Running command: /usr/bin/ceph-conf --cluster=ceph --name=osd. --lookup osd_mount_options_xfs
mount: Mounting /dev/sdc1 on /var/lib/ceph/tmp/mnt.RxRUd8 with options rw,noatime,inode64
command_check_call: Running command: /usr/bin/mount -t xfs -o rw,noatime,inode64 -- /dev/sdc1 /var/lib/ceph/tmp/mnt.RxRUd8
command: Running command: /usr/sbin/restorecon /var/lib/ceph/tmp/mnt.RxRUd8
activate: Cluster uuid is ea1d47de-f843-4c6c-8a7c-5c5bd89b80b6
command: Running command:
activate: Cluster name is ceph
activate: OSD uuid is 6d346826-18c0-4063-9287-d0acb6b88cc2
allocate_osd_id: Allocating OSD id...
command: Running command: /usr/bin/ceph --cluster ceph --name client.bootstrap-osd --keyring /var/lib/ceph/bootstrap-osd/ceph.keyring osd create --concise 6d346826-18c0-4063-9287-d0acb6b88cc2
command: Running command: /usr/sbin/restorecon -R /var/lib/ceph/tmp/mnt.RxRUd8/whoami.25824.tmp
command: Running command: /usr/bin/chown -R ceph:ceph /var/lib/ceph/tmp/mnt.RxRUd8/whoami.25824.tmp
activate: OSD id is 8
activate: Initializing OSD...
command_check_call: Running command: /usr/bin/ceph --cluster ceph --name client.bootstrap-osd --keyring /var/lib/ceph/bootstrap-osd/ceph.keyring mon getmap -o /var/lib/ceph/tmp/mnt.RxRUd8/activate.monmap
got monmap epoch 1
command_check_call: Running command: /usr/bin/ceph-osd --cluster ceph --mkfs --mkkey -i 8 --monmap /var/lib/ceph/tmp/mnt.RxRUd8/activate.monmap --osd-data /var/lib/ceph/tmp/mnt.RxRUd8 --osd-journal /var/lib/ceph/tmp/mnt.RxRUd8/journal --osd-uuid 6d346826-18c0-4063-9287-d0acb6b88cc2 --keyring /var/lib/ceph/tmp/mnt.RxRUd8/keyring --setuser ceph --setgroup ceph
2016-05-06 16:44:19.432929 7f17087cc800 -1 filestore(/var/lib/ceph/tmp/mnt.RxRUd8) mkjournal error creating journal on /var/lib/ceph/tmp/mnt.RxRUd8/journal: (13) Permission denied
2016-05-06 16:44:19.432945 7f17087cc800 -1 OSD::mkfs: ObjectStore::mkfs failed with error -13
2016-05-06 16:44:19.432997 7f17087cc800 -1 [0;31m ** ERROR: error creating empty object store in /var/lib/ceph/tmp/mnt.RxRUd8: (13) Permission denied[0m
mount_activate: Failed to activate
unmount: Unmounting /var/lib/ceph/tmp/mnt.RxRUd8
command_check_call: Running command: /bin/umount -- /var/lib/ceph/tmp/mnt.RxRUd8
Traceback (most recent call last):
File "/usr/sbin/ceph-disk", line 9, in <module>
load_entry_point('ceph-disk==1.0.0', 'console_scripts', 'ceph-disk')()
File "/usr/lib/python2.7/site-packages/ceph_disk/main.py", line 4964, in run
main(sys.argv[1:])
File "/usr/lib/python2.7/site-packages/ceph_disk/main.py", line 4915, in main
args.func(args)
File "/usr/lib/python2.7/site-packages/ceph_disk/main.py", line 3269, in main_activate
reactivate=args.reactivate,
File "/usr/lib/python2.7/site-packages/ceph_disk/main.py", line 3026, in mount_activate
(osd_id, cluster) = activate(path, activate_key_template, init)
File "/usr/lib/python2.7/site-packages/ceph_disk/main.py", line 3202, in activate
keyring=keyring,
File "/usr/lib/python2.7/site-packages/ceph_disk/main.py", line 2695, in mkfs
'--setgroup', get_ceph_group(),
File "/usr/lib/python2.7/site-packages/ceph_disk/main.py", line 439, in command_check_call
return subprocess.check_call(arguments)
File "/usr/lib64/python2.7/subprocess.py", line 542, in check_call
raise CalledProcessError(retcode, cmd)
subprocess.CalledProcessError: Command '['/usr/bin/ceph-osd', '--cluster', 'ceph', '--mkfs', '--mkkey', '-i', '8', '--monmap', '/var/lib/ceph/tmp/mnt.RxRUd8/activate.monmap', '--osd-data', '/var/lib/ceph/tmp/mnt.RxRUd8', '--osd-journal', '/var/lib/ceph/tmp/mnt.RxRUd8/journal', '--osd-uuid', '6d346826-18c0-4063-9287-d0acb6b88cc2', '--keyring', '/var/lib/ceph/tmp/mnt.RxRUd8/keyring', '--setuser', 'ceph', '--setgroup', 'ceph']' returned non-zero exit status 1
_______________________________________________ ceph-users mailing list ceph-users@xxxxxxxxxxxxxx http://lists.ceph.com/listinfo.cgi/ceph-users-ceph.com