[root] osci-1001.infra.cin1.corp:~/cephdeploy # ceph-deploy osd create --filestore --fs-type xfs --data /dev/sdb2 --journal /dev/sdb1 osci-1001[ceph_deploy.conf][DEBUG ] found configuration file at: /root/.cephdeploy.conf[ceph_deploy.cli][INFO ] Invoked (2.0.1): /usr/bin/ceph-deploy osd create --filestore --fs-type xfs --data /dev/sdb2 --journal /dev/sdb1 osci-1001[ceph_deploy.cli][INFO ] ceph-deploy options:[ceph_deploy.cli][INFO ] verbose : False[ceph_deploy.cli][INFO ] bluestore : None[ceph_deploy.cli][INFO ] cd_conf : <ceph_deploy.conf.cephdeploy.Conf instance at 0x7fde72b81c68>[ceph_deploy.cli][INFO ] cluster : ceph[ceph_deploy.cli][INFO ] fs_type : xfs[ceph_deploy.cli][INFO ] block_wal : None[ceph_deploy.cli][INFO ] default_release : False[ceph_deploy.cli][INFO ] username : None[ceph_deploy.cli][INFO ] journal : /dev/sdb1[ceph_deploy.cli][INFO ] subcommand : create[ceph_deploy.cli][INFO ] host : osci-1001[ceph_deploy.cli][INFO ] filestore : True[ceph_deploy.cli][INFO ] func : <function osd at 0x7fde72db0578>[ceph_deploy.cli][INFO ] ceph_conf : None[ceph_deploy.cli][INFO ] zap_disk : False[ceph_deploy.cli][INFO ] data : /dev/sdb2[ceph_deploy.cli][INFO ] block_db : None[ceph_deploy.cli][INFO ] dmcrypt : False[ceph_deploy.cli][INFO ] overwrite_conf : False[ceph_deploy.cli][INFO ] dmcrypt_key_dir : /etc/ceph/dmcrypt-keys[ceph_deploy.cli][INFO ] quiet : False[ceph_deploy.cli][INFO ] debug : False[ceph_deploy.osd][DEBUG ] Creating OSD on cluster ceph with data device /dev/sdb2[osci-1001][DEBUG ] connected to host: osci-1001[osci-1001][DEBUG ] detect platform information from remote host[osci-1001][DEBUG ] detect machine type[osci-1001][DEBUG ] find the location of an executable[ceph_deploy.osd][INFO ] Distro info: CentOS Linux 7.5.1804 Core[ceph_deploy.osd][DEBUG ] Deploying osd to osci-1001[osci-1001][DEBUG ] write cluster configuration to /etc/ceph/{cluster}.conf[osci-1001][DEBUG ] find the location of an executable[osci-1001][INFO ] Running command: /usr/sbin/ceph-volume --cluster ceph lvm create --filestore --data /dev/sdb2 --journal /dev/sdb1[osci-1001][WARNIN] --> RuntimeError: command returned non-zero exit status: 1[osci-1001][DEBUG ] Running command: /bin/ceph-authtool --gen-print-key[osci-1001][DEBUG ] Running command: /bin/ceph --cluster ceph --name client.bootstrap-osd --keyring /var/lib/ceph/bootstrap-osd/ceph.keyring -i - osd new 81522145-e31b-4325-83fd-6cfefc1b761f[osci-1001][DEBUG ] Running command: vgcreate --force --yes ceph-7b308a5a-a8e9-48aa-86a9-39957dcbd1eb /dev/sdb2[osci-1001][DEBUG ] stdout: Physical volume "/dev/sdb2" successfully created.[osci-1001][DEBUG ] stdout: Volume group "ceph-7b308a5a-a8e9-48aa-86a9-39957dcbd1eb" successfully created[osci-1001][DEBUG ] Running command: lvcreate --yes -l 100%FREE -n osd-data-81522145-e31b-4325-83fd-6cfefc1b761f ceph-7b308a5a-a8e9-48aa-86a9-39957dcbd1eb[osci-1001][DEBUG ] stdout: Logical volume "osd-data-81522145-e31b-4325-83fd-6cfefc1b761f" created.[osci-1001][DEBUG ] Running command: /bin/ceph-authtool --gen-print-key[osci-1001][DEBUG ] Running command: mkfs -t xfs -f -i size=2048 /dev/ceph-7b308a5a-a8e9-48aa-86a9-39957dcbd1eb/osd-data-81522145-e31b-4325-83fd-6cfefc1b761f[osci-1001][DEBUG ] stdout: meta-data="" isize=2048 agcount=4, agsize=58239488 blks[osci-1001][DEBUG ] = sectsz=4096 attr=2, projid32bit=1[osci-1001][DEBUG ] = crc=1 finobt=0, sparse=0[osci-1001][DEBUG ] data = bsize=4096 blocks=232957952, imaxpct=25[osci-1001][DEBUG ] = sunit=0 swidth=0 blks[osci-1001][DEBUG ] naming =version 2 bsize=4096 ascii-ci=0 ftype=1[osci-1001][DEBUG ] log =internal log bsize=4096 blocks=113749, version=2[osci-1001][DEBUG ] = sectsz=4096 sunit=1 blks, lazy-count=1[osci-1001][DEBUG ] realtime =none extsz=4096 blocks=0, rtextents=0[osci-1001][DEBUG ] Running command: mount -t xfs -o "rw,noatime,noquota,logbsize=256k,logbufs=8,inode64,allocsize=4M,delaylog" /dev/ceph-7b308a5a-a8e9-48aa-86a9-39957dcbd1eb/osd-data-81522145-e31b-4325-83fd-6cfefc1b761f /var/lib/ceph/osd/ceph-1[osci-1001][DEBUG ] stderr: mount: unsupported option format: "rw,noatime,noquota,logbsize=256k,logbufs=8,inode64,allocsize=4M,delaylog"[osci-1001][DEBUG ] --> Was unable to complete a new OSD, will rollback changes[osci-1001][DEBUG ] --> OSD will be fully purged from the cluster, because the ID was generated[osci-1001][DEBUG ] Running command: ceph osd purge osd.1 --yes-i-really-mean-it[osci-1001][DEBUG ] stderr: purged osd.1[osci-1001][ERROR ] RuntimeError: command returned non-zero exit status: 1[ceph_deploy.osd][ERROR ] Failed to execute command: /usr/sbin/ceph-volume --cluster ceph lvm create --filestore --data /dev/sdb2 --journal /dev/sdb1[ceph_deploy][ERROR ] GenericError: Failed to create 1 OSDs
_______________________________________________ ceph-users mailing list ceph-users@xxxxxxxxxxxxxx http://lists.ceph.com/listinfo.cgi/ceph-users-ceph.com