Openstack RBD EC pool

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



Hi,

I tried to add a "archive" storage class to our Openstack environment by introducing a second storage backend offering RBD volumes having their data in an erasure coded pool. As I will have to specify a data-pool I tried it as follows:


################### keyring files:
ceph.client.cinder.keyring
ceph.client.cinder-ec.keyring

################### ceph.conf
[global]
fsid = b5e30221-a214-353c-b66b-8c37b4349123
mon host = ceph-mon.service.i.ewcs.ch
auth cluster required = cephx
auth service required = cephx
auth client required = cephx
###################


################## ceph.ec.conf
[global]
fsid = b5e30221-a214-353c-b66b-8c37b4349123
mon host = ceph-mon.service.i.xxxx.xxxx
auth cluster required = cephx
auth service required = cephx
auth client required = cephx

[client.cinder-ec]
rbd default data pool = ewos1-prod_cinder_ec
#################

################# cinder-volume.conf
...
[ceph1-rp3-1]
volume_backend_name = ceph1-rp3-1
volume_driver = cinder.volume.drivers.rbd.RBDDriver
rbd_ceph_conf = /etc/ceph/ceph.conf
rbd_user = cinder
rbd_secret_uuid = xxxxxxxcc8b-xxxxxx-ae16xxxxxx
rbd_pool = cinder
rbd_flatten_volume_from_snapshot = false
rbd_max_clone_depth = 5
rbd_store_chunk_size = 4
rados_connect_timeout = -1
report_discard_supported = true
rbd_exclusive_cinder_pool = true
enable_deferred_deletion = true
deferred_deletion_delay = 259200
deferred_deletion_purge_interval = 3600

[ceph1-ec-1]
volume_backend_name = ceph1-ec-1
volume_driver = cinder.volume.drivers.rbd.RBDDriver
rbd_ceph_conf = /etc/ceph/ceph.ec.conf
rbd_user = cinder-ec
rbd_secret_uuid = xxxxxxcc8b-xxxxxx-ae16xxxxxx
rbd_pool = cinder_ec_metadata
rbd_flatten_volume_from_snapshot = false
rbd_max_clone_depth = 3
rbd_store_chunk_size = 4
rados_connect_timeout = -1
report_discard_supported = true
rbd_exclusive_cinder_pool = true
enable_deferred_deletion = true
deferred_deletion_delay = 259200
deferred_deletion_purge_interval = 3600
##############################


I created three pools (for cinder) like:
ceph osd pool create cinder 512 512 replicated rack_replicated_rule
ceph osd pool create cinder_ec_metadata 6 6 replicated rack_replicated_rule
ceph osd pool create cinder_ec 512 512 erasure ec32
ceph osd pool set cinder_ec allow_ec_overwrites true


I am able to use backend ceph1-rp3-1 without any errors (create, attach, delete, snapshot). I am also able to create volumes via:

openstack volume create --size 100 --type ec1 myvolume_ec

but I am not able to attach it to any instance. I get erros like:

==> libvirtd.log <==
2019-02-15 22:23:01.771+0000: 27895: error : qemuMonitorJSONCheckError:392 : internal error: unable to execute QEMU command 'device_add': Property 'scsi-hd.drive' can't find value 'drive-scsi0-0-0-3'

My instance got three disks (root,swap and one cinder replicated volume) amd looks like:

<domain type='kvm' id='1'>
  <name>instance-0000254e</name>
  <uuid>6d41c54b-753a-46c7-a573-bedf8822fbf5</uuid>
  <metadata>
<nova:instance xmlns:nova="http://openstack.org/xmlns/libvirt/nova/1.0";>
      <nova:package version="18.1.0"/>
      <nova:name>x3-1</nova:name>
      <nova:creationTime>2019-02-15 21:18:24</nova:creationTime>
      <nova:flavor name="xxxxxxxx">
        <nova:memory>16384</nova:memory>
        <nova:disk>80</nova:disk>
        <nova:swap>8192</nova:swap>
        <nova:ephemeral>0</nova:ephemeral>
        <nova:vcpus>4</nova:vcpus>
      </nova:flavor>
      <nova:owner>
...
      </nova:owner>
      <nova:root type="image" uuid="9e1e0d91-3a87-46b4-af52-ac3ebc257492"/>
    </nova:instance>
...
  <os>
    <type arch='x86_64' machine='pc-i440fx-bionic'>hvm</type>
    <boot dev='hd'/>
    <smbios mode='sysinfo'/>
...
  <devices>
    <emulator>/usr/bin/qemu-system-x86_64</emulator>
    <disk type='network' device='disk'>
      <driver name='qemu' type='raw' cache='none' discard='unmap'/>
      <auth username='nova'>
        <secret type='ceph' uuid='xxxxxxxxxxxxxxxx'/>
      </auth>
<source protocol='rbd' name='nova/6d41c54b-753a-46c7-a573-bedf8822fbf5_disk'>
        <host name='10.38.65.71' port='6789'/>
        <host name='10.38.66.71' port='6789'/>
        <host name='10.38.67.71' port='6789'/>
      </source>
      <target dev='sda' bus='scsi'/>
      <alias name='scsi0-0-0-0'/>
      <address type='drive' controller='0' bus='0' target='0' unit='0'/>
    </disk>
    <disk type='network' device='disk'>
      <driver name='qemu' type='raw' cache='none' discard='unmap'/>
      <auth username='nova'>
        <secret type='ceph' uuid='xxxxxxx'/>
      </auth>
<source protocol='rbd' name='nova/6d41c54b-753a-46c7-a573-bedf8822fbf5_disk.swap'>
        <host name='10.38.65.71' port='6789'/>
        <host name='10.38.66.71' port='6789'/>
        <host name='10.38.67.71' port='6789'/>
      </source>
      <target dev='sdb' bus='scsi'/>
      <alias name='scsi0-0-0-1'/>
      <address type='drive' controller='0' bus='0' target='0' unit='1'/>
    </disk>
    <disk type='network' device='disk'>
      <driver name='qemu' type='raw' cache='none' discard='unmap'/>
      <auth username='cinder'>
        <secret type='ceph' uuid='055816c5-cc8b-4c74-ae16-e2cbbf50af85'/>
      </auth>
<source protocol='rbd' name='cinder/volume-01e8cb68-1f86-4142-958c-fdd1c301833a'>
        <host name='10.38.65.71' port='6789'/>
        <host name='10.38.66.71' port='6789'/>
        <host name='10.38.67.71' port='6789'/>
      </source>
      <target dev='sdc' bus='scsi'/>
      <iotune>
        <total_bytes_sec>125829120</total_bytes_sec>
        <total_iops_sec>1000</total_iops_sec>
      </iotune>
      <serial>01e8cb68-1f86-4142-958c-fdd1c301833a</serial>
      <alias name='scsi0-0-0-2'/>
      <address type='drive' controller='0' bus='0' target='0' unit='2'/>
    </disk>
    <controller type='scsi' index='0' model='virtio-scsi'>
      <alias name='scsi0'/>
<address type='pci' domain='0x0000' bus='0x00' slot='0x04' function='0x0'/>
    </controller>
...


Any ideas?

All the best,
Florian

Attachment: smime.p7s
Description: S/MIME cryptographic signature

_______________________________________________
ceph-users mailing list
ceph-users@xxxxxxxxxxxxxx
http://lists.ceph.com/listinfo.cgi/ceph-users-ceph.com

[Index of Archives]     [Information on CEPH]     [Linux Filesystem Development]     [Ceph Development]     [Ceph Large]     [Ceph Dev]     [Linux USB Development]     [Video for Linux]     [Linux Audio Users]     [Yosemite News]     [Linux Kernel]     [Linux SCSI]     [xfs]


  Powered by Linux