Karan Singh
To: ceph-users@xxxxxxxxxxxxxx
Sent: Tuesday, 3 December, 2013 6:27:29 PM
Subject: [ceph-users] Openstack+ceph volume mounting to vm
[root@rdo /(keystone_admin)]# virsh attach-device instance-00000018 disk.xml
error: Failed to attach device from disk.xml
error: internal error unable to execute QEMU command '__com.redhat_drive_add': Device 'drive-virtio-disk5' could not be initialized
[root@rdo /(keystone_admin)]#
My Setup details :-
[root@rdo /(keystone_admin)]# rpm -qa | grep -i qemu
qemu-img-0.12.1.2-2.355.el6.2.cuttlefish.async.x86_64
qemu-kvm-tools-0.12.1.2-2.355.el6.2.cuttlefish.async.x86_64
qemu-guest-agent-0.12.1.2-2.355.el6.2.cuttlefish.async.x86_64
qemu-kvm-0.12.1.2-2.355.el6.2.cuttlefish.async.x86_64
gpxe-roms-qemu-0.9.7-6.10.el6.noarch
[root@rdo /(keystone_admin)]#
[root@rdo /(keystone_admin)]# uname -a
Linux rdo 3.10.18-1.el6.elrepo.x86_64 #1 SMP Mon Nov 4 19:12:54 EST 2013 x86_64 x86_64 x86_64 GNU/Linux
[root@rdo /(keystone_admin)]#
[root@rdo /(keystone_admin)]# cat /etc/redhat-release
CentOS release 6.5 (Final)
[root@rdo /(keystone_admin)]#
[root@rdo /(keystone_admin)]# cinder list
+--------------------------------------+-----------+------------------+------+--------------+----------+-------------+
| ID | Status | Display Name | Size | Volume Type | Bootable | Attached to |
+--------------------------------------+-----------+------------------+------+--------------+----------+-------------+
| 10cc0855-652a-4a9b-baa1-80bc86dc12ac | available | ceph-vol1 | 5 | ceph-storage | false | |
| 9671edaa-62c8-4f98-a36c-d6e59612141b | available | boot_from_volume | 20 | None | false | |
+--------------------------------------+-----------+------------------+------+--------------+----------+-------------+
[root@rdo /(keystone_admin)]#
[root@rdo /(keystone_admin)]# ceph status
cluster 0ff473d9-0670-42a3-89ff-81bbfb2e676a
health HEALTH_OK
monmap e3: 3 mons at {ceph-mon1=192.168.1.38:6789/0,ceph-mon2=192.168.1.33:6789/0,ceph-mon3=192.168.1.31:6789/0}, election epoch 30, quorum 0,1,2 ceph-mon1,ceph-mon2,ceph-mon3
osdmap e157: 11 osds: 11 up, 11 in
pgmap v12102: 448 pgs: 448 active+clean; 135 GB data, 272 GB used, 5935 GB / 6207 GB avail
mdsmap e27: 1/1/1 up {0=ceph-mon1=up:active}
[root@rdo /(keystone_admin)]#
[root@rdo /(keystone_admin)]# cat disk.xml
<disk type='network'>
<driver name="qemu" type="raw"/>
<source protocol="rbd" name="ceph-volumes/volume-10cc0855-652a-4a9b-baa1-80bc86dc12ac">
<host name='192.168.1.38' port='6789'/>
<host name='192.168.1.31' port='6789'/>
<host name='192.168.1.33' port='6789'/>
</source>
<target dev="vdf" bus="virtio"/>
<auth username='admin'>
<secret type='ceph' uuid='801a42ec-aec1-3ea8-d869-823c2de56b83'/>
</auth>
</disk>
[root@rdo /(keystone_admin)]#
[root@rdo /(keystone_admin)]# service libvirtd status
libvirtd (pid 17947) is running...
[root@rdo /(keystone_admin)]#
virsh # list
Id Name State
----------------------------------------------------
2 instance-00000018 running
virsh #
Systems Specialist, Computing Environments Group
CSC - IT Center for Science Ltd.
P.O. Box 405, FI-02101 Espoo, FINLAND
http://www.csc.fi/ | +358 (0) 503 812758
_______________________________________________
ceph-users mailing list
ceph-users@xxxxxxxxxxxxxx
http://lists.ceph.com/listinfo.cgi/ceph-users-ceph.com
_______________________________________________ ceph-users mailing list ceph-users@xxxxxxxxxxxxxx http://lists.ceph.com/listinfo.cgi/ceph-users-ceph.com