This patch does a few things. 1) Clean up a bunch of comments which I believe are no longer valid, or just slightly wrong 2) Enable the stopPool command for the logical backend. The comment in the code worries about a situation where you try to "stop" the logical volume that your rootfs is on, for example. However, that situation is already taken care of by the LVM tools; if the logical volume you try to stop is active, it will throw an error saying that the LV is in use, and won't stop it. Therefore, it should be pretty safe to try to stop logical volumes; it will fail for ones that are in use, but will stop volumes that have been properly torn down ahead of time. 3) In deletePool, remove the -f from the vgremove command. Besides the fact that we probably don't usually want to force things, the -f option doesn't exist prior to F-9, so this would fail on F-8, RHEL-5, etc. 4) In deletePool, implement pvremove of all source devices. Note that this should also be relatively safe; it will only pvremove devices that we had previously pvcreate'd in createPool, which means they were all under control of libvirt. It will not pvremove devices on an LVM that you previously created, but were just scanning with libvirt. Signed-off-by: Chris Lalancette <clalance@xxxxxxxxxx>
Index: src/storage_backend_logical.c =================================================================== RCS file: /data/cvs/libvirt/src/storage_backend_logical.c,v retrieving revision 1.14 diff -u -r1.14 storage_backend_logical.c --- a/src/storage_backend_logical.c 8 Sep 2008 08:03:54 -0000 1.14 +++ b/src/storage_backend_logical.c 19 Sep 2008 08:23:04 -0000 @@ -294,7 +294,7 @@ unsigned int flags ATTRIBUTE_UNUSED) { /* - * # sudo vgs --noheadings -o vg_name + * # vgs --noheadings -o vg_name * VolGroup00 * VolGroup01 */ @@ -350,7 +350,6 @@ memset(zeros, 0, sizeof(zeros)); - /* XXX multiple pvs */ if (VIR_ALLOC_N(vgargv, 3 + pool->def->source.ndevice) < 0) { virStorageReportError(conn, VIR_ERR_NO_MEMORY, "%s", _("command line")); return -1; @@ -466,11 +465,11 @@ return 0; } - -/* XXX should we set LVM to inactive ? Probably not - it would - * suck if this were your LVM root fs :-) +/* + * This is actually relatively safe; if you happen to try to "stop" the + * pool that your / is on, for instance, you will get failure like: + * "Can't deactivate volume group "VolGroup00" with 3 open logical volume(s)" */ -#if 0 static int virStorageBackendLogicalStopPool(virConnectPtr conn, virStoragePoolObjPtr pool) @@ -480,7 +479,6 @@ return 0; } -#endif static int virStorageBackendLogicalDeletePool(virConnectPtr conn, @@ -488,15 +486,32 @@ unsigned int flags ATTRIBUTE_UNUSED) { const char *cmdargv[] = { - VGREMOVE, "-f", pool->def->source.name, NULL + VGREMOVE, pool->def->source.name, NULL }; + const char *pvargv[3]; + int i, error; + /* first remove the volume group */ if (virRun(conn, cmdargv, NULL) < 0) return -1; - /* XXX clear the PVs too ? ie pvremove ? probably ought to */ + /* now remove the pv devices and clear them out */ + error = 0; + pvargv[0] = PVREMOVE; + pvargv[2] = NULL; + for (i = 0 ; i < pool->def->source.ndevice ; i++) { + pvargv[1] = pool->def->source.devices[i].path; + if (virRun(conn, pvargv, NULL) < 0) { + error = -1; + virStorageReportError(conn, VIR_ERR_INTERNAL_ERROR, + _("cannot remove PV device %s: %s"), + pool->def->source.devices[i].path, + strerror(errno)); + break; + } + } - return 0; + return error; } @@ -610,9 +625,7 @@ .startPool = virStorageBackendLogicalStartPool, .buildPool = virStorageBackendLogicalBuildPool, .refreshPool = virStorageBackendLogicalRefreshPool, -#if 0 .stopPool = virStorageBackendLogicalStopPool, -#endif .deletePool = virStorageBackendLogicalDeletePool, .createVol = virStorageBackendLogicalCreateVol, .deleteVol = virStorageBackendLogicalDeleteVol,
-- Libvir-list mailing list Libvir-list@xxxxxxxxxx https://www.redhat.com/mailman/listinfo/libvir-list