* storage/devicelibs/lvm.py (zeroLvmMetadata): zero out the LVM metadata only. This basically means from 513 byte and on. This will also ignore the firs 512 bytes if we are in a partition, but that does not matter because LVM does not touch those bits anyways. * storage/devicelibs/lvm.py (vgreduce): introduces a new argument to the function. rm means use the lvm --removemissing option. * storage/devices.py (LVMVolumeGroupDevice.complete): New function to evaluate if VG is consistent. * storage/devices.py (LVMLogicalVolumeDevice.complete): Likewise for LVs. * storage/devicetree.py (_handleSysCrapyness): New function intended to catch all unwanted behavior from the system before continuing. * storage/devicetree.py (questionReinitILVM): New function intended to ask the user what to do in case we find inconsistent LVM metadata. --- storage/devicelibs/lvm.py | 33 ++++++++++++-- storage/devices.py | 29 +++++++++++- storage/devicetree.py | 108 +++++++++++++++++++++++++++++++++++++++++++++ 3 files changed, 162 insertions(+), 8 deletions(-) diff --git a/storage/devicelibs/lvm.py b/storage/devicelibs/lvm.py index 8b755a4..8a0e25b 100644 --- a/storage/devicelibs/lvm.py +++ b/storage/devicelibs/lvm.py @@ -99,6 +99,22 @@ def lvm_cc_addFilterRejectRegexp(regexp): _composeConfig() # End config_args handling code. +def zeroLvmMetadata(device): + """ This dds from byte 513 to 200Mb+513 of the disk. + + We assume that lvm has nothing to do with the first 512 bytes. + We assume that 200Mb is enough. My tests failed with 100Mb for + some unknown reason. + """ + try: + fd = os.open(device, os.O_WRONLY) + os.lseek(fd, 513, os.SEEK_SET) + for i in range(200): + os.write(fd, "\0"*1024) + os.close(fd) + except: + raise LVMError("Falied to zero LVM data on %s." % device) + def getPossiblePhysicalExtents(floor=0): """Returns a list of integers representing the possible values for the physical extent of a volume group. Value is in KB. @@ -283,11 +299,18 @@ def vgdeactivate(vg_name): if rc: raise LVMError("vgdeactivate failed for %s" % vg_name) -def vgreduce(vg_name, pv_list): - args = ["vgreduce"] + \ - config_args + \ - [vg_name] + \ - pv_list +def vgreduce(vg_name, pv_list, rm=False): + """ Reduce a VG. + + rm -> with RemoveMissing option. + Use pv_list when rm=False, otherwise ignore pv_list and call vgreduce with + the --removemissing option. + """ + args = ["vgreduce"] + if rm: + args.extend(["--removemissing", vg_name]) + else: + args.extend([vg_name] + pv_list) rc = iutil.execWithRedirect("lvm", args, stdout = "/dev/tty5", diff --git a/storage/devices.py b/storage/devices.py index 397a648..a5761a0 100644 --- a/storage/devices.py +++ b/storage/devices.py @@ -1692,9 +1692,15 @@ class LVMVolumeGroupDevice(DMDevice): self.teardown() - lvm.vgremove(self.name) - self.notifyKernel() - self.exists = False + # this sometimes fails for some reason. + try: + lvm.vgreduce(self.name, [], rm=True) + lvm.vgremove(self.name) + except lvm.LVMErorr: + raise DeviceError("Could not completely remove VG %s" % self.name) + finally: + self.notifyKernel() + self.exists = False def reduce(self, pv_list): """ Remove the listed PVs from the VG. """ @@ -1823,6 +1829,15 @@ class LVMVolumeGroupDevice(DMDevice): """ A list of this VG's LVs """ return self._lvs[:] # we don't want folks changing our list + def complete(self): + """Check if the vg has all its pvs in the system + Return True if complete. + """ + if len(self.pvs) == self.pvCount: + return True + else: + return False + class LVMLogicalVolumeDevice(DMDevice): """ An LVM Logical Volume """ @@ -1992,6 +2007,14 @@ class LVMLogicalVolumeDevice(DMDevice): self.format.teardown() lvm.lvresize(self.vg.name, self._name, self.size) + def complete(self): + """ Test if vg exits and if it has all pvs. """ + if not self.vg.exists: + return False + + if not self.vg.complete(): + return False + class MDRaidArrayDevice(StorageDevice): """ An mdraid (Linux RAID) device. diff --git a/storage/devicetree.py b/storage/devicetree.py index fdcac7c..342140b 100644 --- a/storage/devicetree.py +++ b/storage/devicetree.py @@ -136,6 +136,36 @@ def questionInitializeDisk(intf=None, name=None): retVal = True return retVal +def questionReinitILVM(intf=None, pv_names=None, lv_name=None, vg_name=None): + retVal = False # The less destructive default + if not intf or not pv_names or (lv_name is None and vg_name is None): + pass + else: + if vg_name is not None: + message = "%s Volume Group" % vg_name + elif lv_name is not None: + message = "%s Logical Volume" % lv_name + + + rc = intf.messageWindow(_("Warning"), + _("Error processing LVM.\n" + "It seems that there is inconsistent LVM data. " + "(%s) make(s) up %s. " + "You can reinitialize all related PVs, which will " + "erase all LVM metadata. Or ignore, which will " + "preserve contents.") + %(str(pv_names), message), + type="custom", + custom_buttons = [ _("_Ignore drive(s)"), + _("_Re-initialize drive(s)") ], + custom_icon="question") + if rc == 0: + pass + else: + retVal = True # thie means clobber. + + return retVal + class DeviceTree(object): """ A quasi-tree that represents the devices in the system. @@ -1305,12 +1335,85 @@ class DeviceTree(object): size=lv_size, exists=True) self._addDevice(lv_device) + try: lv_device.setup() except DeviceError as e: log.info("setup of %s failed: %s" % (lv_device.name, e)) + def _handleSysCrapyness(self, device): + def reinitializeVG(vg): + # First we remove VG data + try: + vg.destroy() + except DeviceError: + # the pvremoves will finish the job. + log.debug("There was an error destroying the VG %s." % vg.name) + pass + + for parent in vg.parents: + try: + parent.destroy() + except: + # does lvm.zerometadata make sence? + lvm.zeroLvmMetadata(parent.path) + + # Give the vg the a default format + kwargs = {"uuid": parent.uuid, + "label": parent.diskLabel, + "device": parent.path, + "exists": parent.exists} + parent.format = formats.getFormat(*[""], **kwargs) + + # remove VG device from list. + self._removeDevice(vg) + + + if device.type == "lvmvg": + paths = [] + for parent in device.parents: + paths.append(parent.path) + + if not device.complete() and \ + questionReinitILVM(intf=self.intf, \ + vg_name=device.name, pv_names=paths): + reinitializeVG(device) + + elif not device.complete(): + # The user chose not to reinitialize. + # hopefully this will ignore the vg components too. + self.addIgnoredDisk(device.name) + return + + elif device.type == "lvmlv": + # we might have already fixed this. + if device not in self._devices: + return + + paths = [] + for parent in device.vg.parents: + paths.append(parent.path) + + if not device.complete() and \ + questionReinitILVM(intf=self.intf, \ + lv_name=device.name, pv_names=paths): + + # destroy all lvs. + for lv in device.vg.lvs: + lv.destroy() + device.vg._removeLogVol(lv) + self._removeDevice(lv) + + reinitializeVG(device.vg) + + elif not device.complete(): + # The user chose not to reinitialize. + # hopefully this will ignore the vg components too. + for lv in device.vg.lvs: + self.addIgnoredDisk(device.name) + return + def populate(self): """ Locate all storage devices. """ # each iteration scans any devices that have appeared since the @@ -1340,6 +1443,11 @@ class DeviceTree(object): for dev in devices: self.addUdevDevice(dev) + # After having the complete tree we make sure that the system + # inconsistencies are ignored or resolved. + for leaf in self.leaves: + self._handleSysCrapyness(leaf) + self.teardownAll() def teardownAll(self): -- 1.6.0.6 _______________________________________________ Anaconda-devel-list mailing list Anaconda-devel-list@xxxxxxxxxx https://www.redhat.com/mailman/listinfo/anaconda-devel-list