Re: [PATCH] Add dmraid support.

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



On Sun, 2009-03-01 at 18:49 +0100, Joel Granados Moreno wrote:
> ---
>  storage/devices.py        |   69 +++++++++++++++++++++++++++++++++++++++++----
>  storage/devicetree.py     |   60 +++++++++++++++++++++++++++++++++++---
>  storage/formats/dmraid.py |   23 +++++++++++----
>  storage/udev.py           |   10 ++++++-
>  4 files changed, 144 insertions(+), 18 deletions(-)
> 
> diff --git a/storage/devices.py b/storage/devices.py
> index 3cc2599..63a0638 100644
> --- a/storage/devices.py
> +++ b/storage/devices.py
> @@ -536,7 +536,6 @@ class StorageDevice(Device):
>          #for parent in self.parents:
>          #    parent.removeChild()
>  
> -
>  class DiskDevice(StorageDevice):
>      """ A disk """
>      _type = "disk"
> @@ -575,9 +574,11 @@ class DiskDevice(StorageDevice):
>              if not self.partedDevice:
>                  raise DeviceError("cannot find parted device instance")
>              log.debug("creating parted Disk: %s" % self.path)
> -            self.partedDisk = parted.Disk(device=self.partedDevice)
> -            if not self.partedDisk:
> -                raise DeviceError("failed to create parted Disk")
> +            try:
> +                self.partedDisk = parted.Disk(device=self.partedDevice)
> +            except:
> +                # It is perfectly fine to have a device with no label.
> +                self.partedDisk = None
>  
>              self.probe()
>  
> @@ -1981,16 +1982,18 @@ class DMRaidArrayDevice(DMDevice):
>      _type = "dm-raid array"
>      _packages = ["dmraid"]
>  
> -    def __init__(self, name, format=None, size=None,
> +    def __init__(self, name, raidSet=None, level=None, format=None, size=None,
>                   exists=None, parents=None, sysfsPath=''):
>          """ Create a DMRaidArrayDevice instance.
>  
>              Arguments:
>  
> -                name -- the device name (generally a device node's basename)
> +                name -- the dmraid name also the device node's basename
>  
>              Keyword Arguments:
>  
> +                raidSet -- the RaidSet object from block
> +                level -- the type of dmraid
>                  parents -- a list of the member devices
>                  sysfsPath -- sysfs device path
>                  size -- the device's size
> @@ -2005,6 +2008,60 @@ class DMRaidArrayDevice(DMDevice):
>                            parents=parents, sysfsPath=sysfsPath,
>                            exists=exists)
>  
> +        self.formatClass = get_device_format_class("dmraidmember")
> +        if not self.formatClass:
> +            raise DeviceError("cannot find class for 'dmraidmember'")
> +
> +
> +        self._raidSet = raidSet
> +        self._level = level
> +
> +    @property
> +    def raidSet(self):
> +        return self._raidSet
> +
> +    @raidSet.setter
> +    def raidSet(self, val):
> +        # If we change self._raidSet, parents list will be invalid.
> +        # Don't allow the change.
> +        pass

If you want the raidSet attribute to be read-only, just don't define the
setter. It is better for users of the code to know they've done
something they cannot do than to silently ignore things.

> +
> +    def _addDevice(self, device):
> +        """ Add a new member device to the array.
> +
> +            XXX This is for use when probing devices, not for modification
> +                of arrays.
> +        """
> +        log_method_call(self, self.name, device=device.name, status=self.status)
> +
> +        if not self.exists:
> +            raise DeviceError("device has not been created")
> +
> +        if not isinstance(device.format, self.formatClass):
> +            raise ValueError("invalid device format for dmraid member")
> +
> +        if device in self.members:
> +            raise ValueError("device is already a member of this array")
> +
> +        # we added it, so now set up the relations
> +        self.devices.append(device)
> +        device.addChild()
> +
> +    @property
> +    def members(self):
> +        return self.parents
> +
> +    @property
> +    def devices(self):
> +        """ Return a list of this array's member device instances. """
> +        return self.parents
> +
> +    def activate(self, mknod=True):
> +        self._raidSet.activate(mknod=mknod)
> +
> +    def deactivate(self):
> +        self._raidSet.deactivate()
> +
>      def probe(self):
>          """ Probe for any missing information about this device.
>  
> diff --git a/storage/devicetree.py b/storage/devicetree.py
> index cdd41f0..6790c66 100644
> --- a/storage/devicetree.py
> +++ b/storage/devicetree.py
> @@ -21,6 +21,7 @@
>  #
>  
>  import os
> +import block
>  
>  from errors import *
>  from devices import *
> @@ -561,6 +562,17 @@ class DeviceTree(object):
>                                         minor=udev_device_get_minor(info),
>                                         sysfsPath=sysfs_path)
>                  self._addDevice(device)
> +        elif udev_device_is_dmraid(info):
> +            # This is just temporary as I need to differentiate between the
> +            # device that has partitions and device that dont.
> +            log.debug("%s is part of a dmraid" % name)
> +            device = self.getDeviceByName(name)
> +            if device is None:
> +                device = StorageDevice(name,
> +                                major=udev_device_get_major(info),
> +                                minor=udev_device_get_minor(info),
> +                                sysfsPath=sysfs_path)
> +                self._addDevice(device)
>          elif udev_device_is_disk(info):
>              log.debug("%s is a disk" % name)
>              device = self.getDeviceByName(name)
> @@ -619,9 +631,8 @@ class DeviceTree(object):
>                  # mdraid
>                  kwargs["mdUuid"] = udev_device_get_md_uuid(info)
>              elif format_type == "isw_raid_member":
> -                # dmraid
> -                # TODO: collect name of containing raidset
> -                # TODO: implement dmraid member format class
> +                # We dont add any new args because we intend to use the same
> +                # block.RaidSet object for all the related devices.
>                  pass
>              elif format_type == "LVM2_member":
>                  # lvm
> @@ -703,8 +714,47 @@ class DeviceTree(object):
>                                                   parents=[device])
>                      self._addDevice(md_array)
>              elif format.type == "dmraidmember":
> -                # look up or create the dmraid array
> -                pass
> +                import pdb ; pdb.set_trace()

This will have to go away.

> +                # Have we already created the DMRaidArrayDevice?
> +                rs = block.getRaidSetFromRelatedMem(uuid=uuid, name=name, \
> +                        major = udev_device_get_major(info), \
> +                        minor = udev_device_get_minor(info))
> +                if rs is None:
> +                    # FIXME: Should handle not finding a dmriad dev better
> +                    pass

At least log this for the time being.

> +
> +                dm_array = self.getDeviceByName(rs.name)
> +                if dm_array is not None:
> +                    # Use the rs's object on the device.
> +                    device.format.raidmem = block.getMemFromRaidSet(dm_array, \
> +                            major = udev_device_get_major(info), \
> +                            minor = udev_device_get_minor(info), \
> +                            uuid=uuid, name=name)


Do the members really need to each have a reference to the raidSet
instance? I guess there is nothing useful in a dmraid member like a uuid
of the array it belongs to so we can move this block lookup into the
DMRaidArrayDevice constructor?

> +
> +                    # We add the new device.
> +                    dm_array._addDevice(device)
> +
> +                else:
> +                    # Use the rs's object on the device.
> +                    device.format.raidmem = block.getMemFromRaidSet(rs, \
> +                            major = udev_device_get_major(info), \
> +                            minor = udev_device_get_minor(info), \
> +                            uuid=uuid, name=name)
> +
> +                    # Activate the Raid set.
> +                    rs.activate(mknod=True)

See, we should be activating storage.devices.Device instances, not block
instances, even if your setup method just calls self.rs.activate.

> +
> +                    # For size.  dmraid uses sector. use parted.
> +                    dm_size = parted.getDevice(rs.bdev.path).getSize(unit="MB")

This also should move into DMRaidArrayDevice, perhaps a probe method
called in the constructor.

> +                    # Create the DMRaidArray
> +                    dm_array = DMRaidArrayDevice(rs.name,
> +                                                 raidSet=rs,
> +                                                 level=rs.level,
> +                                                 size=dm_size,
> +                                                 exists=True,
> +                                                 parents=[device])
> +
> +                    self._addDevice(dm_array)
>              elif format.type == "lvmpv":
>                  # lookup/create the VG and LVs
>                  try:
> diff --git a/storage/formats/dmraid.py b/storage/formats/dmraid.py
> index d458b45..ef80902 100644
> --- a/storage/formats/dmraid.py
> +++ b/storage/formats/dmraid.py
> @@ -20,6 +20,8 @@
>  # Red Hat Author(s): Dave Lehman <dlehman@xxxxxxxxxx>
>  #
>  
> +import block
> +
>  from iutil import log_method_call
>  #from dm import dm_node_from_name
>  from ..errors import *
> @@ -65,25 +67,34 @@ class DMRaidMember(DeviceFormat):
>  
>                  device -- path to the underlying device
>                  uuid -- this format's UUID
> -                raidSet -- the name of the raidset this member belongs to
>                  exists -- indicates whether this is an existing format
>  
> +            On initialization this format is like DeviceFormat
> +
>          """
>          log_method_call(self, *args, **kwargs)
>          DeviceFormat.__init__(self, *args, **kwargs)
> -        
> -        self.raidSet = kwargs.get("raidSet")
> +
> +        # Initialize the attribute that will hold the block object.
> +        self._raidmem = None
> +
> +    @property
> +    def raidmem(self):
> +        return self._raidmem
> +
> +    @raidmem.setter
> +    def raidmem(self, raidmem):
> +        self._raidmem = raidmem
>  
>      def create(self, *args, **kwargs):
>          log_method_call(self, device=self.device,
>                          type=self.type, status=self.status)
> -        raise DMRaidMemberError("creation of dmraid members is not supported")
> +        raise DMRaidMemberError("creation of dmraid members is non-sense")
>  
>      def destroy(self, *args, **kwargs):
>          log_method_call(self, device=self.device,
>                          type=self.type, status=self.status)
> -        raise DMRaidMemberError("destruction of dmraid members is not supported")
> -
> +        raise DMRaidMemberError("destruction of dmraid members is non-sense")
>  
>  register_device_format(DMRaidMember)
>  
> diff --git a/storage/udev.py b/storage/udev.py
> index 4f83c2a..087f811 100644
> --- a/storage/udev.py
> +++ b/storage/udev.py
> @@ -270,4 +270,12 @@ def udev_device_get_lv_sizes(info):
>  
>      return [float(s) / 1024 for s in sizes]
>  
> -
> +def udev_device_is_dmraid(info):
> +    # Note that this function does *not* identify raid sets.
> +    # Tests to see if device is parto of a dmraid set.
> +    # dmraid and mdriad have the same ID_FS_USAGE string,  ID_FS_TYPE has a
> +    # string that describes the type of dmraid (isw_raid_member...),  I don't
> +    # want to maintain a list and mdraid's ID_FS_TYPE='linux_raid_member', so
> +    # dmraid will be everything that is raid and not linux_raid_member
> +    return info["ID_FS_USAGE"] == "raid" and \
> +            info["ID_FS_TYPE"] != "linux_raid_member"

Believe it or not, udev shows LVM PVs as ID_FS_USAGE="raid", so you'll
need to dig deeper. Maybe something like this:

return (info.get("ID_FS_TYPE", "").endswith("_raid_member") and
        info.get("ID_FS_TYPE", "") != "linux_raid_member")

Dave

_______________________________________________
Anaconda-devel-list mailing list
Anaconda-devel-list@xxxxxxxxxx
https://www.redhat.com/mailman/listinfo/anaconda-devel-list

[Index of Archives]     [Kickstart]     [Fedora Users]     [Fedora Legacy List]     [Fedora Maintainers]     [Fedora Desktop]     [Fedora SELinux]     [Big List of Linux Books]     [Yosemite News]     [Yosemite Photos]     [KDE Users]     [Fedora Tools]
  Powered by Linux