Re: [PATCH anaconda-storage] Added test case for devicelib mdraid.py.

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



looks good to me.

On Fri, Mar 13, 2009 at 11:02:03AM +0100, Martin Gracik wrote:
> Rewrote the devicelibs unittest baseclass so we can create and remove another loop devices within the tests.
> 
> Rewrote the isRaidXY(raidlevel) functions in mdraid.py into one function isRaid(XY, raidlevel).
> Rewrote the get_raid_min_members and get_raid_max_spares functions
> to use the new isRaid function, and not use many if-elif-else statements.
> 
> Changed the minimum raid members for raid10 to 2.
> ---
>  storage/devicelibs/mdraid.py          |   78 +++++++++++++------------
>  tests/storage/devicelibs/baseclass.py |   60 ++++++++++---------
>  tests/storage/devicelibs/mdraid.py    |  105 +++++++++++++++++++++++++++++++++
>  3 files changed, 178 insertions(+), 65 deletions(-)
>  create mode 100644 tests/storage/devicelibs/mdraid.py
> 
> diff --git a/storage/devicelibs/mdraid.py b/storage/devicelibs/mdraid.py
> index a0d2f6a..a1a40d2 100644
> --- a/storage/devicelibs/mdraid.py
> +++ b/storage/devicelibs/mdraid.py
> @@ -28,6 +28,13 @@ from ..errors import *
>  import gettext
>  _ = lambda x: gettext.ldgettext("anaconda", x)
>  
> +# raidlevels constants
> +RAID10 = 10
> +RAID6 = 6
> +RAID5 = 5
> +RAID1 = 1
> +RAID0 = 0
> +
>  def getRaidLevels():
>      avail = []
>      try:
> @@ -52,51 +59,46 @@ def getRaidLevels():
>  
>  raid_levels = getRaidLevels()
>  
> -# FIXME: these functions should be consolidated into one function
> -def isRaid10(raidlevel):
> -    """Return whether raidlevel is a valid descriptor of RAID10."""
> -    return raidlevel in ("RAID10", "10", 10)
> -
> -def isRaid6(raidlevel):
> -    """Return whether raidlevel is a valid descriptor of RAID6."""
> -    return raidlevel in ("RAID6", "6", 6)
> -
> -def isRaid5(raidlevel):
> -    """Return whether raidlevel is a valid descriptor of RAID5."""
> -    return raidlevel in ("RAID5", "5", 5)
> +def isRaid(raid, raidlevel):
> +    """Return whether raidlevel is a valid descriptor of raid"""
> +    raid_descriptors = {RAID10: ("RAID10", "10", 10),
> +                        RAID6: ("RAID6", "6", 6),
> +                        RAID5: ("RAID5", "5", 5),
> +                        RAID1: ("mirror", "RAID1", "1", 1),
> +                        RAID0: ("stripe", "RAID0", "0", 0)}
>  
> -def isRaid1(raidlevel):
> -    """Return whether raidlevel is a valid descriptor of RAID1."""
> -    return raidlevel in ("mirror", "RAID1", "1", 1)
> -
> -def isRaid0(raidlevel):
> -    """Return whether raidlevel is a valid descriptor of RAID0."""
> -    return raidlevel in ("stripe", "RAID0", "0", 0)
> +    if raid in raid_descriptors:
> +        return raidlevel in raid_descriptors[raid]
> +    else:
> +        raise ValueError, "invalid raid level %d" % raid
>  
>  def get_raid_min_members(raidlevel):
>      """Return the minimum number of raid members required for raid level"""
> -    if isRaid0(raidlevel):
> -        return 2
> -    elif isRaid1(raidlevel):
> -        return 2
> -    elif isRaid5(raidlevel):
> -        return 3
> -    elif isRaid6(raidlevel):
> -        return 4
> -    elif isRaid10(raidlevel):
> -        return 4
> -    else:
> -        raise ValueError, "invalid raidlevel in get_raid_min_members"
> +    raid_min_members = {RAID10: 2,
> +                        RAID6: 4,
> +                        RAID5: 3,
> +                        RAID1: 2,
> +                        RAID0: 2}
> +
> +    for raid, min_members in raid_min_members.items():
> +        if isRaid(raid, raidlevel):
> +            return min_members
> +
> +    raise ValueError, "invalid raid level %d" % raidlevel
>  
>  def get_raid_max_spares(raidlevel, nummembers):
>      """Return the maximum number of raid spares for raidlevel."""
> -    if isRaid0(raidlevel):
> -        return 0
> -    elif isRaid1(raidlevel) or isRaid5(raidlevel) or \
> -         isRaid6(raidlevel) or isRaid10(raidlevel):
> -        return max(0, nummembers - get_raid_min_members(raidlevel))
> -    else:
> -        raise ValueError, "invalid raidlevel in get_raid_max_spares"
> +    raid_max_spares = {RAID10: lambda: max(0, nummembers - get_raid_min_members(RAID10)),
> +                       RAID6: lambda: max(0, nummembers - get_raid_min_members(RAID6)),
> +                       RAID5: lambda: max(0, nummembers - get_raid_min_members(RAID5)),
> +                       RAID1: lambda: max(0, nummembers - get_raid_min_members(RAID1)),
> +                       RAID0: lambda: 0}
> +
> +    for raid, max_spares_func in raid_max_spares.items():
> +        if isRaid(raid, raidlevel):
> +            return max_spares_func()
> +
> +    raise ValueError, "invalid raid level %d" % raidlevel
>  
>  def mdcreate(device, level, disks, spares=0):
>      argv = ["--create", device, "--level", str(level)]
> diff --git a/tests/storage/devicelibs/baseclass.py b/tests/storage/devicelibs/baseclass.py
> index efc9c80..01fe6c7 100644
> --- a/tests/storage/devicelibs/baseclass.py
> +++ b/tests/storage/devicelibs/baseclass.py
> @@ -15,33 +15,39 @@ class TestDevicelibs(unittest.TestCase):
>  
>      def setUp(self):
>          for dev, file in self._LOOP_DEVICES:
> -            proc = subprocess.Popen(["dd", "if=/dev/zero", "of=%s" % file, "bs=1024", "count=102400"])
> -            while True:
> -                proc.communicate()
> -                if proc.returncode is not None:
> -                    rc = proc.returncode
> -                    break
> -            if rc:
> -                raise OSError, "dd failed creating the file %s" % file
> -            
> -            proc = subprocess.Popen(["losetup", dev, file])
> -            while True:
> -                proc.communicate()
> -                if proc.returncode is not None:
> -                    rc = proc.returncode
> -                    break
> -            if rc:
> -                raise OSError, "losetup failed setting up the loop device %s" % dev
> +            self.makeLoopDev(dev, file)
>  
>      def tearDown(self):
>          for dev, file in self._LOOP_DEVICES:
> -            proc = subprocess.Popen(["losetup", "-d", dev])
> -            while True:
> -                proc.communicate()
> -                if proc.returncode is not None:
> -                    rc = proc.returncode
> -                    break
> -            if rc:
> -                raise OSError, "losetup failed removing the loop device %s" % dev
> -
> -            os.remove(file)
> +            self.removeLoopDev(dev, file)
> +
> +    def makeLoopDev(self, device_name, file_name):
> +        proc = subprocess.Popen(["dd", "if=/dev/zero", "of=%s" % file_name, "bs=1024", "count=102400"])
> +        while True:
> +            proc.communicate()
> +            if proc.returncode is not None:
> +                rc = proc.returncode
> +                break
> +        if rc:
> +            raise OSError, "dd failed creating the file %s" % file_name
> +
> +        proc = subprocess.Popen(["losetup", device_name, file_name])
> +        while True:
> +            proc.communicate()
> +            if proc.returncode is not None:
> +                rc = proc.returncode
> +                break
> +        if rc:
> +            raise OSError, "losetup failed setting up the loop device %s" % device_name
> +
> +    def removeLoopDev(self, device_name, file_name):
> +        proc = subprocess.Popen(["losetup", "-d", device_name])
> +        while True:
> +            proc.communicate()
> +            if proc.returncode is not None:
> +                rc = proc.returncode
> +                break
> +        if rc:
> +            raise OSError, "losetup failed removing the loop device %s" % device_name
> +
> +        os.remove(file_name)
> diff --git a/tests/storage/devicelibs/mdraid.py b/tests/storage/devicelibs/mdraid.py
> new file mode 100644
> index 0000000..6e49e55
> --- /dev/null
> +++ b/tests/storage/devicelibs/mdraid.py
> @@ -0,0 +1,105 @@
> +import baseclass
> +import unittest
> +import storage.devicelibs.mdraid as mdraid
> +
> +import time
> +
> +class TestMDRaid(baseclass.TestDevicelibs):
> +
> +    def testMDRaidStuff(self):
> +        ##
> +        ## getRaidLevels
> +        ##
> +        # pass
> +        self.assertEqual(mdraid.getRaidLevels(), mdraid.getRaidLevels())
> +
> +        ##
> +        ## get_raid_min_members
> +        ##
> +        # pass
> +        self.assertEqual(mdraid.get_raid_min_members(mdraid.RAID0), 2)
> +        self.assertEqual(mdraid.get_raid_min_members(mdraid.RAID1), 2)
> +        self.assertEqual(mdraid.get_raid_min_members(mdraid.RAID5), 3)
> +        self.assertEqual(mdraid.get_raid_min_members(mdraid.RAID6), 4)
> +        self.assertEqual(mdraid.get_raid_min_members(mdraid.RAID10), 2)
> +
> +        # fail
> +        # unsupported raid
> +        self.assertRaises(ValueError, mdraid.get_raid_min_members, 4)
> +
> +        ##
> +        ## get_raid_max_spares
> +        ##
> +        # pass
> +        self.assertEqual(mdraid.get_raid_max_spares(mdraid.RAID0, 5), 0)
> +        self.assertEqual(mdraid.get_raid_max_spares(mdraid.RAID1, 5), 3)
> +        self.assertEqual(mdraid.get_raid_max_spares(mdraid.RAID5, 5), 2)
> +        self.assertEqual(mdraid.get_raid_max_spares(mdraid.RAID6, 5), 1)
> +        self.assertEqual(mdraid.get_raid_max_spares(mdraid.RAID10, 5), 3)
> +
> +        # fail
> +        # unsupported raid
> +        self.assertRaises(ValueError, mdraid.get_raid_max_spares, 4, 5)
> +
> +        ##
> +        ## mdcreate
> +        ##
> +        # pass
> +        self.assertEqual(mdraid.mdcreate("/dev/md0", 1, [self._LOOP_DEV0, self._LOOP_DEV1]), None)
> +        # wait for raid to settle
> +        time.sleep(2)
> +
> +        # fail
> +        self.assertRaises(mdraid.MDRaidError, mdraid.mdcreate, "/dev/md1", 1, ["/not/existing/dev0", "/not/existing/dev1"])
> +
> +        ##
> +        ## mddeactivate
> +        ##
> +        # pass
> +        self.assertEqual(mdraid.mddeactivate("/dev/md0"), None)
> +
> +        # fail
> +        self.assertRaises(mdraid.MDRaidError, mdraid.mddeactivate, "/not/existing/md")
> +
> +        ##
> +        ## mdadd
> +        ##
> +        # pass
> +        # TODO
> +
> +        # fail
> +        self.assertRaises(mdraid.MDRaidError, mdraid.mdadd, "/not/existing/device")
> +
> +        ##
> +        ## mdactivate
> +        ##
> +        # pass
> +        self.assertEqual(mdraid.mdactivate("/dev/md0", [self._LOOP_DEV0, self._LOOP_DEV1], super_minor=0), None)
> +        # wait for raid to settle
> +        time.sleep(2)
> +
> +        # fail
> +        self.assertRaises(mdraid.MDRaidError, mdraid.mdactivate, "/not/existing/md", super_minor=1)
> +        # requires super_minor or uuid
> +        self.assertRaises(ValueError, mdraid.mdactivate, "/dev/md1")
> +
> +        ##
> +        ## mddestroy
> +        ##
> +        # pass
> +        # deactivate first
> +        self.assertEqual(mdraid.mddeactivate("/dev/md0"), None)
> +
> +        self.assertEqual(mdraid.mddestroy(self._LOOP_DEV0), None)
> +        self.assertEqual(mdraid.mddestroy(self._LOOP_DEV1), None)
> +
> +        # fail
> +        # not a component
> +        self.assertRaises(mdraid.MDRaidError, mdraid.mddestroy, "/dev/md0")
> +        self.assertRaises(mdraid.MDRaidError, mdraid.mddestroy, "/not/existing/device")
> +
> +
> +if __name__ == "__main__":
> +    suite = unittest.TestLoader().loadTestsFromTestCase(TestMDRaid)
> +    unittest.TextTestRunner(verbosity=2).run(suite)
> +
> -- 
> 1.6.0.6
> 
> _______________________________________________
> Anaconda-devel-list mailing list
> Anaconda-devel-list@xxxxxxxxxx
> https://www.redhat.com/mailman/listinfo/anaconda-devel-list

-- 
Joel Andres Granados
Brno, Czech Republic, Red Hat.

_______________________________________________
Anaconda-devel-list mailing list
Anaconda-devel-list@xxxxxxxxxx
https://www.redhat.com/mailman/listinfo/anaconda-devel-list

[Index of Archives]     [Kickstart]     [Fedora Users]     [Fedora Legacy List]     [Fedora Maintainers]     [Fedora Desktop]     [Fedora SELinux]     [Big List of Linux Books]     [Yosemite News]     [Yosemite Photos]     [KDE Users]     [Fedora Tools]
  Powered by Linux