Re: Branch 'anaconda-storage-branch' - 10 commits - installclasses/fedora.py installclasses/rhel.py kickstart.py storage/devicetree.py storage/partitioning.py

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



On Thu, Mar 12, 2009 at 01:42:51PM +0000, Christopher Edward Lumens wrote:
>  installclasses/fedora.py |    9 
>  installclasses/rhel.py   |   10 
>  kickstart.py             |  530 +++++++++++++++++++++++++++--------------------
>  storage/devicetree.py    |    2 
>  storage/partitioning.py  |  113 +++++-----
>  5 files changed, 381 insertions(+), 283 deletions(-)
> 
> New commits:
> commit 89394bbfe2ff4eeed5bd168f6f8a935cd7fa8407
> Author: Chris Lumens <clumens@xxxxxxxxxx>
> Date:   Wed Mar 11 17:01:58 2009 -0400
> 
>     Get rid of the mappings and ksID as well.
> 
> diff --git a/kickstart.py b/kickstart.py
> index cc9999a..1000f8e 100644
> --- a/kickstart.py
> +++ b/kickstart.py
> @@ -1013,13 +1013,6 @@ class AnacondaKSHandler(superclass):
>          self.permanentSkipSteps = []
>          self.skipSteps = []
>          self.showSteps = []
> -        self.ksRaidMapping = {}
> -        self.ksUsedMembers = []
> -        self.ksPVMapping = {}
> -        self.ksVGMapping = {}
> -        # XXX hack to give us a starting point for RAID, LVM, etc unique IDs.
> -        self.ksID = 100000
> -
>          self.anaconda = anaconda
>          self.id = self.anaconda.id
>  
> 
> 
> commit 85b93e8407359c49ba1364b29fe2a67a236c010d
> Author: Chris Lumens <clumens@xxxxxxxxxx>
> Date:   Wed Mar 11 16:11:01 2009 -0400
> 
>     Make sure the device has a diskType before attempting to check what it is.
> 
> diff --git a/storage/devicetree.py b/storage/devicetree.py
> index 2a40fc4..109db1a 100644
> --- a/storage/devicetree.py
> +++ b/storage/devicetree.py
> @@ -758,7 +758,7 @@ class DeviceTree(object):
>          if isinstance(dep, PartitionDevice):
>              # collect all of the logicals on the same disk
>              for part in self.getDevicesByInstance(PartitionDevice):
> -                if part.isLogical and part.disk == dep.disk:
> +                if part.partType and part.isLogical and part.disk == dep.disk:
>                      logicals.append(part)
>  
>          for device in self.devices.values():
> 
> 
> commit a8f0fa0603962ca3b4b67e1e37f7a497b2deb3e3
> Author: Chris Lumens <clumens@xxxxxxxxxx>
> Date:   Wed Mar 11 16:10:47 2009 -0400
> 
>     Update the volgroup command to work with the new storage code.
> 
> diff --git a/kickstart.py b/kickstart.py
> index fec8112..cc9999a 100644
> --- a/kickstart.py
> +++ b/kickstart.py
> @@ -864,30 +864,51 @@ class VolGroup(commands.volgroup.FC3_VolGroup):
>          vgd = commands.volgroup.FC3_VolGroup.parse(self, args)
>          pvs = []
>  
> -        # get the unique ids of each of the physical volumes
> +        storage = self.handler.id.storage
> +        devicetree = storage.devicetree
> +
> +        # Get a list of all the physical volume devices that make up this VG.
>          for pv in vgd.physvols:
> -            if pv not in self.handler.ksPVMapping.keys():
> +            dev = devicetree.getDeviceByName(pv)
> +            if not dev:
>                  raise KickstartValueError, formatErrorMsg(self.lineno, msg="Tried to use undefined partition %s in Volume Group specification" % pv)
> -            pvs.append(self.handler.ksPVMapping[pv])
> +
> +            pvs.append(dev)
>  
>          if len(pvs) == 0 and not vgd.preexist:
>              raise KickstartValueError, formatErrorMsg(self.lineno, msg="Volume group defined without any physical volumes.  Either specify physical volumes or use --useexisting.")
>  
> -        if vgd.pesize not in lvm.getPossiblePhysicalExtents(floor=1024):
> +        if vgd.pesize not in getPossiblePhysicalExtents(floor=1024):
>              raise KickstartValueError, formatErrorMsg(self.lineno, msg="Volume group specified invalid pesize")
>  
> -        # get a sort of hackish id
> -        uniqueID = self.handler.ksID
> -        self.handler.ksVGMapping[vgd.vgname] = uniqueID
> -        self.handler.ksID += 1
> -            
> -        request = partRequests.VolumeGroupRequestSpec(vgname = vgd.vgname,
> -                                                      physvols = pvs,
> -                                                      preexist = vgd.preexist,
> -                                                      format = vgd.format,
> -                                                      pesize = vgd.pesize)
> -        request.uniqueID = uniqueID
> -        addPartRequest(self.handler.anaconda, request)
> +        # If --noformat was given, there's really nothing to do.
> +        if not vgd.format:
> +            if not vgd.name:
> +                raise KickstartValueError, formatErrorMsg(self.lineno, msg="--noformat used without giving a name")
> +
> +            dev = devicetree.getDeviceByName(vgd.name)
> +            if not dev:
> +                raise KickstartValueError, formatErrorMsg(self.lineno, msg="No preexisting VG with the name \"%s\" was found." % vgd.name)
> +
> +            return vgd
> +
> +        # If we were given a pre-existing VG to use, we need to verify it
> +        # exists and then schedule a new format action to take place there.
> +        # Also, we only support a subset of all the options on pre-existing
> +        # VGs.
> +        if vgd.preexist:
> +            device = devicetree.getDeviceByName(vgd.name)
> +            if not device:
> +                raise KicsktartValueError, formatErrorMsg(self.lineno, msg="Specified nonexistent VG %s in volgroup command" % vgd.name)
> +
> +            devicetree.registerAction(ActionCreateFormat(device))
> +        else:
> +            request = storage.newVG(pvs=pvs,
> +                                    name=vgd.vgname,
> +                                    peSize=vgd.pesize/1024.0)
> +
> +            storage.createDevice(request)
> +
>          return vgd
>  
>  class XConfig(commands.xconfig.F10_XConfig):
> 
> 
> commit e8a7994647bc1ae0647b1d59f1160adbbab0f7b4
> Author: Chris Lumens <clumens@xxxxxxxxxx>
> Date:   Wed Mar 11 16:10:30 2009 -0400
> 
>     Update the raid command to work with the new storage code.
> 
> diff --git a/kickstart.py b/kickstart.py
> index 748a5e8..fec8112 100644
> --- a/kickstart.py
> +++ b/kickstart.py
> @@ -708,77 +708,106 @@ class Reboot(commands.reboot.FC6_Reboot):
>  class Raid(commands.raid.F9_Raid):
>      def parse(self, args):
>          rd = commands.raid.F9_Raid.parse(self, args)
> +        raidmems = []
>  
> -        uniqueID = None
> +        storage = self.handler.id.storage
> +        devicetree = storage.devicetree
> +        kwargs = {}
>  
>          if rd.mountpoint == "swap":
> -            filesystem = fileSystemTypeGet('swap')
> +            type = "swap"
>              rd.mountpoint = ""
>          elif rd.mountpoint.startswith("pv."):
> -            filesystem = fileSystemTypeGet("physical volume (LVM)")
> +            type = "lvmpv"
> +            kwargs["name"] = rd.mountpoint
>  
> -            if self.handler.ksPVMapping.has_key(rd.mountpoint):
> -                raise KickstartValueError, formatErrorMsg(self.lineno, msg="Defined PV partition multiple times")
> +            if devicetree.getDeviceByName(kwargs["name"]):
> +                raise KickstartValueError, formatErrorMsg(self.lineno, msg="PV partition defined multiple times")
>  
> -            # get a sort of hackish id
> -            uniqueID = self.handler.ksID
> -            self.handler.ksPVMapping[rd.mountpoint] = uniqueID
> -            self.handler.ksID += 1
>              rd.mountpoint = ""
>          else:
>              if rd.fstype != "":
> -                try:
> -                    filesystem = fileSystemTypeGet(rd.fstype)
> -                except KeyError:
> -                    raise KickstartValueError, formatErrorMsg(self.lineno, msg="The \"%s\" filesystem type is not supported." % rd.fstype)
> +                type = rd.fstype
>              else:
> -                filesystem = fileSystemTypeGetDefault()
> +                type = storage.defaultFSType
>  
> -        # sanity check mountpoint
> +        # Sanity check mountpoint
>          if rd.mountpoint != "" and rd.mountpoint[0] != '/':
>              raise KickstartValueError, formatErrorMsg(self.lineno, msg="The mount point is not valid.")
>  
> -        raidmems = []
> +        # If this specifies an existing request that we should not format,
> +        # quit here after setting up enough information to mount it later.
> +        if not rd.format:
> +            if not rd.device:
> +                raise KickstartValueError, formatErrorMsg(self.lineno, msg="--noformat used without --device")
> +
> +            dev = devicetree.getDeviceByName(rd.device)
> +            if not dev:
> +                raise KickstartValueError, formatErrorMsg(self.lineno, msg="No preexisting RAID device with the name \"%s\" was found." % rd.device)
> +
> +            dev.format.mountpoint = lvd.mountpoint
> +            dev.format.mountopts = lvd.fsopts
> +            self.handler.skipSteps.extend(["partition", "zfcpconfig", "parttype"])
> +            return rd
>  
> -        # get the unique ids of each of the raid members
> +        # Get a list of all the RAID members.
>          for member in rd.members:
> -            if member not in self.handler.ksRaidMapping.keys():
> +            dev = devicetree.getDeviceByName(member)
> +            if not dev:
>                  raise KickstartValueError, formatErrorMsg(self.lineno, msg="Tried to use undefined partition %s in RAID specification" % member)
> -            if member in self.handler.ksUsedMembers:
> -                raise KickstartValueError, formatErrorMsg(self.lineno, msg="Tried to use RAID member %s in two or more RAID specifications" % member)
> -                
> -            raidmems.append(self.handler.ksRaidMapping[member])
> -            self.handler.ksUsedMembers.append(member)
> -
> -        if rd.level == "" and not rd.preexist:
> -            raise KickstartValueError, formatErrorMsg(self.lineno, msg="RAID Partition defined without RAID level")
> -        if len(raidmems) == 0 and not rd.preexist:
> -            raise KickstartValueError, formatErrorMsg(self.lineno, msg="RAID Partition defined without any RAID members")
> -
> -        request = partRequests.RaidRequestSpec(filesystem,
> -                                               mountpoint = rd.mountpoint,
> -                                               raidmembers = raidmems,
> -                                               raidlevel = rd.level,
> -                                               raidspares = rd.spares,
> -                                               format = rd.format,
> -                                               raidminor = rd.device,
> -                                               preexist = rd.preexist,
> -                                               fsprofile = rd.fsprofile)
> -
> -        if uniqueID is not None:
> -            request.uniqueID = uniqueID
> -        if rd.preexist and rd.device != "":
> -            request.device = "md%s" % rd.device
> -        if rd.fsopts != "":
> -            request.fsopts = rd.fsopts
> +
> +            raidmems.append(dev)
> +
> +        if not rd.preexist:
> +            if len(raidmems) == 0:
> +                raise KickstartValueError, formatErrorMsg(self.lineno, msg="RAID Partition defined without any RAID members")
> +
> +            if rd.level == "":
> +                raise KickstartValueError, formatErrorMsg(self.lineno, msg="RAID Partition defined without RAID level")
> +
> +        # Now get a format to hold a lot of these extra values.
> +        kwargs["format"] = getFormat(type,
> +                                     mountpoint=rd.mountpoint,
> +                                     mountopts=rd.fsopts)
> +        if not kwargs["format"]:
> +            raise KickstartValueError, formatErrorMsg(self.lineno, msg="The \"%s\" filesystem type is not supported." % type)
> +
> +        kwargs["name"] = rd.device
> +        kwargs["level"] = rd.level
> +        kwargs["parents"] = raidmems
> +        kwargs["memberDevices"] = len(raidmems)
> +        kwargs["totalDevices"] = kwargs["memberDevices"]+rd.spares
> +
> +        # If we were given a pre-existing RAID to create a filesystem on,
> +        # we need to verify it exists and then schedule a new format action
> +        # to take place there.  Also, we only support a subset of all the
> +        # options on pre-existing RAIDs.
> +        if rd.preexist:
> +            device = devicetree.getDeviceByName(rd.name)
> +            if not device:
> +                raise KickstartValueError, formatErrorMsg(self.lineno, msg="Specifeid nonexisted RAID %s in raid command" % rd.name)
> +
> +            devicetree.registerAction(ActionCreateFormat(device, kwargs["format"]))
> +        else:
> +            request = storage.newMDArray(**kwargs)
> +
> +            # FIXME: no way to specify an fsprofile right now
> +            # if pd.fsprofile:
> +            #     request.format.fsprofile = pd.fsprofile
> +
> +            storage.createDevice(request)
>  
>          if rd.encrypted:
> -            if rd.passphrase and \
> -               not self.handler.anaconda.id.storage.encryptionPassphrase:
> -                self.handler.anaconda.id.storage.encryptionPassphrase = rd.passphrase
> -            request.encryption = cryptodev.LUKSDevice(passphrase=rd.passphrase, format=rd.format)
> +            if rd.passphrase and not storage.encryptionPassphrase:
> +               storage.encryptionPassphrase = rd.passphrase
> +
> +            luksformat = request.format
> +            request.format = getFormat("luks", passphrase=rd.passphrase, device=request.path)
> +            luksdev = LUKSDevice("luks%d" % storage.nextID,
> +                                 format=luksformat,
> +                                 parents=request)
> +            storage.createDevice(luksdev)
>  
> -        addPartRequest(self.handler.anaconda, request)
>          self.handler.skipSteps.extend(["partition", "zfcpconfig", "parttype"])
>          return rd
>  
> 
> 
> commit b009de64c53f353b0e6cfe67c6d8a6524ddb636c
> Author: Chris Lumens <clumens@xxxxxxxxxx>
> Date:   Wed Mar 11 16:09:59 2009 -0400
> 
>     Update the part command to work with the new storage code.
> 
> diff --git a/kickstart.py b/kickstart.py
> index c798a1d..748a5e8 100644
> --- a/kickstart.py
> +++ b/kickstart.py
> @@ -565,11 +565,9 @@ class Partition(commands.partition.F9_Partition):
>      def parse(self, args):
>          pd = commands.partition.F9_Partition.parse(self, args)
>  
> -        uniqueID = None
> -
> -        fsopts = ""
> -        if pd.fsopts:
> -            fsopts = pd.fsopts
> +        storage = self.handler.id.storage
> +        devicetree = storage.devicetree
> +        kwargs = {}
>  
>          if pd.onbiosdisk != "":
>              pd.disk = isys.doGetBiosDisk(pd.onbiosdisk)
> @@ -578,7 +576,7 @@ class Partition(commands.partition.F9_Partition):
>                  raise KickstartValueError, formatErrorMsg(self.lineno, msg="Specified BIOS disk %s cannot be determined" % pd.onbiosdisk)
>  
>          if pd.mountpoint == "swap":
> -            filesystem = fileSystemTypeGet('swap')
> +            type = "swap"
>              pd.mountpoint = ""
>              if pd.recommended:
>                  (pd.size, pd.maxSizeMB) = iutil.swapSuggestion()
> @@ -588,102 +586,117 @@ class Partition(commands.partition.F9_Partition):
>          elif pd.mountpoint == "None":
>              pd.mountpoint = ""
>              if pd.fstype:
> -                try:
> -                    filesystem = fileSystemTypeGet(pd.fstype)
> -                except KeyError:
> -                    raise KickstartValueError, formatErrorMsg(self.lineno, msg="The \"%s\" filesystem type is not supported." % pd.fstype)
> +                type = pd.fstype
>              else:
> -                filesystem = fileSystemTypeGetDefault()
> -        elif pd.mountpoint == 'appleboot':
> -            filesystem = fileSystemTypeGet("Apple Bootstrap")
> -            pd.mountpoint = ""
> -        elif pd.mountpoint == 'prepboot':
> -            filesystem = fileSystemTypeGet("PPC PReP Boot")
> -            pd.mountpoint = ""
> +                type = storage.defaultFSType
> +#        elif pd.mountpoint == 'appleboot':
> +#            filesystem = fileSystemTypeGet("Apple Bootstrap")
> +#            pd.mountpoint = ""
> +#        elif pd.mountpoint == 'prepboot':
> +#            filesystem = fileSystemTypeGet("PPC PReP Boot")
> +#            pd.mountpoint = ""
>          elif pd.mountpoint.startswith("raid."):
> -            filesystem = fileSystemTypeGet("software RAID")
> -            
> -            if self.handler.ksRaidMapping.has_key(pd.mountpoint):
> -                raise KickstartValueError, formatErrorMsg(self.lineno, msg="Defined RAID partition multiple times")
> -            
> -            # get a sort of hackish id
> -            uniqueID = self.handler.ksID
> -            self.handler.ksRaidMapping[pd.mountpoint] = uniqueID
> -            self.handler.ksID += 1
> +            type = "mdmember"
> +            kwargs["name"] = pd.mountpoint
> +
> +            if devicetree.getDeviceByName(kwargs["name"]):
> +                raise KickstartValueError, formatErrorMsg(self.lineno, msg="RAID partition defined multiple times")
> +
>              pd.mountpoint = ""
>          elif pd.mountpoint.startswith("pv."):
> -            filesystem = fileSystemTypeGet("physical volume (LVM)")
> +            type = "lvmpv"
> +            kwargs["name"] = pd.mountpoint
>  
> -            if self.handler.ksPVMapping.has_key(pd.mountpoint):
> -                raise KickstartValueError, formatErrorMsg(self.lineno, msg="Defined PV partition multiple times")
> +            if devicetree.getDeviceByName(kwargs["name"]):
> +                raise KickstartValueError, formatErrorMsg(self.lineno, msg="PV partition defined multiple times")
>  
> -            # get a sort of hackish id
> -            uniqueID = self.handler.ksID
> -            self.handler.ksPVMapping[pd.mountpoint] = uniqueID
> -            self.handler.ksID += 1
>              pd.mountpoint = ""
>          elif pd.mountpoint == "/boot/efi":
> -            filesystem = fileSystemTypeGet("efi")
> -            fsopts = "defaults,uid=0,gid=0,umask=0077,shortname=winnt"
> +            type = "vfat"
> +            pd.fsopts = "defaults,uid=0,gid=0,umask=0077,shortname=winnt"
>          else:
>              if pd.fstype != "":
> -                try:
> -                    filesystem = fileSystemTypeGet(pd.fstype)
> -                except KeyError:
> -                    raise KickstartValueError, formatErrorMsg(self.lineno, msg="The \"%s\" filesystem type is not supported." % pd.fstype)
> +                type = pd.fstype
>              else:
> -                filesystem = fileSystemTypeGetDefault()
> +                type = storage.defaultFSType
>  
> -        if pd.size is None and (pd.start == 0 and pd.end == 0) and pd.onPart == "":
> +        # If this specified an existing request that we should not format,
> +        # quit here after setting up enough information to mount it later.
> +        if not pd.format:
> +            if not pd.onPart:
> +                raise KickstartValueError, formatErrorMsg(self.lineno, msg="--noformat used without --onpart")
> +
> +            dev = devicetree.getDeviceByName(pd.onPart)
> +            if not dev:
> +                raise KickstartValueError, formatErrorMsg(self.lineno, msg="No preexisting partition with the name \"%s\" was found." % pd.onPart)
> +
> +            dev.format.mountpoint = pd.mountpoint
> +            dev.format.mountopts = pd.fsopts
> +            self.handler.skipSteps.extend(["partition", "zfcpconfig", "parttype"])
> +            return pd
> +
> +        # Size specification checks.
> +        if pd.size is None and pd.onPart == "":
>              raise KickstartValueError, formatErrorMsg(self.lineno, msg="Partition requires a size specification")
> -        if pd.start != 0 and pd.disk == "":
> -            raise KickstartValueError, formatErrorMsg(self.lineno, msg="Partition command with start cylinder requires a drive specification")
> -        hds = map(lambda x: x.name, filter(lambda x: isys.mediaPresent(x.name), self.handler.id.storage.disks))
> -        if pd.disk not in hds and pd.disk in ('mapper/'+hd for hd in hds):
> -            pd.disk = 'mapper/' + pd.disk
> -        if pd.disk not in hds:
> -            raise KickstartValueError, formatErrorMsg(self.lineno, msg="Specified nonexistent disk %s in partition command" % pd.disk)
> -
> -        request = partRequests.PartitionSpec(filesystem,
> -                                             mountpoint = pd.mountpoint,
> -                                             format = pd.format,
> -                                             fslabel = pd.label,
> -                                             fsprofile = pd.fsprofile)
> -        
> -        if pd.size is not None:
> -            request.size = pd.size
> -        if pd.start != 0:
> -            request.start = pd.start
> -        if pd.end != 0:
> -            request.end = pd.end
> -        if pd.grow:
> -            request.grow = pd.grow
> -        if pd.maxSizeMB != 0:
> -            request.maxSizeMB = pd.maxSizeMB
> -        if pd.disk != "":
> -            request.drive = [ pd.disk ]
> -        if pd.primOnly:
> -            request.primary = pd.primOnly
> -        if uniqueID:
> -            request.uniqueID = uniqueID
> -        if pd.onPart != "":
> -            request.device = pd.onPart
> -            for areq in self.handler.id.storage.autoPartitionRequests:
> -                if areq.device is not None and areq.device == pd.onPart:
> -                    raise KickstartValueError, formatErrorMsg(self.lineno, "Partition already used")
>  
> -        if fsopts != "":
> -            request.fsopts = fsopts
> +        # Now get a format to hold a lot of these extra values.
> +        kwargs["format"] = getFormat(type,
> +                                     mountpoint=pd.mountpoint,
> +                                     label=pd.label,
> +                                     mountopts=pd.fsopts)
> +        if not kwargs["format"]:
> +            raise KickstartValueError, formatErrorMsg(self.lineno, msg="The \"%s\" filesystem type is not supported." % type)
> +
> +        # If we were given a specific disk to create the partition on, verify
> +        # that it exists first.  If it doesn't exist, see if it exists with
> +        # mapper/ on the front.  If that doesn't exist either, it's an error.
> +        if pd.disk:
> +            disk = devicetree.getDeviceByName(pd.disk)
> +            if not disk:
> +                pd.disk = "mapper/" % pd.disk
> +                disk = devicetree.getDeviceByName(pd.disk)
> +
> +                if not disk:
> +                    raise KickstartValueError, formatErrorMsg(self.lineno, msg="Specified nonexistent disk %s in partition command" % pd.disk)
> +
> +            kwargs["disks"] = [disk]
> +
> +        kwargs["grow"] = pd.grow
> +        kwargs["size"] = pd.size
> +        kwargs["maxsize"] = pd.maxSizeMB
> +        kwargs["primary"] = pd.primOnly
> +
> +        # If we were given a pre-existing partition to create a filesystem on,
> +        # we need to verify it exists and then schedule a new format action to
> +        # take place there.  Also, we only support a subset of all the options
> +        # on pre-existing partitions.
> +        if pd.onPart:
> +            device = devicetree.getDeviceByName(pd.onPart)
> +            if not device:
> +                raise KickstartValueError, formatErrorMsg(self.lineno, msg="Specified nonexistent partition %s in partition command" % pd.onPart)
> +
> +            devicetree.registerAction(ActionCreateFormat(device, kwargs["format"]))
> +        else:
> +            request = storage.newPartition(**kwargs)
> +
> +            # FIXME: no way to specify an fsprofile right now
> +            # if pd.fsprofile:
> +            #     request.format.fsprofile = pd.fsprofile
> +
> +            storage.createDevice(request)
>  
>          if pd.encrypted:
> -            if pd.passphrase and \
> -               not self.handler.anaconda.id.storage.encryptionPassphrase:
> -                self.handler.anaconda.id.storage.encryptionPassphrase = pd.passphrase
> -            request.encryption = cryptodev.LUKSDevice(passphrase=pd.passphrase, format=pd.format)
> +            if pd.passphrase and not storage.encryptionPassphrase:
> +               storage.encryptionPassphrase = pd.passphrase
>  
> -        addPartRequest(self.handler.anaconda, request)
> -        self.handler.skipSteps.extend(["partition", "zfcpconfig", "parttype"])
> +            luksformat = request.format
> +            request.format = getFormat("luks", passphrase=pd.passphrase, device=request.path)
> +            luksdev = LUKSDevice("luks%d" % storage.nextID,
> +                                 format=luksformat,
> +                                 parents=request)
> +            storage.createDevice(luksdev)
>  
> +        self.handler.skipSteps.extend(["partition", "zfcpconfig", "parttype"])
>          return pd
>  
>  class Reboot(commands.reboot.FC6_Reboot):
> 
> 
> commit 9c924c843125b19d90a9a3f317eb8d707c1fa108
> Author: Chris Lumens <clumens@xxxxxxxxxx>
> Date:   Wed Mar 11 16:09:08 2009 -0400
> 
>     Update the logvol command to work with the new storage code.
> 
> diff --git a/kickstart.py b/kickstart.py
> index fe03532..c798a1d 100644
> --- a/kickstart.py
> +++ b/kickstart.py
> @@ -18,6 +18,9 @@
>  # along with this program.  If not, see <http://www.gnu.org/licenses/>.
>  #
>  
> +from storage.devices import LUKSDevice
> +from storage.devicelibs.lvm import getPossiblePhysicalExtents
> +from storage.formats import getFormat
>  from storage.partitioning import clearPartitions
>  
>  from errors import *
> @@ -334,74 +337,104 @@ class LogVol(commands.logvol.F9_LogVol):
>      def parse(self, args):
>          lvd = commands.logvol.F9_LogVol.parse(self, args)
>  
> +        storage = self.handler.id.storage
> +        devicetree = storage.devicetree
> +
>          if lvd.mountpoint == "swap":
> -            filesystem = fileSystemTypeGet("swap")
> +            type = "swap"
>              lvd.mountpoint = ""
> -
>              if lvd.recommended:
>                  (lvd.size, lvd.maxSizeMB) = iutil.swapSuggestion()
>                  lvd.grow = True
>          else:
>              if lvd.fstype != "":
> -                try:
> -                    filesystem = fileSystemTypeGet(lvd.fstype)
> -                except KeyError:
> -                    raise KickstartValueError, formatErrorMsg(self.lineno, msg="The \"%s\" filesystem type is not supported." % lvd.fstype)
> +                type = lvd.fstype
>              else:
> -                filesystem = fileSystemTypeGetDefault()
> +                type = storage.defaultFSType
>  
> -        # sanity check mountpoint
> +        # Sanity check mountpoint
>          if lvd.mountpoint != "" and lvd.mountpoint[0] != '/':
>              raise KickstartValueError, formatErrorMsg(self.lineno, msg="The mount point \"%s\" is not valid." % (lvd.mountpoint,))
>  
> -        try:
> -            vgid = self.handler.ksVGMapping[lvd.vgname]
> -        except KeyError:
> -            raise KickstartValueError, formatErrorMsg(self.lineno, msg="No volume group exists with the name '%s'.  Specify volume groups before logical volumes." % lvd.vgname)
> -
> -        for areq in self.handler.id.storage.autoPartitionRequests:
> -            if areq.type == REQUEST_LV:
> -                if areq.volumeGroup == vgid and areq.logicalVolumeName == lvd.name:
> -                    raise KickstartValueError, formatErrorMsg(self.lineno, msg="Logical volume name already used in volume group %s" % lvd.vgname)
> -            elif areq.type == REQUEST_VG and areq.uniqueID == vgid:
> -                # Store a reference to the VG so we can do the PE size check.
> -                vg = areq
> -
> -        if not self.handler.ksVGMapping.has_key(lvd.vgname):
> -            raise KickstartValueError, formatErrorMsg(self.lineno, msg="Logical volume specifies a non-existent volume group" % lvd.name)
> -
> -        if lvd.percent == 0 and not lvd.preexist:
> -            if lvd.size == 0:
> -                raise KickstartValueError, formatErrorMsg(self.lineno, msg="Size required")
> -            elif not lvd.grow and lvd.size*1024 < vg.pesize:
> -                raise KickstartValueError, formatErrorMsg(self.lineno, msg="Logical volume size must be larger than the volume group physical extent size.")
> -        elif (lvd.percent <= 0 or lvd.percent > 100) and not lvd.preexist:
> -            raise KickstartValueError, formatErrorMsg(self.lineno, msg="Percentage must be between 0 and 100")
> -
> -        request = partRequests.LogicalVolumeRequestSpec(filesystem,
> -                                      format = lvd.format,
> -                                      mountpoint = lvd.mountpoint,
> -                                      size = lvd.size,
> -                                      percent = lvd.percent,
> -                                      volgroup = vgid,
> -                                      lvname = lvd.name,
> -                                      grow = lvd.grow,
> -                                      maxSizeMB = lvd.maxSizeMB,
> -                                      preexist = lvd.preexist,
> -                                      fsprofile = lvd.fsprofile)
> -
> -        if lvd.fsopts != "":
> -            request.fsopts = lvd.fsopts
> +        # Check that the VG this LV is a member of has already been specified.
> +        vg = devicetree.getDeviceByName(lvd.vgname)
> +        if not vg:
> +            raise KickstartValueError, formatErrorMsg(self.lineno, msg="No volume group exists with the name \"%s\".  Specify volume groups before logical volumes." % lvd.vgname)
> +
> +        # If this specifies an existing request that we should not format,
> +        # quit here after setting up enough information to mount it later.
> +        if not lvd.format:
> +            if not lvd.name:
> +                raise KickstartValueError, formatErrorMsg(self.lineno, msg="--noformat used without --name")
> +
> +            dev = devicetree.getDeviceByName("%s-%s" % (vg.name, lvd.name))
> +            if not dev:
> +                raise KickstartValueError, formatErrorMsg(self.lineno, msg="No preexisting logical volume with the name \"%s\" was found." % lvd.name)
> +
> +            dev.format.mountpoint = lvd.mountpoint
> +            dev.format.mountopts = lvd.fsopts
> +            self.handler.skipSteps.extend(["partition", "zfcpconfig", "parttype"])
> +            return lvd
> +
> +        # Make sure this LV name is not already used in the requested VG.
> +        tmp = devicetree.getDeviceByName("%s-%s" % (vg.name, lvd.name))
> +        if tmp:
> +            raise KickstartValueError, formatErrorMsg(self.lineno, msg="Logical volume name already used in volume group %s" % vg.name)
> +
> +        # Size specification checks
> +        if not lvd.preexist:
> +            if lvd.percent == 0:
> +                if lvd.size == 0:
> +                    raise KickstartValueError, formatErrorMsg(self.lineno, msg="Size required")
> +                elif not lvd.grow and lvd.size*1024 < vg.peSize:
> +                    raise KickstartValueError, formatErrorMsg(self.lineno, msg="Logical volume size must be larger than the volume group physical extent size.")
> +            elif lvd.percent <= 0 or lvd.percent > 100:
> +                raise KickstartValueError, formatErrorMsg(self.lineno, msg="Percentage must be between 0 and 100")
> +
> +        # Now get a format to hold a lot of these extra values.
> +        format = getFormat(type,
> +                           mountpoint=lvd.mountpoint,
> +                           mountopts=lvd.fsopts)
> +        if not format:
> +            raise KickstartValueError, formatErrorMsg(self.lineno, msg="The \"%s\" filesystem type is not supported." % type)
> +
> +        # If we were given a pre-existing LV to create a filesystem on, we need
> +        # to verify it and its VG exists and then schedule a new format action
> +        # to take place there.  Also, we only support a subset of all the
> +        # options on pre-existing LVs.
> +        if lvd.preexist:
> +            device = devicetree.getDeviceByName("%s-%s" % (vg.name, lvd.name))
> +            if not device:
> +                raise KickstartValueError, formatErrorMsg(self.lineno, msg="Specified nonexistent LV %s in logvol command" % lvd.name)
> +
> +            devicetree.registerAction(ActionCreateFormat(device, format))
> +        else:
> +            request = storage.newLV(format=format,
> +                                    name=lvd.name,
> +                                    vg=vg,
> +                                    size=lvd.size,
> +                                    grow=lvd.grow,
> +                                    maxsize=lvd.maxSizeMB,
> +                                    percent=lvd.percent)
> +
> +            # FIXME: no way to specify an fsprofile right now
> +            # if lvd.fsprofile:
> +            #     request.format.fsprofile = lvd.fsprofile
> +
> +            storage.createDevice(request)
>  
>          if lvd.encrypted:
> -            if lvd.passphrase and \
> -               not self.handler.anaconda.id.storage.encryptionPassphrase:
> -                self.handler.anaconda.id.storage.encryptionPassphrase = lvd.passphrase
> -            request.encryption = cryptodev.LUKSDevice(passphrase=lvd.passphrase, format=lvd.format)
> +            if lvd.passphrase and not storage.encryptionPassphrase:
> +                storage.encryptionPassphrase = lvd.passphrase
>  
> -        addPartRequest(self.handler.anaconda, request)
> -        self.handler.skipSteps.extend(["partition", "zfcpconfig", "parttype"])
> +            luksformat = request.format
> +            request.format = getFormat("luks", passphrase=lvd.passphrase, device=request.path)
> +            luksdev = LUKSDevice("luks%d" % storage.nextID,
> +                                 format=luksformat,
> +                                 parents=request)
> +            storage.createDevice(luksdev)
>  
> +        self.handler.skipSteps.extend(["partition", "zfcpconfig", "parttype"])
>          return lvd
>  
>  class Logging(commands.logging.FC6_Logging):
> 
> 
> commit beea6fd23c92a866e4cfcfd4d9525453bbc75f46
> Author: Chris Lumens <clumens@xxxxxxxxxx>
> Date:   Wed Mar 11 16:07:17 2009 -0400
> 
>     addPartRequest is no longer needed.
>     
>     The new storage code schedules actions, so we don't need to batch all the
>     requests up to be added later.
> 
> diff --git a/kickstart.py b/kickstart.py
> index b22c5ef..fe03532 100644
> --- a/kickstart.py
> +++ b/kickstart.py
> @@ -998,24 +998,10 @@ class AnacondaKSParser(KickstartParser):
>  
>          KickstartParser.handleCommand(self, lineno, args)
>  
> -# this adds a partition to the autopartition list replacing anything
> -# else with this mountpoint so that you can use autopart and override /
> -def addPartRequest(anaconda, request):
> -    if not request.mountpoint:
> -        anaconda.id.storage.autoPartitionRequests.append(request)
> -        return
> -
> -    for req in anaconda.id.storage.autoPartitionRequests:
> -        if req.mountpoint and req.mountpoint == request.mountpoint:
> -            anaconda.id.storage.autoPartitionRequests.remove(req)
> -            break
> -    anaconda.id.storage.autoPartitionRequests.append(request)
> -
>  def processKickstartFile(anaconda, file):
>      # We need to make sure storage is active before the kickstart file is read.
>      import storage
>      storage.storageInitialize(anaconda)
> -    anaconda.dispatch.skipStep("storageinit")
>  
>      # parse the %pre
>      ksparser = KickstartPreParser(AnacondaKSHandler(anaconda))
> @@ -1217,6 +1203,9 @@ def setSteps(anaconda):
>      dispatch.skipStep("installtype")
>      dispatch.skipStep("network")
>  
> +    # Storage is initialized for us right when kickstart processing starts.
> +    dispatch.skipStep("storageinit")
> +
>      # Don't show confirmation screens on non-interactive installs.
>      if not interactive:
>          dispatch.skipStep("confirminstall")
> 
> 
> commit 551c11375d045616789f4817cdea681fa155f228
> Author: Chris Lumens <clumens@xxxxxxxxxx>
> Date:   Wed Mar 11 16:04:55 2009 -0400
> 
>     Don't set default partitioning in every kickstart case.
>     
>     Turns out we don't want to do this because it'll result in the default autopart
>     scheme getting used regardless of what other partitioning commands are
>     specified.
> 
> diff --git a/installclasses/fedora.py b/installclasses/fedora.py
> index 087e2d3..d2ddafa 100644
> --- a/installclasses/fedora.py
> +++ b/installclasses/fedora.py
> @@ -62,10 +62,11 @@ class InstallClass(BaseInstallClass):
>      def setInstallData(self, anaconda):
>  	BaseInstallClass.setInstallData(self, anaconda)
>  
> -        BaseInstallClass.setDefaultPartitioning(self,
> -                                                anaconda.id.storage,
> -                                                anaconda.platform,
> -                                                CLEARPART_TYPE_LINUX)
> +        if not anaconda.isKickstart:
> +            BaseInstallClass.setDefaultPartitioning(self,
> +                                                    anaconda.id.storage,
> +                                                    anaconda.platform,
> +                                                    CLEARPART_TYPE_LINUX)
>  
>      def setSteps(self, anaconda):
>  	BaseInstallClass.setSteps(self, anaconda);
> diff --git a/installclasses/rhel.py b/installclasses/rhel.py
> index 5217928..da25946 100644
> --- a/installclasses/rhel.py
> +++ b/installclasses/rhel.py
> @@ -87,10 +87,12 @@ class InstallClass(BaseInstallClass):
>  
>      def setInstallData(self, anaconda):
>  	BaseInstallClass.setInstallData(self, anaconda)
> -        BaseInstallClass.setDefaultPartitioning(self, 
> -                                                anaconda.id.storage,
> -                                                anaconda.platform,
> -                                                CLEARPART_TYPE_LINUX)
> +
> +        if not anaconda.isKickstart:
> +            BaseInstallClass.setDefaultPartitioning(self, 
> +                                                    anaconda.id.storage,
> +                                                    anaconda.platform,
> +                                                    CLEARPART_TYPE_LINUX)
>  
>      def setSteps(self, anaconda):
>          dispatch = anaconda.dispatch
> 
> 
> commit d933e3929cf26a9db3317008d100fca2bc245c22
> Author: Chris Lumens <clumens@xxxxxxxxxx>
> Date:   Wed Mar 11 11:18:32 2009 -0400
> 
>     Clear partitions before scheduling requests.
>     
>     Otherwise, our scheduling requests will get purged by the action loop
>     detector.
> 
> diff --git a/kickstart.py b/kickstart.py
> index 9c106a3..b22c5ef 100644
> --- a/kickstart.py
> +++ b/kickstart.py
> @@ -18,6 +18,8 @@
>  # along with this program.  If not, see <http://www.gnu.org/licenses/>.
>  #
>  
> +from storage.partitioning import clearPartitions
> +
>  from errors import *
>  import iutil
>  import isys
> @@ -243,6 +245,8 @@ class ClearPart(commands.clearpart.FC3_ClearPart):
>          if self.initAll:
>              self.handler.id.storage.reinitializeDisks = self.initAll
>  
> +        clearPartitions(self.handler.id.storage)
> +
>          return retval
>  
>  class Firewall(commands.firewall.F10_Firewall):
> 
> 
> commit 1c479aff318c1a9249135a21095170f7d5d61aa4
> Author: Chris Lumens <clumens@xxxxxxxxxx>
> Date:   Tue Mar 10 14:01:50 2009 -0400
> 
>     Always go through doAutoPart.
>     
>     We need to do this in order to make sure doPartitioning gets called, which
>     does most of the magic of performing partitioning.  We want to break out
>     the autopart stuff into separate functions and only call it in the autopart
>     case.
> 
> diff --git a/storage/partitioning.py b/storage/partitioning.py
> index c104401..66c0c29 100644
> --- a/storage/partitioning.py
> +++ b/storage/partitioning.py
> @@ -40,25 +40,7 @@ _ = lambda x: gettext.ldgettext("anaconda", x)
>  import logging
>  log = logging.getLogger("storage")
>  
> -def doAutoPartition(anaconda):
> -    log.debug("doAutoPartition(%s)" % anaconda)
> -    log.debug("doAutoPart: %s" % anaconda.id.storage.doAutoPart)
> -    log.debug("clearPartType: %s" % anaconda.id.storage.clearPartType)
> -    log.debug("clearPartDisks: %s" % anaconda.id.storage.clearPartDisks)
> -    log.debug("autoPartitionRequests: %s" % anaconda.id.storage.autoPartitionRequests)
> -    log.debug("storage.disks: %s" % anaconda.id.storage.disks)
> -    log.debug("all names: %s" % [d.name for d in anaconda.id.storage.devicetree.devices.values()])
> -    if anaconda.dir == DISPATCH_BACK:
> -        anaconda.id.storage.reset()
> -        return
> -
> -    if anaconda.id.storage.doAutoPart or anaconda.isKickstart:
> -        # kickstart uses clearPartitions even without autopart
> -        clearPartitions(anaconda.id.storage)
> -
> -    if not anaconda.id.storage.doAutoPart:
> -        return
> -
> +def _createFreeSpacePartitions(anaconda):
>      # get a list of disks that have at least one free space region of at
>      # least 100MB
>      disks = []
> @@ -90,6 +72,9 @@ def doAutoPartition(anaconda):
>          anaconda.id.storage.createDevice(part)
>          devs.append(part)
>  
> +    return (disks, devs)
> +
> +def _schedulePartitions(anaconda, disks):
>      #
>      # Convert storage.autoPartitionRequests into Device instances and
>      # schedule them for creation
> @@ -115,38 +100,9 @@ def doAutoPartition(anaconda):
>          anaconda.id.storage.createDevice(dev)
>  
>      # make sure preexisting broken lvm/raid configs get out of the way
> +    return
>  
> -    # sanity check the individual devices
> -    log.warning("not sanity checking devices because I don't know how yet")
> -
> -    # run the autopart function to allocate and grow partitions
> -    try:
> -        doPartitioning(anaconda.id.storage,
> -                       exclusiveDisks=anaconda.id.storage.clearPartDisks)
> -    except PartitioningWarning as msg:
> -        if not anaconda.isKickstart:
> -            anaconda.intf.messageWindow(_("Warnings During Automatic "
> -                                          "Partitioning"),
> -                           _("Following warnings occurred during automatic "
> -                           "partitioning:\n\n%s") % (msg,),
> -                           custom_icon='warning')
> -        else:
> -            log.warning(msg)
> -    except PartitioningError as msg:
> -        # restore drives to original state
> -        anaconda.id.storage.reset()
> -        if not anaconda.isKickstart:
> -            extra = ""
> -            anaconda.dispatch.skipStep("partition", skip = 0)
> -        else:
> -            extra = _("\n\nPress 'OK' to exit the installer.")
> -        anaconda.intf.messageWindow(_("Error Partitioning"),
> -               _("Could not allocate requested partitions: \n\n"
> -                 "%s.%s") % (msg, extra), custom_icon='error')
> -
> -        if anaconda.isKickstart:
> -            sys.exit(0)
> -
> +def _scheduleLVs(anaconda, devs):
>      if anaconda.id.storage.encryptedAutoPart:
>          pvs = []
>          for dev in devs:
> @@ -190,6 +146,63 @@ def doAutoPartition(anaconda):
>      # grow the new VG and its LVs
>      growLVM(anaconda.id.storage)
>  
> +def doAutoPartition(anaconda):
> +    log.debug("doAutoPartition(%s)" % anaconda)
> +    log.debug("doAutoPart: %s" % anaconda.id.storage.doAutoPart)
> +    log.debug("clearPartType: %s" % anaconda.id.storage.clearPartType)
> +    log.debug("clearPartDisks: %s" % anaconda.id.storage.clearPartDisks)
> +    log.debug("autoPartitionRequests: %s" % anaconda.id.storage.autoPartitionRequests)
> +    log.debug("storage.disks: %s" % anaconda.id.storage.disks)
> +    log.debug("all names: %s" % [d.name for d in anaconda.id.storage.devicetree.devices.values()])
> +    if anaconda.dir == DISPATCH_BACK:
> +        anaconda.id.storage.reset()
> +        return
> +
> +    disks = []
> +    devs = []
> +
> +    if anaconda.id.storage.doAutoPart or anaconda.isKickstart:
> +        # kickstart uses clearPartitions even without autopart
> +        clearPartitions(anaconda.id.storage)
> +
> +    if anaconda.id.storage.doAutoPart:
> +        (disks, devs) = _createFreeSpacePartitions(anaconda)
> +        _schedulePartitions(anaconda, disks)
> +
> +    # sanity check the individual devices
> +    log.warning("not sanity checking devices because I don't know how yet")
> +
> +    # run the autopart function to allocate and grow partitions
> +    try:
> +        doPartitioning(anaconda.id.storage,
> +                       exclusiveDisks=anaconda.id.storage.clearPartDisks)
> +    except PartitioningWarning as msg:
> +        if not anaconda.isKickstart:
> +            anaconda.intf.messageWindow(_("Warnings During Automatic "
> +                                          "Partitioning"),
> +                           _("Following warnings occurred during automatic "
> +                           "partitioning:\n\n%s") % (msg,),
> +                           custom_icon='warning')
> +        else:
> +            log.warning(msg)
> +    except PartitioningError as msg:
> +        # restore drives to original state
> +        anaconda.id.storage.reset()
> +        if not anaconda.isKickstart:
> +            extra = ""
> +            anaconda.dispatch.skipStep("partition", skip = 0)
> +        else:
> +            extra = _("\n\nPress 'OK' to exit the installer.")
> +        anaconda.intf.messageWindow(_("Error Partitioning"),
> +               _("Could not allocate requested partitions: \n\n"
> +                 "%s.%s") % (msg, extra), custom_icon='error')
> +
> +        if anaconda.isKickstart:
> +            sys.exit(0)
> +
> +    if anaconda.id.storage.doAutoPart:
> +        _scheduleLVs(anaconda, dev)

Should this be devs?        ^^^^^^^^^^^^^

> +
>      # sanity check the collection of devices
>      log.warning("not sanity checking storage config because I don't know how yet")
>      # now do a full check of the requests
> 
> 
> _______________________________________________
> anaconda-commits-list mailing list
> anaconda-commits-list@xxxxxxxxxx
> http://post-office.corp.redhat.com/mailman/listinfo/anaconda-commits-list

-- 
Joel Andres Granados
Brno, Czech Republic, Red Hat.

_______________________________________________
Anaconda-devel-list mailing list
Anaconda-devel-list@xxxxxxxxxx
https://www.redhat.com/mailman/listinfo/anaconda-devel-list

[Index of Archives]     [Kickstart]     [Fedora Users]     [Fedora Legacy List]     [Fedora Maintainers]     [Fedora Desktop]     [Fedora SELinux]     [Big List of Linux Books]     [Yosemite News]     [Yosemite Photos]     [KDE Users]     [Fedora Tools]
  Powered by Linux