[PATCH] sepolgen: add support for python3

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



Hello everyone,

this is my first mail to this mailing list so I would like to
introduce myself a bit.

I am Robert Kuska, member of python-sig in Fedora and co-owner
of change Python3 as default. In Fedora many packages depends
on policycoreutils which ships python modules from selinux, the
problem is (at least for us as we want to have default installation
clean of Python2) that not all of them work with Python3. Therefore
I've decided to make a patch(es) for all modules (except for GUI)
to make them work with python3.

As a first I've picked a sepolgen as it seemed it will be the hardest.
Please note that I have no previous experience with coding for selinux
project.

You can find my patch as an attachment to this mail, as it is a rather
big one (I didn't split the patch because it fixes mostly same syntax
errors).

I have tested the patch with python2.7 and python3.4. I had
no issue with python2.7 except for test_module.TestModuleCompiler.test
which was failing even before my patch.

Following failures (number vary, from 1 to 6) were seen with python3.4:

https://paste.fedoraproject.org/243817/36795382/

The origin of these failures lies within change in hash function of sets
and dicts, hash now uses random seed in python3 and tests rely on
frozen order of items which is imho bad. I didn't fix those tests
as I wanted to know firstly if this is just a issue with tests and such
frozen order is not expected.

--
Robert Kuska
{rkuska}

From 103d5863af9199d12a15a7d69bc895708153f939 Mon Sep 17 00:00:00 2001
From: Robert Kuska <rkuska@xxxxxxxxxx>
Date: Mon, 13 Jul 2015 15:11:04 +0200
Subject: [PATCH] Sepolgen: Add Python3 support for module

This patch adds support for Python3 in sepolgen module.
Apart from obvious fixes reported by 2to3 following
issues were fixed:
* use rich comparisons instead of __cmp__
* when sorting use key function instead of cmp
* encode text before hashing
---
 sepolgen/src/sepolgen/access.py      |  57 ++++++----
 sepolgen/src/sepolgen/audit.py       |   6 +-
 sepolgen/src/sepolgen/classperms.py  |  10 +-
 sepolgen/src/sepolgen/defaults.py    |   4 +-
 sepolgen/src/sepolgen/interfaces.py  |  12 +--
 sepolgen/src/sepolgen/lex.py         | 158 +++++++++++++--------------
 sepolgen/src/sepolgen/matching.py    |  51 +++++----
 sepolgen/src/sepolgen/module.py      |  16 +--
 sepolgen/src/sepolgen/objectmodel.py |   2 +-
 sepolgen/src/sepolgen/output.py      |   4 +-
 sepolgen/src/sepolgen/policygen.py   |  17 +--
 sepolgen/src/sepolgen/refparser.py   |  26 ++---
 sepolgen/src/sepolgen/refpolicy.py   |  38 +++----
 sepolgen/src/sepolgen/util.py        |  12 +++
 sepolgen/src/sepolgen/yacc.py        | 203 +++++++++++++++++++----------------
 sepolgen/tests/test_access.py        |  41 ++++---
 sepolgen/tests/test_audit.py         | 108 +++++++++----------
 sepolgen/tests/test_interfaces.py    |  28 ++---
 sepolgen/tests/test_matching.py      |   8 +-
 sepolgen/tests/test_refpolicy.py     |  50 ++++-----
 20 files changed, 457 insertions(+), 394 deletions(-)

diff --git a/sepolgen/src/sepolgen/access.py b/sepolgen/src/sepolgen/access.py
index cf13210..03a54a4 100644
--- a/sepolgen/src/sepolgen/access.py
+++ b/sepolgen/src/sepolgen/access.py
@@ -31,7 +31,8 @@ and sets of that access (AccessVectorSet). These objects are used in Madison
 in a variety of ways, but they are the fundamental representation of access.
 """
 
-import refpolicy
+from . import refpolicy
+
 from selinux import audit2why
 
 def is_idparam(id):
@@ -88,6 +89,7 @@ class AccessVector:
             self.audit_msgs = []
             self.type = audit2why.TERULE
             self.data = []
+        self.__hash__ = None
 
         # The direction of the information flow represented by this
         # access vector - used for matching
@@ -133,23 +135,36 @@ class AccessVector:
         return "allow %s %s:%s %s;" % (self.src_type, self.tgt_type,
                                         self.obj_class, self.perms.to_space_str())
 
-    def __cmp__(self, other):
-        if self.src_type != other.src_type:
-            return cmp(self.src_type, other.src_type)
-        if self.tgt_type != other.tgt_type:
-            return cmp(self.tgt_type, other.tgt_type)
-        if self.obj_class != self.obj_class:
-            return cmp(self.obj_class, other.obj_class)
-        if len(self.perms) != len(other.perms):
-            return cmp(len(self.perms), len(other.perms))
-        x = list(self.perms)
-        x.sort()
-        y = list(other.perms)
-        y.sort()
-        for pa, pb in zip(x, y):
-            if pa != pb:
-                return cmp(pa, pb)
-        return 0
+    def _compare(self, other, method):
+        try:
+            x = list(self.perms)
+            x.sort()
+            a = (self.src_type, self.tgt_type, self.obj_class, x)
+            y = list(other.perms)
+            y.sort()
+            b = (other.src_type, other.tgt_type, other.obj_class, y)
+            return method(a, b)
+        except (AttributeError, TypeError):
+            # trying to compare to foreign type
+            return NotImplemented
+
+    def __eq__(self, other):
+        return self._compare(other, lambda a, b: a == b)
+
+    def __lt__(self, other):
+        return self._compare(other, lambda a, b: a < b)
+
+    def __le__(self, other):
+        return self._compare(other, lambda a, b: a <= b)
+
+    def __ge__(self, other):
+        return self._compare(other, lambda a, b: a >= b)
+
+    def __gt__(self, other):
+        return self._compare(other, lambda a, b: a > b)
+
+    def __ne__(self, other):
+        return self._compare(other, lambda a, b: a != b)
 
 def avrule_to_access_vectors(avrule):
     """Convert an avrule into a list of access vectors.
@@ -262,7 +277,7 @@ class AccessVectorSet:
         tgt = self.src.setdefault(src_type, { })
         cls = tgt.setdefault(tgt_type, { })
         
-        if cls.has_key((obj_class, avc_type)):
+        if (obj_class, avc_type) in cls:
             access = cls[obj_class, avc_type]
         else:
             access = AccessVector()
@@ -293,7 +308,7 @@ def avs_extract_types(avs):
 def avs_extract_obj_perms(avs):
     perms = { }
     for av in avs:
-        if perms.has_key(av.obj_class):
+        if av.obj_class in perms:
             s = perms[av.obj_class]
         else:
             s = refpolicy.IdSet()
@@ -321,7 +336,7 @@ class RoleTypeSet:
         return len(self.role_types.keys())
 
     def add(self, role, type):
-        if self.role_types.has_key(role):
+        if role in self.role_types:
             role_type = self.role_types[role]
         else:
             role_type = refpolicy.RoleType()
diff --git a/sepolgen/src/sepolgen/audit.py b/sepolgen/src/sepolgen/audit.py
index 56919be..83efac3 100644
--- a/sepolgen/src/sepolgen/audit.py
+++ b/sepolgen/src/sepolgen/audit.py
@@ -17,11 +17,11 @@
 # Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
 #
 
-import refpolicy
-import access
 import re
 import sys
 
+from . import refpolicy
+from . import access
 # Convenience functions
 
 def get_audit_boot_msgs():
@@ -430,7 +430,7 @@ class AuditParser:
 
         # Group by audit header
         if msg.header != "":
-            if self.by_header.has_key(msg.header):
+            if msg.header in self.by_header:
                 self.by_header[msg.header].append(msg)
             else:
                 self.by_header[msg.header] = [msg]
diff --git a/sepolgen/src/sepolgen/classperms.py b/sepolgen/src/sepolgen/classperms.py
index c925dee..f4fd899 100644
--- a/sepolgen/src/sepolgen/classperms.py
+++ b/sepolgen/src/sepolgen/classperms.py
@@ -49,10 +49,10 @@ def t_NAME(t):
     return t
 
 def t_error(t):
-    print "Illegal character '%s'" % t.value[0]
+    print("Illegal character '%s'" % t.value[0])
     t.skip(1)
 
-import lex
+from . import lex
 lex.lex()
 
 def p_statements(p):
@@ -90,9 +90,9 @@ def p_names(p):
         p[0] = [p[1]] + p[2]
 
 def p_error(p):
-    print "Syntax error on line %d %s [type=%s]" % (p.lineno, p.value, p.type)
+    print("Syntax error on line %d %s [type=%s]" % (p.lineno, p.value, p.type))
     
-import yacc
+from . import yacc
 yacc.yacc()
 
 
@@ -112,5 +112,5 @@ test2 = """define(`all_filesystem_perms',`{ mount remount unmount getattr relabe
 define(`all_security_perms',`{ compute_av compute_create compute_member check_context load_policy compute_relabel compute_user setenforce setbool setsecparam setcheckreqprot }')
 """
 result = yacc.parse(txt)
-print result
+print(result)
     
diff --git a/sepolgen/src/sepolgen/defaults.py b/sepolgen/src/sepolgen/defaults.py
index 218bc7c..9591063 100644
--- a/sepolgen/src/sepolgen/defaults.py
+++ b/sepolgen/src/sepolgen/defaults.py
@@ -36,14 +36,14 @@ class PathChoooser(object):
             if ignore.match(line): continue
             mo = consider.match(line)
             if not mo:
-                raise ValueError, "%s:%d: line is not in key = value format" % (pathname, lineno+1)
+                raise ValueError("%s:%d: line is not in key = value format" % (pathname, lineno+1))
             self.config[mo.group(1)] = mo.group(2)
 
     # We're only exporting one useful function, so why not be a function
     def __call__(self, testfilename, pathset="SELINUX_DEVEL_PATH"):
         paths = self.config.get(pathset, None)
         if paths is None:
-            raise ValueError, "%s was not in %s" % (pathset, self.config_pathname)
+            raise ValueError("%s was not in %s" % (pathset, self.config_pathname))
         paths = paths.split(":")
         for p in paths:
             target = os.path.join(p, testfilename)
diff --git a/sepolgen/src/sepolgen/interfaces.py b/sepolgen/src/sepolgen/interfaces.py
index 88a6dc3..d34f562 100644
--- a/sepolgen/src/sepolgen/interfaces.py
+++ b/sepolgen/src/sepolgen/interfaces.py
@@ -21,13 +21,13 @@
 Classes for representing and manipulating interfaces.
 """
 
-import access
-import refpolicy
 import itertools
-import objectmodel
-import matching
 
-from sepolgeni18n import _
+from . import access
+from . import refpolicy
+from . import objectmodel
+from . import matching
+from .sepolgeni18n import _
 
 import copy
 
@@ -276,7 +276,7 @@ class InterfaceVector:
         if attributes:
             for typeattribute in interface.typeattributes():
                 for attr in typeattribute.attributes:
-                    if not attributes.attributes.has_key(attr):
+                    if attr not in attributes.attributes:
                         # print "missing attribute " + attr
                         continue
                     attr_vec = attributes.attributes[attr]
diff --git a/sepolgen/src/sepolgen/lex.py b/sepolgen/src/sepolgen/lex.py
index c149366..b3dddd0 100644
--- a/sepolgen/src/sepolgen/lex.py
+++ b/sepolgen/src/sepolgen/lex.py
@@ -24,20 +24,22 @@
 
 __version__ = "2.2"
 
+import collections
 import re, sys, types
 
+from . import util
+
 # Regular expression used to match valid token names
 _is_identifier = re.compile(r'^[a-zA-Z0-9_]+$')
 
 # Available instance types.  This is used when lexers are defined by a class.
-# It's a little funky because I want to preserve backwards compatibility
-# with Python 2.0 where types.ObjectType is undefined.
+# use try except in case that we are using Python interpreter without object
+# being defined as builtin
 
 try:
    _INSTANCETYPE = (types.InstanceType, types.ObjectType)
 except AttributeError:
-   _INSTANCETYPE = types.InstanceType
-   class object: pass       # Note: needed if no new-style classes present
+   _INSTANCETYPE = (object)
 
 # Exception thrown when invalid token encountered and no default error
 # handler is defined.
@@ -172,7 +174,7 @@ class Lexer:
     # readtab() - Read lexer information from a tab file
     # ------------------------------------------------------------
     def readtab(self,tabfile,fdict):
-        exec "import %s as lextab" % tabfile
+        exec("import %s as lextab" % tabfile)
         self.lextokens      = lextab._lextokens
         self.lexreflags     = lextab._lexreflags
         self.lexliterals    = lextab._lexliterals
@@ -197,8 +199,8 @@ class Lexer:
     # input() - Push a new string into the lexer
     # ------------------------------------------------------------
     def input(self,s):
-        if not (isinstance(s,types.StringType) or isinstance(s,types.UnicodeType)):
-            raise ValueError, "Expected a string"
+        if not (isinstance(s,util.bytes_type) or isinstance(s,util.string_type)):
+            raise ValueError("Expected a string")
         self.lexdata = s
         self.lexpos = 0
         self.lexlen = len(s)
@@ -207,8 +209,8 @@ class Lexer:
     # begin() - Changes the lexing state
     # ------------------------------------------------------------
     def begin(self,state):
-        if not self.lexstatere.has_key(state):
-            raise ValueError, "Undefined state"
+        if state not in self.lexstatere:
+            raise ValueError("Undefined state")
         self.lexre = self.lexstatere[state]
         self.lexretext = self.lexstateretext[state]
         self.lexignore = self.lexstateignore.get(state,"")
@@ -286,7 +288,7 @@ class Lexer:
                    break
 
                 # if func not callable, it means it's an ignored token                
-                if not callable(func):
+                if not isinstance(func, collections.Callable):
                    break 
 
                 # If token is processed by a function, call it
@@ -299,9 +301,9 @@ class Lexer:
                 
                 # Verify type of the token.  If not in the token map, raise an error
                 if not self.lexoptimize:
-                    if not self.lextokens.has_key(newtok.type):
-                        raise LexError, ("%s:%d: Rule '%s' returned an unknown token type '%s'" % (
-                            func.func_code.co_filename, func.func_code.co_firstlineno,
+                    if newtok.type not in self.lextokens:
+                        raise LexError("%s:%d: Rule '%s' returned an unknown token type '%s'" % (
+                            func.__code__.co_filename, func.__code__.co_firstlineno,
                             func.__name__, newtok.type),lexdata[lexpos:])
 
                 return newtok
@@ -329,17 +331,17 @@ class Lexer:
                     newtok = self.lexerrorf(tok)
                     if lexpos == self.lexpos:
                         # Error method didn't change text position at all. This is an error.
-                        raise LexError, ("Scanning error. Illegal character '%s'" % (lexdata[lexpos]), lexdata[lexpos:])
+                        raise LexError("Scanning error. Illegal character '%s'" % (lexdata[lexpos]), lexdata[lexpos:])
                     lexpos = self.lexpos
                     if not newtok: continue
                     return newtok
 
                 self.lexpos = lexpos
-                raise LexError, ("Illegal character '%s' at index %d" % (lexdata[lexpos],lexpos), lexdata[lexpos:])
+                raise LexError("Illegal character '%s' at index %d" % (lexdata[lexpos],lexpos), lexdata[lexpos:])
 
         self.lexpos = lexpos + 1
         if self.lexdata is None:
-             raise RuntimeError, "No input string given with input()"
+             raise RuntimeError("No input string given with input()")
         return None
         
 # -----------------------------------------------------------------------------
@@ -377,7 +379,7 @@ def _validate_file(filename):
             if not prev:
                 counthash[name] = linen
             else:
-                print "%s:%d: Rule %s redefined. Previously defined on line %d" % (filename,linen,name,prev)
+                print("%s:%d: Rule %s redefined. Previously defined on line %d" % (filename,linen,name,prev))
                 noerror = 0
         linen += 1
     return noerror
@@ -439,12 +441,12 @@ def _form_master_re(relist,reflags,ldict):
                 # callback function to carry out the action
                 if f.find("ignore_") > 0:
                     lexindexfunc[i] = (None,None)
-                    print "IGNORE", f
+                    print("IGNORE", f)
                 else:
                     lexindexfunc[i] = (None, f[2:])
          
         return [(lexre,lexindexfunc)],[regex]
-    except Exception,e:
+    except Exception as e:
         m = int(len(relist)/2)
         if m == 0: m = 1
         llist, lre = _form_master_re(relist[:m],reflags,ldict)
@@ -464,7 +466,7 @@ def _statetoken(s,names):
     nonstate = 1
     parts = s.split("_")
     for i in range(1,len(parts)):
-         if not names.has_key(parts[i]) and parts[i] != 'ANY': break
+         if parts[i] not in names and parts[i] != 'ANY': break
     if i > 1:
        states = tuple(parts[1:i])
     else:
@@ -507,7 +509,7 @@ def lex(module=None,object=None,debug=0,optimize=0,lextab="lextab",reflags=0,now
             for (i,v) in _items:
                 ldict[i] = v
         else:
-            raise ValueError,"Expected a module or instance"
+            raise ValueError("Expected a module or instance")
         lexobj.lexmodule = module
         
     else:
@@ -542,61 +544,61 @@ def lex(module=None,object=None,debug=0,optimize=0,lextab="lextab",reflags=0,now
         literals = ldict.get("literals","")
         
     if not tokens:
-        raise SyntaxError,"lex: module does not define 'tokens'"
-    if not (isinstance(tokens,types.ListType) or isinstance(tokens,types.TupleType)):
-        raise SyntaxError,"lex: tokens must be a list or tuple."
+        raise SyntaxError("lex: module does not define 'tokens'")
+    if not (isinstance(tokens, list) or isinstance(tokens, tuple)):
+        raise SyntaxError("lex: tokens must be a list or tuple.")
 
     # Build a dictionary of valid token names
     lexobj.lextokens = { }
     if not optimize:
         for n in tokens:
             if not _is_identifier.match(n):
-                print "lex: Bad token name '%s'" % n
+                print("lex: Bad token name '%s'" % n)
                 error = 1
-            if warn and lexobj.lextokens.has_key(n):
-                print "lex: Warning. Token '%s' multiply defined." % n
+            if warn and n in lexobj.lextokens:
+                print("lex: Warning. Token '%s' multiply defined." % n)
             lexobj.lextokens[n] = None
     else:
         for n in tokens: lexobj.lextokens[n] = None
 
     if debug:
-        print "lex: tokens = '%s'" % lexobj.lextokens.keys()
+        print("lex: tokens = '%s'" % lexobj.lextokens.keys())
 
     try:
          for c in literals:
-               if not (isinstance(c,types.StringType) or isinstance(c,types.UnicodeType)) or len(c) > 1:
-                    print "lex: Invalid literal %s. Must be a single character" % repr(c)
+               if not (isinstance(c,util.bytes_type) or isinstance(c,util.string_type)) or len(c) > 1:
+                    print("lex: Invalid literal %s. Must be a single character" % repr(c))
                     error = 1
                     continue
 
     except TypeError:
-         print "lex: Invalid literals specification. literals must be a sequence of characters."
+         print("lex: Invalid literals specification. literals must be a sequence of characters.")
          error = 1
 
     lexobj.lexliterals = literals
 
     # Build statemap
     if states:
-         if not (isinstance(states,types.TupleType) or isinstance(states,types.ListType)):
-              print "lex: states must be defined as a tuple or list."
+         if not (isinstance(states,tuple) or isinstance(states,list)):
+              print("lex: states must be defined as a tuple or list.")
               error = 1
          else:
               for s in states:
-                    if not isinstance(s,types.TupleType) or len(s) != 2:
-                           print "lex: invalid state specifier %s. Must be a tuple (statename,'exclusive|inclusive')" % repr(s)
+                    if not isinstance(s,tuple) or len(s) != 2:
+                           print("lex: invalid state specifier %s. Must be a tuple (statename,'exclusive|inclusive')" % repr(s))
                            error = 1
                            continue
                     name, statetype = s
-                    if not isinstance(name,types.StringType):
-                           print "lex: state name %s must be a string" % repr(name)
+                    if not isinstance(name,util.bytes_type):
+                           print("lex: state name %s must be a string" % repr(name))
                            error = 1
                            continue
                     if not (statetype == 'inclusive' or statetype == 'exclusive'):
-                           print "lex: state type for state %s must be 'inclusive' or 'exclusive'" % name
+                           print("lex: state type for state %s must be 'inclusive' or 'exclusive'" % name)
                            error = 1
                            continue
-                    if stateinfo.has_key(name):
-                           print "lex: state '%s' already defined." % name
+                    if name in stateinfo:
+                           print("lex: state '%s' already defined." % name)
                            error = 1
                            continue
                     stateinfo[name] = statetype
@@ -618,28 +620,28 @@ def lex(module=None,object=None,debug=0,optimize=0,lextab="lextab",reflags=0,now
     errorf   = { }        # Error functions by state
 
     if len(tsymbols) == 0:
-        raise SyntaxError,"lex: no rules of the form t_rulename are defined."
+        raise SyntaxError("lex: no rules of the form t_rulename are defined.")
 
     for f in tsymbols:
         t = ldict[f]
         states, tokname = _statetoken(f,stateinfo)
         toknames[f] = tokname
 
-        if callable(t):
+        if isinstance(t, collections.Callable):
             for s in states: funcsym[s].append((f,t))
-        elif (isinstance(t, types.StringType) or isinstance(t,types.UnicodeType)):
+        elif (isinstance(t, util.bytes_type) or isinstance(t, util.string_type)):
             for s in states: strsym[s].append((f,t))
         else:
-            print "lex: %s not defined as a function or string" % f
+            print("lex: %s not defined as a function or string" % f)
             error = 1
 
     # Sort the functions by line number
     for f in funcsym.values():
-        f.sort(lambda x,y: cmp(x[1].func_code.co_firstlineno,y[1].func_code.co_firstlineno))
+        f.sort(key=lambda x: x[1].__code__.co_firstlineno)
 
     # Sort the strings by regular expression length
     for s in strsym.values():
-        s.sort(lambda x,y: (len(x[1]) < len(y[1])) - (len(x[1]) > len(y[1])))
+        s.sort(key=lambda x: len(x[1]))
 
     regexs = { }
 
@@ -649,31 +651,31 @@ def lex(module=None,object=None,debug=0,optimize=0,lextab="lextab",reflags=0,now
 
         # Add rules defined by functions first
         for fname, f in funcsym[state]:
-            line = f.func_code.co_firstlineno
-            file = f.func_code.co_filename
+            line = f.__code__.co_firstlineno
+            file = f.__code__.co_filename
             files[file] = None
             tokname = toknames[fname]
 
             ismethod = isinstance(f, types.MethodType)
 
             if not optimize:
-                nargs = f.func_code.co_argcount
+                nargs = f.__code__.co_argcount
                 if ismethod:
                     reqargs = 2
                 else:
                     reqargs = 1
                 if nargs > reqargs:
-                    print "%s:%d: Rule '%s' has too many arguments." % (file,line,f.__name__)
+                    print("%s:%d: Rule '%s' has too many arguments." % (file,line,f.__name__))
                     error = 1
                     continue
 
                 if nargs < reqargs:
-                    print "%s:%d: Rule '%s' requires an argument." % (file,line,f.__name__)
+                    print("%s:%d: Rule '%s' requires an argument." % (file,line,f.__name__))
                     error = 1
                     continue
 
                 if tokname == 'ignore':
-                    print "%s:%d: Rule '%s' must be defined as a string." % (file,line,f.__name__)
+                    print("%s:%d: Rule '%s' must be defined as a string." % (file,line,f.__name__))
                     error = 1
                     continue
         
@@ -686,25 +688,25 @@ def lex(module=None,object=None,debug=0,optimize=0,lextab="lextab",reflags=0,now
                     try:
                         c = re.compile("(?P<%s>%s)" % (f.__name__,f.__doc__), re.VERBOSE | reflags)
                         if c.match(""):
-                             print "%s:%d: Regular expression for rule '%s' matches empty string." % (file,line,f.__name__)
+                             print("%s:%d: Regular expression for rule '%s' matches empty string." % (file,line,f.__name__))
                              error = 1
                              continue
-                    except re.error,e:
-                        print "%s:%d: Invalid regular expression for rule '%s'. %s" % (file,line,f.__name__,e)
+                    except re.error as e:
+                        print("%s:%d: Invalid regular expression for rule '%s'. %s" % (file,line,f.__name__,e))
                         if '#' in f.__doc__:
-                             print "%s:%d. Make sure '#' in rule '%s' is escaped with '\\#'." % (file,line, f.__name__)                 
+                             print("%s:%d. Make sure '#' in rule '%s' is escaped with '\\#'." % (file,line, f.__name__))
                         error = 1
                         continue
 
                     if debug:
-                        print "lex: Adding rule %s -> '%s' (state '%s')" % (f.__name__,f.__doc__, state)
+                        print("lex: Adding rule %s -> '%s' (state '%s')" % (f.__name__,f.__doc__, state))
 
                 # Okay. The regular expression seemed okay.  Let's append it to the master regular
                 # expression we're building
   
                 regex_list.append("(?P<%s>%s)" % (f.__name__,f.__doc__))
             else:
-                print "%s:%d: No regular expression defined for rule '%s'" % (file,line,f.__name__)
+                print("%s:%d: No regular expression defined for rule '%s'" % (file,line,f.__name__))
 
         # Now add all of the simple rules
         for name,r in strsym[state]:
@@ -716,34 +718,34 @@ def lex(module=None,object=None,debug=0,optimize=0,lextab="lextab",reflags=0,now
 
             if not optimize:
                 if tokname == 'error':
-                    raise SyntaxError,"lex: Rule '%s' must be defined as a function" % name
+                    raise SyntaxError("lex: Rule '%s' must be defined as a function" % name)
                     error = 1
                     continue
         
-                if not lexobj.lextokens.has_key(tokname) and tokname.find("ignore_") < 0:
-                    print "lex: Rule '%s' defined for an unspecified token %s." % (name,tokname)
+                if tokname not in lexobj.lextokens and tokname.find("ignore_") < 0:
+                    print("lex: Rule '%s' defined for an unspecified token %s." % (name,tokname))
                     error = 1
                     continue
                 try:
                     c = re.compile("(?P<%s>%s)" % (name,r),re.VERBOSE | reflags)
                     if (c.match("")):
-                         print "lex: Regular expression for rule '%s' matches empty string." % name
+                         print("lex: Regular expression for rule '%s' matches empty string." % name)
                          error = 1
                          continue
-                except re.error,e:
-                    print "lex: Invalid regular expression for rule '%s'. %s" % (name,e)
+                except re.error as e:
+                    print("lex: Invalid regular expression for rule '%s'. %s" % (name,e))
                     if '#' in r:
-                         print "lex: Make sure '#' in rule '%s' is escaped with '\\#'." % name
+                         print("lex: Make sure '#' in rule '%s' is escaped with '\\#'." % name)
 
                     error = 1
                     continue
                 if debug:
-                    print "lex: Adding rule %s -> '%s' (state '%s')" % (name,r,state)
+                    print("lex: Adding rule %s -> '%s' (state '%s')" % (name,r,state))
                 
             regex_list.append("(?P<%s>%s)" % (name,r))
 
         if not regex_list:
-             print "lex: No rules defined for state '%s'" % state
+             print("lex: No rules defined for state '%s'" % state)
              error = 1
 
         regexs[state] = regex_list
@@ -755,7 +757,7 @@ def lex(module=None,object=None,debug=0,optimize=0,lextab="lextab",reflags=0,now
                 error = 1
 
     if error:
-        raise SyntaxError,"lex: Unable to build lexer."
+        raise SyntaxError("lex: Unable to build lexer.")
 
     # From this point forward, we're reasonably confident that we can build the lexer.
     # No more errors will be generated, but there might be some warning messages.
@@ -768,7 +770,7 @@ def lex(module=None,object=None,debug=0,optimize=0,lextab="lextab",reflags=0,now
         lexobj.lexstateretext[state] = re_text
         if debug:
             for i in range(len(re_text)):
-                 print "lex: state '%s'. regex[%d] = '%s'" % (state, i, re_text[i])
+                 print("lex: state '%s'. regex[%d] = '%s'" % (state, i, re_text[i]))
 
     # For inclusive states, we need to add the INITIAL state
     for state,type in stateinfo.items():
@@ -788,19 +790,19 @@ def lex(module=None,object=None,debug=0,optimize=0,lextab="lextab",reflags=0,now
     lexobj.lexstateerrorf = errorf
     lexobj.lexerrorf = errorf.get("INITIAL",None)
     if warn and not lexobj.lexerrorf:
-        print "lex: Warning. no t_error rule is defined."
+        print("lex: Warning. no t_error rule is defined.")
 
     # Check state information for ignore and error rules
     for s,stype in stateinfo.items():
         if stype == 'exclusive':
-              if warn and not errorf.has_key(s):
-                   print "lex: Warning. no error rule is defined for exclusive state '%s'" % s
-              if warn and not ignore.has_key(s) and lexobj.lexignore:
-                   print "lex: Warning. no ignore rule is defined for exclusive state '%s'" % s
+              if warn and s not in errorf:
+                   print("lex: Warning. no error rule is defined for exclusive state '%s'" % s)
+              if warn and s not in ignore and lexobj.lexignore:
+                   print("lex: Warning. no ignore rule is defined for exclusive state '%s'" % s)
         elif stype == 'inclusive':
-              if not errorf.has_key(s):
+              if s not in errorf:
                    errorf[s] = errorf.get("INITIAL",None)
-              if not ignore.has_key(s):
+              if s not in ignore:
                    ignore[s] = ignore.get("INITIAL","")
    
 
@@ -829,7 +831,7 @@ def runmain(lexer=None,data=None):
             data = f.read()
             f.close()
         except IndexError:
-            print "Reading from standard input (type EOF to end):"
+            print("Reading from standard input (type EOF to end):")
             data = sys.stdin.read()
 
     if lexer:
@@ -845,7 +847,7 @@ def runmain(lexer=None,data=None):
     while 1:
         tok = _token()
         if not tok: break
-        print "(%s,%r,%d,%d)" % (tok.type, tok.value, tok.lineno,tok.lexpos)
+        print("(%s,%r,%d,%d)" % (tok.type, tok.value, tok.lineno,tok.lexpos))
         
 
 # -----------------------------------------------------------------------------
diff --git a/sepolgen/src/sepolgen/matching.py b/sepolgen/src/sepolgen/matching.py
index d56dd92..533bb9a 100644
--- a/sepolgen/src/sepolgen/matching.py
+++ b/sepolgen/src/sepolgen/matching.py
@@ -21,33 +21,46 @@
 Classes and algorithms for matching requested access to access vectors.
 """
 
-import access
-import objectmodel
 import itertools
 
+from . import access
+from . import objectmodel
+
+
 class Match:
     def __init__(self, interface=None, dist=0):
         self.interface = interface
         self.dist = dist
         self.info_dir_change = False
+        self.__hash__ = None
+
+    def _compare(self, other, method):
+        try:
+            a = (self.dist, self.info_dir_change)
+            b = (other.dist, other.info_dir_change)
+            return method(a, b)
+        except (AttributeError, TypeError):
+            # trying to compare to foreign type
+            return NotImplemented
+
+    def __eq__(self, other):
+        return self._compare(other, lambda a, b: a == b)
+
+    def __lt__(self, other):
+        return self._compare(other, lambda a, b: a < b)
+
+    def __le__(self, other):
+        return self._compare(other, lambda a, b: a <= b)
+
+    def __ge__(self, other):
+        return self._compare(other, lambda a, b: a >= b)
+
+    def __gt__(self, other):
+        return self._compare(other, lambda a, b: a > b)
+
+    def __ne__(self, other):
+        return self._compare(other, lambda a, b: a != b)
 
-    def __cmp__(self, other):
-        if self.dist == other.dist:
-            if self.info_dir_change:
-                if other.info_dir_change:
-                    return 0
-                else:
-                    return 1
-            else:
-                if other.info_dir_change:
-                    return -1
-                else:
-                    return 0
-        else:
-            if self.dist < other.dist:
-                return -1
-            else:
-                return 1
 
 class MatchList:
     DEFAULT_THRESHOLD = 150
diff --git a/sepolgen/src/sepolgen/module.py b/sepolgen/src/sepolgen/module.py
index 7fc9443..d5b3f75 100644
--- a/sepolgen/src/sepolgen/module.py
+++ b/sepolgen/src/sepolgen/module.py
@@ -22,18 +22,22 @@ Utilities for dealing with the compilation of modules and creation
 of module tress.
 """
 
-import defaults
-
-import selinux
-
 import re
 import tempfile
-import commands
+try:
+    from subprocess import getstatusoutput
+except ImportError:
+    from commands import getstatusoutput
 import os
 import os.path
 import subprocess
 import shutil
 
+import selinux
+
+from . import defaults
+
+
 def is_valid_name(modname):
     """Check that a module name is valid.
     """
@@ -130,7 +134,7 @@ class ModuleCompiler:
 
     def run(self, command):
         self.o(command)
-        rc, output = commands.getstatusoutput(command)
+        rc, output = getstatusoutput(command)
         self.o(output)
         
         return rc
diff --git a/sepolgen/src/sepolgen/objectmodel.py b/sepolgen/src/sepolgen/objectmodel.py
index 88c8a1f..d05d721 100644
--- a/sepolgen/src/sepolgen/objectmodel.py
+++ b/sepolgen/src/sepolgen/objectmodel.py
@@ -118,7 +118,7 @@ class PermMappings:
                 continue
             if fields[0] == "class":
                 c = fields[1]
-                if self.classes.has_key(c):
+                if c in self.classes:
                     raise ValueError("duplicate class in perm map")
                 self.classes[c] = { }
                 cur = self.classes[c]
diff --git a/sepolgen/src/sepolgen/output.py b/sepolgen/src/sepolgen/output.py
index 739452d..4244a74 100644
--- a/sepolgen/src/sepolgen/output.py
+++ b/sepolgen/src/sepolgen/output.py
@@ -27,8 +27,8 @@ generating policy. This keeps the semantic / syntactic issues
 cleanly separated from the formatting issues.
 """
 
-import refpolicy
-import util
+from . import refpolicy
+from . import util
 
 class ModuleWriter:
     def __init__(self):
diff --git a/sepolgen/src/sepolgen/policygen.py b/sepolgen/src/sepolgen/policygen.py
index 5f38577..9203f6d 100644
--- a/sepolgen/src/sepolgen/policygen.py
+++ b/sepolgen/src/sepolgen/policygen.py
@@ -24,17 +24,18 @@ classes and algorithms for the generation of SELinux policy.
 import itertools
 import textwrap
 
-import refpolicy
-import objectmodel
-import access
-import interfaces
-import matching
 import selinux.audit2why as audit2why
 try:
     from setools import *
 except:
     pass
 
+from . import refpolicy
+from . import objectmodel
+from . import access
+from . import interfaces
+from . import matching
+
 # Constants for the level of explanation from the generation
 # routines
 NO_EXPLANATION    = 0
@@ -167,7 +168,7 @@ class PolicyGenerator:
 
             if av.type == audit2why.BOOLEAN:
                 if len(av.data) > 1:
-                    rule.comment += "\n#!!!! This avc can be allowed using one of the these booleans:\n#     %s" % ", ".join(map(lambda x: x[0], av.data))
+                    rule.comment += "\n#!!!! This avc can be allowed using one of the these booleans:\n#     %s" % ", ".join([x[0] for x in av.data])
                 else:
                     rule.comment += "\n#!!!! This avc can be allowed using the boolean '%s'" % av.data[0][0]
 
@@ -186,7 +187,7 @@ class PolicyGenerator:
                         self.domains = seinfo(ATTRIBUTE, name="domain")[0]["types"]
                     types=[]
 
-                    for i in map(lambda x: x[TCONTEXT], sesearch([ALLOW], {SCONTEXT: av.src_type, CLASS: av.obj_class, PERMS: av.perms})):
+                    for i in [x[TCONTEXT] for x in sesearch([ALLOW], {SCONTEXT: av.src_type, CLASS: av.obj_class, PERMS: av.perms})]:
                         if i not in self.domains:
                             types.append(i)
                     if len(types) == 1:
@@ -296,7 +297,7 @@ def call_interface(interface, av):
         elif params[i].type == refpolicy.OBJ_CLASS:
             ifcall.args.append(av.obj_class)
         else:
-            print params[i].type
+            print(params[i].type)
             assert(0)
 
     assert(len(ifcall.args) > 0)
diff --git a/sepolgen/src/sepolgen/refparser.py b/sepolgen/src/sepolgen/refparser.py
index 83542d3..7589e49 100644
--- a/sepolgen/src/sepolgen/refparser.py
+++ b/sepolgen/src/sepolgen/refparser.py
@@ -34,12 +34,12 @@ import os
 import re
 import traceback
 
-import refpolicy
-import access
-import defaults
+from . import refpolicy
+from . import access
+from . import defaults
 
-import lex
-import yacc
+from . import lex
+from . import yacc
 
 # :::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
 #
@@ -269,7 +269,7 @@ def t_comment(t):
     t.lexer.lineno += 1
 
 def t_error(t):
-    print "Illegal character '%s'" % t.value[0]
+    print("Illegal character '%s'" % t.value[0])
     t.skip(1)
 
 def t_newline(t):
@@ -311,7 +311,7 @@ def collect(stmts, parent, val=None):
 
 def expand(ids, s):
     for id in ids:
-        if spt.has_key(id):
+        if id in spt:
             s.update(spt.by_name(id))
         else:
             s.add(id)
@@ -971,7 +971,7 @@ def p_optional_semi(p):
 def p_error(tok):
     global error, parse_file, success, parser
     error = "%s: Syntax error on line %d %s [type=%s]" % (parse_file, tok.lineno, tok.value, tok.type)
-    print error
+    print(error)
     success = False
 
 def prep_spt(spt):
@@ -1008,7 +1008,7 @@ def parse(text, module=None, support=None, debug=False):
 
     try:
         parser.parse(text, debug=debug, lexer=lexer)
-    except Exception, e:
+    except Exception as e:
         parser = None
         lexer = None
         error = "internal parser error: %s" % str(e) + "\n" + traceback.format_exc()
@@ -1041,7 +1041,7 @@ def list_headers(root):
 
 
 def parse_headers(root, output=None, expand=True, debug=False):
-    import util
+    from . import util
 
     headers = refpolicy.Headers()
 
@@ -1075,9 +1075,9 @@ def parse_headers(root, output=None, expand=True, debug=False):
             fd.close()
             parse_file = f
             parse(txt, module, spt, debug)
-        except IOError, e:
+        except IOError as e:
             return
-        except ValueError, e:
+        except ValueError as e:
             raise ValueError("error parsing file %s: %s" % (f, str(e)))
 
     spt = None
@@ -1113,7 +1113,7 @@ def parse_headers(root, output=None, expand=True, debug=False):
                 parse_file(x[1], m, spt)
             else:
                 parse_file(x[1], m)
-        except ValueError, e:
+        except ValueError as e:
             o(str(e) + "\n")
             failures.append(x[1])
             continue
diff --git a/sepolgen/src/sepolgen/refpolicy.py b/sepolgen/src/sepolgen/refpolicy.py
index b8ed5c1..4bdba13 100644
--- a/sepolgen/src/sepolgen/refpolicy.py
+++ b/sepolgen/src/sepolgen/refpolicy.py
@@ -85,53 +85,53 @@ class Node(PolicyBase):
     # Top level nodes
 
     def nodes(self):
-        return itertools.ifilter(lambda x: isinstance(x, Node), walktree(self))
+        return filter(lambda x: isinstance(x, Node), walktree(self))
 
     def modules(self):
-        return itertools.ifilter(lambda x: isinstance(x, Module), walktree(self))
+        return filter(lambda x: isinstance(x, Module), walktree(self))
 
     def interfaces(self):
-        return itertools.ifilter(lambda x: isinstance(x, Interface), walktree(self))
+        return filter(lambda x: isinstance(x, Interface), walktree(self))
 
     def templates(self):
-        return itertools.ifilter(lambda x: isinstance(x, Template), walktree(self))
+        return filter(lambda x: isinstance(x, Template), walktree(self))
 
     def support_macros(self):
-        return itertools.ifilter(lambda x: isinstance(x, SupportMacros), walktree(self))
+        return filter(lambda x: isinstance(x, SupportMacros), walktree(self))
 
     # Common policy statements
 
     def module_declarations(self):
-        return itertools.ifilter(lambda x: isinstance(x, ModuleDeclaration), walktree(self))
+        return filter(lambda x: isinstance(x, ModuleDeclaration), walktree(self))
 
     def interface_calls(self):
-        return itertools.ifilter(lambda x: isinstance(x, InterfaceCall), walktree(self))
+        return filter(lambda x: isinstance(x, InterfaceCall), walktree(self))
 
     def avrules(self):
-        return itertools.ifilter(lambda x: isinstance(x, AVRule), walktree(self))
+        return filter(lambda x: isinstance(x, AVRule), walktree(self))
 
     def typerules(self):
-        return itertools.ifilter(lambda x: isinstance(x, TypeRule), walktree(self))
+        return filter(lambda x: isinstance(x, TypeRule), walktree(self))
 
     def typeattributes(self):
         """Iterate over all of the TypeAttribute children of this Interface."""
-        return itertools.ifilter(lambda x: isinstance(x, TypeAttribute), walktree(self))
+        return filter(lambda x: isinstance(x, TypeAttribute), walktree(self))
 
     def roleattributes(self):
         """Iterate over all of the RoleAttribute children of this Interface."""
-        return itertools.ifilter(lambda x: isinstance(x, RoleAttribute), walktree(self))
+        return filter(lambda x: isinstance(x, RoleAttribute), walktree(self))
 
     def requires(self):
-        return itertools.ifilter(lambda x: isinstance(x, Require), walktree(self))
+        return filter(lambda x: isinstance(x, Require), walktree(self))
 
     def roles(self):
-        return itertools.ifilter(lambda x: isinstance(x, Role), walktree(self))
+        return filter(lambda x: isinstance(x, Role), walktree(self))
 
     def role_allows(self):
-        return itertools.ifilter(lambda x: isinstance(x, RoleAllow), walktree(self))
+        return filter(lambda x: isinstance(x, RoleAllow), walktree(self))
 
     def role_types(self):
-        return itertools.ifilter(lambda x: isinstance(x, RoleType), walktree(self))
+        return filter(lambda x: isinstance(x, RoleType), walktree(self))
 
     def __str__(self):
         if self.comment:
@@ -291,7 +291,7 @@ class SecurityContext(Leaf):
         self.type = fields[2]
         if len(fields) > 3:
             # FUTURE - normalize level fields to allow more comparisons to succeed.
-            self.level = string.join(fields[3:], ':')
+            self.level = ':'.join(fields[3:])
         else:
             self.level = None
 
@@ -703,7 +703,7 @@ def print_tree(head):
         s = ""
         for i in range(depth):
             s = s + "\t"
-        print s + str(node)
+        print(s + str(node))
 
 
 class Headers(Node):
@@ -810,7 +810,7 @@ class SupportMacros(Node):
         # are ordered correctly so that no macro is used before
         # it is defined
         s = set()
-        if self.map.has_key(perm):
+        if perm in self.map:
             for p in self.by_name(perm):
                 s.update(self.__expand_perm(p))
         else:
@@ -833,7 +833,7 @@ class SupportMacros(Node):
     def has_key(self, name):
         if not self.map:
             self.__gen_map()
-        return self.map.has_key(name)
+        return name in self.map
 
 class Require(Leaf):
     def __init__(self, parent=None):
diff --git a/sepolgen/src/sepolgen/util.py b/sepolgen/src/sepolgen/util.py
index 74a11f5..0ab3e3f 100644
--- a/sepolgen/src/sepolgen/util.py
+++ b/sepolgen/src/sepolgen/util.py
@@ -16,6 +16,18 @@
 # along with this program; if not, write to the Free Software
 # Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
 #
+import sys
+
+
+PY2 = sys.version_info[0] == 2
+PY3 = sys.version_info[0] == 3
+
+if PY2:
+    bytes_type=str
+    string_type=unicode
+else:
+    bytes_type=bytes
+    string_type=str
 
 class ConsoleProgressBar:
     def __init__(self, out, steps=100, indicator='#'):
diff --git a/sepolgen/src/sepolgen/yacc.py b/sepolgen/src/sepolgen/yacc.py
index bc4536d..61d1fe1 100644
--- a/sepolgen/src/sepolgen/yacc.py
+++ b/sepolgen/src/sepolgen/yacc.py
@@ -67,7 +67,12 @@ default_lr  = 'LALR'           # Default LR table generation method
 
 error_count = 3                # Number of symbols that must be shifted to leave recovery mode
 
-import re, types, sys, cStringIO, hashlib, os.path
+import re, types, sys, hashlib, os.path
+try:
+    import cStringIO
+except ImportError:
+    import io as cStringIO
+import locale
 
 # Exception raised for yacc-related errors
 class YaccError(Exception):   pass
@@ -109,7 +114,7 @@ class YaccProduction:
         self.stack = stack
 
     def __getitem__(self,n):
-        if type(n) == types.IntType:
+        if type(n) == int:
              if n >= 0: return self.slice[n].value
              else: return self.stack[n].value
         else:
@@ -139,9 +144,9 @@ class YaccProduction:
 
     def pushback(self,n):
         if n <= 0:
-            raise ValueError, "Expected a positive value"
+            raise ValueError("Expected a positive value")
         if n > (len(self.slice)-1):
-            raise ValueError, "Can't push %d tokens. Only %d are available." % (n,len(self.slice)-1)
+            raise ValueError("Can't push %d tokens. Only %d are available." % (n,len(self.slice)-1))
         for i in range(0,n):
             self.pbstack.append(self.slice[-i-1])
 
@@ -157,7 +162,7 @@ class Parser:
         # object directly.
 
         if magic != "xyzzy":
-            raise YaccError, "Can't instantiate Parser. Use yacc() instead."
+            raise YaccError("Can't instantiate Parser. Use yacc() instead.")
 
         # Reset internal state
         self.productions = None          # List of productions
@@ -190,7 +195,7 @@ class Parser:
 
         # If no lexer was given, we will try to use the lex module
         if not lexer:
-            import lex
+            from . import lex
             lexer = lex.lexer
 
         pslice.lexer = lexer
@@ -221,7 +226,7 @@ class Parser:
             # is already set, we just use that. Otherwise, we'll pull
             # the next token off of the lookaheadstack or from the lexer
             if debug > 1:
-                print 'state', statestack[-1]
+                print('state', statestack[-1])
             if not lookahead:
                 if not lookaheadstack:
                     lookahead = get_token()     # Get the next token
@@ -239,7 +244,7 @@ class Parser:
             t = actions.get((s,ltype),None)
 
             if debug > 1:
-                print 'action', t
+                print('action', t)
             if t is not None:
                 if t > 0:
                     # shift a symbol on the stack
@@ -396,7 +401,7 @@ class Parser:
                 continue
 
             # Call an error function here
-            raise RuntimeError, "yacc: internal parser error!!!\n"
+            raise RuntimeError("yacc: internal parser error!!!\n")
 
 # -----------------------------------------------------------------------------
 #                          === Parser Construction ===
@@ -457,12 +462,12 @@ def validate_dict(d):
 
         if n[0:2] == 'p_':
             sys.stderr.write("yacc: Warning. '%s' not defined as a function\n" % n)
-        if 1 and isinstance(v,types.FunctionType) and v.func_code.co_argcount == 1:
+        if 1 and isinstance(v,types.FunctionType) and v.__code__.co_argcount == 1:
             try:
                 doc = v.__doc__.split(" ")
                 if doc[1] == ':':
-                    sys.stderr.write("%s:%d: Warning. Possible grammar rule '%s' defined without p_ prefix.\n" % (v.func_code.co_filename, v.func_code.co_firstlineno,n))
-            except StandardError:
+                    sys.stderr.write("%s:%d: Warning. Possible grammar rule '%s' defined without p_ prefix.\n" % (v.__code__.co_filename, v.__code__.co_firstlineno,n))
+            except Exception:
                 pass
 
 # -----------------------------------------------------------------------------
@@ -581,7 +586,7 @@ class Production:
         # Precompute list of productions immediately following
         try:
             p.lrafter = Prodnames[p.prod[n+1]]
-        except (IndexError,KeyError),e:
+        except (IndexError,KeyError) as e:
             p.lrafter = []
         try:
             p.lrbefore = p.prod[n-1]
@@ -615,7 +620,7 @@ _is_identifier = re.compile(r'^[a-zA-Z0-9_-~]+$')
 
 def add_production(f,file,line,prodname,syms):
     
-    if Terminals.has_key(prodname):
+    if prodname in Terminals:
         sys.stderr.write("%s:%d: Illegal rule name '%s'. Already defined as a token.\n" % (file,line,prodname))
         return -1
     if prodname == 'error':
@@ -634,7 +639,7 @@ def add_production(f,file,line,prodname,syms):
                  if (len(c) > 1):
                       sys.stderr.write("%s:%d: Literal token %s in rule '%s' may only be a single character\n" % (file,line,s, prodname)) 
                       return -1
-                 if not Terminals.has_key(c):
+                 if c not in Terminals:
                       Terminals[c] = []
                  syms[x] = c
                  continue
@@ -646,7 +651,7 @@ def add_production(f,file,line,prodname,syms):
 
     # See if the rule is already in the rulemap
     map = "%s -> %s" % (prodname,syms)
-    if Prodmap.has_key(map):
+    if map in Prodmap:
         m = Prodmap[map]
         sys.stderr.write("%s:%d: Duplicate rule %s.\n" % (file,line, m))
         sys.stderr.write("%s:%d: Previous definition at %s:%d\n" % (file,line, m.file, m.line))
@@ -663,7 +668,7 @@ def add_production(f,file,line,prodname,syms):
             
     Productions.append(p)
     Prodmap[map] = p
-    if not Nonterminals.has_key(prodname):
+    if prodname not in Nonterminals:
         Nonterminals[prodname] = [ ]
     
     # Add all terminals to Terminals
@@ -687,13 +692,13 @@ def add_production(f,file,line,prodname,syms):
             del p.prod[i]
             continue
 
-        if Terminals.has_key(t):
+        if t in Terminals:
             Terminals[t].append(p.number)
             # Is a terminal.  We'll assign a precedence to p based on this
             if not hasattr(p,"prec"):
                 p.prec = Precedence.get(t,('right',0))
         else:
-            if not Nonterminals.has_key(t):
+            if t not in Nonterminals:
                 Nonterminals[t] = [ ]
             Nonterminals[t].append(p.number)
         i += 1
@@ -722,8 +727,8 @@ def add_production(f,file,line,prodname,syms):
 # and adds rules to the grammar
 
 def add_function(f):
-    line = f.func_code.co_firstlineno
-    file = f.func_code.co_filename
+    line = f.__code__.co_firstlineno
+    file = f.__code__.co_filename
     error = 0
 
     if isinstance(f,types.MethodType):
@@ -731,11 +736,11 @@ def add_function(f):
     else:
         reqdargs = 1
         
-    if f.func_code.co_argcount > reqdargs:
+    if f.__code__.co_argcount > reqdargs:
         sys.stderr.write("%s:%d: Rule '%s' has too many arguments.\n" % (file,line,f.__name__))
         return -1
 
-    if f.func_code.co_argcount < reqdargs:
+    if f.__code__.co_argcount < reqdargs:
         sys.stderr.write("%s:%d: Rule '%s' requires an argument.\n" % (file,line,f.__name__))
         return -1
           
@@ -776,9 +781,10 @@ def add_function(f):
                 error += e
 
                 
-            except StandardError:
+            except Exception as e:
                 sys.stderr.write("%s:%d: Syntax error in rule '%s'\n" % (file,dline,ps))
                 error -= 1
+                raise
     else:
         sys.stderr.write("%s:%d: No documentation string specified in function '%s'\n" % (file,line,f.__name__))
     return error
@@ -793,7 +799,7 @@ def compute_reachable():
     (Unused terminals have already had their warning.)
     '''
     Reachable = { }
-    for s in Terminals.keys() + Nonterminals.keys():
+    for s in list(Terminals.keys()) + list(Nonterminals.keys()):
         Reachable[s] = 0
 
     mark_reachable_from( Productions[0].prod[0], Reachable )
@@ -872,7 +878,7 @@ def compute_terminates():
     some_error = 0
     for (s,terminates) in Terminates.items():
         if not terminates:
-            if not Prodnames.has_key(s) and not Terminals.has_key(s) and s != 'error':
+            if s not in Prodnames and s not in Terminals and s != 'error':
                 # s is used-but-not-defined, and we've already warned of that,
                 # so it would be overkill to say that it's also non-terminating.
                 pass
@@ -893,7 +899,7 @@ def verify_productions(cycle_check=1):
         if not p: continue
 
         for s in p.prod:
-            if not Prodnames.has_key(s) and not Terminals.has_key(s) and s != 'error':
+            if s not in Prodnames and s not in Terminals and s != 'error':
                 sys.stderr.write("%s:%d: Symbol '%s' used, but not defined as a token or a rule.\n" % (p.file,p.line,s))
                 error = 1
                 continue
@@ -935,12 +941,12 @@ def verify_productions(cycle_check=1):
 
     if yaccdebug:
         _vf.write("\nTerminals, with rules where they appear\n\n")
-        ks = Terminals.keys()
+        ks = list(Terminals.keys())
         ks.sort()
         for k in ks:
             _vf.write("%-20s : %s\n" % (k, " ".join([str(s) for s in Terminals[k]])))
         _vf.write("\nNonterminals, with rules where they appear\n\n")
-        ks = Nonterminals.keys()
+        ks = list(Nonterminals.keys())
         ks.sort()
         for k in ks:
             _vf.write("%-20s : %s\n" % (k, " ".join([str(s) for s in Nonterminals[k]])))
@@ -1003,7 +1009,7 @@ def add_precedence(plist):
                 sys.stderr.write("yacc: Invalid precedence '%s'\n" % prec)
                 return -1
             for t in terms:
-                if Precedence.has_key(t):
+                if t in Precedence:
                     sys.stderr.write("yacc: Precedence already specified for terminal '%s'\n" % t)
                     error += 1
                     continue
@@ -1087,7 +1093,7 @@ def compute_follow(start=None):
             # Here is the production set
             for i in range(len(p.prod)):
                 B = p.prod[i]
-                if Nonterminals.has_key(B):
+                if B in Nonterminals:
                     # Okay. We got a non-terminal in a production
                     fst = first(p.prod[i+1:])
                     hasempty = 0
@@ -1259,7 +1265,7 @@ def lr0_items():
         for x in asyms.keys():
             g = lr0_goto(I,x)
             if not g:  continue
-            if _lr0_cidhash.has_key(id(g)): continue
+            if id(g) in _lr0_cidhash: continue
             _lr0_cidhash[id(g)] = len(C)            
             C.append(g)
             
@@ -1305,7 +1311,7 @@ def compute_nullable_nonterminals():
                 nullable[p.name] = 1
                 continue
            for t in p.prod:
-                if not nullable.has_key(t): break
+                if t not in nullable: break
            else:
                 nullable[p.name] = 1
        if len(nullable) == num_nullable: break
@@ -1329,7 +1335,7 @@ def find_nonterminal_transitions(C):
          for p in C[state]:
              if p.lr_index < p.len - 1:
                   t = (state,p.prod[p.lr_index+1])
-                  if Nonterminals.has_key(t[1]):
+                  if t[1] in Nonterminals:
                         if t not in trans: trans.append(t)
          state = state + 1
      return trans
@@ -1352,7 +1358,7 @@ def dr_relation(C,trans,nullable):
     for p in g:
        if p.lr_index < p.len - 1:
            a = p.prod[p.lr_index+1]
-           if Terminals.has_key(a):
+           if a in Terminals:
                if a not in terms: terms.append(a)
 
     # This extra bit is to handle the start state
@@ -1377,7 +1383,7 @@ def reads_relation(C, trans, empty):
     for p in g:
         if p.lr_index < p.len - 1:
              a = p.prod[p.lr_index + 1]
-             if empty.has_key(a):
+             if a in empty:
                   rel.append((j,a))
 
     return rel
@@ -1437,15 +1443,15 @@ def compute_lookback_includes(C,trans,nullable):
                  t = p.prod[lr_index]
 
                  # Check to see if this symbol and state are a non-terminal transition
-                 if dtrans.has_key((j,t)):
+                 if (j,t) in dtrans:
                        # Yes.  Okay, there is some chance that this is an includes relation
                        # the only way to know for certain is whether the rest of the 
                        # production derives empty
 
                        li = lr_index + 1
                        while li < p.len:
-                            if Terminals.has_key(p.prod[li]): break      # No forget it
-                            if not nullable.has_key(p.prod[li]): break
+                            if p.prod[li] in Terminals: break      # No forget it
+                            if p.prod[li] not in nullable: break
                             li = li + 1
                        else:
                             # Appears to be a relation between (j,t) and (state,N)
@@ -1466,7 +1472,7 @@ def compute_lookback_includes(C,trans,nullable):
                  else:
                       lookb.append((j,r))
         for i in includes:
-             if not includedict.has_key(i): includedict[i] = []
+             if i not in includedict: includedict[i] = []
              includedict[i].append((state,N))
         lookdict[(state,N)] = lookb
 
@@ -1513,11 +1519,11 @@ def traverse(x,N,stack,F,X,R,FP):
         for a in F.get(y,[]):
             if a not in F[x]: F[x].append(a)
     if N[x] == d:
-       N[stack[-1]] = sys.maxint
+       N[stack[-1]] = sys.maxsize
        F[stack[-1]] = F[x]
        element = stack.pop()
        while element != x:
-           N[stack[-1]] = sys.maxint
+           N[stack[-1]] = sys.maxsize
            F[stack[-1]] = F[x]
            element = stack.pop()
 
@@ -1577,7 +1583,7 @@ def add_lookaheads(lookbacks,followset):
     for trans,lb in lookbacks.items():
         # Loop over productions in lookback
         for state,p in lb:
-             if not p.lookaheads.has_key(state):
+             if state not in p.lookaheads:
                   p.lookaheads[state] = []
              f = followset.get(trans,[])
              for a in f:
@@ -1709,7 +1715,7 @@ def lr_parse_table(method):
                 else:
                     i = p.lr_index
                     a = p.prod[i+1]       # Get symbol right after the "."
-                    if Terminals.has_key(a):
+                    if a in Terminals:
                         g = lr0_goto(I,a)
                         j = _lr0_cidhash.get(id(g),-1)
                         if j >= 0:
@@ -1751,22 +1757,22 @@ def lr_parse_table(method):
                                 action[st,a] = j
                                 actionp[st,a] = p
                                 
-            except StandardError,e:
-                raise YaccError, "Hosed in lr_parse_table", e
+            except Exception as e:
+                raise YaccError("Hosed in lr_parse_table").with_traceback(e)
 
         # Print the actions associated with each terminal
         if yaccdebug:
           _actprint = { }
           for a,p,m in actlist:
-            if action.has_key((st,a)):
+            if (st,a) in action:
                 if p is actionp[st,a]:
                     _vf.write("    %-15s %s\n" % (a,m))
                     _actprint[(a,m)] = 1
           _vf.write("\n")
           for a,p,m in actlist:
-            if action.has_key((st,a)):
+            if (st,a) in action:
                 if p is not actionp[st,a]:
-                    if not _actprint.has_key((a,m)):
+                    if (a,m) not in _actprint:
                         _vf.write("  ! %-15s [ %s ]\n" % (a,m))
                         _actprint[(a,m)] = 1
             
@@ -1776,7 +1782,7 @@ def lr_parse_table(method):
         nkeys = { }
         for ii in I:
             for s in ii.usyms:
-                if Nonterminals.has_key(s):
+                if s in Nonterminals:
                     nkeys[s] = None
         for n in nkeys.keys():
             g = lr0_goto(I,n)
@@ -1914,15 +1920,15 @@ del _lr_goto_items
         
         f.close()
 
-    except IOError,e:
-        print "Unable to create '%s'" % filename
-        print e
+    except IOError as e:
+        print("Unable to create '%s'" % filename)
+        print(e)
         return
 
 def lr_read_tables(module=tab_module,optimize=0):
     global _lr_action, _lr_goto, _lr_productions, _lr_method
     try:
-        exec "import %s as parsetab" % module
+        exec("import %s as parsetab" % module)
         
         if (optimize) or (Signature.digest() == parsetab._lr_signature):
             _lr_action = parsetab._lr_action
@@ -1944,7 +1950,7 @@ def lr_read_tables(module=tab_module,optimize=0):
 try:
    _INSTANCETYPE = (types.InstanceType, types.ObjectType)
 except AttributeError:
-   _INSTANCETYPE = types.InstanceType
+   _INSTANCETYPE = (object)
 
 # -----------------------------------------------------------------------------
 # yacc(module)
@@ -1952,6 +1958,19 @@ except AttributeError:
 # Build the parser module
 # -----------------------------------------------------------------------------
 
+def encode(text):
+    """Encode given text via preferred system encoding"""
+    encoding = locale.getpreferredencoding()
+    # locale will often find out the correct encoding
+    try:
+        encoded_text = text.encode(encoding)
+    except UnicodeError:
+    # if it fails to find correct encoding then ascii is used
+    # which may lead to UnicodeError if `text` contains non ascii signs
+    # utf-8 is our guess to fix the situation
+        encoded_text = text.encode('utf-8')
+    return encoded_text 
+
 def yacc(method=default_lr, debug=yaccdebug, module=None, tabmodule=tab_module, start=None, check_recursion=1, optimize=0,write_tables=1,debugfile=debug_file,outputdir=''):
     global yaccdebug
     yaccdebug = debug
@@ -1962,7 +1981,7 @@ def yacc(method=default_lr, debug=yaccdebug, module=None, tabmodule=tab_module,
 
 
     # Add parsing method to signature
-    Signature.update(method)
+    Signature.update(encode(method))
     
     # If a "module" parameter was supplied, extract its dictionary.
     # Note: a module may in fact be an instance as well.
@@ -1977,7 +1996,7 @@ def yacc(method=default_lr, debug=yaccdebug, module=None, tabmodule=tab_module,
             for i in _items:
                 ldict[i[0]] = i[1]
         else:
-            raise ValueError,"Expected a module"
+            raise ValueError("Expected a module")
         
     else:
         # No module given.  We might be able to get information from the caller.
@@ -1995,7 +2014,7 @@ def yacc(method=default_lr, debug=yaccdebug, module=None, tabmodule=tab_module,
     if not start:
         start = ldict.get("start",None)
     if start:
-        Signature.update(start)
+        Signature.update(encode(start))
 
     # If running in optimized mode.  We're going to
 
@@ -2023,24 +2042,24 @@ def yacc(method=default_lr, debug=yaccdebug, module=None, tabmodule=tab_module,
             tokens = ldict.get("tokens",None)
     
         if not tokens:
-            raise YaccError,"module does not define a list 'tokens'"
-        if not (isinstance(tokens,types.ListType) or isinstance(tokens,types.TupleType)):
-            raise YaccError,"tokens must be a list or tuple."
+            raise YaccError("module does not define a list 'tokens'")
+        if not (isinstance(tokens,list) or isinstance(tokens,tuple)):
+            raise YaccError("tokens must be a list or tuple.")
 
         # Check to see if a requires dictionary is defined.
         requires = ldict.get("require",None)
         if requires:
-            if not (isinstance(requires,types.DictType)):
-                raise YaccError,"require must be a dictionary."
+            if not (isinstance(requires,dict)):
+                raise YaccError("require must be a dictionary.")
 
             for r,v in requires.items():
                 try:
-                    if not (isinstance(v,types.ListType)):
+                    if not (isinstance(v,list)):
                         raise TypeError
                     v1 = [x.split(".") for x in v]
                     Requires[r] = v1
-                except StandardError:
-                    print "Invalid specification for rule '%s' in require. Expected a list of strings" % r            
+                except Exception:
+                    print("Invalid specification for rule '%s' in require. Expected a list of strings" % r)            
 
         
         # Build the dictionary of terminals.  We a record a 0 in the
@@ -2048,12 +2067,12 @@ def yacc(method=default_lr, debug=yaccdebug, module=None, tabmodule=tab_module,
         # used in the grammar
 
         if 'error' in tokens:
-            print "yacc: Illegal token 'error'.  Is a reserved word."
-            raise YaccError,"Illegal token name"
+            print("yacc: Illegal token 'error'.  Is a reserved word.")
+            raise YaccError("Illegal token name")
 
         for n in tokens:
-            if Terminals.has_key(n):
-                print "yacc: Warning. Token '%s' multiply defined." % n
+            if n in Terminals:
+                print("yacc: Warning. Token '%s' multiply defined." % n)
             Terminals[n] = [ ]
 
         Terminals['error'] = [ ]
@@ -2061,13 +2080,13 @@ def yacc(method=default_lr, debug=yaccdebug, module=None, tabmodule=tab_module,
         # Get the precedence map (if any)
         prec = ldict.get("precedence",None)
         if prec:
-            if not (isinstance(prec,types.ListType) or isinstance(prec,types.TupleType)):
-                raise YaccError,"precedence must be a list or tuple."
+            if not (isinstance(prec,list) or isinstance(prec,tuple)):
+                raise YaccError("precedence must be a list or tuple.")
             add_precedence(prec)
-            Signature.update(repr(prec))
+            Signature.update(encode(repr(prec)))
 
         for n in tokens:
-            if not Precedence.has_key(n):
+            if n not in Precedence:
                 Precedence[n] = ('right',0)         # Default, right associative, 0 precedence
 
         # Look for error handler
@@ -2078,17 +2097,17 @@ def yacc(method=default_lr, debug=yaccdebug, module=None, tabmodule=tab_module,
             elif isinstance(ef, types.MethodType):
                 ismethod = 1
             else:
-                raise YaccError,"'p_error' defined, but is not a function or method."                
-            eline = ef.func_code.co_firstlineno
-            efile = ef.func_code.co_filename
+                raise YaccError("'p_error' defined, but is not a function or method.")                
+            eline = ef.__code__.co_firstlineno
+            efile = ef.__code__.co_filename
             files[efile] = None
 
-            if (ef.func_code.co_argcount != 1+ismethod):
-                raise YaccError,"%s:%d: p_error() requires 1 argument." % (efile,eline)
+            if (ef.__code__.co_argcount != 1+ismethod):
+                raise YaccError("%s:%d: p_error() requires 1 argument." % (efile,eline))
             global Errorfunc
             Errorfunc = ef
         else:
-            print "yacc: Warning. no p_error() function is defined."
+            print("yacc: Warning. no p_error() function is defined.")
             
         # Get the list of built-in functions with p_ prefix
         symbols = [ldict[f] for f in ldict.keys()
@@ -2097,27 +2116,27 @@ def yacc(method=default_lr, debug=yaccdebug, module=None, tabmodule=tab_module,
 
         # Check for non-empty symbols
         if len(symbols) == 0:
-            raise YaccError,"no rules of the form p_rulename are defined."
+            raise YaccError("no rules of the form p_rulename are defined.")
     
         # Sort the symbols by line number
-        symbols.sort(lambda x,y: cmp(x.func_code.co_firstlineno,y.func_code.co_firstlineno))
+        symbols.sort(key=lambda x: x.__code__.co_firstlineno)
 
         # Add all of the symbols to the grammar
         for f in symbols:
             if (add_function(f)) < 0:
                 error += 1
             else:
-                files[f.func_code.co_filename] = None
+                files[f.__code__.co_filename] = None
 
         # Make a signature of the docstrings
         for f in symbols:
             if f.__doc__:
-                Signature.update(f.__doc__)
+                Signature.update(encode(f.__doc__))
     
         lr_init_vars()
 
         if error:
-            raise YaccError,"Unable to construct parser."
+            raise YaccError("Unable to construct parser.")
 
         if not lr_read_tables(tabmodule):
 
@@ -2129,8 +2148,8 @@ def yacc(method=default_lr, debug=yaccdebug, module=None, tabmodule=tab_module,
             # Validate dictionary
             validate_dict(ldict)
 
-            if start and not Prodnames.has_key(start):
-                raise YaccError,"Bad starting symbol '%s'" % start
+            if start and start not in Prodnames:
+                raise YaccError("Bad starting symbol '%s'" % start)
         
             augment_grammar(start)    
             error = verify_productions(cycle_check=check_recursion)
@@ -2138,7 +2157,7 @@ def yacc(method=default_lr, debug=yaccdebug, module=None, tabmodule=tab_module,
                if (type(f) in (types.FunctionType,types.MethodType) and ldict[f].__name__[:2] != 'p_')]
 
             if error:
-                raise YaccError,"Unable to construct parser."
+                raise YaccError("Unable to construct parser.")
             
             build_lritems()
             compute_first1()
@@ -2147,7 +2166,7 @@ def yacc(method=default_lr, debug=yaccdebug, module=None, tabmodule=tab_module,
             if method in ['SLR','LALR']:
                 lr_parse_table(method)
             else:
-                raise YaccError, "Unknown parsing method '%s'" % method
+                raise YaccError("Unknown parsing method '%s'" % method)
 
             if write_tables:
                 lr_write_tables(tabmodule,outputdir)        
@@ -2159,8 +2178,8 @@ def yacc(method=default_lr, debug=yaccdebug, module=None, tabmodule=tab_module,
                     f.write("\n\n")
                     f.write(_vf.getvalue())
                     f.close()
-                except IOError,e:
-                    print "yacc: can't create '%s'" % debugfile,e
+                except IOError as e:
+                    print("yacc: can't create '%s'" % debugfile,e)
         
     # Made it here.   Create a parser object and set up its internal state.
     # Set global parse() method to bound method of parser object.
@@ -2205,5 +2224,5 @@ def yacc_cleanup():
     
 # Stub that raises an error if parsing is attempted without first calling yacc()
 def parse(*args,**kwargs):
-    raise YaccError, "yacc: No parser built with yacc()"
+    raise YaccError("yacc: No parser built with yacc()")
 
diff --git a/sepolgen/tests/test_access.py b/sepolgen/tests/test_access.py
index fec699e..2ae29db 100644
--- a/sepolgen/tests/test_access.py
+++ b/sepolgen/tests/test_access.py
@@ -32,7 +32,7 @@ class TestAccessVector(unittest.TestCase):
         self.assertEqual(a.obj_class, None)
         self.assertTrue(isinstance(a.perms, refpolicy.IdSet))
         self.assertTrue(isinstance(a.audit_msgs, type([])))
-        self.assertEquals(len(a.audit_msgs), 0)
+        self.assertEqual(len(a.audit_msgs), 0)
 
         # Construction from a list
         a = access.AccessVector()
@@ -82,8 +82,8 @@ class TestAccessVector(unittest.TestCase):
         a.obj_class = "file"
         a.perms.update(["read", "write"])
 
-        self.assertEquals(str(a), "allow foo bar:file { read write };")
-        self.assertEquals(a.to_string(), "allow foo bar:file { read write };")
+        self.assertEqual(str(a), "allow foo bar:file { read write };")
+        self.assertEqual(a.to_string(), "allow foo bar:file { read write };")
 
     def test_cmp(self):
         a = access.AccessVector()
@@ -98,36 +98,33 @@ class TestAccessVector(unittest.TestCase):
         b.obj_class = "file"
         b.perms.update(["read", "write"])
 
-        self.assertEquals(a, b)
+        self.assertEqual(a, b)
 
         # Source Type
         b.src_type = "baz"
-        self.assertEquals(cmp(a, b), 1)
+        self.assertNotEqual(a, b)
 
         b.src_type = "gaz"
-        self.assertEquals(cmp(a, b), -1)
+        self.assertNotEqual(a, b)
 
         # Target Type
         b.src_type = "foo"
         b.tgt_type = "aar"
-        self.assertEquals(cmp(a, b), 1)
+        self.assertNotEqual(a, b)
 
         b.tgt_type = "gaz"
-        self.assertEquals(cmp(a, b), -1)
+        self.assertNotEqual(a, b)
 
         # Perms
         b.tgt_type = "bar"
         b.perms = refpolicy.IdSet(["read"])
-        ret = cmp(a, b)
-        self.assertEquals(ret, 1)
+        self.assertNotEqual(a, b)
 
         b.perms = refpolicy.IdSet(["read", "write", "append"])
-        ret = cmp(a, b)
-        self.assertEquals(ret, -1)
+        self.assertNotEqual(a, b)
 
         b.perms = refpolicy.IdSet(["read", "append"])
-        ret = cmp(a, b)
-        self.assertEquals(ret, 1)
+        self.assertNotEqual(a, b)
                          
 class TestUtilFunctions(unittest.TestCase):
     def test_is_idparam(self):
@@ -149,7 +146,7 @@ class TestUtilFunctions(unittest.TestCase):
         rule.perms.add("write")
 
         avs = access.avrule_to_access_vectors(rule)
-        self.assertEquals(len(avs), 8)
+        self.assertEqual(len(avs), 8)
         comps = [("foo", "what", "dir"),
                  ("foo", "what", "file"),
                  ("foo", "bar", "dir"),
@@ -160,15 +157,15 @@ class TestUtilFunctions(unittest.TestCase):
                  ("baz", "bar", "file")]
         status = [False] * 8
         for av in access.avrule_to_access_vectors(rule):
-            self.assertEquals(av.perms, refpolicy.IdSet(["read", "write"]))
-            for i in xrange(len(comps)):
+            self.assertEqual(av.perms, refpolicy.IdSet(["read", "write"]))
+            for i in range(len(comps)):
                 if comps[i][0] == av.src_type and \
                    comps[i][1] == av.tgt_type and \
                    comps[i][2] == av.obj_class:
                     status[i] = True
 
         for s in status:
-            self.assertEquals(s, True)
+            self.assertEqual(s, True)
                    
 
 class TestAccessVectorSet(unittest.TestCase):
@@ -203,18 +200,18 @@ class TestAccessVectorSet(unittest.TestCase):
                  ("baz", "bar", "file")]
         status = [False] * 8
         for av in self.s:
-            self.assertEquals(av.perms, refpolicy.IdSet(["read", "write"]))
-            for i in xrange(len(comps)):
+            self.assertEqual(av.perms, refpolicy.IdSet(["read", "write"]))
+            for i in range(len(comps)):
                 if comps[i][0] == av.src_type and \
                    comps[i][1] == av.tgt_type and \
                    comps[i][2] == av.obj_class:
                     status[i] = True
 
         for s in status:
-            self.assertEquals(s, True)
+            self.assertEqual(s, True)
 
     def test_len(self):
-        self.assertEquals(len(self.s), 8)
+        self.assertEqual(len(self.s), 8)
 
     def test_list(self):
         a = access.AccessVectorSet()
diff --git a/sepolgen/tests/test_audit.py b/sepolgen/tests/test_audit.py
index 7b74220..c72ca61 100644
--- a/sepolgen/tests/test_audit.py
+++ b/sepolgen/tests/test_audit.py
@@ -60,29 +60,29 @@ class TestAVCMessage(unittest.TestCase):
     def test_defs(self):
         avc = sepolgen.audit.AVCMessage(audit1)
         sc = sepolgen.refpolicy.SecurityContext()
-        self.assertEquals(avc.scontext, sc)
-        self.assertEquals(avc.tcontext, sc)
-        self.assertEquals(avc.tclass, "")
-        self.assertEquals(avc.accesses, [])
+        self.assertEqual(avc.scontext, sc)
+        self.assertEqual(avc.tcontext, sc)
+        self.assertEqual(avc.tclass, "")
+        self.assertEqual(avc.accesses, [])
 
     def test_granted(self):
         avc = sepolgen.audit.AVCMessage(granted1)
         avc.from_split_string(granted1.split())
 
-        self.assertEquals(avc.scontext.user, "user_u")
-        self.assertEquals(avc.scontext.role, "system_r")
-        self.assertEquals(avc.scontext.type, "unconfined_t")
-        self.assertEquals(avc.scontext.level, "s0")
+        self.assertEqual(avc.scontext.user, "user_u")
+        self.assertEqual(avc.scontext.role, "system_r")
+        self.assertEqual(avc.scontext.type, "unconfined_t")
+        self.assertEqual(avc.scontext.level, "s0")
 
-        self.assertEquals(avc.tcontext.user, "user_u")
-        self.assertEquals(avc.tcontext.role, "object_r")
-        self.assertEquals(avc.tcontext.type, "user_home_t")
-        self.assertEquals(avc.tcontext.level, "s0")
+        self.assertEqual(avc.tcontext.user, "user_u")
+        self.assertEqual(avc.tcontext.role, "object_r")
+        self.assertEqual(avc.tcontext.type, "user_home_t")
+        self.assertEqual(avc.tcontext.level, "s0")
         
-        self.assertEquals(avc.tclass, "file")
-        self.assertEquals(avc.accesses, ["getattr"])
+        self.assertEqual(avc.tclass, "file")
+        self.assertEqual(avc.accesses, ["getattr"])
 
-        self.assertEquals(avc.denial, False)
+        self.assertEqual(avc.denial, False)
 
 
     def test_from_split_string(self):
@@ -91,54 +91,54 @@ class TestAVCMessage(unittest.TestCase):
         recs = audit1.split()
         avc.from_split_string(recs)
 
-        self.assertEquals(avc.header, "audit(1158064002.046:4):")
-        self.assertEquals(avc.scontext.user, "user_u")
-        self.assertEquals(avc.scontext.role, "system_r")
-        self.assertEquals(avc.scontext.type, "bluetooth_helper_t")
-        self.assertEquals(avc.scontext.level, "s0-s0:c0")
+        self.assertEqual(avc.header, "audit(1158064002.046:4):")
+        self.assertEqual(avc.scontext.user, "user_u")
+        self.assertEqual(avc.scontext.role, "system_r")
+        self.assertEqual(avc.scontext.type, "bluetooth_helper_t")
+        self.assertEqual(avc.scontext.level, "s0-s0:c0")
 
-        self.assertEquals(avc.tcontext.user, "system_u")
-        self.assertEquals(avc.tcontext.role, "object_r")
-        self.assertEquals(avc.tcontext.type, "xdm_tmp_t")
-        self.assertEquals(avc.tcontext.level, "s0")
+        self.assertEqual(avc.tcontext.user, "system_u")
+        self.assertEqual(avc.tcontext.role, "object_r")
+        self.assertEqual(avc.tcontext.type, "xdm_tmp_t")
+        self.assertEqual(avc.tcontext.level, "s0")
 
-        self.assertEquals(avc.tclass, "file")
-        self.assertEquals(avc.accesses, ["read"])
+        self.assertEqual(avc.tclass, "file")
+        self.assertEqual(avc.accesses, ["read"])
 
-        self.assertEquals(avc.comm, "bluez-pin")
+        self.assertEqual(avc.comm, "bluez-pin")
 
 
-        self.assertEquals(avc.denial, True)
+        self.assertEqual(avc.denial, True)
 
         # audit daemon message
         avc = sepolgen.audit.AVCMessage(audit2)
         recs = audit2.split()
         avc.from_split_string(recs)
 
-        self.assertEquals(avc.header, "audit(1158584779.745:708):")
-        self.assertEquals(avc.scontext.user, "user_u")
-        self.assertEquals(avc.scontext.role, "system_r")
-        self.assertEquals(avc.scontext.type, "vpnc_t")
-        self.assertEquals(avc.scontext.level, "s0")
+        self.assertEqual(avc.header, "audit(1158584779.745:708):")
+        self.assertEqual(avc.scontext.user, "user_u")
+        self.assertEqual(avc.scontext.role, "system_r")
+        self.assertEqual(avc.scontext.type, "vpnc_t")
+        self.assertEqual(avc.scontext.level, "s0")
 
-        self.assertEquals(avc.tcontext.user, "user_u")
-        self.assertEquals(avc.tcontext.role, "system_r")
-        self.assertEquals(avc.tcontext.type, "vpnc_t")
-        self.assertEquals(avc.tcontext.level, "s0")
+        self.assertEqual(avc.tcontext.user, "user_u")
+        self.assertEqual(avc.tcontext.role, "system_r")
+        self.assertEqual(avc.tcontext.type, "vpnc_t")
+        self.assertEqual(avc.tcontext.level, "s0")
 
-        self.assertEquals(avc.tclass, "capability")
-        self.assertEquals(avc.accesses, ["dac_read_search"])
+        self.assertEqual(avc.tclass, "capability")
+        self.assertEqual(avc.accesses, ["dac_read_search"])
 
-        self.assertEquals(avc.comm, "sh")
+        self.assertEqual(avc.comm, "sh")
 
-        self.assertEquals(avc.denial, True)
+        self.assertEqual(avc.denial, True)
 
 class TestPathMessage(unittest.TestCase):
     def test_from_split_string(self):
         path = sepolgen.audit.PathMessage(path1)
         recs = path1.split()
         path.from_split_string(recs)
-        self.assertEquals(path.path, "/usr/lib/sa/sa1")
+        self.assertEqual(path.path, "/usr/lib/sa/sa1")
 
 # TODO - add tests for the other message types
 
@@ -149,27 +149,27 @@ class TestAuditParser(unittest.TestCase):
     def test_parse_string(self):
         a = sepolgen.audit.AuditParser()
         a.parse_string(log1)
-        self.assertEquals(len(a.avc_msgs), 11)
-        self.assertEquals(len(a.compute_sid_msgs), 0)
-        self.assertEquals(len(a.invalid_msgs), 0)
-        self.assertEquals(len(a.policy_load_msgs), 0)
-        self.assertEquals(len(a.path_msgs), 1)
+        self.assertEqual(len(a.avc_msgs), 11)
+        self.assertEqual(len(a.compute_sid_msgs), 0)
+        self.assertEqual(len(a.invalid_msgs), 0)
+        self.assertEqual(len(a.policy_load_msgs), 0)
+        self.assertEqual(len(a.path_msgs), 1)
 
     def test_post_process(self):
         a = sepolgen.audit.AuditParser()
         a.parse_string(log2)
-        self.assertEquals(len(a.avc_msgs), 2)
-        self.assertEquals(a.avc_msgs[0].path, "/usr/lib/sa/sa1")
-        self.assertEquals(a.avc_msgs[1].path, "/usr/lib/sa/sa1")
+        self.assertEqual(len(a.avc_msgs), 2)
+        self.assertEqual(a.avc_msgs[0].path, "/usr/lib/sa/sa1")
+        self.assertEqual(a.avc_msgs[1].path, "/usr/lib/sa/sa1")
 
     def test_parse_file(self):
         f = open("audit.txt")
         a = sepolgen.audit.AuditParser()
         a.parse_file(f)
-        self.assertEquals(len(a.avc_msgs), 21)
-        self.assertEquals(len(a.compute_sid_msgs), 0)
-        self.assertEquals(len(a.invalid_msgs), 0)
-        self.assertEquals(len(a.policy_load_msgs), 0)
+        self.assertEqual(len(a.avc_msgs), 21)
+        self.assertEqual(len(a.compute_sid_msgs), 0)
+        self.assertEqual(len(a.invalid_msgs), 0)
+        self.assertEqual(len(a.policy_load_msgs), 0)
 
 class TestGeneration(unittest.TestCase):
     def test_generation(self):
diff --git a/sepolgen/tests/test_interfaces.py b/sepolgen/tests/test_interfaces.py
index b589bdf..a10ce2d 100644
--- a/sepolgen/tests/test_interfaces.py
+++ b/sepolgen/tests/test_interfaces.py
@@ -202,11 +202,11 @@ class TestInterfaceSet(unittest.TestCase):
         i = interfaces.InterfaceSet()
         i.add_headers(h)
 
-        self.assertEquals(len(i.interfaces), 1)
+        self.assertEqual(len(i.interfaces), 1)
         for key, interface in i.interfaces.items():
-            self.assertEquals(key, interface.name)
-            self.assertEquals(key, "foo")
-            self.assertEquals(len(interface.access), 2)
+            self.assertEqual(key, interface.name)
+            self.assertEqual(key, "foo")
+            self.assertEqual(len(interface.access), 2)
 
             # Check the access vectors
             comp_avs = [["$1", "usr_t", "dir", "create", "add_name"],
@@ -215,21 +215,21 @@ class TestInterfaceSet(unittest.TestCase):
             self.assertTrue(ret)
 
             # Check the params
-            self.assertEquals(len(interface.params), 1)
+            self.assertEqual(len(interface.params), 1)
             for param in interface.params.values():
-                self.assertEquals(param.type, refpolicy.SRC_TYPE)
-                self.assertEquals(param.name, "$1")
-                self.assertEquals(param.num, 1)
-                self.assertEquals(param.required, True)
+                self.assertEqual(param.type, refpolicy.SRC_TYPE)
+                self.assertEqual(param.name, "$1")
+                self.assertEqual(param.num, 1)
+                self.assertEqual(param.required, True)
 
     def test_expansion(self):
         h = refparser.parse(test_expansion)
         i = interfaces.InterfaceSet()
         i.add_headers(h)
 
-        self.assertEquals(len(i.interfaces), 3)
+        self.assertEqual(len(i.interfaces), 3)
         for key, interface in i.interfaces.items():
-            self.assertEquals(key, interface.name)
+            self.assertEqual(key, interface.name)
             if key == "foo":
                 comp_avs = [["$1", "usr_t", "dir", "create", "add_name"],
                             ["$1", "usr_t", "file", "read", "write"]]
@@ -277,6 +277,6 @@ class TestInterfaceSet(unittest.TestCase):
             if ifv.name == "files_exec_usr_files":
                 if_status[2] = True
 
-        self.assertEquals(if_status[0], True)
-        self.assertEquals(if_status[1], True)
-        self.assertEquals(if_status[2], True)
+        self.assertEqual(if_status[0], True)
+        self.assertEqual(if_status[1], True)
+        self.assertEqual(if_status[2], True)
diff --git a/sepolgen/tests/test_matching.py b/sepolgen/tests/test_matching.py
index 161e001..4a85e68 100644
--- a/sepolgen/tests/test_matching.py
+++ b/sepolgen/tests/test_matching.py
@@ -35,13 +35,13 @@ class TestMatch(unittest.TestCase):
 
         self.assertEquals(a, b)
         b.info_dir_change = False
-        self.assertEquals(cmp(a, b), 1)
-        self.assertEquals(cmp(b, a), -1)
+        self.assertTrue((a > b))
+        self.assertTrue((b < a))
 
         b.dist = 200
 
-        self.assertEquals(cmp(a, b), -1)
-        self.assertEquals(cmp(b, a), 1)
+        self.assertTrue((a < b))
+        self.assertTrue((b > a))
 
 class TestMatchList(unittest.TestCase):
     def test_append(self):
diff --git a/sepolgen/tests/test_refpolicy.py b/sepolgen/tests/test_refpolicy.py
index 8c87189..6f3b886 100644
--- a/sepolgen/tests/test_refpolicy.py
+++ b/sepolgen/tests/test_refpolicy.py
@@ -24,10 +24,10 @@ import selinux
 class TestIdSet(unittest.TestCase):
     def test_set_to_str(self):
         s = refpolicy.IdSet(["read", "write", "getattr"])
-        self.assertEquals(s.to_space_str(), "{ read write getattr }")
+        self.assertEqual(s.to_space_str(), "{ read write getattr }")
         s = refpolicy.IdSet()
         s.add("read")
-        self.assertEquals(s.to_space_str(), "read")
+        self.assertEqual(s.to_space_str(), "read")
 
 class TestSecurityContext(unittest.TestCase):
     def test_init(self):
@@ -38,25 +38,25 @@ class TestSecurityContext(unittest.TestCase):
         context = "user_u:object_r:foo_t"
         sc = refpolicy.SecurityContext()
         sc.from_string(context)
-        self.assertEquals(sc.user, "user_u")
-        self.assertEquals(sc.role, "object_r")
-        self.assertEquals(sc.type, "foo_t")
-        self.assertEquals(sc.level, None)
+        self.assertEqual(sc.user, "user_u")
+        self.assertEqual(sc.role, "object_r")
+        self.assertEqual(sc.type, "foo_t")
+        self.assertEqual(sc.level, None)
         if selinux.is_selinux_mls_enabled():
-            self.assertEquals(str(sc), context + ":s0")
+            self.assertEqual(str(sc), context + ":s0")
         else:
-            self.assertEquals(str(sc), context)
-        self.assertEquals(sc.to_string(default_level="s1"), context + ":s1")
+            self.assertEqual(str(sc), context)
+        self.assertEqual(sc.to_string(default_level="s1"), context + ":s1")
 
         context = "user_u:object_r:foo_t:s0-s0:c0-c255"
         sc = refpolicy.SecurityContext()
         sc.from_string(context)
-        self.assertEquals(sc.user, "user_u")
-        self.assertEquals(sc.role, "object_r")
-        self.assertEquals(sc.type, "foo_t")
-        self.assertEquals(sc.level, "s0-s0:c0-c255")
-        self.assertEquals(str(sc), context)
-        self.assertEquals(sc.to_string(), context)
+        self.assertEqual(sc.user, "user_u")
+        self.assertEqual(sc.role, "object_r")
+        self.assertEqual(sc.type, "foo_t")
+        self.assertEqual(sc.level, "s0-s0:c0-c255")
+        self.assertEqual(str(sc), context)
+        self.assertEqual(sc.to_string(), context)
 
         sc = refpolicy.SecurityContext()
         self.assertRaises(ValueError, sc.from_string, "abc")
@@ -67,20 +67,20 @@ class TestSecurityContext(unittest.TestCase):
         sc3 = refpolicy.SecurityContext("user_u:object_r:foo_t:s0")
         sc4 = refpolicy.SecurityContext("user_u:object_r:bar_t")
 
-        self.assertEquals(sc1, sc2)
-        self.assertNotEquals(sc1, sc3)
-        self.assertNotEquals(sc1, sc4)
+        self.assertEqual(sc1, sc2)
+        self.assertNotEqual(sc1, sc3)
+        self.assertNotEqual(sc1, sc4)
 
 class TestObjecClass(unittest.TestCase):
     def test_init(self):
         o = refpolicy.ObjectClass(name="file")
-        self.assertEquals(o.name, "file")
+        self.assertEqual(o.name, "file")
         self.assertTrue(isinstance(o.perms, set))
 
 class TestAVRule(unittest.TestCase):
     def test_init(self):
         a = refpolicy.AVRule()
-        self.assertEquals(a.rule_type, a.ALLOW)
+        self.assertEqual(a.rule_type, a.ALLOW)
         self.assertTrue(isinstance(a.src_types, set))
         self.assertTrue(isinstance(a.tgt_types, set))
         self.assertTrue(isinstance(a.obj_classes, set))
@@ -92,7 +92,7 @@ class TestAVRule(unittest.TestCase):
         a.tgt_types.add("bar_t")
         a.obj_classes.add("file")
         a.perms.add("read")
-        self.assertEquals(a.to_string(), "allow foo_t bar_t:file read;")
+        self.assertEqual(a.to_string(), "allow foo_t bar_t:file read;")
 
         a.rule_type = a.DONTAUDIT
         a.src_types.add("user_t")
@@ -100,17 +100,17 @@ class TestAVRule(unittest.TestCase):
         a.obj_classes.add("lnk_file")
         a.perms.add("write")
         # This test might need to go because set ordering is not guaranteed
-        self.assertEquals(a.to_string(),
+        self.assertEqual(a.to_string(),
                           "dontaudit { foo_t user_t } { user_home_t bar_t }:{ lnk_file file } { read write };")
 
 class TestTypeRule(unittest.TestCase):
     def test_init(self):
         a = refpolicy.TypeRule()
-        self.assertEquals(a.rule_type, a.TYPE_TRANSITION)
+        self.assertEqual(a.rule_type, a.TYPE_TRANSITION)
         self.assertTrue(isinstance(a.src_types, set))
         self.assertTrue(isinstance(a.tgt_types, set))
         self.assertTrue(isinstance(a.obj_classes, set))
-        self.assertEquals(a.dest_type, "")
+        self.assertEqual(a.dest_type, "")
 
     def test_to_string(self):
         a = refpolicy.TypeRule()
@@ -118,7 +118,7 @@ class TestTypeRule(unittest.TestCase):
         a.tgt_types.add("bar_exec_t")
         a.obj_classes.add("process")
         a.dest_type = "bar_t"
-        self.assertEquals(a.to_string(), "type_transition foo_t bar_exec_t:process bar_t;")
+        self.assertEqual(a.to_string(), "type_transition foo_t bar_exec_t:process bar_t;")
 
 
 class TestParseNode(unittest.TestCase):
-- 
2.4.3

_______________________________________________
Selinux mailing list
Selinux@xxxxxxxxxxxxx
To unsubscribe, send email to Selinux-leave@xxxxxxxxxxxxx.
To get help, send an email containing "help" to Selinux-request@xxxxxxxxxxxxx.

[Index of Archives]     [Selinux Refpolicy]     [Linux SGX]     [Fedora Users]     [Fedora Desktop]     [Yosemite Photos]     [Yosemite Camping]     [Yosemite Campsites]     [KDE Users]     [Gnome Users]

  Powered by Linux