[gtk-doc] py: switch to python3



commit 58de11f918e7ce60e28ee891312f16f05aeedba6
Author: Stefan Sauer <ensonic users sf net>
Date:   Fri May 4 07:57:43 2018 +0200

    py: switch to python3
    
    Require python-3.2 and drop support of python 2.X. The 'python-six' package
    is not required anymore.

 NEWS               |    3 +-
 configure.ac       |    2 +-
 gtkdoc/check.py    |   30 +++++++---------
 gtkdoc/common.py   |   36 +------------------
 gtkdoc/fixxref.py  |   13 +++----
 gtkdoc/mkdb.py     |   95 +++++++++++++++++++++++++---------------------------
 gtkdoc/mkpdf.py    |    3 --
 gtkdoc/rebase.py   |   13 +++----
 gtkdoc/scan.py     |   17 ++++-----
 gtkdoc/scangobj.py |    4 +-
 10 files changed, 83 insertions(+), 133 deletions(-)
---
diff --git a/NEWS b/NEWS
index 60d5111..3339acb 100644
--- a/NEWS
+++ b/NEWS
@@ -1,7 +1,8 @@
 GTK-Doc 1.28.1  (Mar 24 2018)
 ==============
 
-Development
+GTK-Doc now requires python-3.X. It does not requires python-six anymore.
+
 
 GTK-Doc 1.28  (Mar 24 2018)
 ==============
diff --git a/configure.ac b/configure.ac
index 14dee8c..494d95f 100644
--- a/configure.ac
+++ b/configure.ac
@@ -32,7 +32,7 @@ PKG_PROG_PKG_CONFIG([0.19])
 dnl
 dnl Check for Python.
 dnl
-AM_PATH_PYTHON([2.7])
+AM_PATH_PYTHON([3.2])
 
 dnl
 dnl Check for xsltproc
diff --git a/gtkdoc/check.py b/gtkdoc/check.py
index 7f32b60..b410910 100755
--- a/gtkdoc/check.py
+++ b/gtkdoc/check.py
@@ -25,9 +25,6 @@ results. Can be run druring make check, by adding this to the documentations
 Makefile.am: TESTS = $(GTKDOC_CHECK).
 """
 
-# Support both Python 2 and 3
-from __future__ import print_function
-
 import os
 import re
 from glob import glob
@@ -53,17 +50,21 @@ def check_empty(filename):
     return count
 
 
+def read_file(filename):
+    with open(filename, 'r', encoding='utf-8') as f:
+        return f.read().splitlines()
+
+
 def check_includes(filename):
     # Check that each XML file in the xml directory is included in doc_main_file
-    with common.open_text(filename) as f:
-        lines = f.read().splitlines()
-        num_missing = 0
-        for include in glob('xml/*.xml'):
-            try:
-                next(line for line in lines if include in line)
-            except StopIteration:
-                num_missing += 1
-                print('%s:1:E: doesn\'t appear to include "%s"' % (filename, include))
+    lines = read_file(filename)
+    num_missing = 0
+    for include in glob('xml/*.xml'):
+        try:
+            next(line for line in lines if include in line)
+        except StopIteration:
+            num_missing += 1
+            print('%s:1:E: doesn\'t appear to include "%s"' % (filename, include))
 
     return num_missing
 
@@ -74,11 +75,6 @@ def get_variable(env, lines, variable):
     return value
 
 
-def read_file(filename):
-    with common.open_text(filename) as f:
-        return f.read().splitlines()
-
-
 def run_tests(workdir, doc_module, doc_main_file):
     checks = 4
 
diff --git a/gtkdoc/common.py b/gtkdoc/common.py
index f62e6de..bd11950 100644
--- a/gtkdoc/common.py
+++ b/gtkdoc/common.py
@@ -19,44 +19,16 @@
 # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
 #
 
-# Support both Python 2 and 3
-from __future__ import print_function
-
 from collections import OrderedDict
 import logging
 import os
 import re
 import subprocess
 import sys
-import six
-import codecs
 
 from . import config
 
 
-def open_text(filename, mode='r', encoding='utf-8'):
-    """An open() which removes some differences between Python 2 and 3 and
-    has saner defaults.
-
-    Unlike the builtin open by default utf-8 is use and not the locale
-    encoding (which is ANSI on Windows for example, not very helpful)
-
-    For Python 2, files are opened in text mode like with Python 3.
-    """
-
-    if mode not in ('r', 'w'):
-        raise ValueError("mode %r not supported, must be 'r' or 'w'" % mode)
-
-    if six.PY3:
-        return open(filename, mode, encoding=encoding)
-    else:
-        # We can't use io.open() here as its write method is too strict and
-        # only allows unicode instances and not everything in the codebase
-        # forces unicode at the moment. codecs.open() on the other hand
-        # happily takes ASCII str and decodes it.
-        return codecs.open(filename, mode, encoding=encoding)
-
-
 def setup_logging():
     """Check GTKDOC_TRACE environment variable.
 
@@ -69,16 +41,12 @@ def setup_logging():
     logging.basicConfig(stream=sys.stdout,
                         level=logging.getLevelName(log_level.upper()),
                         format='%(asctime)s:%(filename)s:%(funcName)s:%(lineno)d:%(levelname)s:%(message)s')
-    # When redirecting the output on python2 or if run with a non utf-8 locale
+    # When redirecting the output and running with a non utf-8 locale
     # we get UnicodeEncodeError:
     encoding = sys.stdout.encoding
     if 'PYTHONIOENCODING' not in os.environ and (not encoding or encoding != 'UTF-8'):
         sys.stdout.flush()
-        if six.PY3:
-            sys.stdout = open(sys.stdout.fileno(), mode='w', encoding='utf8', buffering=1)
-        else:
-            import codecs
-            sys.stdout = codecs.getwriter('utf8')(sys.stdout)
+        sys.stdout = open(sys.stdout.fileno(), mode='w', encoding='utf8', buffering=1)
 
 
 def UpdateFileIfChanged(old_file, new_file, make_backup):
diff --git a/gtkdoc/fixxref.py b/gtkdoc/fixxref.py
index d79d75c..daba928 100755
--- a/gtkdoc/fixxref.py
+++ b/gtkdoc/fixxref.py
@@ -21,9 +21,6 @@
 
 ''"Fix cross-references in the HTML documentation.''"
 
-# Support both Python 2 and 3
-from __future__ import print_function
-
 import logging
 import os
 import re
@@ -183,7 +180,7 @@ def ReadDevhelp(file, use_absolute_links):
 
     logging.info('Scanning index file=%s, absolute=%d, dir=%s', file, use_absolute_links, dir)
 
-    for line in common.open_text(file):
+    for line in open(file, 'r', encoding='utf-8'):
         m = re.search(r' link="([^#]*)#([^"]*)"', line)
         if m:
             link = m.group(1) + '#' + m.group(2)
@@ -193,7 +190,7 @@ def ReadDevhelp(file, use_absolute_links):
 
 def ReadSections(module):
     """We don't warn on missing links to non-public sysmbols."""
-    for line in common.open_text(module + '-sections.txt'):
+    for line in open(module + '-sections.txt', 'r', encoding='utf-8'):
         m1 = re.search(r'^<SUBSECTION\s*(.*)>', line)
         if line.startswith('#') or line.strip() == '':
             continue
@@ -228,7 +225,7 @@ def FixCrossReferences(module_dir, module, src_lang):
 def FixHTMLFile(src_lang, module, file):
     logging.info('Fixing file: %s', file)
 
-    content = common.open_text(file).read()
+    content = open(file, 'r', encoding='utf-8').read()
 
     if config.highlight:
         # FIXME: ideally we'd pass a clue about the example language to the highligher
@@ -272,7 +269,7 @@ def FixHTMLFile(src_lang, module, file):
 
     new_file = file + '.new'
     content = '\n'.join(lines)
-    with common.open_text(new_file, 'w') as h:
+    with open(new_file, 'w', encoding='utf-8') as h:
         h.write(content)
 
     os.unlink(file)
@@ -412,7 +409,7 @@ def HighlightSourceVim(src_lang, type, source):
         script += "%s -n -e -u NONE -T xterm >/dev/null" % config.highlight
         subprocess.check_call([script], shell=True)
 
-        highlighted_source = common.open_text(temp_source_file + ".html").read()
+        highlighted_source = open(temp_source_file + ".html", 'r', encoding='utf-8').read()
         highlighted_source = re.sub(r'.*<pre\b[^>]*>\n', '', highlighted_source, flags=re.DOTALL)
         highlighted_source = re.sub(r'</pre>.*', '', highlighted_source, flags=re.DOTALL)
 
diff --git a/gtkdoc/mkdb.py b/gtkdoc/mkdb.py
index f5e8995..4cceabc 100644
--- a/gtkdoc/mkdb.py
+++ b/gtkdoc/mkdb.py
@@ -23,9 +23,6 @@
 Creates the DocBook files from the source comments.
 """
 
-from __future__ import print_function
-from six import iteritems, iterkeys
-
 from collections import OrderedDict
 import logging
 import os
@@ -318,7 +315,7 @@ def OutputObjectList():
     old_object_index = os.path.join(DB_OUTPUT_DIR, "object_index.sgml")
     new_object_index = os.path.join(DB_OUTPUT_DIR, "object_index.new")
 
-    OUTPUT = common.open_text(new_object_index, 'w')
+    OUTPUT = open(new_object_index, 'w', encoding='utf-8')
 
     OUTPUT.write('''%s
 <informaltable pgwide="1" frame="none">
@@ -380,7 +377,7 @@ def OutputDB(file, options):
     """
 
     logging.info("Reading: %s", file)
-    INPUT = common.open_text(file)
+    INPUT = open(file, 'r', encoding='utf-8')
     filename = ''
     book_top = ''
     book_bottom = ''
@@ -817,7 +814,7 @@ def DetermineNamespace():
     while True:
         prefix = {}
         letter = ''
-        for symbol in iterkeys(IndexEntriesFull):
+        for symbol in IndexEntriesFull.keys():
             if name_space == '' or name_space.lower() in symbol.lower():
                 if len(symbol) > pos:
                     letter = symbol[pos:pos + 1]
@@ -839,7 +836,7 @@ def DetermineNamespace():
         if letter != '' and letter != "_":
             maxletter = ''
             maxsymbols = 0
-            for letter in iterkeys(prefix):
+            for letter in prefix.keys():
                 logging.debug("ns prefix: %s: %s", letter, prefix[letter])
                 if prefix[letter] > maxsymbols:
                     maxletter = letter
@@ -886,7 +883,7 @@ def OutputIndex(basename, apiindex):
         {
             'original': x,
             'short': re.sub(r'^' + NAME_SPACE + r'\_?(.*)', r'\1', x.upper(), flags=re.I),
-        } for x in iterkeys(apiindex)]
+        } for x in apiindex.keys()]
     sorted_keys = sorted(mapped_keys, key=lambda d: (d['short'], d['original']))
 
     for key in sorted_keys:
@@ -975,7 +972,7 @@ def OutputSinceIndexes():
     """Generate the 'since' api index files."""
     for version in set(Since.values()):
         logging.info("Since : [%s]", version)
-        index = {x: IndexEntriesSince[x] for x in iterkeys(IndexEntriesSince) if Since[x] == version}
+        index = {x: IndexEntriesSince[x] for x in IndexEntriesSince.keys() if Since[x] == version}
         OutputIndex("api-index-" + version, index)
 
 
@@ -1006,14 +1003,14 @@ def OutputAnnotationGlossary():
                 rerun = True
                 break
 
-    OUTPUT = common.open_text(new_glossary, 'w')
+    OUTPUT = open(new_glossary, 'w', encoding='utf-8')
 
     OUTPUT.write('''%s
 <glossary id="annotation-glossary">
   <title>Annotation Glossary</title>
 ''' % MakeDocHeader("glossary"))
 
-    for annotation in sorted(iterkeys(AnnotationsUsed), key=str.lower):
+    for annotation in sorted(AnnotationsUsed.keys(), key=str.lower):
         if annotation in AnnotationDefinition:
             definition = AnnotationDefinition[annotation]
             curletter = annotation[0].upper()
@@ -1054,7 +1051,7 @@ def ReadKnownSymbols(file):
     subsection = ''
 
     logging.info("Reading: %s", file)
-    INPUT = common.open_text(file)
+    INPUT = open(file, 'r', encoding='utf-8')
     for line in INPUT:
         if line.startswith('#'):
             continue
@@ -1444,7 +1441,7 @@ def OutputStruct(symbol, declaration):
 <tbody>
 ''' % sid
 
-        for field_name, text in iteritems(fields):
+        for field_name, text in fields.items():
             param_annotations = ''
 
             desc += "<row role=\"member\"><entry role=\"struct_member_name\"><para>%s</para></entry>\n" % 
text
@@ -1586,7 +1583,7 @@ def OutputUnion(symbol, declaration):
 <tbody>
 ''' % sid
 
-        for field_name, text in iteritems(fields):
+        for field_name, text in fields.items():
             param_annotations = ''
 
             desc += "<row><entry role=\"union_member_name\"><para>%s</para></entry>\n" % text
@@ -1908,7 +1905,7 @@ def OutputFunction(symbol, declaration, symbol_type):
         if param_annotations != '':
             desc += "\n<para>%s</para>" % param_annotations
 
-    desc += OutputParamDescriptions("FUNCTION", symbol, iterkeys(fields))
+    desc += OutputParamDescriptions("FUNCTION", symbol, fields.keys())
     desc += OutputSymbolTraits(symbol)
     desc += "</refsect2>\n"
     return (synop, desc)
@@ -1948,7 +1945,7 @@ def OutputParamDescriptions(symbol_type, symbol, fields):
         missing_parameters = ''
         unused_parameters = ''
 
-        for param_name, param_desc in iteritems(params):
+        for param_name, param_desc in params.items():
             (param_desc, param_annotations) = ExpandAnnotation(symbol, param_desc)
             param_desc = ConvertMarkDown(symbol, param_desc)
             # trim
@@ -2169,7 +2166,7 @@ def OutputDBFile(file, title, section_id, includes, functions_synop, other_synop
     old_db_file = os.path.join(DB_OUTPUT_DIR, file + '.xml')
     new_db_file = os.path.join(DB_OUTPUT_DIR, file + '.xml.new')
 
-    OUTPUT = common.open_text(new_db_file, 'w')
+    OUTPUT = open(new_db_file, 'w', encoding='utf-8')
 
     object_anchors = ''
     for fobject in file_objects:
@@ -2336,7 +2333,7 @@ def OutputProgramDBFile(program, section_id):
     old_db_file = os.path.join(DB_OUTPUT_DIR, program + ".xml")
     new_db_file = os.path.join(DB_OUTPUT_DIR, program + ".xml.new")
 
-    OUTPUT = common.open_text(new_db_file, 'w')
+    OUTPUT = open(new_db_file, 'w', encoding='utf-8')
 
     OUTPUT.write('''%s
 <refentry id="%s">
@@ -2379,9 +2376,9 @@ def OutputExtraFile(file):
     old_db_file = os.path.join(DB_OUTPUT_DIR, basename)
     new_db_file = os.path.join(DB_OUTPUT_DIR, basename + ".new")
 
-    contents = common.open_text(file).read()
+    contents = open(file, 'r', encoding='utf-8').read()
 
-    OUTPUT = common.open_text(new_db_file, 'w')
+    OUTPUT = open(new_db_file, 'w', encoding='utf-8')
     OUTPUT.write(ExpandAbbreviations(basename + " file", contents))
     OUTPUT.close()
 
@@ -2390,7 +2387,7 @@ def OutputExtraFile(file):
 
 def GetDocbookHeader(main_file):
     if os.path.exists(main_file):
-        INPUT = common.open_text(main_file)
+        INPUT = open(main_file, 'r', encoding='utf-8')
         header = ''
         for line in INPUT:
             if re.search(r'^\s*<(book|chapter|article)', line):
@@ -2433,7 +2430,7 @@ def OutputBook(main_file, book_top, book_bottom):
     old_file = os.path.join(DB_OUTPUT_DIR, MODULE + "-doc.top")
     new_file = os.path.join(DB_OUTPUT_DIR, MODULE + "-doc.top.new")
 
-    OUTPUT = common.open_text(new_file, 'w')
+    OUTPUT = open(new_file, 'w', encoding='utf-8')
     OUTPUT.write(book_top)
     OUTPUT.close()
 
@@ -2442,7 +2439,7 @@ def OutputBook(main_file, book_top, book_bottom):
     old_file = os.path.join(DB_OUTPUT_DIR, MODULE + "-doc.bottom")
     new_file = os.path.join(DB_OUTPUT_DIR, MODULE + "-doc.bottom.new")
 
-    OUTPUT = common.open_text(new_file, 'w')
+    OUTPUT = open(new_file, 'w', encoding='utf-8')
     OUTPUT.write(book_bottom)
     OUTPUT.close()
 
@@ -2451,7 +2448,7 @@ def OutputBook(main_file, book_top, book_bottom):
     # If the main docbook file hasn't been created yet, we create it here.
     # The user can tweak it later.
     if main_file and not os.path.exists(main_file):
-        OUTPUT = common.open_text(main_file, 'w')
+        OUTPUT = open(main_file, 'w', encoding='utf-8')
 
         logging.info("no master doc, create default one at: " + main_file)
 
@@ -3678,7 +3675,7 @@ def ScanSourceFile(ifile, ignore_files):
 
     logging.info("Scanning source file: %s", ifile)
 
-    SRCFILE = common.open_text(ifile)
+    SRCFILE = open(ifile, 'r', encoding='utf-8')
     in_comment_block = False
     symbol = None
     in_part = ''
@@ -3727,7 +3724,7 @@ def ScanSourceFile(ifile, ignore_files):
 
                 # Convert special characters
                 description = ConvertSGMLChars(symbol, description)
-                for (param_name, param_desc) in iteritems(params):
+                for (param_name, param_desc) in params.items():
                     params[param_name] = ConvertSGMLChars(symbol, param_desc)
 
                 # Handle Section docs
@@ -3742,7 +3739,7 @@ def ScanSourceFile(ifile, ignore_files):
                             ifile, line_number, "Section %s is not defined in the %s-sections.txt file." % 
(real_symbol, MODULE))
 
                     logging.info("SECTION DOCS found in source for : '%s'", real_symbol)
-                    for param_name, param_desc in iteritems(params):
+                    for param_name, param_desc in params.items():
                         logging.info("   '" + param_name + "'")
                         param_name = param_name.lower()
                         key = None
@@ -3775,7 +3772,7 @@ def ScanSourceFile(ifile, ignore_files):
                     section_id = None
 
                     logging.info("PROGRAM DOCS found in source for '%s'", real_symbol)
-                    for param_name, param_desc in iteritems(params):
+                    for param_name, param_desc in params.items():
                         logging.info("PROGRAM key %s: '%s'", real_symbol, param_name)
                         param_name = param_name.lower()
                         key = None
@@ -3996,9 +3993,9 @@ def OutputMissingDocumentation():
     buffer_deprecated = ''
     buffer_descriptions = ''
 
-    UNDOCUMENTED = common.open_text(new_undocumented_file, 'w')
+    UNDOCUMENTED = open(new_undocumented_file, 'w', encoding='utf-8')
 
-    for symbol in sorted(iterkeys(AllSymbols)):
+    for symbol in sorted(AllSymbols.keys()):
         # FIXME: should we print common.LogWarnings for undocumented stuff?
         # DEBUG
         # location = "defined at " + GetSymbolSourceFile(symbol) + ":" + GetSymbolSourceLine(symbol) + "\n"
@@ -4072,10 +4069,10 @@ def OutputUndeclaredSymbols():
     old_undeclared_file = os.path.join(ROOT_DIR, MODULE + "-undeclared.txt")
     new_undeclared_file = os.path.join(ROOT_DIR, MODULE + "-undeclared.new")
 
-    UNDECLARED = common.open_text(new_undeclared_file, 'w')
+    UNDECLARED = open(new_undeclared_file, 'w', encoding='utf-8')
 
     if UndeclaredSymbols:
-        UNDECLARED.write("\n".join(sorted(iterkeys(UndeclaredSymbols))))
+        UNDECLARED.write("\n".join(sorted(UndeclaredSymbols.keys())))
         UNDECLARED.write("\n")
         print("See %s-undeclared.txt for the list of undeclared symbols." % MODULE)
 
@@ -4097,14 +4094,14 @@ def OutputUnusedSymbols():
     old_unused_file = os.path.join(ROOT_DIR, MODULE + "-unused.txt")
     new_unused_file = os.path.join(ROOT_DIR, MODULE + "-unused.new")
 
-    UNUSED = common.open_text(new_unused_file, 'w')
+    UNUSED = open(new_unused_file, 'w', encoding='utf-8')
 
-    for symbol in sorted(iterkeys(Declarations)):
+    for symbol in sorted(Declarations.keys()):
         if symbol not in DeclarationOutput:
             UNUSED.write("%s\n" % symbol)
             num_unused += 1
 
-    for symbol in sorted(iterkeys(AllUnusedSymbols)):
+    for symbol in sorted(AllUnusedSymbols.keys()):
         UNUSED.write(symbol + "(" + AllUnusedSymbols[symbol] + ")\n")
         num_unused += 1
 
@@ -4118,18 +4115,18 @@ def OutputUnusedSymbols():
 
 def OutputAllSymbols():
     """Outputs list of all symbols to a file."""
-    SYMBOLS = common.open_text(os.path.join(ROOT_DIR, MODULE + "-symbols.txt"), 'w')
+    SYMBOLS = open(os.path.join(ROOT_DIR, MODULE + "-symbols.txt"), 'w', encoding='utf-8')
 
-    for symbol in sorted(iterkeys(AllSymbols)):
+    for symbol in sorted(AllSymbols.keys()):
         SYMBOLS.write(symbol + "\n")
     SYMBOLS.close()
 
 
 def OutputSymbolsWithoutSince():
     """Outputs list of all symbols without a since tag to a file."""
-    SYMBOLS = common.open_text(os.path.join(ROOT_DIR, MODULE + "-nosince.txt"), 'w')
+    SYMBOLS = open(os.path.join(ROOT_DIR, MODULE + "-nosince.txt"), 'w', encoding='utf-8')
 
-    for symbol in sorted(iterkeys(SourceSymbolDocs)):
+    for symbol in sorted(SourceSymbolDocs.keys()):
         if symbol in Since:
             SYMBOLS.write(symbol + "\n")
     SYMBOLS.close()
@@ -4152,7 +4149,7 @@ def CheckParamsDocumented(symbol, params):
 
     if len(params) > 0:
         logging.info("params: %s", str(params))
-        for (param_name, param_desc) in iteritems(params):
+        for (param_name, param_desc) in params.items():
             # Output a warning if the parameter is empty and remember for stats.
             if param_name != "void" and not re.search(r'\S', param_desc):
                 if symbol in AllIncompleteSymbols:
@@ -4178,10 +4175,10 @@ def MergeSourceDocumentation():
     """
 
     # add whats found in the source
-    symbols = set(iterkeys(SourceSymbolDocs))
+    symbols = set(SourceSymbolDocs.keys())
 
     # and add known symbols from -sections.txt
-    for symbol in iterkeys(KnownSymbols):
+    for symbol in KnownSymbols.keys():
         if KnownSymbols[symbol] == 1:
             symbols.add(symbol)
 
@@ -4265,7 +4262,7 @@ def ReadDeclarationsFile(ifile, override):
         DeclarationConditional.clear()
         DeclarationOutput.clear()
 
-    INPUT = common.open_text(ifile)
+    INPUT = open(ifile, 'r', encoding='utf-8')
     declaration_type = ''
     declaration_name = None
     declaration = None
@@ -4380,7 +4377,7 @@ def ReadSignalsFile(ifile):
     if not os.path.isfile(ifile):
         return
 
-    INPUT = common.open_text(ifile)
+    INPUT = open(ifile, 'r', encoding='utf-8')
     line_number = 0
     for line in INPUT:
         line_number += 1
@@ -4446,7 +4443,7 @@ def ReadObjectHierarchy(ifile):
         logging.debug('no *-hierarchy.tx')
         return
 
-    INPUT = common.open_text(ifile)
+    INPUT = open(ifile, 'r', encoding='utf-8')
 
     # Only emit objects if they are supposed to be documented, or if
     # they have documented children. To implement this, we maintain a
@@ -4497,7 +4494,7 @@ def ReadObjectHierarchy(ifile):
 
     logging.debug('got %d entries for hierarchy', len(tree))
 
-    OUTPUT = common.open_text(new_tree_index, 'w')
+    OUTPUT = open(new_tree_index, 'w', encoding='utf-8')
     OUTPUT.write(MakeDocHeader("screen") + "\n<screen>\n" + AddTreeLineArt(tree) + "\n</screen>\n")
     OUTPUT.close()
 
@@ -4518,7 +4515,7 @@ def ReadInterfaces(ifile):
     if not os.path.isfile(ifile):
         return
 
-    INPUT = common.open_text(ifile)
+    INPUT = open(ifile, 'r', encoding='utf-8')
 
     for line in INPUT:
         line = line.strip()
@@ -4551,7 +4548,7 @@ def ReadPrerequisites(ifile):
     if not os.path.isfile(ifile):
         return
 
-    INPUT = common.open_text(ifile)
+    INPUT = open(ifile, 'r', encoding='utf-8')
 
     for line in INPUT:
         line = line.strip()
@@ -4602,7 +4599,7 @@ def ReadArgsFile(ifile):
     if not os.path.isfile(ifile):
         return
 
-    INPUT = common.open_text(ifile)
+    INPUT = open(ifile, 'r', encoding='utf-8')
     line_number = 0
     for line in INPUT:
         line_number += 1
diff --git a/gtkdoc/mkpdf.py b/gtkdoc/mkpdf.py
index 6a034da..beaaf5c 100755
--- a/gtkdoc/mkpdf.py
+++ b/gtkdoc/mkpdf.py
@@ -19,9 +19,6 @@
 # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
 #
 
-# Support both Python 2 and 3
-from __future__ import print_function
-
 import logging
 import os
 import sys
diff --git a/gtkdoc/rebase.py b/gtkdoc/rebase.py
index 75713ce..424c3e6 100755
--- a/gtkdoc/rebase.py
+++ b/gtkdoc/rebase.py
@@ -24,9 +24,6 @@
 The rebase tool rewrites URI references in installed HTML documentation.
 """
 
-from __future__ import print_function
-from six import iteritems, iterkeys
-
 import logging
 import os
 import re
@@ -136,7 +133,7 @@ gunzip %s/%s
 def ReadDevhelp(dir, file):
     onlinedir = None
 
-    for line in common.open_text(os.path.join(dir, file)):
+    for line in open(os.path.join(dir, file), mode='r', encoding='utf-8'):
         # online must come before chapter/functions
         if '<chapters' in line or '<functions' in line:
             break
@@ -150,7 +147,7 @@ def ReadDevhelp(dir, file):
 def ReadIndex(dir, file):
     onlinedir = None
 
-    for line in common.open_text(os.path.join(dir, file)):
+    for line in open(os.path.join(dir, file), mode='r', encoding='utf-8'):
         # ONLINE must come before any ANCHORs
         if '<ANCHOR' in line:
             break
@@ -207,10 +204,10 @@ def RebaseFile(filename, options):
     def repl_func(match):
         return match.group(1) + RebaseLink(match.group(2), options) + match.group(3)
 
-    contents = common.open_text(filename).read()
+    contents = open(filename, mode='r', encoding='utf-8').read()
     processed = re.sub(regex, repl_func, contents)
     newfilename = filename + '.new'
-    with common.open_text(newfilename, 'w') as h:
+    with open(newfilename, mode='w', encoding='utf-8') as h:
         h.write(processed)
     os.unlink(filename)
     os.rename(newfilename, filename)
@@ -252,6 +249,6 @@ def RebaseLink(href, options):
 
 
 def PrintWhatWeHaveDone():
-    for origdir in sorted(iterkeys(Mapped)):
+    for origdir in sorted(Mapped.keys()):
         info = Mapped[origdir]
         print(origdir, "->", info[0], "(%s)" % info[1])
diff --git a/gtkdoc/scan.py b/gtkdoc/scan.py
index cd0725e..f1f1672 100644
--- a/gtkdoc/scan.py
+++ b/gtkdoc/scan.py
@@ -33,9 +33,6 @@ This second list file is typically copied to '$MODULE-sections.txt' and
 organized into sections ready to output the XML pages.
 """
 
-from __future__ import print_function
-from six import iteritems, iterkeys
-
 import logging
 import os
 import re
@@ -77,16 +74,16 @@ def Run(options):
     for dir in options.source_dir:
         ScanHeaders(dir, section_list, decl_list, get_types, options)
 
-    with common.open_text(new_decl_list, 'w') as f:
-        for section in sorted(iterkeys(section_list)):
+    with open(new_decl_list, 'w', encoding='utf-8') as f:
+        for section in sorted(section_list.keys()):
             f.write(section_list[section])
 
-    with common.open_text(new_decl, 'w') as f:
+    with open(new_decl, 'w', encoding='utf-8') as f:
         for decl in decl_list:
             f.write(decl)
 
     if options.rebuild_types:
-        with common.open_text(new_types, 'w') as f:
+        with open(new_types, 'w', encoding='utf-8') as f:
             for func in sorted(get_types):
                 f.write(func + '\n')
 
@@ -113,7 +110,7 @@ def Run(options):
     # because EXTRA_DIST in gtk-doc.make requires it.
     overrides_file = base_filename + '-overrides.txt'
     if not os.path.exists(overrides_file):
-        open(overrides_file, 'w').close()
+        open(overrides_file, 'w', encoding='utf-8').close()
 
 
 #
@@ -226,7 +223,7 @@ def ScanHeader(input_file, section_list, decl_list, get_types, options):
 
     logging.info('Scanning %s', input_file)
 
-    for line in common.open_text(input_file):
+    for line in open(input_file, 'r', encoding='utf-8'):
         # If this is a private header, skip it.
         if re.search(r'^\s*/\*\s*<\s*private_header\s*>\s*\*/', line):
             return
@@ -773,7 +770,7 @@ def ScanHeader(input_file, section_list, decl_list, get_types, options):
         previous_line = line
 
     # print remaining forward declarations
-    for symbol in sorted(iterkeys(forward_decls)):
+    for symbol in sorted(forward_decls.keys()):
         if forward_decls[symbol]:
             AddSymbolToList(slist, symbol)
             decl_list.append(forward_decls[symbol])
diff --git a/gtkdoc/scangobj.py b/gtkdoc/scangobj.py
index 4ad2717..237863c 100644
--- a/gtkdoc/scangobj.py
+++ b/gtkdoc/scangobj.py
@@ -1206,7 +1206,7 @@ def run(options):
     logging.info('options: %s', str(options.__dict__))
 
     c_file = options.module + '-scan.c'
-    output = common.open_text(c_file, 'w')
+    output = open(c_file, 'w', encoding='utf-8')
 
     base_filename = os.path.join(options.output_dir, options.module)
     old_signals_filename = base_filename + '.signals'
@@ -1227,7 +1227,7 @@ def run(options):
     get_types = ""
     ntypes = 1
 
-    for line in common.open_text(options.types):
+    for line in open(options.types, 'r', encoding='utf-8'):
         if line.startswith('#include'):
             includes += line
         elif line.startswith('%') or line.strip() == '':


[Date Prev][Date Next]   [Thread Prev][Thread Next]   [Thread Index] [Date Index] [Author Index]