]> git.madduck.net Git - etc/vim.git/blobdiff - blib2to3/pgen2/driver.py

madduck's git repository

Every one of the projects in this repository is available at the canonical URL git://git.madduck.net/madduck/pub/<projectpath> — see each project's metadata for the exact URL.

All patches and comments are welcome. Please squash your changes to logical commits before using git-format-patch and git-send-email to patches@git.madduck.net. If you'd read over the Git project's submission guidelines and adhered to them, I'd be especially grateful.

SSH access, as well as push access can be individually arranged.

If you use my repositories frequently, consider adding the following snippet to ~/.gitconfig and using the third clone URL listed for each project:

[url "git://git.madduck.net/madduck/"]
  insteadOf = madduck:

Preliminary work on Poetry integration
[etc/vim.git] / blib2to3 / pgen2 / driver.py
index eabc72b779571f2aa2b46b25ce81f2d495660819..af7ca1799ca8dc733871755115c5d4ad89c5692a 100644 (file)
@@ -43,6 +43,7 @@ class Driver(object):
         p.setup()
         lineno = 1
         column = 0
         p.setup()
         lineno = 1
         column = 0
+        indent_columns = []
         type = value = start = end = line_text = None
         prefix = ""
         for quintuple in tokens:
         type = value = start = end = line_text = None
         prefix = ""
         for quintuple in tokens:
@@ -69,15 +70,25 @@ class Driver(object):
             if debug:
                 self.logger.debug("%s %r (prefix=%r)",
                                   token.tok_name[type], value, prefix)
             if debug:
                 self.logger.debug("%s %r (prefix=%r)",
                                   token.tok_name[type], value, prefix)
-            if type == token.DEDENT:
+            if type in {token.INDENT, token.DEDENT}:
                 _prefix = prefix
                 prefix = ""
                 _prefix = prefix
                 prefix = ""
+            if type == token.DEDENT:
+                _indent_col = indent_columns.pop()
+                prefix, _prefix = self._partially_consume_prefix(_prefix, _indent_col)
             if p.addtoken(type, value, (prefix, start)):
                 if debug:
                     self.logger.debug("Stop.")
                 break
             prefix = ""
             if p.addtoken(type, value, (prefix, start)):
                 if debug:
                     self.logger.debug("Stop.")
                 break
             prefix = ""
-            if type == token.DEDENT:
+            if type == token.INDENT:
+                indent_columns.append(len(value))
+                if _prefix.startswith(value):
+                    # Don't double-indent.  Since we're delaying the prefix that
+                    # would normally belong to INDENT, we need to put the value
+                    # at the end versus at the beginning.
+                    _prefix = _prefix[len(value):] + value
+            if type in {token.INDENT, token.DEDENT}:
                 prefix = _prefix
             lineno, column = end
             if value.endswith("\n"):
                 prefix = _prefix
             lineno, column = end
             if value.endswith("\n"):
@@ -108,12 +119,45 @@ class Driver(object):
         tokens = tokenize.generate_tokens(io.StringIO(text).readline)
         return self.parse_tokens(tokens, debug)
 
         tokens = tokenize.generate_tokens(io.StringIO(text).readline)
         return self.parse_tokens(tokens, debug)
 
-
-def _generate_pickle_name(gt):
+    def _partially_consume_prefix(self, prefix, column):
+        lines = []
+        current_line = ""
+        current_column = 0
+        wait_for_nl = False
+        for char in prefix:
+            current_line += char
+            if wait_for_nl:
+                if char == '\n':
+                    if current_line.strip() and current_column < column:
+                        res = ''.join(lines)
+                        return res, prefix[len(res):]
+
+                    lines.append(current_line)
+                    current_line = ""
+                    current_column = 0
+                    wait_for_nl = False
+            elif char == ' ':
+                current_column += 1
+            elif char == '\t':
+                current_column += 4
+            elif char == '\n':
+                # enexpected empty line
+                current_column = 0
+            else:
+                # indent is finished
+                wait_for_nl = True
+        return ''.join(lines), current_line
+
+
+def _generate_pickle_name(gt, cache_dir=None):
     head, tail = os.path.splitext(gt)
     if tail == ".txt":
         tail = ""
     head, tail = os.path.splitext(gt)
     if tail == ".txt":
         tail = ""
-    return head + tail + ".".join(map(str, sys.version_info)) + ".pickle"
+    name = head + tail + ".".join(map(str, sys.version_info)) + ".pickle"
+    if cache_dir:
+        return os.path.join(cache_dir, os.path.basename(name))
+    else:
+        return name
 
 
 def load_grammar(gt="Grammar.txt", gp=None,
 
 
 def load_grammar(gt="Grammar.txt", gp=None,
@@ -146,7 +190,7 @@ def _newer(a, b):
     return os.path.getmtime(a) >= os.path.getmtime(b)
 
 
     return os.path.getmtime(a) >= os.path.getmtime(b)
 
 
-def load_packaged_grammar(package, grammar_source):
+def load_packaged_grammar(package, grammar_source, cache_dir=None):
     """Normally, loads a pickled grammar by doing
         pkgutil.get_data(package, pickled_grammar)
     where *pickled_grammar* is computed from *grammar_source* by adding the
     """Normally, loads a pickled grammar by doing
         pkgutil.get_data(package, pickled_grammar)
     where *pickled_grammar* is computed from *grammar_source* by adding the
@@ -158,8 +202,9 @@ def load_packaged_grammar(package, grammar_source):
 
     """
     if os.path.isfile(grammar_source):
 
     """
     if os.path.isfile(grammar_source):
-        return load_grammar(grammar_source)
-    pickled_name = _generate_pickle_name(os.path.basename(grammar_source))
+        gp = _generate_pickle_name(grammar_source, cache_dir) if cache_dir else None
+        return load_grammar(grammar_source, gp=gp)
+    pickled_name = _generate_pickle_name(os.path.basename(grammar_source), cache_dir)
     data = pkgutil.get_data(package, pickled_name)
     g = grammar.Grammar()
     g.loads(data)
     data = pkgutil.get_data(package, pickled_name)
     g = grammar.Grammar()
     g.loads(data)