]> git.madduck.net Git - etc/vim.git/blobdiff - src/blib2to3/pgen2/parse.py

madduck's git repository

Every one of the projects in this repository is available at the canonical URL git://git.madduck.net/madduck/pub/<projectpath> — see each project's metadata for the exact URL.

All patches and comments are welcome. Please squash your changes to logical commits before using git-format-patch and git-send-email to patches@git.madduck.net. If you'd read over the Git project's submission guidelines and adhered to them, I'd be especially grateful.

SSH access, as well as push access can be individually arranged.

If you use my repositories frequently, consider adding the following snippet to ~/.gitconfig and using the third clone URL listed for each project:

[url "git://git.madduck.net/madduck/"]
  insteadOf = madduck:

Fix merging implicit multiline strings that have inline comments (#3956)
[etc/vim.git] / src / blib2to3 / pgen2 / parse.py
index a9dc11f39ce15deedbcb57cbef8d5774c238d4ea..ad51a3dad08dfbbe1fee7cc9b7845eeb7cb263fd 100644 (file)
@@ -9,33 +9,32 @@ See Parser/parser.c in the Python distribution for additional info on
 how this parsing engine works.
 
 """
-import copy
 from contextlib import contextmanager
-
-# Local imports
-from . import grammar, token, tokenize
 from typing import (
-    cast,
+    TYPE_CHECKING,
     Any,
-    Optional,
-    Text,
-    Union,
-    Tuple,
+    Callable,
     Dict,
-    List,
     Iterator,
-    Callable,
+    List,
+    Optional,
     Set,
-    TYPE_CHECKING,
+    Tuple,
+    Union,
+    cast,
 )
+
 from blib2to3.pgen2.grammar import Grammar
-from blib2to3.pytree import convert, NL, Context, RawNode, Leaf, Node
+from blib2to3.pytree import NL, Context, Leaf, Node, RawNode, convert
+
+# Local imports
+from . import grammar, token, tokenize
 
 if TYPE_CHECKING:
-    from blib2to3.driver import TokenProxy
+    from blib2to3.pgen2.driver import TokenProxy
 
 
-Results = Dict[Text, NL]
+Results = Dict[str, NL]
 Convert = Callable[[Grammar, RawNode], Union[Node, Leaf]]
 DFA = List[List[Tuple[int, int]]]
 DFAS = Tuple[DFA, Dict[int, int]]
@@ -100,7 +99,7 @@ class Recorder:
         finally:
             self.parser.is_backtracking = is_backtracking
 
-    def add_token(self, tok_type: int, tok_val: Text, raw: bool = False) -> None:
+    def add_token(self, tok_type: int, tok_val: str, raw: bool = False) -> None:
         func: Callable[..., Any]
         if raw:
             func = self.parser._addtoken
@@ -114,7 +113,9 @@ class Recorder:
                     args.insert(0, ilabel)
                 func(*args)
 
-    def determine_route(self, value: Text = None, force: bool = False) -> Optional[int]:
+    def determine_route(
+        self, value: Optional[str] = None, force: bool = False
+    ) -> Optional[int]:
         alive_ilabels = self.ilabels
         if len(alive_ilabels) == 0:
             *_, most_successful_ilabel = self._dead_ilabels
@@ -131,10 +132,10 @@ class ParseError(Exception):
     """Exception to signal the parser is stuck."""
 
     def __init__(
-        self, msg: Text, type: Optional[int], value: Optional[Text], context: Context
+        self, msg: str, type: Optional[int], value: Optional[str], context: Context
     ) -> None:
         Exception.__init__(
-            self, "%s: type=%r, value=%r, context=%r" % (msg, type, value, context)
+            self, f"{msg}: type={type!r}, value={value!r}, context={context!r}"
         )
         self.msg = msg
         self.type = type
@@ -142,7 +143,7 @@ class ParseError(Exception):
         self.context = context
 
 
-class Parser(object):
+class Parser:
     """Parser engine.
 
     The proper usage sequence is:
@@ -210,6 +211,7 @@ class Parser(object):
         # See note in docstring above. TL;DR this is ignored.
         self.convert = convert or lam_sub
         self.is_backtracking = False
+        self.last_token: Optional[int] = None
 
     def setup(self, proxy: "TokenProxy", start: Optional[int] = None) -> None:
         """Prepare for parsing.
@@ -235,8 +237,9 @@ class Parser(object):
         self.rootnode: Optional[NL] = None
         self.used_names: Set[str] = set()
         self.proxy = proxy
+        self.last_token = None
 
-    def addtoken(self, type: int, value: Text, context: Context) -> bool:
+    def addtoken(self, type: int, value: str, context: Context) -> bool:
         """Add a token; return True iff this is the end of the program."""
         # Map from token to label
         ilabels = self.classify(type, value, context)
@@ -284,7 +287,7 @@ class Parser(object):
 
         return self._addtoken(ilabel, type, value, context)
 
-    def _addtoken(self, ilabel: int, type: int, value: Text, context: Context) -> bool:
+    def _addtoken(self, ilabel: int, type: int, value: str, context: Context) -> bool:
         # Loop until the token is shifted; may raise exceptions
         while True:
             dfa, state, node = self.stack[-1]
@@ -316,6 +319,7 @@ class Parser(object):
                         dfa, state, node = self.stack[-1]
                         states, first = dfa
                     # Done with this token
+                    self.last_token = type
                     return False
 
             else:
@@ -329,7 +333,7 @@ class Parser(object):
                     # No success finding a transition
                     raise ParseError("bad input", type, value, context)
 
-    def classify(self, type: int, value: Text, context: Context) -> List[int]:
+    def classify(self, type: int, value: str, context: Context) -> List[int]:
         """Turn a token into a label.  (Internal)
 
         Depending on whether the value is a soft-keyword or not,
@@ -342,9 +346,23 @@ class Parser(object):
                 return [self.grammar.keywords[value]]
             elif value in self.grammar.soft_keywords:
                 assert type in self.grammar.tokens
+                # Current soft keywords (match, case, type) can only appear at the
+                # beginning of a statement. So as a shortcut, don't try to treat them
+                # like keywords in any other context.
+                # ('_' is also a soft keyword in the real grammar, but for our grammar
+                # it's just an expression, so we don't need to treat it specially.)
+                if self.last_token not in (
+                    None,
+                    token.INDENT,
+                    token.DEDENT,
+                    token.NEWLINE,
+                    token.SEMI,
+                    token.COLON,
+                ):
+                    return [self.grammar.tokens[type]]
                 return [
-                    self.grammar.soft_keywords[value],
                     self.grammar.tokens[type],
+                    self.grammar.soft_keywords[value],
                 ]
 
         ilabel = self.grammar.tokens.get(type)
@@ -352,7 +370,7 @@ class Parser(object):
             raise ParseError("bad token", type, value, context)
         return [ilabel]
 
-    def shift(self, type: int, value: Text, newstate: int, context: Context) -> None:
+    def shift(self, type: int, value: str, newstate: int, context: Context) -> None:
         """Shift a token.  (Internal)"""
         if self.is_backtracking:
             dfa, state, _ = self.stack[-1]