]>
git.madduck.net Git - etc/vim.git/blobdiff - src/blib2to3/pgen2/parse.py
madduck's git repository
Every one of the projects in this repository is available at the canonical
URL git://git.madduck.net/madduck/pub/<projectpath> — see
each project's metadata for the exact URL.
All patches and comments are welcome. Please squash your changes to logical
commits before using git-format-patch and git-send-email to
patches@ git. madduck. net .
If you'd read over the Git project's submission guidelines and adhered to them,
I'd be especially grateful.
SSH access, as well as push access can be individually
arranged .
If you use my repositories frequently, consider adding the following
snippet to ~/.gitconfig and using the third clone URL listed for each
project:
[url "git://git.madduck.net/madduck/"]
insteadOf = madduck:
how this parsing engine works.
"""
how this parsing engine works.
"""
from contextlib import contextmanager
# Local imports
from contextlib import contextmanager
# Local imports
from blib2to3.pytree import convert, NL, Context, RawNode, Leaf, Node
if TYPE_CHECKING:
from blib2to3.pytree import convert, NL, Context, RawNode, Leaf, Node
if TYPE_CHECKING:
- from blib2to3.driver import TokenProxy
+ from blib2to3.pgen2. driver import TokenProxy
-Results = Dict[Text , NL]
Convert = Callable[[Grammar, RawNode], Union[Node, Leaf]]
DFA = List[List[Tuple[int, int]]]
DFAS = Tuple[DFA, Dict[int, int]]
Convert = Callable[[Grammar, RawNode], Union[Node, Leaf]]
DFA = List[List[Tuple[int, int]]]
DFAS = Tuple[DFA, Dict[int, int]]
stack: List[Tuple[DFAS, int, RawNode]]
) -> List[Tuple[DFAS, int, RawNode]]:
"""Nodeless stack copy."""
stack: List[Tuple[DFAS, int, RawNode]]
) -> List[Tuple[DFAS, int, RawNode]]:
"""Nodeless stack copy."""
- return [(copy.deepcopy(dfa) , label, DUMMY_NODE) for dfa, label, _ in stack]
+ return [(dfa , label, DUMMY_NODE) for dfa, label, _ in stack]
finally:
self.parser.is_backtracking = is_backtracking
finally:
self.parser.is_backtracking = is_backtracking
- def add_token(self, tok_type: int, tok_val: Text , raw: bool = False) -> None:
+ def add_token(self, tok_type: int, tok_val: str , raw: bool = False) -> None:
func: Callable[..., Any]
if raw:
func = self.parser._addtoken
func: Callable[..., Any]
if raw:
func = self.parser._addtoken
args.insert(0, ilabel)
func(*args)
args.insert(0, ilabel)
func(*args)
- def determine_route(self, value: Text = None, force: bool = False) -> Optional[int]:
+ def determine_route(self, value: Optional[str] = None, force: bool = False) -> Optional[int]:
alive_ilabels = self.ilabels
if len(alive_ilabels) == 0:
*_, most_successful_ilabel = self._dead_ilabels
alive_ilabels = self.ilabels
if len(alive_ilabels) == 0:
*_, most_successful_ilabel = self._dead_ilabels
"""Exception to signal the parser is stuck."""
def __init__(
"""Exception to signal the parser is stuck."""
def __init__(
- self, msg: Text, type: Optional[int], value: Optional[Text ], context: Context
+ self, msg: str, type: Optional[int], value: Optional[str ], context: Context
) -> None:
Exception.__init__(
) -> None:
Exception.__init__(
- self, "%s: type=%r, value=%r, context=%r" % (msg, type, value, context)
+ self, f"{msg}: type={type!r}, value={value!r}, context={context!r}"
)
self.msg = msg
self.type = type
)
self.msg = msg
self.type = type
"""Parser engine.
The proper usage sequence is:
"""Parser engine.
The proper usage sequence is:
self.used_names: Set[str] = set()
self.proxy = proxy
self.used_names: Set[str] = set()
self.proxy = proxy
- def addtoken(self, type: int, value: Text , context: Context) -> bool:
+ def addtoken(self, type: int, value: str , context: Context) -> bool:
"""Add a token; return True iff this is the end of the program."""
# Map from token to label
ilabels = self.classify(type, value, context)
"""Add a token; return True iff this is the end of the program."""
# Map from token to label
ilabels = self.classify(type, value, context)
break
next_token_type, next_token_value, *_ = proxy.eat(counter)
break
next_token_type, next_token_value, *_ = proxy.eat(counter)
+ if next_token_type in (tokenize.COMMENT, tokenize.NL):
+ counter += 1
+ continue
+
if next_token_type == tokenize.OP:
next_token_type = grammar.opmap[next_token_value]
if next_token_type == tokenize.OP:
next_token_type = grammar.opmap[next_token_value]
return self._addtoken(ilabel, type, value, context)
return self._addtoken(ilabel, type, value, context)
- def _addtoken(self, ilabel: int, type: int, value: Text , context: Context) -> bool:
+ def _addtoken(self, ilabel: int, type: int, value: str , context: Context) -> bool:
# Loop until the token is shifted; may raise exceptions
while True:
dfa, state, node = self.stack[-1]
# Loop until the token is shifted; may raise exceptions
while True:
dfa, state, node = self.stack[-1]
# No success finding a transition
raise ParseError("bad input", type, value, context)
# No success finding a transition
raise ParseError("bad input", type, value, context)
- def classify(self, type: int, value: Text , context: Context) -> List[int]:
+ def classify(self, type: int, value: str , context: Context) -> List[int]:
"""Turn a token into a label. (Internal)
Depending on whether the value is a soft-keyword or not,
"""Turn a token into a label. (Internal)
Depending on whether the value is a soft-keyword or not,
raise ParseError("bad token", type, value, context)
return [ilabel]
raise ParseError("bad token", type, value, context)
return [ilabel]
- def shift(self, type: int, value: Text , newstate: int, context: Context) -> None:
+ def shift(self, type: int, value: str , newstate: int, context: Context) -> None:
"""Shift a token. (Internal)"""
if self.is_backtracking:
dfa, state, _ = self.stack[-1]
"""Shift a token. (Internal)"""
if self.is_backtracking:
dfa, state, _ = self.stack[-1]