]>
git.madduck.net Git - etc/vim.git/commitdiff
madduck's git repository
Every one of the projects in this repository is available at the canonical
URL git://git.madduck.net/madduck/pub/<projectpath> — see
each project's metadata for the exact URL.
All patches and comments are welcome. Please squash your changes to logical
commits before using git-format-patch and git-send-email to
patches@ git. madduck. net .
If you'd read over the Git project's submission guidelines and adhered to them,
I'd be especially grateful.
SSH access, as well as push access can be individually
arranged .
If you use my repositories frequently, consider adding the following
snippet to ~/.gitconfig and using the third clone URL listed for each
project:
[url "git://git.madduck.net/madduck/"]
insteadOf = madduck:
summary |
shortlog |
log |
commit | commitdiff |
tree
raw |
patch |
inline | side by side (parent:
114e835 )
12 files changed:
import colorama # noqa: F401
import colorama # noqa: F401
def find_project_root(
srcs: Sequence[str], stdin_filename: Optional[str] = None
) -> Tuple[Path, str]:
def find_project_root(
srcs: Sequence[str], stdin_filename: Optional[str] = None
) -> Tuple[Path, str]:
return SpecifierSet(",".join(str(s) for s in specifiers))
return SpecifierSet(",".join(str(s) for s in specifiers))
def find_user_pyproject_toml() -> Path:
r"""Return the path to the top-level user configuration for black.
def find_user_pyproject_toml() -> Path:
r"""Return the path to the top-level user configuration for black.
return user_config_path.resolve()
return user_config_path.resolve()
def get_gitignore(root: Path) -> PathSpec:
"""Return a PathSpec matching gitignore content if present."""
gitignore = root / ".gitignore"
def get_gitignore(root: Path) -> PathSpec:
"""Return a PathSpec matching gitignore content if present."""
gitignore = root / ".gitignore"
def jupyter_dependencies_are_installed(*, verbose: bool, quiet: bool) -> bool:
try:
# isort: off
def jupyter_dependencies_are_installed(*, verbose: bool, quiet: bool) -> bool:
try:
# isort: off
try:
f = open(filename)
except OSError as err:
try:
f = open(filename)
except OSError as err:
- print("Can't open %s: %s" % (filename, err) )
+ print(f"Can't open {filename}: {err}" )
return False
self.symbol2number = {}
self.number2symbol = {}
return False
self.symbol2number = {}
self.number2symbol = {}
lineno += 1
mo = re.match(r"^#define\s+(\w+)\s+(\d+)$", line)
if not mo and line.strip():
lineno += 1
mo = re.match(r"^#define\s+(\w+)\s+(\d+)$", line)
if not mo and line.strip():
- print("%s(%s): can't parse %s" % (filename, lineno, line.strip()) )
+ print(f"{filename}({lineno}): can't parse {line.strip()}" )
else:
symbol, number = mo.groups()
number = int(number)
else:
symbol, number = mo.groups()
number = int(number)
try:
f = open(filename)
except OSError as err:
try:
f = open(filename)
except OSError as err:
- print("Can't open %s: %s" % (filename, err) )
+ print(f"Can't open {filename}: {err}" )
return False
# The code below essentially uses f's iterator-ness!
lineno = 0
return False
# The code below essentially uses f's iterator-ness!
lineno = 0
Iterable,
List,
Optional,
Iterable,
List,
Optional,
Union,
)
from contextlib import contextmanager
Union,
)
from contextlib import contextmanager
def __init__(self, grammar: Grammar, logger: Optional[Logger] = None) -> None:
self.grammar = grammar
if logger is None:
def __init__(self, grammar: Grammar, logger: Optional[Logger] = None) -> None:
self.grammar = grammar
if logger is None:
assert p.rootnode is not None
return p.rootnode
assert p.rootnode is not None
return p.rootnode
- def parse_stream_raw(self, stream: IO[Text ], debug: bool = False) -> NL:
+ def parse_stream_raw(self, stream: IO[str ], debug: bool = False) -> NL:
"""Parse a stream and return the syntax tree."""
tokens = tokenize.generate_tokens(stream.readline, grammar=self.grammar)
return self.parse_tokens(tokens, debug)
"""Parse a stream and return the syntax tree."""
tokens = tokenize.generate_tokens(stream.readline, grammar=self.grammar)
return self.parse_tokens(tokens, debug)
- def parse_stream(self, stream: IO[Text ], debug: bool = False) -> NL:
+ def parse_stream(self, stream: IO[str ], debug: bool = False) -> NL:
"""Parse a stream and return the syntax tree."""
return self.parse_stream_raw(stream, debug)
def parse_file(
"""Parse a stream and return the syntax tree."""
return self.parse_stream_raw(stream, debug)
def parse_file(
- self, filename: Path, encoding: Optional[Text ] = None, debug: bool = False
+ self, filename: Path, encoding: Optional[str ] = None, debug: bool = False
) -> NL:
"""Parse a file and return the syntax tree."""
) -> NL:
"""Parse a file and return the syntax tree."""
- with io.open(filename, "r" , encoding=encoding) as stream:
+ with open(filename , encoding=encoding) as stream:
return self.parse_stream(stream, debug)
return self.parse_stream(stream, debug)
- def parse_string(self, text: Text , debug: bool = False) -> NL:
+ def parse_string(self, text: str , debug: bool = False) -> NL:
"""Parse a string and return the syntax tree."""
tokens = tokenize.generate_tokens(
io.StringIO(text).readline, grammar=self.grammar
)
return self.parse_tokens(tokens, debug)
"""Parse a string and return the syntax tree."""
tokens = tokenize.generate_tokens(
io.StringIO(text).readline, grammar=self.grammar
)
return self.parse_tokens(tokens, debug)
- def _partially_consume_prefix(self, prefix: Text, column: int) -> Tuple[Text, Text ]:
+ def _partially_consume_prefix(self, prefix: str, column: int) -> Tuple[str, str ]:
lines: List[str] = []
current_line = ""
current_column = 0
lines: List[str] = []
current_line = ""
current_column = 0
return "".join(lines), current_line
return "".join(lines), current_line
-def _generate_pickle_name(gt: Path, cache_dir: Optional[Path] = None) -> Text :
+def _generate_pickle_name(gt: Path, cache_dir: Optional[Path] = None) -> str :
head, tail = os.path.splitext(gt)
if tail == ".txt":
tail = ""
head, tail = os.path.splitext(gt)
if tail == ".txt":
tail = ""
- gt: Text = "Grammar.txt",
- gp: Optional[Text ] = None,
+ gt: str = "Grammar.txt",
+ gp: Optional[str ] = None,
save: bool = True,
force: bool = False,
logger: Optional[Logger] = None,
save: bool = True,
force: bool = False,
logger: Optional[Logger] = None,
-def _newer(a: Text, b: Text ) -> bool:
+def _newer(a: str, b: str ) -> bool:
"""Inquire whether file a was written since file b."""
if not os.path.exists(a):
return False
"""Inquire whether file a was written since file b."""
if not os.path.exists(a):
return False
def load_packaged_grammar(
def load_packaged_grammar(
- package: str, grammar_source: Text , cache_dir: Optional[Path] = None
+ package: str, grammar_source: str , cache_dir: Optional[Path] = None
) -> grammar.Grammar:
"""Normally, loads a pickled grammar by doing
pkgutil.get_data(package, pickled_grammar)
) -> grammar.Grammar:
"""Normally, loads a pickled grammar by doing
pkgutil.get_data(package, pickled_grammar)
-def main(*args: Text ) -> bool:
+def main(*args: str ) -> bool:
"""Main program, when run as a script: produce grammar pickle files.
Calls load_grammar for each argument, a path to a grammar text file.
"""Main program, when run as a script: produce grammar pickle files.
Calls load_grammar for each argument, a path to a grammar text file.
import os
import pickle
import tempfile
import os
import pickle
import tempfile
-from typing import Any, Dict, List, Optional, Text, T uple, TypeVar, Union
+from typing import Any, Dict, List, Optional, Tuple, TypeVar, Union
# Local imports
from . import token
_P = TypeVar("_P", bound="Grammar")
# Local imports
from . import token
_P = TypeVar("_P", bound="Grammar")
-Label = Tuple[int, Optional[Text ]]
+Label = Tuple[int, Optional[str ]]
DFA = List[List[Tuple[int, int]]]
DFAS = Tuple[DFA, Dict[int, int]]
Path = Union[str, "os.PathLike[str]"]
DFA = List[List[Tuple[int, int]]]
DFAS = Tuple[DFA, Dict[int, int]]
Path = Union[str, "os.PathLike[str]"]
"""Pgen parsing tables conversion class.
Once initialized, this class supplies the grammar tables for the
"""Pgen parsing tables conversion class.
Once initialized, this class supplies the grammar tables for the
-from typing import Dict, Match, Text
+from typing import Dict, Match
-simple_escapes: Dict[Text, Text ] = {
+simple_escapes: Dict[str, str ] = {
"a": "\a",
"b": "\b",
"f": "\f",
"a": "\a",
"b": "\b",
"f": "\f",
-def escape(m: Match[Text]) -> Text :
+def escape(m: Match[str]) -> str :
all, tail = m.group(0, 1)
assert all.startswith("\\")
esc = simple_escapes.get(tail)
all, tail = m.group(0, 1)
assert all.startswith("\\")
esc = simple_escapes.get(tail)
-def evalString(s: Text) -> Text :
+def evalString(s: str) -> str :
assert s.startswith("'") or s.startswith('"'), repr(s[:1])
q = s[0]
if s[:3] == q * 3:
assert s.startswith("'") or s.startswith('"'), repr(s[:1])
q = s[0]
if s[:3] == q * 3:
how this parsing engine works.
"""
how this parsing engine works.
"""
from contextlib import contextmanager
# Local imports
from contextlib import contextmanager
# Local imports
from blib2to3.pgen2.driver import TokenProxy
from blib2to3.pgen2.driver import TokenProxy
-Results = Dict[Text , NL]
Convert = Callable[[Grammar, RawNode], Union[Node, Leaf]]
DFA = List[List[Tuple[int, int]]]
DFAS = Tuple[DFA, Dict[int, int]]
Convert = Callable[[Grammar, RawNode], Union[Node, Leaf]]
DFA = List[List[Tuple[int, int]]]
DFAS = Tuple[DFA, Dict[int, int]]
finally:
self.parser.is_backtracking = is_backtracking
finally:
self.parser.is_backtracking = is_backtracking
- def add_token(self, tok_type: int, tok_val: Text , raw: bool = False) -> None:
+ def add_token(self, tok_type: int, tok_val: str , raw: bool = False) -> None:
func: Callable[..., Any]
if raw:
func = self.parser._addtoken
func: Callable[..., Any]
if raw:
func = self.parser._addtoken
args.insert(0, ilabel)
func(*args)
args.insert(0, ilabel)
func(*args)
- def determine_route(self, value: Optional[Text ] = None, force: bool = False) -> Optional[int]:
+ def determine_route(self, value: Optional[str ] = None, force: bool = False) -> Optional[int]:
alive_ilabels = self.ilabels
if len(alive_ilabels) == 0:
*_, most_successful_ilabel = self._dead_ilabels
alive_ilabels = self.ilabels
if len(alive_ilabels) == 0:
*_, most_successful_ilabel = self._dead_ilabels
"""Exception to signal the parser is stuck."""
def __init__(
"""Exception to signal the parser is stuck."""
def __init__(
- self, msg: Text, type: Optional[int], value: Optional[Text ], context: Context
+ self, msg: str, type: Optional[int], value: Optional[str ], context: Context
) -> None:
Exception.__init__(
) -> None:
Exception.__init__(
- self, "%s: type=%r, value=%r, context=%r" % (msg, type, value, context)
+ self, f"{msg}: type={type!r}, value={value!r}, context={context!r}"
)
self.msg = msg
self.type = type
)
self.msg = msg
self.type = type
"""Parser engine.
The proper usage sequence is:
"""Parser engine.
The proper usage sequence is:
self.used_names: Set[str] = set()
self.proxy = proxy
self.used_names: Set[str] = set()
self.proxy = proxy
- def addtoken(self, type: int, value: Text , context: Context) -> bool:
+ def addtoken(self, type: int, value: str , context: Context) -> bool:
"""Add a token; return True iff this is the end of the program."""
# Map from token to label
ilabels = self.classify(type, value, context)
"""Add a token; return True iff this is the end of the program."""
# Map from token to label
ilabels = self.classify(type, value, context)
return self._addtoken(ilabel, type, value, context)
return self._addtoken(ilabel, type, value, context)
- def _addtoken(self, ilabel: int, type: int, value: Text , context: Context) -> bool:
+ def _addtoken(self, ilabel: int, type: int, value: str , context: Context) -> bool:
# Loop until the token is shifted; may raise exceptions
while True:
dfa, state, node = self.stack[-1]
# Loop until the token is shifted; may raise exceptions
while True:
dfa, state, node = self.stack[-1]
# No success finding a transition
raise ParseError("bad input", type, value, context)
# No success finding a transition
raise ParseError("bad input", type, value, context)
- def classify(self, type: int, value: Text , context: Context) -> List[int]:
+ def classify(self, type: int, value: str , context: Context) -> List[int]:
"""Turn a token into a label. (Internal)
Depending on whether the value is a soft-keyword or not,
"""Turn a token into a label. (Internal)
Depending on whether the value is a soft-keyword or not,
raise ParseError("bad token", type, value, context)
return [ilabel]
raise ParseError("bad token", type, value, context)
return [ilabel]
- def shift(self, type: int, value: Text , newstate: int, context: Context) -> None:
+ def shift(self, type: int, value: str , newstate: int, context: Context) -> None:
"""Shift a token. (Internal)"""
if self.is_backtracking:
dfa, state, _ = self.stack[-1]
"""Shift a token. (Internal)"""
if self.is_backtracking:
dfa, state, _ = self.stack[-1]
Iterator,
List,
Optional,
Iterator,
List,
Optional,
-class ParserGenerator(object) :
generator: Iterator[GoodTokenInfo]
generator: Iterator[GoodTokenInfo]
- first: Dict[Text, Optional[Dict[Text , int]]]
+ first: Dict[str, Optional[Dict[str , int]]]
- def __init__(self, filename: Path, stream: Optional[IO[Text ]] = None) -> None:
+ def __init__(self, filename: Path, stream: Optional[IO[str ]] = None) -> None:
close_stream = None
if stream is None:
stream = open(filename, encoding="utf-8")
close_stream = None
if stream is None:
stream = open(filename, encoding="utf-8")
c.start = c.symbol2number[self.startsymbol]
return c
c.start = c.symbol2number[self.startsymbol]
return c
- def make_first(self, c: PgenGrammar, name: Text ) -> Dict[int, int]:
+ def make_first(self, c: PgenGrammar, name: str ) -> Dict[int, int]:
rawfirst = self.first[name]
assert rawfirst is not None
first = {}
rawfirst = self.first[name]
assert rawfirst is not None
first = {}
first[ilabel] = 1
return first
first[ilabel] = 1
return first
- def make_label(self, c: PgenGrammar, label: Text ) -> int:
+ def make_label(self, c: PgenGrammar, label: str ) -> int:
# XXX Maybe this should be a method on a subclass of converter?
ilabel = len(c.labels)
if label[0].isalpha():
# XXX Maybe this should be a method on a subclass of converter?
ilabel = len(c.labels)
if label[0].isalpha():
self.calcfirst(name)
# print name, self.first[name].keys()
self.calcfirst(name)
# print name, self.first[name].keys()
- def calcfirst(self, name: Text ) -> None:
+ def calcfirst(self, name: str ) -> None:
dfa = self.dfas[name]
self.first[name] = None # dummy to detect left recursion
state = dfa[0]
dfa = self.dfas[name]
self.first[name] = None # dummy to detect left recursion
state = dfa[0]
inverse[symbol] = label
self.first[name] = totalset
inverse[symbol] = label
self.first[name] = totalset
- def parse(self) -> Tuple[Dict[Text, List["DFAState"]], Text ]:
+ def parse(self) -> Tuple[Dict[str, List["DFAState"]], str ]:
dfas = {}
startsymbol: Optional[str] = None
# MSTART: (NEWLINE | RULE)* ENDMARKER
dfas = {}
startsymbol: Optional[str] = None
# MSTART: (NEWLINE | RULE)* ENDMARKER
state.addarc(st, label)
return states # List of DFAState instances; first one is start
state.addarc(st, label)
return states # List of DFAState instances; first one is start
- def dump_nfa(self, name: Text , start: "NFAState", finish: "NFAState") -> None:
+ def dump_nfa(self, name: str , start: "NFAState", finish: "NFAState") -> None:
print("Dump of NFA for", name)
todo = [start]
for i, state in enumerate(todo):
print("Dump of NFA for", name)
todo = [start]
for i, state in enumerate(todo):
else:
print(" %s -> %d" % (label, j))
else:
print(" %s -> %d" % (label, j))
- def dump_dfa(self, name: Text , dfa: Sequence["DFAState"]) -> None:
+ def dump_dfa(self, name: str , dfa: Sequence["DFAState"]) -> None:
print("Dump of DFA for", name)
for i, state in enumerate(dfa):
print(" State", i, state.isfinal and "(final)" or "")
print("Dump of DFA for", name)
for i, state in enumerate(dfa):
print(" State", i, state.isfinal and "(final)" or "")
- def expect(self, type: int, value: Optional[Any] = None) -> Text :
+ def expect(self, type: int, value: Optional[Any] = None) -> str :
if self.type != type or (value is not None and self.value != value):
self.raise_error(
"expected %s/%s, got %s/%s", type, value, self.type, self.value
if self.type != type or (value is not None and self.value != value):
self.raise_error(
"expected %s/%s, got %s/%s", type, value, self.type, self.value
raise SyntaxError(msg, (self.filename, self.end[0], self.end[1], self.line))
raise SyntaxError(msg, (self.filename, self.end[0], self.end[1], self.line))
-class NFAState(object) :
- arcs: List[Tuple[Optional[Text ], "NFAState"]]
+class NFAState:
+ arcs: List[Tuple[Optional[str ], "NFAState"]]
def __init__(self) -> None:
self.arcs = [] # list of (label, NFAState) pairs
def __init__(self) -> None:
self.arcs = [] # list of (label, NFAState) pairs
- def addarc(self, next: "NFAState", label: Optional[Text ] = None) -> None:
+ def addarc(self, next: "NFAState", label: Optional[str ] = None) -> None:
assert label is None or isinstance(label, str)
assert isinstance(next, NFAState)
self.arcs.append((label, next))
assert label is None or isinstance(label, str)
assert isinstance(next, NFAState)
self.arcs.append((label, next))
nfaset: Dict[NFAState, Any]
isfinal: bool
nfaset: Dict[NFAState, Any]
isfinal: bool
- arcs: Dict[Text , "DFAState"]
+ arcs: Dict[str , "DFAState"]
def __init__(self, nfaset: Dict[NFAState, Any], final: NFAState) -> None:
assert isinstance(nfaset, dict)
def __init__(self, nfaset: Dict[NFAState, Any], final: NFAState) -> None:
assert isinstance(nfaset, dict)
self.isfinal = final in nfaset
self.arcs = {} # map from label to DFAState
self.isfinal = final in nfaset
self.arcs = {} # map from label to DFAState
- def addarc(self, next: "DFAState", label: Text ) -> None:
+ def addarc(self, next: "DFAState", label: str ) -> None:
assert isinstance(label, str)
assert label not in self.arcs
assert isinstance(next, DFAState)
assert isinstance(label, str)
assert label not in self.arcs
assert isinstance(next, DFAState)
"""Token constants (from "token.h")."""
"""Token constants (from "token.h")."""
from typing import Dict
from typing import Final
from typing import Dict
from typing import Final
tok_name: Final[Dict[int, str]] = {}
for _name, _value in list(globals().items()):
tok_name: Final[Dict[int, str]] = {}
for _name, _value in list(globals().items()):
- if type(_value) is type(0) :
+ if type(_value) is int :
def _combinations(*l: str) -> Set[str]:
def _combinations(*l: str) -> Set[str]:
- return set(x + y for x in l for y in l + ("",) if x.casefold() != y.casefold())
+ return {x + y for x in l for y in l + ("",) if x.casefold() != y.casefold()}
- type: int, token: Text, srow_col: Coord, erow_col: Coord, line: Text
+ type: int, token: str, srow_col: Coord, erow_col: Coord, line: str
) -> None: # for testing
(srow, scol) = srow_col
(erow, ecol) = erow_col
) -> None: # for testing
(srow, scol) = srow_col
(erow, ecol) = erow_col
-TokenEater = Callable[[int, Text, Coord, Coord, Text ], None]
+TokenEater = Callable[[int, str, Coord, Coord, str ], None]
-def tokenize(readline: Callable[[], Text ], tokeneater: TokenEater = printtoken) -> None:
+def tokenize(readline: Callable[[], str ], tokeneater: TokenEater = printtoken) -> None:
"""
The tokenize() function accepts two parameters: one representing the
input stream, and one providing an output mechanism for tokenize().
"""
The tokenize() function accepts two parameters: one representing the
input stream, and one providing an output mechanism for tokenize().
# backwards compatible interface
# backwards compatible interface
-def tokenize_loop(readline: Callable[[], Text ], tokeneater: TokenEater) -> None:
+def tokenize_loop(readline: Callable[[], str ], tokeneater: TokenEater) -> None:
for token_info in generate_tokens(readline):
tokeneater(*token_info)
for token_info in generate_tokens(readline):
tokeneater(*token_info)
-GoodTokenInfo = Tuple[int, Text, Coord, Coord, Text ]
+GoodTokenInfo = Tuple[int, str, Coord, Coord, str ]
TokenInfo = Union[Tuple[int, str], GoodTokenInfo]
class Untokenizer:
TokenInfo = Union[Tuple[int, str], GoodTokenInfo]
class Untokenizer:
prev_row: int
prev_col: int
prev_row: int
prev_col: int
if col_offset:
self.tokens.append(" " * col_offset)
if col_offset:
self.tokens.append(" " * col_offset)
- def untokenize(self, iterable: Iterable[TokenInfo]) -> Text :
+ def untokenize(self, iterable: Iterable[TokenInfo]) -> str :
for t in iterable:
if len(t) == 2:
self.compat(cast(Tuple[int, str], t), iterable)
break
tok_type, token, start, end, line = cast(
for t in iterable:
if len(t) == 2:
self.compat(cast(Tuple[int, str], t), iterable)
break
tok_type, token, start, end, line = cast(
- Tuple[int, Text, Coord, Coord, Text ], t
+ Tuple[int, str, Coord, Coord, str ], t
)
self.add_whitespace(start)
self.tokens.append(token)
)
self.add_whitespace(start)
self.tokens.append(token)
self.prev_col = 0
return "".join(self.tokens)
self.prev_col = 0
return "".join(self.tokens)
- def compat(self, token: Tuple[int, Text ], iterable: Iterable[TokenInfo]) -> None:
+ def compat(self, token: Tuple[int, str ], iterable: Iterable[TokenInfo]) -> None:
startline = False
indents = []
toks_append = self.tokens.append
startline = False
indents = []
toks_append = self.tokens.append
try:
return readline()
except StopIteration:
try:
return readline()
except StopIteration:
def find_cookie(line: bytes) -> Optional[str]:
try:
def find_cookie(line: bytes) -> Optional[str]:
try:
return default, [first, second]
return default, [first, second]
-def untokenize(iterable: Iterable[TokenInfo]) -> Text :
+def untokenize(iterable: Iterable[TokenInfo]) -> str :
"""Transform tokens back into Python source code.
Each element returned by the iterable must be a token sequence
"""Transform tokens back into Python source code.
Each element returned by the iterable must be a token sequence
- readline: Callable[[], Text ], grammar: Optional[Grammar] = None
+ readline: Callable[[], str ], grammar: Optional[Grammar] = None
) -> Iterator[GoodTokenInfo]:
"""
The generate_tokens() generator requires one argument, readline, which
) -> Iterator[GoodTokenInfo]:
"""
The generate_tokens() generator requires one argument, readline, which
from typing import Union
# Local imports
from typing import Union
# Local imports
-from .pgen2 import token
from .pgen2 import driver
from .pgen2.grammar import Grammar
from .pgen2 import driver
from .pgen2.grammar import Grammar
def __init__(self, grammar: Grammar) -> None:
"""Initializer.
def __init__(self, grammar: Grammar) -> None:
"""Initializer.
Iterator,
List,
Optional,
Iterator,
List,
Optional,
HUGE: int = 0x7FFFFFFF # maximum repeat count, default max
HUGE: int = 0x7FFFFFFF # maximum repeat count, default max
-_type_reprs: Dict[int, Union[Text , int]] = {}
+_type_reprs: Dict[int, Union[str , int]] = {}
-def type_repr(type_num: int) -> Union[Text , int]:
+def type_repr(type_num: int) -> Union[str , int]:
global _type_reprs
if not _type_reprs:
from .pygram import python_symbols
global _type_reprs
if not _type_reprs:
from .pygram import python_symbols
_P = TypeVar("_P", bound="Base")
NL = Union["Node", "Leaf"]
_P = TypeVar("_P", bound="Base")
NL = Union["Node", "Leaf"]
-Context = Tuple[Text , Tuple[int, int]]
-RawNode = Tuple[int, Optional[Text ], Optional[Context], Optional[List[NL]]]
+Context = Tuple[str , Tuple[int, int]]
+RawNode = Tuple[int, Optional[str ], Optional[Context], Optional[List[NL]]]
"""
Abstract base class for Node and Leaf.
"""
Abstract base class for Node and Leaf.
return self._eq(other)
@property
return self._eq(other)
@property
- def prefix(self) -> Text :
+ def prefix(self) -> str :
raise NotImplementedError
def _eq(self: _P, other: _P) -> bool:
raise NotImplementedError
def _eq(self: _P, other: _P) -> bool:
return 0
return 1 + self.parent.depth()
return 0
return 1 + self.parent.depth()
- def get_suffix(self) -> Text :
+ def get_suffix(self) -> str :
"""
Return the string immediately following the invocant node. This is
effectively equivalent to node.next_sibling.prefix
"""
Return the string immediately following the invocant node. This is
effectively equivalent to node.next_sibling.prefix
"""Concrete implementation for interior nodes."""
fixers_applied: Optional[List[Any]]
"""Concrete implementation for interior nodes."""
fixers_applied: Optional[List[Any]]
- used_names: Optional[Set[Text ]]
+ used_names: Optional[Set[str ]]
def __init__(
self,
type: int,
children: List[NL],
context: Optional[Any] = None,
def __init__(
self,
type: int,
children: List[NL],
context: Optional[Any] = None,
- prefix: Optional[Text ] = None,
+ prefix: Optional[str ] = None,
fixers_applied: Optional[List[Any]] = None,
) -> None:
"""
fixers_applied: Optional[List[Any]] = None,
) -> None:
"""
else:
self.fixers_applied = None
else:
self.fixers_applied = None
- def __repr__(self) -> Text :
+ def __repr__(self) -> str :
"""Return a canonical string representation."""
assert self.type is not None
"""Return a canonical string representation."""
assert self.type is not None
- return "%s(%s, %r)" % (
+ return "{}({}, {!r})".format (
self.__class__.__name__,
type_repr(self.type),
self.children,
)
self.__class__.__name__,
type_repr(self.type),
self.children,
)
- def __str__(self) -> Text :
+ def __str__(self) -> str :
"""
Return a pretty string representation.
"""
Return a pretty string representation.
yield from child.pre_order()
@property
yield from child.pre_order()
@property
- def prefix(self) -> Text :
+ def prefix(self) -> str :
"""
The whitespace and comments preceding this node in the input.
"""
"""
The whitespace and comments preceding this node in the input.
"""
return self.children[0].prefix
@prefix.setter
return self.children[0].prefix
@prefix.setter
- def prefix(self, prefix: Text ) -> None:
+ def prefix(self, prefix: str ) -> None:
if self.children:
self.children[0].prefix = prefix
if self.children:
self.children[0].prefix = prefix
"""Concrete implementation for leaf nodes."""
# Default values for instance variables
"""Concrete implementation for leaf nodes."""
# Default values for instance variables
fixers_applied: List[Any]
bracket_depth: int
# Changed later in brackets.py
opening_bracket: Optional["Leaf"] = None
fixers_applied: List[Any]
bracket_depth: int
# Changed later in brackets.py
opening_bracket: Optional["Leaf"] = None
- used_names: Optional[Set[Text ]]
+ used_names: Optional[Set[str ]]
_prefix = "" # Whitespace and comments preceding this token in the input
lineno: int = 0 # Line where this token starts in the input
column: int = 0 # Column where this token starts in the input
_prefix = "" # Whitespace and comments preceding this token in the input
lineno: int = 0 # Line where this token starts in the input
column: int = 0 # Column where this token starts in the input
def __init__(
self,
type: int,
def __init__(
self,
type: int,
context: Optional[Context] = None,
context: Optional[Context] = None,
- prefix: Optional[Text ] = None,
+ prefix: Optional[str ] = None,
fixers_applied: List[Any] = [],
opening_bracket: Optional["Leaf"] = None,
fmt_pass_converted_first_leaf: Optional["Leaf"] = None,
fixers_applied: List[Any] = [],
opening_bracket: Optional["Leaf"] = None,
fmt_pass_converted_first_leaf: Optional["Leaf"] = None,
from .pgen2.token import tok_name
assert self.type is not None
from .pgen2.token import tok_name
assert self.type is not None
- return "%s(%s, %r)" % (
+ return "{}({}, {!r})".format (
self.__class__.__name__,
tok_name.get(self.type, self.type),
self.value,
)
self.__class__.__name__,
tok_name.get(self.type, self.type),
self.value,
)
- def __str__(self) -> Text :
+ def __str__(self) -> str :
"""
Return a pretty string representation.
"""
Return a pretty string representation.
- def prefix(self) -> Text :
+ def prefix(self) -> str :
"""
The whitespace and comments preceding this token in the input.
"""
return self._prefix
@prefix.setter
"""
The whitespace and comments preceding this token in the input.
"""
return self._prefix
@prefix.setter
- def prefix(self, prefix: Text ) -> None:
+ def prefix(self, prefix: str ) -> None:
self.changed()
self._prefix = prefix
self.changed()
self._prefix = prefix
return Leaf(type, value or "", context=context)
return Leaf(type, value or "", context=context)
-_Results = Dict[Text , NL]
+_Results = Dict[str , NL]
-class BasePattern(object) :
"""
A pattern is a tree matching pattern.
"""
A pattern is a tree matching pattern.
type: Optional[int]
type = None # Node type (token if < 256, symbol if >= 256)
content: Any = None # Optional content matching pattern
type: Optional[int]
type = None # Node type (token if < 256, symbol if >= 256)
content: Any = None # Optional content matching pattern
- name: Optional[Text ] = None # Optional name used to store match in results dict
+ name: Optional[str ] = None # Optional name used to store match in results dict
def __new__(cls, *args, **kwds):
"""Constructor that prevents BasePattern from being instantiated."""
assert cls is not BasePattern, "Cannot instantiate BasePattern"
return object.__new__(cls)
def __new__(cls, *args, **kwds):
"""Constructor that prevents BasePattern from being instantiated."""
assert cls is not BasePattern, "Cannot instantiate BasePattern"
return object.__new__(cls)
- def __repr__(self) -> Text :
+ def __repr__(self) -> str :
assert self.type is not None
args = [type_repr(self.type), self.content, self.name]
while args and args[-1] is None:
del args[-1]
assert self.type is not None
args = [type_repr(self.type), self.content, self.name]
while args and args[-1] is None:
del args[-1]
- return "%s(%s)" % (self.__class__.__name__, ", ".join(map(repr, args)))
+ return "{}({})".format (self.__class__.__name__, ", ".join(map(repr, args)))
def _submatch(self, node, results=None) -> bool:
raise NotImplementedError
def _submatch(self, node, results=None) -> bool:
raise NotImplementedError
def __init__(
self,
type: Optional[int] = None,
def __init__(
self,
type: Optional[int] = None,
- content: Optional[Text ] = None,
- name: Optional[Text ] = None,
+ content: Optional[str ] = None,
+ name: Optional[str ] = None,
) -> None:
"""
Initializer. Takes optional type, content, and name.
) -> None:
"""
Initializer. Takes optional type, content, and name.
def __init__(
self,
type: Optional[int] = None,
def __init__(
self,
type: Optional[int] = None,
- content: Optional[Iterable[Text ]] = None,
- name: Optional[Text ] = None,
+ content: Optional[Iterable[str ]] = None,
+ name: Optional[str ] = None,
) -> None:
"""
Initializer. Takes optional type, content, and name.
) -> None:
"""
Initializer. Takes optional type, content, and name.
- content: Optional[Text ] = None,
+ content: Optional[str ] = None,
min: int = 0,
max: int = HUGE,
min: int = 0,
max: int = HUGE,
- name: Optional[Text ] = None,
+ name: Optional[str ] = None,
) -> None:
"""
Initializer.
) -> None:
"""
Initializer.