All patches and comments are welcome. Please squash your changes to logical
commits before using git-format-patch and git-send-email to
patches@git.madduck.net.
If you'd read over the Git project's submission guidelines and adhered to them,
I'd be especially grateful.
1 # Copyright 2004-2005 Elemental Security, Inc. All Rights Reserved.
2 # Licensed to PSF under a Contributor Agreement.
5 from . import grammar, token, tokenize
19 from blib2to3.pgen2 import grammar
20 from blib2to3.pgen2.tokenize import GoodTokenInfo
24 Path = Union[str, "os.PathLike[str]"]
27 class PgenGrammar(grammar.Grammar):
31 class ParserGenerator:
34 generator: Iterator[GoodTokenInfo]
35 first: Dict[str, Optional[Dict[str, int]]]
37 def __init__(self, filename: Path, stream: Optional[IO[str]] = None) -> None:
40 stream = open(filename, encoding="utf-8")
41 close_stream = stream.close
42 self.filename = filename
44 self.generator = tokenize.generate_tokens(stream.readline)
45 self.gettoken() # Initialize lookahead
46 self.dfas, self.startsymbol = self.parse()
47 if close_stream is not None:
49 self.first = {} # map from symbol name to set of tokens
52 def make_grammar(self) -> PgenGrammar:
54 names = list(self.dfas.keys())
56 names.remove(self.startsymbol)
57 names.insert(0, self.startsymbol)
59 i = 256 + len(c.symbol2number)
60 c.symbol2number[name] = i
61 c.number2symbol[i] = name
67 for label, next in sorted(state.arcs.items()):
68 arcs.append((self.make_label(c, label), dfa.index(next)))
70 arcs.append((0, dfa.index(state)))
72 c.states.append(states)
73 c.dfas[c.symbol2number[name]] = (states, self.make_first(c, name))
74 c.start = c.symbol2number[self.startsymbol]
77 def make_first(self, c: PgenGrammar, name: str) -> Dict[int, int]:
78 rawfirst = self.first[name]
79 assert rawfirst is not None
81 for label in sorted(rawfirst):
82 ilabel = self.make_label(c, label)
83 ##assert ilabel not in first # XXX failed on <> ... !=
87 def make_label(self, c: PgenGrammar, label: str) -> int:
88 # XXX Maybe this should be a method on a subclass of converter?
89 ilabel = len(c.labels)
90 if label[0].isalpha():
91 # Either a symbol name or a named token
92 if label in c.symbol2number:
93 # A symbol name (a non-terminal)
94 if label in c.symbol2label:
95 return c.symbol2label[label]
97 c.labels.append((c.symbol2number[label], None))
98 c.symbol2label[label] = ilabel
101 # A named token (NAME, NUMBER, STRING)
102 itoken = getattr(token, label, None)
103 assert isinstance(itoken, int), label
104 assert itoken in token.tok_name, label
105 if itoken in c.tokens:
106 return c.tokens[itoken]
108 c.labels.append((itoken, None))
109 c.tokens[itoken] = ilabel
112 # Either a keyword or an operator
113 assert label[0] in ('"', "'"), label
115 if value[0].isalpha():
117 keywords = c.soft_keywords
119 keywords = c.keywords
122 if value in keywords:
123 return keywords[value]
125 c.labels.append((token.NAME, value))
126 keywords[value] = ilabel
129 # An operator (any non-numeric token)
130 itoken = grammar.opmap[value] # Fails if unknown token
131 if itoken in c.tokens:
132 return c.tokens[itoken]
134 c.labels.append((itoken, None))
135 c.tokens[itoken] = ilabel
138 def addfirstsets(self) -> None:
139 names = list(self.dfas.keys())
142 if name not in self.first:
144 # print name, self.first[name].keys()
146 def calcfirst(self, name: str) -> None:
147 dfa = self.dfas[name]
148 self.first[name] = None # dummy to detect left recursion
150 totalset: Dict[str, int] = {}
152 for label, next in state.arcs.items():
153 if label in self.dfas:
154 if label in self.first:
155 fset = self.first[label]
157 raise ValueError("recursion for rule %r" % name)
159 self.calcfirst(label)
160 fset = self.first[label]
161 assert fset is not None
162 totalset.update(fset)
163 overlapcheck[label] = fset
166 overlapcheck[label] = {label: 1}
167 inverse: Dict[str, str] = {}
168 for label, itsfirst in overlapcheck.items():
169 for symbol in itsfirst:
170 if symbol in inverse:
172 "rule %s is ambiguous; %s is in the first sets of %s as well"
173 " as %s" % (name, symbol, label, inverse[symbol])
175 inverse[symbol] = label
176 self.first[name] = totalset
178 def parse(self) -> Tuple[Dict[str, List["DFAState"]], str]:
180 startsymbol: Optional[str] = None
181 # MSTART: (NEWLINE | RULE)* ENDMARKER
182 while self.type != token.ENDMARKER:
183 while self.type == token.NEWLINE:
185 # RULE: NAME ':' RHS NEWLINE
186 name = self.expect(token.NAME)
187 self.expect(token.OP, ":")
188 a, z = self.parse_rhs()
189 self.expect(token.NEWLINE)
190 # self.dump_nfa(name, a, z)
191 dfa = self.make_dfa(a, z)
192 # self.dump_dfa(name, dfa)
194 self.simplify_dfa(dfa)
197 # print name, oldlen, newlen
198 if startsymbol is None:
200 assert startsymbol is not None
201 return dfas, startsymbol
203 def make_dfa(self, start: "NFAState", finish: "NFAState") -> List["DFAState"]:
204 # To turn an NFA into a DFA, we define the states of the DFA
205 # to correspond to *sets* of states of the NFA. Then do some
206 # state reduction. Let's represent sets as dicts with 1 for
208 assert isinstance(start, NFAState)
209 assert isinstance(finish, NFAState)
211 def closure(state: NFAState) -> Dict[NFAState, int]:
212 base: Dict[NFAState, int] = {}
213 addclosure(state, base)
216 def addclosure(state: NFAState, base: Dict[NFAState, int]) -> None:
217 assert isinstance(state, NFAState)
221 for label, next in state.arcs:
223 addclosure(next, base)
225 states = [DFAState(closure(start), finish)]
226 for state in states: # NB states grows while we're iterating
227 arcs: Dict[str, Dict[NFAState, int]] = {}
228 for nfastate in state.nfaset:
229 for label, next in nfastate.arcs:
230 if label is not None:
231 addclosure(next, arcs.setdefault(label, {}))
232 for label, nfaset in sorted(arcs.items()):
234 if st.nfaset == nfaset:
237 st = DFAState(nfaset, finish)
239 state.addarc(st, label)
240 return states # List of DFAState instances; first one is start
242 def dump_nfa(self, name: str, start: "NFAState", finish: "NFAState") -> None:
243 print("Dump of NFA for", name)
245 for i, state in enumerate(todo):
246 print(" State", i, state is finish and "(final)" or "")
247 for label, next in state.arcs:
256 print(" %s -> %d" % (label, j))
258 def dump_dfa(self, name: str, dfa: Sequence["DFAState"]) -> None:
259 print("Dump of DFA for", name)
260 for i, state in enumerate(dfa):
261 print(" State", i, state.isfinal and "(final)" or "")
262 for label, next in sorted(state.arcs.items()):
263 print(" %s -> %d" % (label, dfa.index(next)))
265 def simplify_dfa(self, dfa: List["DFAState"]) -> None:
266 # This is not theoretically optimal, but works well enough.
267 # Algorithm: repeatedly look for two states that have the same
268 # set of arcs (same labels pointing to the same nodes) and
269 # unify them, until things stop changing.
271 # dfa is a list of DFAState instances
275 for i, state_i in enumerate(dfa):
276 for j in range(i + 1, len(dfa)):
278 if state_i == state_j:
279 # print " unify", i, j
282 state.unifystate(state_j, state_i)
286 def parse_rhs(self) -> Tuple["NFAState", "NFAState"]:
287 # RHS: ALT ('|' ALT)*
288 a, z = self.parse_alt()
289 if self.value != "|":
296 while self.value == "|":
298 a, z = self.parse_alt()
303 def parse_alt(self) -> Tuple["NFAState", "NFAState"]:
305 a, b = self.parse_item()
306 while self.value in ("(", "[") or self.type in (token.NAME, token.STRING):
307 c, d = self.parse_item()
312 def parse_item(self) -> Tuple["NFAState", "NFAState"]:
313 # ITEM: '[' RHS ']' | ATOM ['+' | '*']
314 if self.value == "[":
316 a, z = self.parse_rhs()
317 self.expect(token.OP, "]")
321 a, z = self.parse_atom()
323 if value not in ("+", "*"):
332 def parse_atom(self) -> Tuple["NFAState", "NFAState"]:
333 # ATOM: '(' RHS ')' | NAME | STRING
334 if self.value == "(":
336 a, z = self.parse_rhs()
337 self.expect(token.OP, ")")
339 elif self.type in (token.NAME, token.STRING):
342 a.addarc(z, self.value)
347 "expected (...) or NAME or STRING, got %s/%s", self.type, self.value
351 def expect(self, type: int, value: Optional[Any] = None) -> str:
352 if self.type != type or (value is not None and self.value != value):
354 "expected %s/%s, got %s/%s", type, value, self.type, self.value
360 def gettoken(self) -> None:
361 tup = next(self.generator)
362 while tup[0] in (tokenize.COMMENT, tokenize.NL):
363 tup = next(self.generator)
364 self.type, self.value, self.begin, self.end, self.line = tup
365 # print token.tok_name[self.type], repr(self.value)
367 def raise_error(self, msg: str, *args: Any) -> NoReturn:
372 msg = " ".join([msg] + list(map(str, args)))
373 raise SyntaxError(msg, (self.filename, self.end[0], self.end[1], self.line))
377 arcs: List[Tuple[Optional[str], "NFAState"]]
379 def __init__(self) -> None:
380 self.arcs = [] # list of (label, NFAState) pairs
382 def addarc(self, next: "NFAState", label: Optional[str] = None) -> None:
383 assert label is None or isinstance(label, str)
384 assert isinstance(next, NFAState)
385 self.arcs.append((label, next))
389 nfaset: Dict[NFAState, Any]
391 arcs: Dict[str, "DFAState"]
393 def __init__(self, nfaset: Dict[NFAState, Any], final: NFAState) -> None:
394 assert isinstance(nfaset, dict)
395 assert isinstance(next(iter(nfaset)), NFAState)
396 assert isinstance(final, NFAState)
398 self.isfinal = final in nfaset
399 self.arcs = {} # map from label to DFAState
401 def addarc(self, next: "DFAState", label: str) -> None:
402 assert isinstance(label, str)
403 assert label not in self.arcs
404 assert isinstance(next, DFAState)
405 self.arcs[label] = next
407 def unifystate(self, old: "DFAState", new: "DFAState") -> None:
408 for label, next in self.arcs.items():
410 self.arcs[label] = new
412 def __eq__(self, other: Any) -> bool:
413 # Equality test -- ignore the nfaset instance variable
414 assert isinstance(other, DFAState)
415 if self.isfinal != other.isfinal:
417 # Can't just return self.arcs == other.arcs, because that
418 # would invoke this method recursively, with cycles...
419 if len(self.arcs) != len(other.arcs):
421 for label, next in self.arcs.items():
422 if next is not other.arcs.get(label):
426 __hash__: Any = None # For Py3 compatibility.
429 def generate_grammar(filename: Path = "Grammar.txt") -> PgenGrammar:
430 p = ParserGenerator(filename)
431 return p.make_grammar()