All patches and comments are welcome. Please squash your changes to logical
commits before using git-format-patch and git-send-email to
patches@git.madduck.net.
If you'd read over the Git project's submission guidelines and adhered to them,
I'd be especially grateful.
1 # Copyright 2004-2005 Elemental Security, Inc. All Rights Reserved.
2 # Licensed to PSF under a Contributor Agreement.
5 from . import grammar, token, tokenize
21 from blib2to3.pgen2 import grammar
22 from blib2to3.pgen2.tokenize import GoodTokenInfo
26 Path = Union[str, "os.PathLike[str]"]
29 class PgenGrammar(grammar.Grammar):
33 class ParserGenerator(object):
37 generator: Iterator[GoodTokenInfo]
38 first: Dict[Text, Optional[Dict[Text, int]]]
40 def __init__(self, filename: Path, stream: Optional[IO[Text]] = None) -> None:
43 stream = open(filename)
44 close_stream = stream.close
45 self.filename = filename
47 self.generator = tokenize.generate_tokens(stream.readline)
48 self.gettoken() # Initialize lookahead
49 self.dfas, self.startsymbol = self.parse()
50 if close_stream is not None:
52 self.first = {} # map from symbol name to set of tokens
55 def make_grammar(self) -> PgenGrammar:
57 names = list(self.dfas.keys())
59 names.remove(self.startsymbol)
60 names.insert(0, self.startsymbol)
62 i = 256 + len(c.symbol2number)
63 c.symbol2number[name] = i
64 c.number2symbol[i] = name
70 for label, next in sorted(state.arcs.items()):
71 arcs.append((self.make_label(c, label), dfa.index(next)))
73 arcs.append((0, dfa.index(state)))
75 c.states.append(states)
76 c.dfas[c.symbol2number[name]] = (states, self.make_first(c, name))
77 c.start = c.symbol2number[self.startsymbol]
80 def make_first(self, c: PgenGrammar, name: Text) -> Dict[int, int]:
81 rawfirst = self.first[name]
82 assert rawfirst is not None
84 for label in sorted(rawfirst):
85 ilabel = self.make_label(c, label)
86 ##assert ilabel not in first # XXX failed on <> ... !=
90 def make_label(self, c: PgenGrammar, label: Text) -> int:
91 # XXX Maybe this should be a method on a subclass of converter?
92 ilabel = len(c.labels)
93 if label[0].isalpha():
94 # Either a symbol name or a named token
95 if label in c.symbol2number:
96 # A symbol name (a non-terminal)
97 if label in c.symbol2label:
98 return c.symbol2label[label]
100 c.labels.append((c.symbol2number[label], None))
101 c.symbol2label[label] = ilabel
104 # A named token (NAME, NUMBER, STRING)
105 itoken = getattr(token, label, None)
106 assert isinstance(itoken, int), label
107 assert itoken in token.tok_name, label
108 if itoken in c.tokens:
109 return c.tokens[itoken]
111 c.labels.append((itoken, None))
112 c.tokens[itoken] = ilabel
115 # Either a keyword or an operator
116 assert label[0] in ('"', "'"), label
118 if value[0].isalpha():
120 if value in c.keywords:
121 return c.keywords[value]
123 c.labels.append((token.NAME, value))
124 c.keywords[value] = ilabel
127 # An operator (any non-numeric token)
128 itoken = grammar.opmap[value] # Fails if unknown token
129 if itoken in c.tokens:
130 return c.tokens[itoken]
132 c.labels.append((itoken, None))
133 c.tokens[itoken] = ilabel
136 def addfirstsets(self) -> None:
137 names = list(self.dfas.keys())
140 if name not in self.first:
142 # print name, self.first[name].keys()
144 def calcfirst(self, name: Text) -> None:
145 dfa = self.dfas[name]
146 self.first[name] = None # dummy to detect left recursion
148 totalset: Dict[str, int] = {}
150 for label, next in state.arcs.items():
151 if label in self.dfas:
152 if label in self.first:
153 fset = self.first[label]
155 raise ValueError("recursion for rule %r" % name)
157 self.calcfirst(label)
158 fset = self.first[label]
159 assert fset is not None
160 totalset.update(fset)
161 overlapcheck[label] = fset
164 overlapcheck[label] = {label: 1}
165 inverse: Dict[str, str] = {}
166 for label, itsfirst in overlapcheck.items():
167 for symbol in itsfirst:
168 if symbol in inverse:
170 "rule %s is ambiguous; %s is in the"
171 " first sets of %s as well as %s"
172 % (name, symbol, label, inverse[symbol])
174 inverse[symbol] = label
175 self.first[name] = totalset
177 def parse(self) -> Tuple[Dict[Text, List["DFAState"]], Text]:
179 startsymbol: Optional[str] = None
180 # MSTART: (NEWLINE | RULE)* ENDMARKER
181 while self.type != token.ENDMARKER:
182 while self.type == token.NEWLINE:
184 # RULE: NAME ':' RHS NEWLINE
185 name = self.expect(token.NAME)
186 self.expect(token.OP, ":")
187 a, z = self.parse_rhs()
188 self.expect(token.NEWLINE)
189 # self.dump_nfa(name, a, z)
190 dfa = self.make_dfa(a, z)
191 # self.dump_dfa(name, dfa)
193 self.simplify_dfa(dfa)
196 # print name, oldlen, newlen
197 if startsymbol is None:
199 assert startsymbol is not None
200 return dfas, startsymbol
202 def make_dfa(self, start: "NFAState", finish: "NFAState") -> List["DFAState"]:
203 # To turn an NFA into a DFA, we define the states of the DFA
204 # to correspond to *sets* of states of the NFA. Then do some
205 # state reduction. Let's represent sets as dicts with 1 for
207 assert isinstance(start, NFAState)
208 assert isinstance(finish, NFAState)
210 def closure(state: NFAState) -> Dict[NFAState, int]:
211 base: Dict[NFAState, int] = {}
212 addclosure(state, base)
215 def addclosure(state: NFAState, base: Dict[NFAState, int]) -> None:
216 assert isinstance(state, NFAState)
220 for label, next in state.arcs:
222 addclosure(next, base)
224 states = [DFAState(closure(start), finish)]
225 for state in states: # NB states grows while we're iterating
226 arcs: Dict[str, Dict[NFAState, int]] = {}
227 for nfastate in state.nfaset:
228 for label, next in nfastate.arcs:
229 if label is not None:
230 addclosure(next, arcs.setdefault(label, {}))
231 for label, nfaset in sorted(arcs.items()):
233 if st.nfaset == nfaset:
236 st = DFAState(nfaset, finish)
238 state.addarc(st, label)
239 return states # List of DFAState instances; first one is start
241 def dump_nfa(self, name: Text, start: "NFAState", finish: "NFAState") -> None:
242 print("Dump of NFA for", name)
244 for i, state in enumerate(todo):
245 print(" State", i, state is finish and "(final)" or "")
246 for label, next in state.arcs:
255 print(" %s -> %d" % (label, j))
257 def dump_dfa(self, name: Text, dfa: Sequence["DFAState"]) -> None:
258 print("Dump of DFA for", name)
259 for i, state in enumerate(dfa):
260 print(" State", i, state.isfinal and "(final)" or "")
261 for label, next in sorted(state.arcs.items()):
262 print(" %s -> %d" % (label, dfa.index(next)))
264 def simplify_dfa(self, dfa: List["DFAState"]) -> None:
265 # This is not theoretically optimal, but works well enough.
266 # Algorithm: repeatedly look for two states that have the same
267 # set of arcs (same labels pointing to the same nodes) and
268 # unify them, until things stop changing.
270 # dfa is a list of DFAState instances
274 for i, state_i in enumerate(dfa):
275 for j in range(i + 1, len(dfa)):
277 if state_i == state_j:
278 # print " unify", i, j
281 state.unifystate(state_j, state_i)
285 def parse_rhs(self) -> Tuple["NFAState", "NFAState"]:
286 # RHS: ALT ('|' ALT)*
287 a, z = self.parse_alt()
288 if self.value != "|":
295 while self.value == "|":
297 a, z = self.parse_alt()
302 def parse_alt(self) -> Tuple["NFAState", "NFAState"]:
304 a, b = self.parse_item()
305 while self.value in ("(", "[") or self.type in (token.NAME, token.STRING):
306 c, d = self.parse_item()
311 def parse_item(self) -> Tuple["NFAState", "NFAState"]:
312 # ITEM: '[' RHS ']' | ATOM ['+' | '*']
313 if self.value == "[":
315 a, z = self.parse_rhs()
316 self.expect(token.OP, "]")
320 a, z = self.parse_atom()
322 if value not in ("+", "*"):
331 def parse_atom(self) -> Tuple["NFAState", "NFAState"]:
332 # ATOM: '(' RHS ')' | NAME | STRING
333 if self.value == "(":
335 a, z = self.parse_rhs()
336 self.expect(token.OP, ")")
338 elif self.type in (token.NAME, token.STRING):
341 a.addarc(z, self.value)
346 "expected (...) or NAME or STRING, got %s/%s", self.type, self.value
350 def expect(self, type: int, value: Optional[Any] = None) -> Text:
351 if self.type != type or (value is not None and self.value != value):
353 "expected %s/%s, got %s/%s", type, value, self.type, self.value
359 def gettoken(self) -> None:
360 tup = next(self.generator)
361 while tup[0] in (tokenize.COMMENT, tokenize.NL):
362 tup = next(self.generator)
363 self.type, self.value, self.begin, self.end, self.line = tup
364 # print token.tok_name[self.type], repr(self.value)
366 def raise_error(self, msg: str, *args: Any) -> NoReturn:
371 msg = " ".join([msg] + list(map(str, args)))
372 raise SyntaxError(msg, (self.filename, self.end[0], self.end[1], self.line))
375 class NFAState(object):
376 arcs: List[Tuple[Optional[Text], "NFAState"]]
378 def __init__(self) -> None:
379 self.arcs = [] # list of (label, NFAState) pairs
381 def addarc(self, next: "NFAState", label: Optional[Text] = None) -> None:
382 assert label is None or isinstance(label, str)
383 assert isinstance(next, NFAState)
384 self.arcs.append((label, next))
387 class DFAState(object):
388 nfaset: Dict[NFAState, Any]
390 arcs: Dict[Text, "DFAState"]
392 def __init__(self, nfaset: Dict[NFAState, Any], final: NFAState) -> None:
393 assert isinstance(nfaset, dict)
394 assert isinstance(next(iter(nfaset)), NFAState)
395 assert isinstance(final, NFAState)
397 self.isfinal = final in nfaset
398 self.arcs = {} # map from label to DFAState
400 def addarc(self, next: "DFAState", label: Text) -> None:
401 assert isinstance(label, str)
402 assert label not in self.arcs
403 assert isinstance(next, DFAState)
404 self.arcs[label] = next
406 def unifystate(self, old: "DFAState", new: "DFAState") -> None:
407 for label, next in self.arcs.items():
409 self.arcs[label] = new
411 def __eq__(self, other: Any) -> bool:
412 # Equality test -- ignore the nfaset instance variable
413 assert isinstance(other, DFAState)
414 if self.isfinal != other.isfinal:
416 # Can't just return self.arcs == other.arcs, because that
417 # would invoke this method recursively, with cycles...
418 if len(self.arcs) != len(other.arcs):
420 for label, next in self.arcs.items():
421 if next is not other.arcs.get(label):
425 __hash__: Any = None # For Py3 compatibility.
428 def generate_grammar(filename: Path = "Grammar.txt") -> PgenGrammar:
429 p = ParserGenerator(filename)
430 return p.make_grammar()