All patches and comments are welcome. Please squash your changes to logical
commits before using git-format-patch and git-send-email to
patches@git.madduck.net.
If you'd read over the Git project's submission guidelines and adhered to them,
I'd be especially grateful.
1 # Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006 Python Software Foundation.
4 """Tokenization help for Python programs.
6 generate_tokens(readline) is a generator that breaks a stream of
7 text into Python tokens. It accepts a readline-like method which is called
8 repeatedly to get the next line of input (or "" for EOF). It generates
9 5-tuples with these members:
11 the token type (see token.py)
13 the starting (row, column) indices of the token (a 2-tuple of ints)
14 the ending (row, column) indices of the token (a 2-tuple of ints)
15 the original line (string)
17 It is designed to match the working of the Python tokenizer exactly, except
18 that it produces COMMENT tokens for comments and gives type OP for all
22 tokenize_loop(readline, tokeneater)
23 tokenize(readline, tokeneater=printtoken)
24 are the same, except instead of generating tokens, tokeneater is a callback
25 function to which the 5 fields described above are passed as 5 arguments,
26 each time a new token is found."""
28 __author__ = 'Ka-Ping Yee <ping@lfw.org>'
30 'GvR, ESR, Tim Peters, Thomas Wouters, Fred Drake, Skip Montanaro'
33 from codecs import BOM_UTF8, lookup
34 from attr import dataclass
35 from blib2to3.pgen2.token import *
38 __all__ = [x for x in dir(token) if x[0] != '_'] + ["tokenize",
39 "generate_tokens", "untokenize"]
45 # Support bytes type in Python <= 2.5, so 2to3 turns itself into
46 # valid Python 3 code.
49 def group(*choices): return '(' + '|'.join(choices) + ')'
50 def any(*choices): return group(*choices) + '*'
51 def maybe(*choices): return group(*choices) + '?'
52 def _combinations(*l):
54 x + y for x in l for y in l + ("",) if x.casefold() != y.casefold()
57 Whitespace = r'[ \f\t]*'
58 Comment = r'#[^\r\n]*'
59 Ignore = Whitespace + any(r'\\\r?\n' + Whitespace) + maybe(Comment)
60 Name = r'\w+' # this is invalid but it's fine because Name comes after Number in all groups
62 Binnumber = r'0[bB]_?[01]+(?:_[01]+)*'
63 Hexnumber = r'0[xX]_?[\da-fA-F]+(?:_[\da-fA-F]+)*[lL]?'
64 Octnumber = r'0[oO]?_?[0-7]+(?:_[0-7]+)*[lL]?'
65 Decnumber = group(r'[1-9]\d*(?:_\d+)*[lL]?', '0[lL]?')
66 Intnumber = group(Binnumber, Hexnumber, Octnumber, Decnumber)
67 Exponent = r'[eE][-+]?\d+(?:_\d+)*'
68 Pointfloat = group(r'\d+(?:_\d+)*\.(?:\d+(?:_\d+)*)?', r'\.\d+(?:_\d+)*') + maybe(Exponent)
69 Expfloat = r'\d+(?:_\d+)*' + Exponent
70 Floatnumber = group(Pointfloat, Expfloat)
71 Imagnumber = group(r'\d+(?:_\d+)*[jJ]', Floatnumber + r'[jJ]')
72 Number = group(Imagnumber, Floatnumber, Intnumber)
74 # Tail end of ' string.
75 Single = r"[^'\\]*(?:\\.[^'\\]*)*'"
76 # Tail end of " string.
77 Double = r'[^"\\]*(?:\\.[^"\\]*)*"'
78 # Tail end of ''' string.
79 Single3 = r"[^'\\]*(?:(?:\\.|'(?!''))[^'\\]*)*'''"
80 # Tail end of """ string.
81 Double3 = r'[^"\\]*(?:(?:\\.|"(?!""))[^"\\]*)*"""'
82 _litprefix = r"(?:[uUrRbBfF]|[rR][fFbB]|[fFbBuU][rR])?"
83 Triple = group(_litprefix + "'''", _litprefix + '"""')
84 # Single-line ' or " string.
85 String = group(_litprefix + r"'[^\n'\\]*(?:\\.[^\n'\\]*)*'",
86 _litprefix + r'"[^\n"\\]*(?:\\.[^\n"\\]*)*"')
88 # Because of leftmost-then-longest match semantics, be sure to put the
89 # longest operators first (e.g., if = came before ==, == would get
90 # recognized as two instances of =).
91 Operator = group(r"\*\*=?", r">>=?", r"<<=?", r"<>", r"!=",
97 Special = group(r'\r?\n', r'[:;.,`@]')
98 Funny = group(Operator, Bracket, Special)
100 PlainToken = group(Number, Funny, String, Name)
101 Token = Ignore + PlainToken
103 # First (or only) line of ' or " string.
104 ContStr = group(_litprefix + r"'[^\n'\\]*(?:\\.[^\n'\\]*)*" +
105 group("'", r'\\\r?\n'),
106 _litprefix + r'"[^\n"\\]*(?:\\.[^\n"\\]*)*' +
107 group('"', r'\\\r?\n'))
108 PseudoExtras = group(r'\\\r?\n', Comment, Triple)
109 PseudoToken = Whitespace + group(PseudoExtras, Number, Funny, ContStr, Name)
111 tokenprog = re.compile(Token, re.UNICODE)
112 pseudoprog = re.compile(PseudoToken, re.UNICODE)
113 single3prog = re.compile(Single3)
114 double3prog = re.compile(Double3)
117 _combinations('r', 'R', 'f', 'F') |
118 _combinations('r', 'R', 'b', 'B') |
119 {'u', 'U', 'ur', 'uR', 'Ur', 'UR'}
122 endprogs = {"'": re.compile(Single), '"': re.compile(Double),
123 "'''": single3prog, '"""': double3prog,
124 **{f"{prefix}'''": single3prog for prefix in _strprefixes},
125 **{f'{prefix}"""': double3prog for prefix in _strprefixes},
126 **{prefix: None for prefix in _strprefixes}}
130 {f"{prefix}'''" for prefix in _strprefixes} |
131 {f'{prefix}"""' for prefix in _strprefixes}
135 {f"{prefix}'" for prefix in _strprefixes} |
136 {f'{prefix}"' for prefix in _strprefixes}
141 @dataclass(frozen=True)
142 class TokenizerConfig:
143 async_is_reserved_keyword: bool = False
145 class TokenError(Exception): pass
147 class StopTokenizing(Exception): pass
149 def printtoken(type, token, xxx_todo_changeme, xxx_todo_changeme1, line): # for testing
150 (srow, scol) = xxx_todo_changeme
151 (erow, ecol) = xxx_todo_changeme1
152 print("%d,%d-%d,%d:\t%s\t%s" % \
153 (srow, scol, erow, ecol, tok_name[type], repr(token)))
155 def tokenize(readline, tokeneater=printtoken):
157 The tokenize() function accepts two parameters: one representing the
158 input stream, and one providing an output mechanism for tokenize().
160 The first parameter, readline, must be a callable object which provides
161 the same interface as the readline() method of built-in file objects.
162 Each call to the function should return one line of input as a string.
164 The second parameter, tokeneater, must also be a callable object. It is
165 called once for each token, with five arguments, corresponding to the
166 tuples generated by generate_tokens().
169 tokenize_loop(readline, tokeneater)
170 except StopTokenizing:
173 # backwards compatible interface
174 def tokenize_loop(readline, tokeneater):
175 for token_info in generate_tokens(readline):
176 tokeneater(*token_info)
185 def add_whitespace(self, start):
187 assert row <= self.prev_row
188 col_offset = col - self.prev_col
190 self.tokens.append(" " * col_offset)
192 def untokenize(self, iterable):
195 self.compat(t, iterable)
197 tok_type, token, start, end, line = t
198 self.add_whitespace(start)
199 self.tokens.append(token)
200 self.prev_row, self.prev_col = end
201 if tok_type in (NEWLINE, NL):
204 return "".join(self.tokens)
206 def compat(self, token, iterable):
209 toks_append = self.tokens.append
210 toknum, tokval = token
211 if toknum in (NAME, NUMBER):
213 if toknum in (NEWLINE, NL):
216 toknum, tokval = tok[:2]
218 if toknum in (NAME, NUMBER, ASYNC, AWAIT):
222 indents.append(tokval)
224 elif toknum == DEDENT:
227 elif toknum in (NEWLINE, NL):
229 elif startline and indents:
230 toks_append(indents[-1])
234 cookie_re = re.compile(r'^[ \t\f]*#.*?coding[:=][ \t]*([-\w.]+)', re.ASCII)
235 blank_re = re.compile(br'^[ \t\f]*(?:[#\r\n]|$)', re.ASCII)
237 def _get_normal_name(orig_enc):
238 """Imitates get_normal_name in tokenizer.c."""
239 # Only care about the first 12 characters.
240 enc = orig_enc[:12].lower().replace("_", "-")
241 if enc == "utf-8" or enc.startswith("utf-8-"):
243 if enc in ("latin-1", "iso-8859-1", "iso-latin-1") or \
244 enc.startswith(("latin-1-", "iso-8859-1-", "iso-latin-1-")):
248 def detect_encoding(readline):
250 The detect_encoding() function is used to detect the encoding that should
251 be used to decode a Python source file. It requires one argument, readline,
252 in the same way as the tokenize() generator.
254 It will call readline a maximum of twice, and return the encoding used
255 (as a string) and a list of any lines (left as bytes) it has read
258 It detects the encoding from the presence of a utf-8 bom or an encoding
259 cookie as specified in pep-0263. If both a bom and a cookie are present, but
260 disagree, a SyntaxError will be raised. If the encoding cookie is an invalid
261 charset, raise a SyntaxError. Note that if a utf-8 bom is found,
262 'utf-8-sig' is returned.
264 If no encoding is specified, then the default of 'utf-8' will be returned.
272 except StopIteration:
275 def find_cookie(line):
277 line_string = line.decode('ascii')
278 except UnicodeDecodeError:
280 match = cookie_re.match(line_string)
283 encoding = _get_normal_name(match.group(1))
285 codec = lookup(encoding)
287 # This behaviour mimics the Python interpreter
288 raise SyntaxError("unknown encoding: " + encoding)
291 if codec.name != 'utf-8':
292 # This behaviour mimics the Python interpreter
293 raise SyntaxError('encoding problem: utf-8')
297 first = read_or_stop()
298 if first.startswith(BOM_UTF8):
301 default = 'utf-8-sig'
305 encoding = find_cookie(first)
307 return encoding, [first]
308 if not blank_re.match(first):
309 return default, [first]
311 second = read_or_stop()
313 return default, [first]
315 encoding = find_cookie(second)
317 return encoding, [first, second]
319 return default, [first, second]
321 def untokenize(iterable):
322 """Transform tokens back into Python source code.
324 Each element returned by the iterable must be a token sequence
325 with at least two elements, a token number and token value. If
326 only two tokens are passed, the resulting output is poor.
328 Round-trip invariant for full input:
329 Untokenized source will match input source exactly
331 Round-trip invariant for limited intput:
332 # Output text will tokenize the back to the input
333 t1 = [tok[:2] for tok in generate_tokens(f.readline)]
334 newcode = untokenize(t1)
335 readline = iter(newcode.splitlines(1)).next
336 t2 = [tok[:2] for tokin generate_tokens(readline)]
340 return ut.untokenize(iterable)
342 def generate_tokens(readline, config: TokenizerConfig = TokenizerConfig()):
344 The generate_tokens() generator requires one argument, readline, which
345 must be a callable object which provides the same interface as the
346 readline() method of built-in file objects. Each call to the function
347 should return one line of input as a string. Alternately, readline
348 can be a callable function terminating with StopIteration:
349 readline = open(myfile).next # Example of alternate readline
351 The generator produces 5-tuples with these members: the token type; the
352 token string; a 2-tuple (srow, scol) of ints specifying the row and
353 column where the token begins in the source; a 2-tuple (erow, ecol) of
354 ints specifying the row and column where the token ends in the source;
355 and the line on which the token was found. The line passed is the
356 logical line; continuation lines are included.
358 lnum = parenlev = continued = 0
359 numchars = '0123456789'
360 contstr, needcont = '', 0
364 # If we know we're parsing 3.7+, we can unconditionally parse `async` and
365 # `await` as keywords.
366 async_is_reserved_keyword = config.async_is_reserved_keyword
367 # 'stashed' and 'async_*' are used for async/await parsing
373 while 1: # loop over lines in stream
376 except StopIteration:
379 pos, max = 0, len(line)
381 if contstr: # continued string
383 raise TokenError("EOF in multi-line string", strstart)
384 endmatch = endprog.match(line)
386 pos = end = endmatch.end(0)
387 yield (STRING, contstr + line[:end],
388 strstart, (lnum, end), contline + line)
389 contstr, needcont = '', 0
391 elif needcont and line[-2:] != '\\\n' and line[-3:] != '\\\r\n':
392 yield (ERRORTOKEN, contstr + line,
393 strstart, (lnum, len(line)), contline)
398 contstr = contstr + line
399 contline = contline + line
402 elif parenlev == 0 and not continued: # new statement
405 while pos < max: # measure leading whitespace
406 if line[pos] == ' ': column = column + 1
407 elif line[pos] == '\t': column = (column//tabsize + 1)*tabsize
408 elif line[pos] == '\f': column = 0
417 if line[pos] in '\r\n': # skip blank lines
418 yield (NL, line[pos:], (lnum, pos), (lnum, len(line)), line)
421 if line[pos] == '#': # skip comments
422 comment_token = line[pos:].rstrip('\r\n')
423 nl_pos = pos + len(comment_token)
424 yield (COMMENT, comment_token,
425 (lnum, pos), (lnum, pos + len(comment_token)), line)
426 yield (NL, line[nl_pos:],
427 (lnum, nl_pos), (lnum, len(line)), line)
430 if column > indents[-1]: # count indents
431 indents.append(column)
432 yield (INDENT, line[:pos], (lnum, 0), (lnum, pos), line)
434 while column < indents[-1]: # count dedents
435 if column not in indents:
436 raise IndentationError(
437 "unindent does not match any outer indentation level",
438 ("<tokenize>", lnum, pos, line))
439 indents = indents[:-1]
441 if async_def and async_def_indent >= indents[-1]:
446 yield (DEDENT, '', (lnum, pos), (lnum, pos), line)
448 if async_def and async_def_nl and async_def_indent >= indents[-1]:
453 else: # continued statement
455 raise TokenError("EOF in multi-line statement", (lnum, 0))
459 pseudomatch = pseudoprog.match(line, pos)
460 if pseudomatch: # scan for tokens
461 start, end = pseudomatch.span(1)
462 spos, epos, pos = (lnum, start), (lnum, end), end
463 token, initial = line[start:end], line[start]
465 if initial in numchars or \
466 (initial == '.' and token != '.'): # ordinary number
467 yield (NUMBER, token, spos, epos, line)
468 elif initial in '\r\n':
477 yield (newline, token, spos, epos, line)
480 assert not token.endswith("\n")
484 yield (COMMENT, token, spos, epos, line)
485 elif token in triple_quoted:
486 endprog = endprogs[token]
487 endmatch = endprog.match(line, pos)
488 if endmatch: # all on one line
489 pos = endmatch.end(0)
490 token = line[start:pos]
494 yield (STRING, token, spos, (lnum, pos), line)
496 strstart = (lnum, start) # multiple lines
497 contstr = line[start:]
500 elif initial in single_quoted or \
501 token[:2] in single_quoted or \
502 token[:3] in single_quoted:
503 if token[-1] == '\n': # continued string
504 strstart = (lnum, start)
505 endprog = (endprogs[initial] or endprogs[token[1]] or
507 contstr, needcont = line[start:], 1
510 else: # ordinary string
514 yield (STRING, token, spos, epos, line)
515 elif initial.isidentifier(): # ordinary name
516 if token in ('async', 'await'):
517 if async_is_reserved_keyword or async_def:
518 yield (ASYNC if token == 'async' else AWAIT,
519 token, spos, epos, line)
522 tok = (NAME, token, spos, epos, line)
523 if token == 'async' and not stashed:
527 if token in ('def', 'for'):
529 and stashed[0] == NAME
530 and stashed[1] == 'async'):
534 async_def_indent = indents[-1]
536 yield (ASYNC, stashed[1],
537 stashed[2], stashed[3],
546 elif initial == '\\': # continued stmt
547 # This yield is new; needed for better idempotency:
551 yield (NL, token, spos, (lnum, pos), line)
554 if initial in '([{': parenlev = parenlev + 1
555 elif initial in ')]}': parenlev = parenlev - 1
559 yield (OP, token, spos, epos, line)
561 yield (ERRORTOKEN, line[pos],
562 (lnum, pos), (lnum, pos+1), line)
569 for indent in indents[1:]: # pop remaining indent levels
570 yield (DEDENT, '', (lnum, 0), (lnum, 0), '')
571 yield (ENDMARKER, '', (lnum, 0), (lnum, 0), '')
573 if __name__ == '__main__': # testing
575 if len(sys.argv) > 1: tokenize(open(sys.argv[1]).readline)
576 else: tokenize(sys.stdin.readline)