All patches and comments are welcome. Please squash your changes to logical
commits before using git-format-patch and git-send-email to
patches@git.madduck.net.
If you'd read over the Git project's submission guidelines and adhered to them,
I'd be especially grateful.
2 String transformers that can split and merge strings.
4 from abc import ABC, abstractmethod
5 from collections import defaultdict
6 from dataclasses import dataclass
7 import regex as re # We need recursive patterns here (?R)
26 if sys.version_info < (3, 8):
27 from typing_extensions import Final
29 from typing import Final
31 from mypy_extensions import trait
33 from black.rusty import Result, Ok, Err
35 from black.mode import Feature
36 from black.nodes import syms, replace_child, parent_type
37 from black.nodes import is_empty_par, is_empty_lpar, is_empty_rpar
38 from black.nodes import OPENING_BRACKETS, CLOSING_BRACKETS, STANDALONE_COMMENT
39 from black.lines import Line, append_leaves
40 from black.brackets import BracketMatchError
41 from black.comments import contains_pragma_comment
42 from black.strings import has_triple_quotes, get_string_prefix, assert_is_leaf_string
43 from black.strings import normalize_string_quotes
45 from blib2to3.pytree import Leaf, Node
46 from blib2to3.pgen2 import token
49 class CannotTransform(Exception):
50 """Base class for errors raised by Transformers."""
55 LN = Union[Leaf, Node]
56 Transformer = Callable[[Line, Collection[Feature]], Iterator[Line]]
61 TResult = Result[T, CannotTransform] # (T)ransform Result
62 TMatchResult = TResult[Index]
65 def TErr(err_msg: str) -> Err[CannotTransform]:
68 Convenience function used when working with the TResult type.
70 cant_transform = CannotTransform(err_msg)
71 return Err(cant_transform)
74 class StringTransformer(ABC):
76 An implementation of the Transformer protocol that relies on its
77 subclasses overriding the template methods `do_match(...)` and
80 This Transformer works exclusively on strings (for example, by merging
83 The following sections can be found among the docstrings of each concrete
84 StringTransformer subclass.
87 Which requirements must be met of the given Line for this
88 StringTransformer to be applied?
91 If the given Line meets all of the above requirements, which string
92 transformations can you expect to be applied to it by this
96 What contractual agreements does this StringTransformer have with other
97 StringTransfomers? Such collaborations should be eliminated/minimized
101 __name__: Final = "StringTransformer"
103 # Ideally this would be a dataclass, but unfortunately mypyc breaks when used with
105 def __init__(self, line_length: int, normalize_strings: bool) -> None:
106 self.line_length = line_length
107 self.normalize_strings = normalize_strings
110 def do_match(self, line: Line) -> TMatchResult:
113 * Ok(string_idx) such that `line.leaves[string_idx]` is our target
114 string, if a match was able to be made.
116 * Err(CannotTransform), if a match was not able to be made.
120 def do_transform(self, line: Line, string_idx: int) -> Iterator[TResult[Line]]:
123 * Ok(new_line) where new_line is the new transformed line.
125 * Err(CannotTransform) if the transformation failed for some reason. The
126 `do_match(...)` template method should usually be used to reject
127 the form of the given Line, but in some cases it is difficult to
128 know whether or not a Line meets the StringTransformer's
129 requirements until the transformation is already midway.
132 This method should NOT mutate @line directly, but it MAY mutate the
133 Line's underlying Node structure. (WARNING: If the underlying Node
134 structure IS altered, then this method should NOT be allowed to
135 yield an CannotTransform after that point.)
138 def __call__(self, line: Line, _features: Collection[Feature]) -> Iterator[Line]:
140 StringTransformer instances have a call signature that mirrors that of
141 the Transformer type.
144 CannotTransform(...) if the concrete StringTransformer class is unable
147 # Optimization to avoid calling `self.do_match(...)` when the line does
148 # not contain any string.
149 if not any(leaf.type == token.STRING for leaf in line.leaves):
150 raise CannotTransform("There are no strings in this line.")
152 match_result = self.do_match(line)
154 if isinstance(match_result, Err):
155 cant_transform = match_result.err()
156 raise CannotTransform(
157 f"The string transformer {self.__class__.__name__} does not recognize"
158 " this line as one that it can transform."
159 ) from cant_transform
161 string_idx = match_result.ok()
163 for line_result in self.do_transform(line, string_idx):
164 if isinstance(line_result, Err):
165 cant_transform = line_result.err()
166 raise CannotTransform(
167 "StringTransformer failed while attempting to transform string."
168 ) from cant_transform
169 line = line_result.ok()
175 """A custom (i.e. manual) string split.
177 A single CustomSplit instance represents a single substring.
180 Consider the following string:
187 This string will correspond to the following three CustomSplit instances:
189 CustomSplit(False, 16)
190 CustomSplit(False, 17)
191 CustomSplit(True, 16)
200 class CustomSplitMapMixin:
202 This mixin class is used to map merged strings to a sequence of
203 CustomSplits, which will then be used to re-split the strings iff none of
204 the resultant substrings go over the configured max line length.
207 _Key: ClassVar = Tuple[StringID, str]
208 _CUSTOM_SPLIT_MAP: ClassVar[Dict[_Key, Tuple[CustomSplit, ...]]] = defaultdict(
213 def _get_key(string: str) -> "CustomSplitMapMixin._Key":
216 A unique identifier that is used internally to map @string to a
217 group of custom splits.
219 return (id(string), string)
221 def add_custom_splits(
222 self, string: str, custom_splits: Iterable[CustomSplit]
224 """Custom Split Map Setter Method
227 Adds a mapping from @string to the custom splits @custom_splits.
229 key = self._get_key(string)
230 self._CUSTOM_SPLIT_MAP[key] = tuple(custom_splits)
232 def pop_custom_splits(self, string: str) -> List[CustomSplit]:
233 """Custom Split Map Getter Method
236 * A list of the custom splits that are mapped to @string, if any
242 Deletes the mapping between @string and its associated custom
243 splits (which are returned to the caller).
245 key = self._get_key(string)
247 custom_splits = self._CUSTOM_SPLIT_MAP[key]
248 del self._CUSTOM_SPLIT_MAP[key]
250 return list(custom_splits)
252 def has_custom_splits(self, string: str) -> bool:
255 True iff @string is associated with a set of custom splits.
257 key = self._get_key(string)
258 return key in self._CUSTOM_SPLIT_MAP
261 class StringMerger(StringTransformer, CustomSplitMapMixin):
262 """StringTransformer that merges strings together.
265 (A) The line contains adjacent strings such that ALL of the validation checks
266 listed in StringMerger.__validate_msg(...)'s docstring pass.
268 (B) The line contains a string which uses line continuation backslashes.
271 Depending on which of the two requirements above where met, either:
273 (A) The string group associated with the target string is merged.
275 (B) All line-continuation backslashes are removed from the target string.
278 StringMerger provides custom split information to StringSplitter.
281 def do_match(self, line: Line) -> TMatchResult:
284 is_valid_index = is_valid_index_factory(LL)
286 for (i, leaf) in enumerate(LL):
288 leaf.type == token.STRING
289 and is_valid_index(i + 1)
290 and LL[i + 1].type == token.STRING
294 if leaf.type == token.STRING and "\\\n" in leaf.value:
297 return TErr("This line has no strings that need merging.")
299 def do_transform(self, line: Line, string_idx: int) -> Iterator[TResult[Line]]:
301 rblc_result = self._remove_backslash_line_continuation_chars(
304 if isinstance(rblc_result, Ok):
305 new_line = rblc_result.ok()
307 msg_result = self._merge_string_group(new_line, string_idx)
308 if isinstance(msg_result, Ok):
309 new_line = msg_result.ok()
311 if isinstance(rblc_result, Err) and isinstance(msg_result, Err):
312 msg_cant_transform = msg_result.err()
313 rblc_cant_transform = rblc_result.err()
314 cant_transform = CannotTransform(
315 "StringMerger failed to merge any strings in this line."
318 # Chain the errors together using `__cause__`.
319 msg_cant_transform.__cause__ = rblc_cant_transform
320 cant_transform.__cause__ = msg_cant_transform
322 yield Err(cant_transform)
327 def _remove_backslash_line_continuation_chars(
328 line: Line, string_idx: int
331 Merge strings that were split across multiple lines using
332 line-continuation backslashes.
335 Ok(new_line), if @line contains backslash line-continuation
338 Err(CannotTransform), otherwise.
342 string_leaf = LL[string_idx]
344 string_leaf.type == token.STRING
345 and "\\\n" in string_leaf.value
346 and not has_triple_quotes(string_leaf.value)
349 f"String leaf {string_leaf} does not contain any backslash line"
350 " continuation characters."
353 new_line = line.clone()
354 new_line.comments = line.comments.copy()
355 append_leaves(new_line, line, LL)
357 new_string_leaf = new_line.leaves[string_idx]
358 new_string_leaf.value = new_string_leaf.value.replace("\\\n", "")
362 def _merge_string_group(self, line: Line, string_idx: int) -> TResult[Line]:
364 Merges string group (i.e. set of adjacent strings) where the first
365 string in the group is `line.leaves[string_idx]`.
368 Ok(new_line), if ALL of the validation checks found in
369 __validate_msg(...) pass.
371 Err(CannotTransform), otherwise.
375 is_valid_index = is_valid_index_factory(LL)
377 vresult = self._validate_msg(line, string_idx)
378 if isinstance(vresult, Err):
381 # If the string group is wrapped inside an Atom node, we must make sure
382 # to later replace that Atom with our new (merged) string leaf.
383 atom_node = LL[string_idx].parent
385 # We will place BREAK_MARK in between every two substrings that we
386 # merge. We will then later go through our final result and use the
387 # various instances of BREAK_MARK we find to add the right values to
388 # the custom split map.
389 BREAK_MARK = "@@@@@ BLACK BREAKPOINT MARKER @@@@@"
391 QUOTE = LL[string_idx].value[-1]
393 def make_naked(string: str, string_prefix: str) -> str:
394 """Strip @string (i.e. make it a "naked" string)
397 * assert_is_leaf_string(@string)
400 A string that is identical to @string except that
401 @string_prefix has been stripped, the surrounding QUOTE
402 characters have been removed, and any remaining QUOTE
403 characters have been escaped.
405 assert_is_leaf_string(string)
407 RE_EVEN_BACKSLASHES = r"(?:(?<!\\)(?:\\\\)*)"
408 naked_string = string[len(string_prefix) + 1 : -1]
409 naked_string = re.sub(
410 "(" + RE_EVEN_BACKSLASHES + ")" + QUOTE, r"\1\\" + QUOTE, naked_string
414 # Holds the CustomSplit objects that will later be added to the custom
418 # Temporary storage for the 'has_prefix' part of the CustomSplit objects.
421 # Sets the 'prefix' variable. This is the prefix that the final merged
423 next_str_idx = string_idx
427 and is_valid_index(next_str_idx)
428 and LL[next_str_idx].type == token.STRING
430 prefix = get_string_prefix(LL[next_str_idx].value).lower()
433 # The next loop merges the string group. The final string will be
436 # The following convenience variables are used:
441 # NSS: naked next string
445 next_str_idx = string_idx
446 while is_valid_index(next_str_idx) and LL[next_str_idx].type == token.STRING:
449 SS = LL[next_str_idx].value
450 next_prefix = get_string_prefix(SS).lower()
452 # If this is an f-string group but this substring is not prefixed
454 if "f" in prefix and "f" not in next_prefix:
455 # Then we must escape any braces contained in this substring.
456 SS = re.subf(r"(\{|\})", "{1}{1}", SS)
458 NSS = make_naked(SS, next_prefix)
460 has_prefix = bool(next_prefix)
461 prefix_tracker.append(has_prefix)
463 S = prefix + QUOTE + NS + NSS + BREAK_MARK + QUOTE
464 NS = make_naked(S, prefix)
468 S_leaf = Leaf(token.STRING, S)
469 if self.normalize_strings:
470 S_leaf.value = normalize_string_quotes(S_leaf.value)
472 # Fill the 'custom_splits' list with the appropriate CustomSplit objects.
473 temp_string = S_leaf.value[len(prefix) + 1 : -1]
474 for has_prefix in prefix_tracker:
475 mark_idx = temp_string.find(BREAK_MARK)
478 ), "Logic error while filling the custom string breakpoint cache."
480 temp_string = temp_string[mark_idx + len(BREAK_MARK) :]
481 breakpoint_idx = mark_idx + (len(prefix) if has_prefix else 0) + 1
482 custom_splits.append(CustomSplit(has_prefix, breakpoint_idx))
484 string_leaf = Leaf(token.STRING, S_leaf.value.replace(BREAK_MARK, ""))
486 if atom_node is not None:
487 replace_child(atom_node, string_leaf)
489 # Build the final line ('new_line') that this method will later return.
490 new_line = line.clone()
491 for (i, leaf) in enumerate(LL):
493 new_line.append(string_leaf)
495 if string_idx <= i < string_idx + num_of_strings:
496 for comment_leaf in line.comments_after(LL[i]):
497 new_line.append(comment_leaf, preformatted=True)
500 append_leaves(new_line, line, [leaf])
502 self.add_custom_splits(string_leaf.value, custom_splits)
506 def _validate_msg(line: Line, string_idx: int) -> TResult[None]:
507 """Validate (M)erge (S)tring (G)roup
509 Transform-time string validation logic for __merge_string_group(...).
512 * Ok(None), if ALL validation checks (listed below) pass.
514 * Err(CannotTransform), if any of the following are true:
515 - The target string group does not contain ANY stand-alone comments.
516 - The target string is not in a string group (i.e. it has no
518 - The string group has more than one inline comment.
519 - The string group has an inline comment that appears to be a pragma.
520 - The set of all string prefixes in the string group is of
521 length greater than one and is not equal to {"", "f"}.
522 - The string group consists of raw strings.
524 # We first check for "inner" stand-alone comments (i.e. stand-alone
525 # comments that have a string leaf before them AND after them).
528 found_sa_comment = False
529 is_valid_index = is_valid_index_factory(line.leaves)
530 while is_valid_index(i) and line.leaves[i].type in [
534 if line.leaves[i].type == STANDALONE_COMMENT:
535 found_sa_comment = True
536 elif found_sa_comment:
538 "StringMerger does NOT merge string groups which contain "
539 "stand-alone comments."
544 num_of_inline_string_comments = 0
545 set_of_prefixes = set()
547 for leaf in line.leaves[string_idx:]:
548 if leaf.type != token.STRING:
549 # If the string group is trailed by a comma, we count the
550 # comments trailing the comma to be one of the string group's
552 if leaf.type == token.COMMA and id(leaf) in line.comments:
553 num_of_inline_string_comments += 1
556 if has_triple_quotes(leaf.value):
557 return TErr("StringMerger does NOT merge multiline strings.")
560 prefix = get_string_prefix(leaf.value).lower()
562 return TErr("StringMerger does NOT merge raw strings.")
564 set_of_prefixes.add(prefix)
566 if id(leaf) in line.comments:
567 num_of_inline_string_comments += 1
568 if contains_pragma_comment(line.comments[id(leaf)]):
569 return TErr("Cannot merge strings which have pragma comments.")
571 if num_of_strings < 2:
573 f"Not enough strings to merge (num_of_strings={num_of_strings})."
576 if num_of_inline_string_comments > 1:
578 f"Too many inline string comments ({num_of_inline_string_comments})."
581 if len(set_of_prefixes) > 1 and set_of_prefixes != {"", "f"}:
582 return TErr(f"Too many different prefixes ({set_of_prefixes}).")
587 class StringParenStripper(StringTransformer):
588 """StringTransformer that strips surrounding parentheses from strings.
591 The line contains a string which is surrounded by parentheses and:
592 - The target string is NOT the only argument to a function call.
593 - The target string is NOT a "pointless" string.
594 - If the target string contains a PERCENT, the brackets are not
595 preceded or followed by an operator with higher precedence than
599 The parentheses mentioned in the 'Requirements' section are stripped.
602 StringParenStripper has its own inherent usefulness, but it is also
603 relied on to clean up the parentheses created by StringParenWrapper (in
604 the event that they are no longer needed).
607 def do_match(self, line: Line) -> TMatchResult:
610 is_valid_index = is_valid_index_factory(LL)
612 for (idx, leaf) in enumerate(LL):
613 # Should be a string...
614 if leaf.type != token.STRING:
617 # If this is a "pointless" string...
620 and leaf.parent.parent
621 and leaf.parent.parent.type == syms.simple_stmt
625 # Should be preceded by a non-empty LPAR...
627 not is_valid_index(idx - 1)
628 or LL[idx - 1].type != token.LPAR
629 or is_empty_lpar(LL[idx - 1])
633 # That LPAR should NOT be preceded by a function name or a closing
634 # bracket (which could be a function which returns a function or a
635 # list/dictionary that contains a function)...
636 if is_valid_index(idx - 2) and (
637 LL[idx - 2].type == token.NAME or LL[idx - 2].type in CLOSING_BRACKETS
643 # Skip the string trailer, if one exists.
644 string_parser = StringParser()
645 next_idx = string_parser.parse(LL, string_idx)
647 # if the leaves in the parsed string include a PERCENT, we need to
648 # make sure the initial LPAR is NOT preceded by an operator with
649 # higher or equal precedence to PERCENT
650 if is_valid_index(idx - 2):
651 # mypy can't quite follow unless we name this
652 before_lpar = LL[idx - 2]
653 if token.PERCENT in {leaf.type for leaf in LL[idx - 1 : next_idx]} and (
670 # only unary PLUS/MINUS
672 and before_lpar.parent.type == syms.factor
673 and (before_lpar.type in {token.PLUS, token.MINUS})
678 # Should be followed by a non-empty RPAR...
680 is_valid_index(next_idx)
681 and LL[next_idx].type == token.RPAR
682 and not is_empty_rpar(LL[next_idx])
684 # That RPAR should NOT be followed by anything with higher
685 # precedence than PERCENT
686 if is_valid_index(next_idx + 1) and LL[next_idx + 1].type in {
694 return Ok(string_idx)
696 return TErr("This line has no strings wrapped in parens.")
698 def do_transform(self, line: Line, string_idx: int) -> Iterator[TResult[Line]]:
701 string_parser = StringParser()
702 rpar_idx = string_parser.parse(LL, string_idx)
704 for leaf in (LL[string_idx - 1], LL[rpar_idx]):
705 if line.comments_after(leaf):
707 "Will not strip parentheses which have comments attached to them."
711 new_line = line.clone()
712 new_line.comments = line.comments.copy()
714 append_leaves(new_line, line, LL[: string_idx - 1])
715 except BracketMatchError:
716 # HACK: I believe there is currently a bug somewhere in
717 # right_hand_split() that is causing brackets to not be tracked
718 # properly by a shared BracketTracker.
719 append_leaves(new_line, line, LL[: string_idx - 1], preformatted=True)
721 string_leaf = Leaf(token.STRING, LL[string_idx].value)
722 LL[string_idx - 1].remove()
723 replace_child(LL[string_idx], string_leaf)
724 new_line.append(string_leaf)
727 new_line, line, LL[string_idx + 1 : rpar_idx] + LL[rpar_idx + 1 :]
730 LL[rpar_idx].remove()
735 class BaseStringSplitter(StringTransformer):
737 Abstract class for StringTransformers which transform a Line's strings by splitting
738 them or placing them on their own lines where necessary to avoid going over
739 the configured line length.
742 * The target string value is responsible for the line going over the
743 line length limit. It follows that after all of black's other line
744 split methods have been exhausted, this line (or one of the resulting
745 lines after all line splits are performed) would still be over the
746 line_length limit unless we split this string.
748 * The target string is NOT a "pointless" string (i.e. a string that has
749 no parent or siblings).
751 * The target string is not followed by an inline comment that appears
754 * The target string is not a multiline (i.e. triple-quote) string.
757 STRING_OPERATORS: Final = [
770 def do_splitter_match(self, line: Line) -> TMatchResult:
772 BaseStringSplitter asks its clients to override this method instead of
773 `StringTransformer.do_match(...)`.
775 Follows the same protocol as `StringTransformer.do_match(...)`.
777 Refer to `help(StringTransformer.do_match)` for more information.
780 def do_match(self, line: Line) -> TMatchResult:
781 match_result = self.do_splitter_match(line)
782 if isinstance(match_result, Err):
785 string_idx = match_result.ok()
786 vresult = self._validate(line, string_idx)
787 if isinstance(vresult, Err):
792 def _validate(self, line: Line, string_idx: int) -> TResult[None]:
794 Checks that @line meets all of the requirements listed in this classes'
795 docstring. Refer to `help(BaseStringSplitter)` for a detailed
796 description of those requirements.
799 * Ok(None), if ALL of the requirements are met.
801 * Err(CannotTransform), if ANY of the requirements are NOT met.
805 string_leaf = LL[string_idx]
807 max_string_length = self._get_max_string_length(line, string_idx)
808 if len(string_leaf.value) <= max_string_length:
810 "The string itself is not what is causing this line to be too long."
813 if not string_leaf.parent or [L.type for L in string_leaf.parent.children] == [
818 f"This string ({string_leaf.value}) appears to be pointless (i.e. has"
822 if id(line.leaves[string_idx]) in line.comments and contains_pragma_comment(
823 line.comments[id(line.leaves[string_idx])]
826 "Line appears to end with an inline pragma comment. Splitting the line"
827 " could modify the pragma's behavior."
830 if has_triple_quotes(string_leaf.value):
831 return TErr("We cannot split multiline strings.")
835 def _get_max_string_length(self, line: Line, string_idx: int) -> int:
837 Calculates the max string length used when attempting to determine
838 whether or not the target string is responsible for causing the line to
839 go over the line length limit.
841 WARNING: This method is tightly coupled to both StringSplitter and
842 (especially) StringParenWrapper. There is probably a better way to
843 accomplish what is being done here.
846 max_string_length: such that `line.leaves[string_idx].value >
847 max_string_length` implies that the target string IS responsible
848 for causing this line to exceed the line length limit.
852 is_valid_index = is_valid_index_factory(LL)
854 # We use the shorthand "WMA4" in comments to abbreviate "We must
855 # account for". When giving examples, we use STRING to mean some/any
858 # Finally, we use the following convenience variables:
860 # P: The leaf that is before the target string leaf.
861 # N: The leaf that is after the target string leaf.
862 # NN: The leaf that is after N.
864 # WMA4 the whitespace at the beginning of the line.
865 offset = line.depth * 4
867 if is_valid_index(string_idx - 1):
868 p_idx = string_idx - 1
870 LL[string_idx - 1].type == token.LPAR
871 and LL[string_idx - 1].value == ""
874 # If the previous leaf is an empty LPAR placeholder, we should skip it.
878 if P.type in self.STRING_OPERATORS:
879 # WMA4 a space and a string operator (e.g. `+ STRING` or `== STRING`).
880 offset += len(str(P)) + 1
882 if P.type == token.COMMA:
883 # WMA4 a space, a comma, and a closing bracket [e.g. `), STRING`].
886 if P.type in [token.COLON, token.EQUAL, token.PLUSEQUAL, token.NAME]:
887 # This conditional branch is meant to handle dictionary keys,
888 # variable assignments, 'return STRING' statement lines, and
889 # 'else STRING' ternary expression lines.
891 # WMA4 a single space.
894 # WMA4 the lengths of any leaves that came before that space,
895 # but after any closing bracket before that space.
896 for leaf in reversed(LL[: p_idx + 1]):
897 offset += len(str(leaf))
898 if leaf.type in CLOSING_BRACKETS:
901 if is_valid_index(string_idx + 1):
902 N = LL[string_idx + 1]
903 if N.type == token.RPAR and N.value == "" and len(LL) > string_idx + 2:
904 # If the next leaf is an empty RPAR placeholder, we should skip it.
905 N = LL[string_idx + 2]
907 if N.type == token.COMMA:
908 # WMA4 a single comma at the end of the string (e.g `STRING,`).
911 if is_valid_index(string_idx + 2):
912 NN = LL[string_idx + 2]
914 if N.type == token.DOT and NN.type == token.NAME:
915 # This conditional branch is meant to handle method calls invoked
916 # off of a string literal up to and including the LPAR character.
918 # WMA4 the '.' character.
922 is_valid_index(string_idx + 3)
923 and LL[string_idx + 3].type == token.LPAR
925 # WMA4 the left parenthesis character.
928 # WMA4 the length of the method's name.
929 offset += len(NN.value)
932 for comment_leaf in line.comments_after(LL[string_idx]):
935 # WMA4 two spaces before the '#' character.
938 # WMA4 the length of the inline comment.
939 offset += len(comment_leaf.value)
941 max_string_length = self.line_length - offset
942 return max_string_length
945 def iter_fexpr_spans(s: str) -> Iterator[Tuple[int, int]]:
947 Yields spans corresponding to expressions in a given f-string.
948 Spans are half-open ranges (left inclusive, right exclusive).
949 Assumes the input string is a valid f-string, but will not crash if the input
952 stack: List[int] = [] # our curly paren stack
956 # if we're in a string part of the f-string, ignore escaped curly braces
957 if not stack and i + 1 < len(s) and s[i + 1] == "{":
969 # we've made it back out of the expression! yield the span
975 # if we're in an expression part of the f-string, fast forward through strings
976 # note that backslashes are not legal in the expression portion of f-strings
979 if s[i : i + 3] in ("'''", '"""'):
981 elif s[i] in ("'", '"'):
985 while i < len(s) and s[i : i + len(delim)] != delim:
992 def fstring_contains_expr(s: str) -> bool:
993 return any(iter_fexpr_spans(s))
996 class StringSplitter(BaseStringSplitter, CustomSplitMapMixin):
998 StringTransformer that splits "atom" strings (i.e. strings which exist on
999 lines by themselves).
1002 * The line consists ONLY of a single string (possibly prefixed by a
1003 string operator [e.g. '+' or '==']), MAYBE a string trailer, and MAYBE
1006 * All of the requirements listed in BaseStringSplitter's docstring.
1009 The string mentioned in the 'Requirements' section is split into as
1010 many substrings as necessary to adhere to the configured line length.
1012 In the final set of substrings, no substring should be smaller than
1013 MIN_SUBSTR_SIZE characters.
1015 The string will ONLY be split on spaces (i.e. each new substring should
1016 start with a space). Note that the string will NOT be split on a space
1017 which is escaped with a backslash.
1019 If the string is an f-string, it will NOT be split in the middle of an
1020 f-expression (e.g. in f"FooBar: {foo() if x else bar()}", {foo() if x
1021 else bar()} is an f-expression).
1023 If the string that is being split has an associated set of custom split
1024 records and those custom splits will NOT result in any line going over
1025 the configured line length, those custom splits are used. Otherwise the
1026 string is split as late as possible (from left-to-right) while still
1027 adhering to the transformation rules listed above.
1030 StringSplitter relies on StringMerger to construct the appropriate
1031 CustomSplit objects and add them to the custom split map.
1034 MIN_SUBSTR_SIZE: Final = 6
1036 def do_splitter_match(self, line: Line) -> TMatchResult:
1039 is_valid_index = is_valid_index_factory(LL)
1043 # The first two leaves MAY be the 'not in' keywords...
1046 and is_valid_index(idx + 1)
1047 and [LL[idx].type, LL[idx + 1].type] == [token.NAME, token.NAME]
1048 and str(LL[idx]) + str(LL[idx + 1]) == "not in"
1051 # Else the first leaf MAY be a string operator symbol or the 'in' keyword...
1052 elif is_valid_index(idx) and (
1053 LL[idx].type in self.STRING_OPERATORS
1054 or LL[idx].type == token.NAME
1055 and str(LL[idx]) == "in"
1059 # The next/first leaf MAY be an empty LPAR...
1060 if is_valid_index(idx) and is_empty_lpar(LL[idx]):
1063 # The next/first leaf MUST be a string...
1064 if not is_valid_index(idx) or LL[idx].type != token.STRING:
1065 return TErr("Line does not start with a string.")
1069 # Skip the string trailer, if one exists.
1070 string_parser = StringParser()
1071 idx = string_parser.parse(LL, string_idx)
1073 # That string MAY be followed by an empty RPAR...
1074 if is_valid_index(idx) and is_empty_rpar(LL[idx]):
1077 # That string / empty RPAR leaf MAY be followed by a comma...
1078 if is_valid_index(idx) and LL[idx].type == token.COMMA:
1081 # But no more leaves are allowed...
1082 if is_valid_index(idx):
1083 return TErr("This line does not end with a string.")
1085 return Ok(string_idx)
1087 def do_transform(self, line: Line, string_idx: int) -> Iterator[TResult[Line]]:
1090 QUOTE = LL[string_idx].value[-1]
1092 is_valid_index = is_valid_index_factory(LL)
1093 insert_str_child = insert_str_child_factory(LL[string_idx])
1095 prefix = get_string_prefix(LL[string_idx].value).lower()
1097 # We MAY choose to drop the 'f' prefix from substrings that don't
1098 # contain any f-expressions, but ONLY if the original f-string
1099 # contains at least one f-expression. Otherwise, we will alter the AST
1101 drop_pointless_f_prefix = ("f" in prefix) and fstring_contains_expr(
1102 LL[string_idx].value
1105 first_string_line = True
1107 string_op_leaves = self._get_string_operator_leaves(LL)
1108 string_op_leaves_length = (
1109 sum([len(str(prefix_leaf)) for prefix_leaf in string_op_leaves]) + 1
1114 def maybe_append_string_operators(new_line: Line) -> None:
1117 If @line starts with a string operator and this is the first
1118 line we are constructing, this function appends the string
1119 operator to @new_line and replaces the old string operator leaf
1120 in the node structure. Otherwise this function does nothing.
1122 maybe_prefix_leaves = string_op_leaves if first_string_line else []
1123 for i, prefix_leaf in enumerate(maybe_prefix_leaves):
1124 replace_child(LL[i], prefix_leaf)
1125 new_line.append(prefix_leaf)
1128 is_valid_index(string_idx + 1) and LL[string_idx + 1].type == token.COMMA
1131 def max_last_string() -> int:
1134 The max allowed length of the string value used for the last
1135 line we will construct.
1137 result = self.line_length
1138 result -= line.depth * 4
1139 result -= 1 if ends_with_comma else 0
1140 result -= string_op_leaves_length
1143 # --- Calculate Max Break Index (for string value)
1144 # We start with the line length limit
1145 max_break_idx = self.line_length
1146 # The last index of a string of length N is N-1.
1148 # Leading whitespace is not present in the string value (e.g. Leaf.value).
1149 max_break_idx -= line.depth * 4
1150 if max_break_idx < 0:
1152 f"Unable to split {LL[string_idx].value} at such high of a line depth:"
1157 # Check if StringMerger registered any custom splits.
1158 custom_splits = self.pop_custom_splits(LL[string_idx].value)
1159 # We use them ONLY if none of them would produce lines that exceed the
1161 use_custom_breakpoints = bool(
1163 and all(csplit.break_idx <= max_break_idx for csplit in custom_splits)
1166 # Temporary storage for the remaining chunk of the string line that
1167 # can't fit onto the line currently being constructed.
1168 rest_value = LL[string_idx].value
1170 def more_splits_should_be_made() -> bool:
1173 True iff `rest_value` (the remaining string value from the last
1174 split), should be split again.
1176 if use_custom_breakpoints:
1177 return len(custom_splits) > 1
1179 return len(rest_value) > max_last_string()
1181 string_line_results: List[Ok[Line]] = []
1182 while more_splits_should_be_made():
1183 if use_custom_breakpoints:
1184 # Custom User Split (manual)
1185 csplit = custom_splits.pop(0)
1186 break_idx = csplit.break_idx
1188 # Algorithmic Split (automatic)
1189 max_bidx = max_break_idx - string_op_leaves_length
1190 maybe_break_idx = self._get_break_idx(rest_value, max_bidx)
1191 if maybe_break_idx is None:
1192 # If we are unable to algorithmically determine a good split
1193 # and this string has custom splits registered to it, we
1194 # fall back to using them--which means we have to start
1195 # over from the beginning.
1197 rest_value = LL[string_idx].value
1198 string_line_results = []
1199 first_string_line = True
1200 use_custom_breakpoints = True
1203 # Otherwise, we stop splitting here.
1206 break_idx = maybe_break_idx
1208 # --- Construct `next_value`
1209 next_value = rest_value[:break_idx] + QUOTE
1211 # HACK: The following 'if' statement is a hack to fix the custom
1212 # breakpoint index in the case of either: (a) substrings that were
1213 # f-strings but will have the 'f' prefix removed OR (b) substrings
1214 # that were not f-strings but will now become f-strings because of
1215 # redundant use of the 'f' prefix (i.e. none of the substrings
1216 # contain f-expressions but one or more of them had the 'f' prefix
1217 # anyway; in which case, we will prepend 'f' to _all_ substrings).
1219 # There is probably a better way to accomplish what is being done
1222 # If this substring is an f-string, we _could_ remove the 'f'
1223 # prefix, and the current custom split did NOT originally use a
1226 next_value != self._normalize_f_string(next_value, prefix)
1227 and use_custom_breakpoints
1228 and not csplit.has_prefix
1230 # Then `csplit.break_idx` will be off by one after removing
1233 next_value = rest_value[:break_idx] + QUOTE
1235 if drop_pointless_f_prefix:
1236 next_value = self._normalize_f_string(next_value, prefix)
1238 # --- Construct `next_leaf`
1239 next_leaf = Leaf(token.STRING, next_value)
1240 insert_str_child(next_leaf)
1241 self._maybe_normalize_string_quotes(next_leaf)
1243 # --- Construct `next_line`
1244 next_line = line.clone()
1245 maybe_append_string_operators(next_line)
1246 next_line.append(next_leaf)
1247 string_line_results.append(Ok(next_line))
1249 rest_value = prefix + QUOTE + rest_value[break_idx:]
1250 first_string_line = False
1252 yield from string_line_results
1254 if drop_pointless_f_prefix:
1255 rest_value = self._normalize_f_string(rest_value, prefix)
1257 rest_leaf = Leaf(token.STRING, rest_value)
1258 insert_str_child(rest_leaf)
1260 # NOTE: I could not find a test case that verifies that the following
1261 # line is actually necessary, but it seems to be. Otherwise we risk
1262 # not normalizing the last substring, right?
1263 self._maybe_normalize_string_quotes(rest_leaf)
1265 last_line = line.clone()
1266 maybe_append_string_operators(last_line)
1268 # If there are any leaves to the right of the target string...
1269 if is_valid_index(string_idx + 1):
1270 # We use `temp_value` here to determine how long the last line
1271 # would be if we were to append all the leaves to the right of the
1272 # target string to the last string line.
1273 temp_value = rest_value
1274 for leaf in LL[string_idx + 1 :]:
1275 temp_value += str(leaf)
1276 if leaf.type == token.LPAR:
1279 # Try to fit them all on the same line with the last substring...
1281 len(temp_value) <= max_last_string()
1282 or LL[string_idx + 1].type == token.COMMA
1284 last_line.append(rest_leaf)
1285 append_leaves(last_line, line, LL[string_idx + 1 :])
1287 # Otherwise, place the last substring on one line and everything
1288 # else on a line below that...
1290 last_line.append(rest_leaf)
1293 non_string_line = line.clone()
1294 append_leaves(non_string_line, line, LL[string_idx + 1 :])
1295 yield Ok(non_string_line)
1296 # Else the target string was the last leaf...
1298 last_line.append(rest_leaf)
1299 last_line.comments = line.comments.copy()
1302 def _iter_nameescape_slices(self, string: str) -> Iterator[Tuple[Index, Index]]:
1305 All ranges of @string which, if @string were to be split there,
1306 would result in the splitting of an \\N{...} expression (which is NOT
1309 # True - the previous backslash was unescaped
1310 # False - the previous backslash was escaped *or* there was no backslash
1311 previous_was_unescaped_backslash = False
1312 it = iter(enumerate(string))
1315 previous_was_unescaped_backslash = not previous_was_unescaped_backslash
1317 if not previous_was_unescaped_backslash or c != "N":
1318 previous_was_unescaped_backslash = False
1320 previous_was_unescaped_backslash = False
1322 begin = idx - 1 # the position of backslash before \N{...}
1328 # malformed nameescape expression?
1329 # should have been detected by AST parsing earlier...
1330 raise RuntimeError(f"{self.__class__.__name__} LOGIC ERROR!")
1333 def _iter_fexpr_slices(self, string: str) -> Iterator[Tuple[Index, Index]]:
1336 All ranges of @string which, if @string were to be split there,
1337 would result in the splitting of an f-expression (which is NOT
1340 if "f" not in get_string_prefix(string).lower():
1342 yield from iter_fexpr_spans(string)
1344 def _get_illegal_split_indices(self, string: str) -> Set[Index]:
1345 illegal_indices: Set[Index] = set()
1347 self._iter_fexpr_slices(string),
1348 self._iter_nameescape_slices(string),
1350 for it in iterators:
1351 for begin, end in it:
1352 illegal_indices.update(range(begin, end + 1))
1353 return illegal_indices
1355 def _get_break_idx(self, string: str, max_break_idx: int) -> Optional[int]:
1357 This method contains the algorithm that StringSplitter uses to
1358 determine which character to split each string at.
1361 @string: The substring that we are attempting to split.
1362 @max_break_idx: The ideal break index. We will return this value if it
1363 meets all the necessary conditions. In the likely event that it
1364 doesn't we will try to find the closest index BELOW @max_break_idx
1365 that does. If that fails, we will expand our search by also
1366 considering all valid indices ABOVE @max_break_idx.
1369 * assert_is_leaf_string(@string)
1370 * 0 <= @max_break_idx < len(@string)
1373 break_idx, if an index is able to be found that meets all of the
1374 conditions listed in the 'Transformations' section of this classes'
1379 is_valid_index = is_valid_index_factory(string)
1381 assert is_valid_index(max_break_idx)
1382 assert_is_leaf_string(string)
1384 _illegal_split_indices = self._get_illegal_split_indices(string)
1386 def breaks_unsplittable_expression(i: Index) -> bool:
1389 True iff returning @i would result in the splitting of an
1390 unsplittable expression (which is NOT allowed).
1392 return i in _illegal_split_indices
1394 def passes_all_checks(i: Index) -> bool:
1397 True iff ALL of the conditions listed in the 'Transformations'
1398 section of this classes' docstring would be be met by returning @i.
1400 is_space = string[i] == " "
1402 is_not_escaped = True
1404 while is_valid_index(j) and string[j] == "\\":
1405 is_not_escaped = not is_not_escaped
1409 len(string[i:]) >= self.MIN_SUBSTR_SIZE
1410 and len(string[:i]) >= self.MIN_SUBSTR_SIZE
1416 and not breaks_unsplittable_expression(i)
1419 # First, we check all indices BELOW @max_break_idx.
1420 break_idx = max_break_idx
1421 while is_valid_index(break_idx - 1) and not passes_all_checks(break_idx):
1424 if not passes_all_checks(break_idx):
1425 # If that fails, we check all indices ABOVE @max_break_idx.
1427 # If we are able to find a valid index here, the next line is going
1428 # to be longer than the specified line length, but it's probably
1429 # better than doing nothing at all.
1430 break_idx = max_break_idx + 1
1431 while is_valid_index(break_idx + 1) and not passes_all_checks(break_idx):
1434 if not is_valid_index(break_idx) or not passes_all_checks(break_idx):
1439 def _maybe_normalize_string_quotes(self, leaf: Leaf) -> None:
1440 if self.normalize_strings:
1441 leaf.value = normalize_string_quotes(leaf.value)
1443 def _normalize_f_string(self, string: str, prefix: str) -> str:
1446 * assert_is_leaf_string(@string)
1449 * If @string is an f-string that contains no f-expressions, we
1450 return a string identical to @string except that the 'f' prefix
1451 has been stripped and all double braces (i.e. '{{' or '}}') have
1452 been normalized (i.e. turned into '{' or '}').
1454 * Otherwise, we return @string.
1456 assert_is_leaf_string(string)
1458 if "f" in prefix and not fstring_contains_expr(string):
1459 new_prefix = prefix.replace("f", "")
1461 temp = string[len(prefix) :]
1462 temp = re.sub(r"\{\{", "{", temp)
1463 temp = re.sub(r"\}\}", "}", temp)
1466 return f"{new_prefix}{new_string}"
1470 def _get_string_operator_leaves(self, leaves: Iterable[Leaf]) -> List[Leaf]:
1473 string_op_leaves = []
1475 while LL[i].type in self.STRING_OPERATORS + [token.NAME]:
1476 prefix_leaf = Leaf(LL[i].type, str(LL[i]).strip())
1477 string_op_leaves.append(prefix_leaf)
1479 return string_op_leaves
1482 class StringParenWrapper(BaseStringSplitter, CustomSplitMapMixin):
1484 StringTransformer that splits non-"atom" strings (i.e. strings that do not
1485 exist on lines by themselves).
1488 All of the requirements listed in BaseStringSplitter's docstring in
1489 addition to the requirements listed below:
1491 * The line is a return/yield statement, which returns/yields a string.
1493 * The line is part of a ternary expression (e.g. `x = y if cond else
1494 z`) such that the line starts with `else <string>`, where <string> is
1497 * The line is an assert statement, which ends with a string.
1499 * The line is an assignment statement (e.g. `x = <string>` or `x +=
1500 <string>`) such that the variable is being assigned the value of some
1503 * The line is a dictionary key assignment where some valid key is being
1504 assigned the value of some string.
1507 The chosen string is wrapped in parentheses and then split at the LPAR.
1509 We then have one line which ends with an LPAR and another line that
1510 starts with the chosen string. The latter line is then split again at
1511 the RPAR. This results in the RPAR (and possibly a trailing comma)
1512 being placed on its own line.
1514 NOTE: If any leaves exist to the right of the chosen string (except
1515 for a trailing comma, which would be placed after the RPAR), those
1516 leaves are placed inside the parentheses. In effect, the chosen
1517 string is not necessarily being "wrapped" by parentheses. We can,
1518 however, count on the LPAR being placed directly before the chosen
1521 In other words, StringParenWrapper creates "atom" strings. These
1522 can then be split again by StringSplitter, if necessary.
1525 In the event that a string line split by StringParenWrapper is
1526 changed such that it no longer needs to be given its own line,
1527 StringParenWrapper relies on StringParenStripper to clean up the
1528 parentheses it created.
1531 def do_splitter_match(self, line: Line) -> TMatchResult:
1534 if line.leaves[-1].type in OPENING_BRACKETS:
1536 "Cannot wrap parens around a line that ends in an opening bracket."
1540 self._return_match(LL)
1541 or self._else_match(LL)
1542 or self._assert_match(LL)
1543 or self._assign_match(LL)
1544 or self._dict_match(LL)
1547 if string_idx is not None:
1548 string_value = line.leaves[string_idx].value
1549 # If the string has no spaces...
1550 if " " not in string_value:
1551 # And will still violate the line length limit when split...
1552 max_string_length = self.line_length - ((line.depth + 1) * 4)
1553 if len(string_value) > max_string_length:
1554 # And has no associated custom splits...
1555 if not self.has_custom_splits(string_value):
1556 # Then we should NOT put this string on its own line.
1558 "We do not wrap long strings in parentheses when the"
1559 " resultant line would still be over the specified line"
1560 " length and can't be split further by StringSplitter."
1562 return Ok(string_idx)
1564 return TErr("This line does not contain any non-atomic strings.")
1567 def _return_match(LL: List[Leaf]) -> Optional[int]:
1570 string_idx such that @LL[string_idx] is equal to our target (i.e.
1571 matched) string, if this line matches the return/yield statement
1572 requirements listed in the 'Requirements' section of this classes'
1577 # If this line is apart of a return/yield statement and the first leaf
1578 # contains either the "return" or "yield" keywords...
1579 if parent_type(LL[0]) in [syms.return_stmt, syms.yield_expr] and LL[
1581 ].value in ["return", "yield"]:
1582 is_valid_index = is_valid_index_factory(LL)
1584 idx = 2 if is_valid_index(1) and is_empty_par(LL[1]) else 1
1585 # The next visible leaf MUST contain a string...
1586 if is_valid_index(idx) and LL[idx].type == token.STRING:
1592 def _else_match(LL: List[Leaf]) -> Optional[int]:
1595 string_idx such that @LL[string_idx] is equal to our target (i.e.
1596 matched) string, if this line matches the ternary expression
1597 requirements listed in the 'Requirements' section of this classes'
1602 # If this line is apart of a ternary expression and the first leaf
1603 # contains the "else" keyword...
1605 parent_type(LL[0]) == syms.test
1606 and LL[0].type == token.NAME
1607 and LL[0].value == "else"
1609 is_valid_index = is_valid_index_factory(LL)
1611 idx = 2 if is_valid_index(1) and is_empty_par(LL[1]) else 1
1612 # The next visible leaf MUST contain a string...
1613 if is_valid_index(idx) and LL[idx].type == token.STRING:
1619 def _assert_match(LL: List[Leaf]) -> Optional[int]:
1622 string_idx such that @LL[string_idx] is equal to our target (i.e.
1623 matched) string, if this line matches the assert statement
1624 requirements listed in the 'Requirements' section of this classes'
1629 # If this line is apart of an assert statement and the first leaf
1630 # contains the "assert" keyword...
1631 if parent_type(LL[0]) == syms.assert_stmt and LL[0].value == "assert":
1632 is_valid_index = is_valid_index_factory(LL)
1634 for (i, leaf) in enumerate(LL):
1635 # We MUST find a comma...
1636 if leaf.type == token.COMMA:
1637 idx = i + 2 if is_empty_par(LL[i + 1]) else i + 1
1639 # That comma MUST be followed by a string...
1640 if is_valid_index(idx) and LL[idx].type == token.STRING:
1643 # Skip the string trailer, if one exists.
1644 string_parser = StringParser()
1645 idx = string_parser.parse(LL, string_idx)
1647 # But no more leaves are allowed...
1648 if not is_valid_index(idx):
1654 def _assign_match(LL: List[Leaf]) -> Optional[int]:
1657 string_idx such that @LL[string_idx] is equal to our target (i.e.
1658 matched) string, if this line matches the assignment statement
1659 requirements listed in the 'Requirements' section of this classes'
1664 # If this line is apart of an expression statement or is a function
1665 # argument AND the first leaf contains a variable name...
1667 parent_type(LL[0]) in [syms.expr_stmt, syms.argument, syms.power]
1668 and LL[0].type == token.NAME
1670 is_valid_index = is_valid_index_factory(LL)
1672 for (i, leaf) in enumerate(LL):
1673 # We MUST find either an '=' or '+=' symbol...
1674 if leaf.type in [token.EQUAL, token.PLUSEQUAL]:
1675 idx = i + 2 if is_empty_par(LL[i + 1]) else i + 1
1677 # That symbol MUST be followed by a string...
1678 if is_valid_index(idx) and LL[idx].type == token.STRING:
1681 # Skip the string trailer, if one exists.
1682 string_parser = StringParser()
1683 idx = string_parser.parse(LL, string_idx)
1685 # The next leaf MAY be a comma iff this line is apart
1686 # of a function argument...
1688 parent_type(LL[0]) == syms.argument
1689 and is_valid_index(idx)
1690 and LL[idx].type == token.COMMA
1694 # But no more leaves are allowed...
1695 if not is_valid_index(idx):
1701 def _dict_match(LL: List[Leaf]) -> Optional[int]:
1704 string_idx such that @LL[string_idx] is equal to our target (i.e.
1705 matched) string, if this line matches the dictionary key assignment
1706 statement requirements listed in the 'Requirements' section of this
1711 # If this line is apart of a dictionary key assignment...
1712 if syms.dictsetmaker in [parent_type(LL[0]), parent_type(LL[0].parent)]:
1713 is_valid_index = is_valid_index_factory(LL)
1715 for (i, leaf) in enumerate(LL):
1716 # We MUST find a colon...
1717 if leaf.type == token.COLON:
1718 idx = i + 2 if is_empty_par(LL[i + 1]) else i + 1
1720 # That colon MUST be followed by a string...
1721 if is_valid_index(idx) and LL[idx].type == token.STRING:
1724 # Skip the string trailer, if one exists.
1725 string_parser = StringParser()
1726 idx = string_parser.parse(LL, string_idx)
1728 # That string MAY be followed by a comma...
1729 if is_valid_index(idx) and LL[idx].type == token.COMMA:
1732 # But no more leaves are allowed...
1733 if not is_valid_index(idx):
1738 def do_transform(self, line: Line, string_idx: int) -> Iterator[TResult[Line]]:
1741 is_valid_index = is_valid_index_factory(LL)
1742 insert_str_child = insert_str_child_factory(LL[string_idx])
1745 ends_with_comma = False
1746 if LL[comma_idx].type == token.COMMA:
1747 ends_with_comma = True
1749 leaves_to_steal_comments_from = [LL[string_idx]]
1751 leaves_to_steal_comments_from.append(LL[comma_idx])
1754 first_line = line.clone()
1755 left_leaves = LL[:string_idx]
1757 # We have to remember to account for (possibly invisible) LPAR and RPAR
1758 # leaves that already wrapped the target string. If these leaves do
1759 # exist, we will replace them with our own LPAR and RPAR leaves.
1760 old_parens_exist = False
1761 if left_leaves and left_leaves[-1].type == token.LPAR:
1762 old_parens_exist = True
1763 leaves_to_steal_comments_from.append(left_leaves[-1])
1766 append_leaves(first_line, line, left_leaves)
1768 lpar_leaf = Leaf(token.LPAR, "(")
1769 if old_parens_exist:
1770 replace_child(LL[string_idx - 1], lpar_leaf)
1772 insert_str_child(lpar_leaf)
1773 first_line.append(lpar_leaf)
1775 # We throw inline comments that were originally to the right of the
1776 # target string to the top line. They will now be shown to the right of
1778 for leaf in leaves_to_steal_comments_from:
1779 for comment_leaf in line.comments_after(leaf):
1780 first_line.append(comment_leaf, preformatted=True)
1782 yield Ok(first_line)
1784 # --- Middle (String) Line
1785 # We only need to yield one (possibly too long) string line, since the
1786 # `StringSplitter` will break it down further if necessary.
1787 string_value = LL[string_idx].value
1790 depth=line.depth + 1,
1791 inside_brackets=True,
1792 should_split_rhs=line.should_split_rhs,
1793 magic_trailing_comma=line.magic_trailing_comma,
1795 string_leaf = Leaf(token.STRING, string_value)
1796 insert_str_child(string_leaf)
1797 string_line.append(string_leaf)
1799 old_rpar_leaf = None
1800 if is_valid_index(string_idx + 1):
1801 right_leaves = LL[string_idx + 1 :]
1805 if old_parens_exist:
1806 assert right_leaves and right_leaves[-1].type == token.RPAR, (
1807 "Apparently, old parentheses do NOT exist?!"
1808 f" (left_leaves={left_leaves}, right_leaves={right_leaves})"
1810 old_rpar_leaf = right_leaves.pop()
1812 append_leaves(string_line, line, right_leaves)
1814 yield Ok(string_line)
1817 last_line = line.clone()
1818 last_line.bracket_tracker = first_line.bracket_tracker
1820 new_rpar_leaf = Leaf(token.RPAR, ")")
1821 if old_rpar_leaf is not None:
1822 replace_child(old_rpar_leaf, new_rpar_leaf)
1824 insert_str_child(new_rpar_leaf)
1825 last_line.append(new_rpar_leaf)
1827 # If the target string ended with a comma, we place this comma to the
1828 # right of the RPAR on the last line.
1830 comma_leaf = Leaf(token.COMMA, ",")
1831 replace_child(LL[comma_idx], comma_leaf)
1832 last_line.append(comma_leaf)
1839 A state machine that aids in parsing a string's "trailer", which can be
1840 either non-existent, an old-style formatting sequence (e.g. `% varX` or `%
1841 (varX, varY)`), or a method-call / attribute access (e.g. `.format(varX,
1844 NOTE: A new StringParser object MUST be instantiated for each string
1845 trailer we need to parse.
1848 We shall assume that `line` equals the `Line` object that corresponds
1849 to the following line of python code:
1851 x = "Some {}.".format("String") + some_other_string
1854 Furthermore, we will assume that `string_idx` is some index such that:
1856 assert line.leaves[string_idx].value == "Some {}."
1859 The following code snippet then holds:
1861 string_parser = StringParser()
1862 idx = string_parser.parse(line.leaves, string_idx)
1863 assert line.leaves[idx].type == token.PLUS
1867 DEFAULT_TOKEN: Final = 20210605
1869 # String Parser States
1874 SINGLE_FMT_ARG: Final = 5
1879 # Lookup Table for Next State
1880 _goto: Final[Dict[Tuple[ParserState, NodeType], ParserState]] = {
1881 # A string trailer may start with '.' OR '%'.
1882 (START, token.DOT): DOT,
1883 (START, token.PERCENT): PERCENT,
1884 (START, DEFAULT_TOKEN): DONE,
1885 # A '.' MUST be followed by an attribute or method name.
1886 (DOT, token.NAME): NAME,
1887 # A method name MUST be followed by an '(', whereas an attribute name
1888 # is the last symbol in the string trailer.
1889 (NAME, token.LPAR): LPAR,
1890 (NAME, DEFAULT_TOKEN): DONE,
1891 # A '%' symbol can be followed by an '(' or a single argument (e.g. a
1892 # string or variable name).
1893 (PERCENT, token.LPAR): LPAR,
1894 (PERCENT, DEFAULT_TOKEN): SINGLE_FMT_ARG,
1895 # If a '%' symbol is followed by a single argument, that argument is
1896 # the last leaf in the string trailer.
1897 (SINGLE_FMT_ARG, DEFAULT_TOKEN): DONE,
1898 # If present, a ')' symbol is the last symbol in a string trailer.
1899 # (NOTE: LPARS and nested RPARS are not included in this lookup table,
1900 # since they are treated as a special case by the parsing logic in this
1901 # classes' implementation.)
1902 (RPAR, DEFAULT_TOKEN): DONE,
1905 def __init__(self) -> None:
1906 self._state = self.START
1907 self._unmatched_lpars = 0
1909 def parse(self, leaves: List[Leaf], string_idx: int) -> int:
1912 * @leaves[@string_idx].type == token.STRING
1915 The index directly after the last leaf which is apart of the string
1916 trailer, if a "trailer" exists.
1918 @string_idx + 1, if no string "trailer" exists.
1920 assert leaves[string_idx].type == token.STRING
1922 idx = string_idx + 1
1923 while idx < len(leaves) and self._next_state(leaves[idx]):
1927 def _next_state(self, leaf: Leaf) -> bool:
1930 * On the first call to this function, @leaf MUST be the leaf that
1931 was directly after the string leaf in question (e.g. if our target
1932 string is `line.leaves[i]` then the first call to this method must
1933 be `line.leaves[i + 1]`).
1934 * On the next call to this function, the leaf parameter passed in
1935 MUST be the leaf directly following @leaf.
1938 True iff @leaf is apart of the string's trailer.
1940 # We ignore empty LPAR or RPAR leaves.
1941 if is_empty_par(leaf):
1944 next_token = leaf.type
1945 if next_token == token.LPAR:
1946 self._unmatched_lpars += 1
1948 current_state = self._state
1950 # The LPAR parser state is a special case. We will return True until we
1951 # find the matching RPAR token.
1952 if current_state == self.LPAR:
1953 if next_token == token.RPAR:
1954 self._unmatched_lpars -= 1
1955 if self._unmatched_lpars == 0:
1956 self._state = self.RPAR
1957 # Otherwise, we use a lookup table to determine the next state.
1959 # If the lookup table matches the current state to the next
1960 # token, we use the lookup table.
1961 if (current_state, next_token) in self._goto:
1962 self._state = self._goto[current_state, next_token]
1964 # Otherwise, we check if a the current state was assigned a
1966 if (current_state, self.DEFAULT_TOKEN) in self._goto:
1967 self._state = self._goto[current_state, self.DEFAULT_TOKEN]
1968 # If no default has been assigned, then this parser has a logic
1971 raise RuntimeError(f"{self.__class__.__name__} LOGIC ERROR!")
1973 if self._state == self.DONE:
1979 def insert_str_child_factory(string_leaf: Leaf) -> Callable[[LN], None]:
1981 Factory for a convenience function that is used to orphan @string_leaf
1982 and then insert multiple new leaves into the same part of the node
1983 structure that @string_leaf had originally occupied.
1986 Let `string_leaf = Leaf(token.STRING, '"foo"')` and `N =
1987 string_leaf.parent`. Assume the node `N` has the following
1994 Leaf(STRING, '"foo"'),
1998 We then run the code snippet shown below.
2000 insert_str_child = insert_str_child_factory(string_leaf)
2002 lpar = Leaf(token.LPAR, '(')
2003 insert_str_child(lpar)
2005 bar = Leaf(token.STRING, '"bar"')
2006 insert_str_child(bar)
2008 rpar = Leaf(token.RPAR, ')')
2009 insert_str_child(rpar)
2012 After which point, it follows that `string_leaf.parent is None` and
2013 the node `N` now has the following structure:
2020 Leaf(STRING, '"bar"'),
2025 string_parent = string_leaf.parent
2026 string_child_idx = string_leaf.remove()
2028 def insert_str_child(child: LN) -> None:
2029 nonlocal string_child_idx
2031 assert string_parent is not None
2032 assert string_child_idx is not None
2034 string_parent.insert_child(string_child_idx, child)
2035 string_child_idx += 1
2037 return insert_str_child
2040 def is_valid_index_factory(seq: Sequence[Any]) -> Callable[[int], bool]:
2046 is_valid_index = is_valid_index_factory(my_list)
2048 assert is_valid_index(0)
2049 assert is_valid_index(2)
2051 assert not is_valid_index(3)
2052 assert not is_valid_index(-1)
2056 def is_valid_index(idx: int) -> bool:
2059 True iff @idx is positive AND seq[@idx] does NOT raise an
2062 return 0 <= idx < len(seq)
2064 return is_valid_index