+
+def make_filename(line: str) -> str:
+ non_letters: Pattern = re.compile(r"[^a-z]+")
+ filename: str = line[3:].rstrip().lower()
+ filename = non_letters.sub("_", filename)
+ if filename.startswith("_"):
+ filename = filename[1:]
+ if filename.endswith("_"):
+ filename = filename[:-1]
+ return filename + ".md"
+
+
+def get_contents(section: DocSection) -> str:
+ """Gets the contents for the DocSection."""
+ contents: List[str] = []
+ src: Path = section.src
+ start_line: int = section.src_range.start_line
+ end_line: int = section.src_range.end_line
+ with open(src, "r", encoding="utf-8") as f:
+ for lineno, line in enumerate(f, start=1):
+ if lineno >= start_line and lineno < end_line:
+ contents.append(line)
+ return "".join(contents)
+
+
+def get_sections_from_readme() -> List[DocSection]:
+ """Gets the sections from README so they can be processed by process_sections.
+
+ It opens README and goes down line by line looking for sub-header lines which
+ denotes a section. Once it finds a sub-header line, it will create a DocSection
+ object with all of the information currently available. Then on every line, it will
+ track the ending line index of the section. And it repeats this for every sub-header
+ line it finds.
+ """
+ sections: List[DocSection] = []
+ section: Optional[DocSection] = None
+ with open(README, "r", encoding="utf-8") as f:
+ for lineno, line in enumerate(f, start=1):
+ if line.startswith("## "):
+ filename = make_filename(line)
+ section_name = filename[:-3]
+ section = DocSection(
+ name=str(section_name),
+ src=README,
+ src_range=SrcRange(lineno, lineno),
+ out_filename=filename,
+ processors=(fix_headers,),
+ )
+ sections.append(section)
+ if section is not None:
+ section.src_range.end_line += 1
+ return sections
+
+
+def fix_headers(contents: str) -> str:
+ """Fixes the headers of sections copied from README.
+
+ Removes one octothorpe (#) from all headers since the contents are no longer nested
+ in a root document (i.e. the README).
+ """
+ lines: List[str] = contents.splitlines()
+ fixed_contents: List[str] = []
+ for line in lines:
+ if line.startswith("##"):
+ line = line[1:]
+ fixed_contents.append(line + "\n") # splitlines strips the leading newlines
+ return "".join(fixed_contents)
+
+
+def process_sections(
+ custom_sections: List[DocSection], readme_sections: List[DocSection]
+) -> None:
+ """Reads, processes, and writes sections to CURRENT_DIR.
+
+ For each section, the contents will be fetched, processed by processors
+ required by the section, and written to CURRENT_DIR. If it encounters duplicate
+ sections (i.e. shares the same name attribute), it will skip processing the
+ duplicates.
+
+ It processes custom sections before the README generated sections so sections in the
+ README can be overwritten with custom options.
+ """
+ processed_sections: Set[str] = set()
+ modified_files: Set[Path] = set()
+ sections: List[DocSection] = custom_sections
+ sections.extend(readme_sections)
+ for section in sections:
+ LOG.info(f"Processing '{section.name}' from {section.src}")
+ if section.name in processed_sections:
+ LOG.info(
+ f"Skipping '{section.name}' from '{section.src}' as it is a duplicate"
+ )
+ continue
+
+ target_path: Path = CURRENT_DIR / section.get_out_filename()
+ if target_path in modified_files:
+ LOG.warning(
+ f"{target_path} has been already written to, its contents will be"
+ " OVERWRITTEN and notices will be duplicated"
+ )
+ contents: str = get_contents(section)
+
+ # processors goes here
+ if fix_headers in section.processors:
+ contents = fix_headers(contents)
+
+ with open(target_path, "w", encoding="utf-8") as f:
+ if section.src.suffix == ".md":
+ f.write(
+ "[//]: # (NOTE: THIS FILE WAS AUTOGENERATED FROM"
+ f" {section.src})\n\n"
+ )
+ f.write(contents)
+ processed_sections.add(section.name)
+ modified_files.add(target_path)