]>
git.madduck.net Git - etc/vim.git/commitdiff
madduck's git repository
Every one of the projects in this repository is available at the canonical
URL git://git.madduck.net/madduck/pub/<projectpath> — see
each project's metadata for the exact URL.
All patches and comments are welcome. Please squash your changes to logical
commits before using git-format-patch and git-send-email to
patches@ git. madduck. net .
If you'd read over the Git project's submission guidelines and adhered to them,
I'd be especially grateful.
SSH access, as well as push access can be individually
arranged .
If you use my repositories frequently, consider adding the following
snippet to ~/.gitconfig and using the third clone URL listed for each
project:
[url "git://git.madduck.net/madduck/"]
insteadOf = madduck:
summary |
shortlog |
log |
commit | commitdiff |
tree
raw |
patch |
inline | side by side (from parent 1:
8c22d23 )
--- /dev/null
+mypy == 0.920
+
+# A bunch of packages for type information
+mypy-extensions >= 0.4.3
+tomli >= 0.10.2
+types-typed-ast >= 1.4.2
+types-dataclasses >= 0.1.3
+typing-extensions > 3.10.0.1
+click >= 8.0.0
+platformdirs >= 2.1.0
+
+# And because build isolation is disabled, we'll need to pull this too
+setuptools-scm[toml] >= 6.3.1
+wheel
on:
push:
branches: [main]
on:
push:
branches: [main]
- paths-ignore: ["docs/**", "tests/**", "*.md "]
+ paths-ignore: ["docs/**", "tests/**", "**.md", "**.rst "]
- paths-ignore: ["docs/**", "tests/**", "*.md "]
+ paths-ignore: ["docs/**", "tests/**", "**.md", "**.rst "]
workflow_dispatch:
inputs:
workflow_dispatch:
inputs:
description: "Custom Black arguments (eg. -S)"
required: false
description: "Custom Black arguments (eg. -S)"
required: false
+concurrency:
+ group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.run_id }}
+ cancel-in-progress: true
+
jobs:
analysis:
name: analysis / linux
runs-on: ubuntu-latest
jobs:
analysis:
name: analysis / linux
runs-on: ubuntu-latest
+ env:
+ # Clang is less picky with the C code it's given than gcc (and may
+ # generate faster binaries too).
+ CC: clang-12
steps:
- name: Checkout this repository (full clone)
steps:
- name: Checkout this repository (full clone)
python -m pip install pip --upgrade
python -m pip install https://github.com/ichard26/diff-shades/archive/stable.zip
python -m pip install click packaging urllib3
python -m pip install pip --upgrade
python -m pip install https://github.com/ichard26/diff-shades/archive/stable.zip
python -m pip install click packaging urllib3
+ python -m pip install -r .github/mypyc-requirements.txt
# After checking out old revisions, this might not exist so we'll use a copy.
cat scripts/diff_shades_gha_helper.py > helper.py
git config user.name "diff-shades-gha"
# After checking out old revisions, this might not exist so we'll use a copy.
cat scripts/diff_shades_gha_helper.py > helper.py
git config user.name "diff-shades-gha"
path: ${{ steps.config.outputs.baseline-analysis }}
key: ${{ steps.config.outputs.baseline-cache-key }}
path: ${{ steps.config.outputs.baseline-analysis }}
key: ${{ steps.config.outputs.baseline-cache-key }}
- - name: I nstall baseline revision
+ - name: Build and i nstall baseline revision
if: steps.baseline-cache.outputs.cache-hit != 'true'
env:
GITHUB_TOKEN: ${{ github.token }}
if: steps.baseline-cache.outputs.cache-hit != 'true'
env:
GITHUB_TOKEN: ${{ github.token }}
- run: ${{ steps.config.outputs.baseline-setup-cmd }} && python -m pip install .
+ run: >
+ ${{ steps.config.outputs.baseline-setup-cmd }}
+ && python setup.py --use-mypyc bdist_wheel
+ && python -m pip install dist/*.whl && rm build dist -r
- name: Analyze baseline revision
if: steps.baseline-cache.outputs.cache-hit != 'true'
- name: Analyze baseline revision
if: steps.baseline-cache.outputs.cache-hit != 'true'
diff-shades analyze -v --work-dir projects-cache/
${{ steps.config.outputs.baseline-analysis }} -- ${{ github.event.inputs.baseline-args }}
diff-shades analyze -v --work-dir projects-cache/
${{ steps.config.outputs.baseline-analysis }} -- ${{ github.event.inputs.baseline-args }}
- - name: I nstall target revision
+ - name: Build and i nstall target revision
env:
GITHUB_TOKEN: ${{ github.token }}
env:
GITHUB_TOKEN: ${{ github.token }}
- run: ${{ steps.config.outputs.target-setup-cmd }} && python -m pip install .
+ run: >
+ ${{ steps.config.outputs.target-setup-cmd }}
+ && python setup.py --use-mypyc bdist_wheel
+ && python -m pip install dist/*.whl
- name: Analyze target revision
run: >
- name: Analyze target revision
run: >
python helper.py comment-body
${{ steps.config.outputs.baseline-analysis }} ${{ steps.config.outputs.target-analysis }}
${{ steps.config.outputs.baseline-sha }} ${{ steps.config.outputs.target-sha }}
python helper.py comment-body
${{ steps.config.outputs.baseline-analysis }} ${{ steps.config.outputs.target-analysis }}
${{ steps.config.outputs.baseline-sha }} ${{ steps.config.outputs.target-sha }}
+ ${{ github.event.pull_request.number }}
- name: Upload summary file (PR only)
if: github.event_name == 'pull_request'
uses: actions/upload-artifact@v2
with:
- name: Upload summary file (PR only)
if: github.event_name == 'pull_request'
uses: actions/upload-artifact@v2
with:
- name: .pr-comment-body.md
- path: .pr-comment-body.md
+ name: .pr-comment.json
+ path: .pr-comment.json
# This is last so the diff-shades-comment workflow can still work even if we
# end up detecting failed files and failing the run.
# This is last so the diff-shades-comment workflow can still work even if we
# end up detecting failed files and failing the run.
- name: Try to find pre-existing PR comment
if: steps.metadata.outputs.needs-comment == 'true'
id: find-comment
- name: Try to find pre-existing PR comment
if: steps.metadata.outputs.needs-comment == 'true'
id: find-comment
- uses: peter-evans/find-comment@v1
+ uses: peter-evans/find-comment@d2dae40ed151c634e4189471272b57e76ec19ba8
with:
issue-number: ${{ steps.metadata.outputs.pr-number }}
comment-author: "github-actions[bot]"
with:
issue-number: ${{ steps.metadata.outputs.pr-number }}
comment-author: "github-actions[bot]"
- name: Create or update PR comment
if: steps.metadata.outputs.needs-comment == 'true'
- name: Create or update PR comment
if: steps.metadata.outputs.needs-comment == 'true'
- uses: peter-evans/create-or-update-comment@v1
+ uses: peter-evans/create-or-update-comment@a35cf36e5301d70b76f316e867e7788a55a31dae
with:
comment-id: ${{ steps.find-comment.outputs.comment-id }}
issue-number: ${{ steps.metadata.outputs.pr-number }}
with:
comment-id: ${{ steps.find-comment.outputs.comment-id }}
issue-number: ${{ steps.metadata.outputs.pr-number }}
instead the next time the workflow is triggered on the same PR.
The workflow uploads 3-4 artifacts upon completion: the two generated analyses (they
instead the next time the workflow is triggered on the same PR.
The workflow uploads 3-4 artifacts upon completion: the two generated analyses (they
-have the .json file extension), `diff.html`, and `.pr-comment-body.md ` if triggered by a
+have the .json file extension), `diff.html`, and `.pr-comment.json ` if triggered by a
PR. The last one is downloaded by the `diff-shades-comment` workflow and shouldn't be
downloaded locally. `diff.html` comes in handy for push-based or manually triggered
runs. And the analyses exist just in case you want to do further analysis using the
PR. The last one is downloaded by the `diff-shades-comment` workflow and shouldn't be
downloaded locally. `diff.html` comes in handy for push-based or manually triggered
runs. And the analyses exist just in case you want to do further analysis using the
import zipfile
from io import BytesIO
from pathlib import Path
import zipfile
from io import BytesIO
from pathlib import Path
-from typing import Any, Dict, Optional, Tuple
+from typing import Any, Optional, Tuple
import click
import urllib3
import click
import urllib3
else:
from typing_extensions import Final, Literal
else:
from typing_extensions import Final, Literal
-COMMENT_BODY_FILE: Final = ".pr-comment-body.md "
+COMMENT_FILE: Final = ".pr-comment.json "
DIFF_STEP_NAME: Final = "Generate HTML diff report"
DOCS_URL: Final = (
"https://black.readthedocs.io/en/latest/"
DIFF_STEP_NAME: Final = "Generate HTML diff report"
DOCS_URL: Final = (
"https://black.readthedocs.io/en/latest/"
print(f"::set-output name={name}::{value}")
print(f"::set-output name={name}::{value}")
-def http_get(
- url: str,
- is_json: bool = True,
- headers: Optional[Dict[str, str]] = None,
- **kwargs: Any,
-) -> Any:
- headers = headers or {}
+def http_get(url: str, is_json: bool = True, **kwargs: Any) -> Any:
+ headers = kwargs.get("headers") or {}
headers["User-Agent"] = USER_AGENT
if "github" in url:
if GH_API_TOKEN:
headers["Authorization"] = f"token {GH_API_TOKEN}"
headers["Accept"] = "application/vnd.github.v3+json"
headers["User-Agent"] = USER_AGENT
if "github" in url:
if GH_API_TOKEN:
headers["Authorization"] = f"token {GH_API_TOKEN}"
headers["Accept"] = "application/vnd.github.v3+json"
- r = http.request("GET", url, headers=headers, **kwargs)
+ kwargs["headers"] = headers
+
+ r = http.request("GET", url, **kwargs)
if is_json:
data = json.loads(r.data.decode("utf-8"))
else:
if is_json:
data = json.loads(r.data.decode("utf-8"))
else:
@click.argument("target", type=click.Path(exists=True, path_type=Path))
@click.argument("baseline-sha")
@click.argument("target-sha")
@click.argument("target", type=click.Path(exists=True, path_type=Path))
@click.argument("baseline-sha")
@click.argument("target-sha")
+@click.argument("pr-num", type=int)
- baseline: Path, target: Path, baseline_sha: str, target_sha: str
+ baseline: Path, target: Path, baseline_sha: str, target_sha: str, pr_num: int
) -> None:
# fmt: off
cmd = [
) -> None:
# fmt: off
cmd = [
f"[**What is this?**]({DOCS_URL}) | [Workflow run]($workflow-run-url) |"
" [diff-shades documentation](https://github.com/ichard26/diff-shades#readme)"
)
f"[**What is this?**]({DOCS_URL}) | [Workflow run]($workflow-run-url) |"
" [diff-shades documentation](https://github.com/ichard26/diff-shades#readme)"
)
- print(f"[INFO]: writing half-completed comment body to {COMMENT_BODY _FILE}")
- with open(COMMENT_BODY_ FILE, "w", encoding="utf-8") as f:
- f.write(body )
+ print(f"[INFO]: writing comment details to {COMMENT _FILE}")
+ with open(COMMENT_FILE, "w", encoding="utf-8") as f:
+ json.dump({"body": body, "pr-number": pr_num}, f )
@main.command("comment-details", help="Get PR comment resources from a workflow run.")
@click.argument("run-id")
def comment_details(run_id: str) -> None:
data = http_get(f"https://api.github.com/repos/{REPO}/actions/runs/{run_id}")
@main.command("comment-details", help="Get PR comment resources from a workflow run.")
@click.argument("run-id")
def comment_details(run_id: str) -> None:
data = http_get(f"https://api.github.com/repos/{REPO}/actions/runs/{run_id}")
- if data["event"] != "pull_request":
+ if data["event"] != "pull_request" or data["conclusion"] == "cancelled" :
set_output("needs-comment", "false")
return
set_output("needs-comment", "true")
set_output("needs-comment", "false")
return
set_output("needs-comment", "true")
- pulls = data["pull_requests"]
- assert len(pulls) == 1
- pr_number = pulls[0]["number"]
- set_output("pr-number", str(pr_number))
-
- jobs_data = http_get(data["jobs_url"])
- assert len(jobs_data["jobs"]) == 1, "multiple jobs not supported nor tested"
- job = jobs_data["jobs"][0]
+ jobs = http_get(data["jobs_url"])["jobs"]
+ assert len(jobs) == 1, "multiple jobs not supported nor tested"
+ job = jobs[0]
steps = {s["name"]: s["number"] for s in job["steps"]}
diff_step = steps[DIFF_STEP_NAME]
diff_url = job["html_url"] + f"#step:{diff_step}:1"
artifacts_data = http_get(data["artifacts_url"])["artifacts"]
artifacts = {a["name"]: a["archive_download_url"] for a in artifacts_data}
steps = {s["name"]: s["number"] for s in job["steps"]}
diff_step = steps[DIFF_STEP_NAME]
diff_url = job["html_url"] + f"#step:{diff_step}:1"
artifacts_data = http_get(data["artifacts_url"])["artifacts"]
artifacts = {a["name"]: a["archive_download_url"] for a in artifacts_data}
- body_url = artifacts[COMMENT_BODY_FILE]
- body_zip = BytesIO(http_get(body_url, is_json=False))
- with zipfile.ZipFile(body_zip) as zfile:
- with zfile.open(COMMENT_BODY_FILE) as rf:
- body = rf.read().decode("utf-8")
+ comment_url = artifacts[COMMENT_FILE]
+ comment_zip = BytesIO(http_get(comment_url, is_json=False))
+ with zipfile.ZipFile(comment_zip) as zfile:
+ with zfile.open(COMMENT_FILE) as rf:
+ comment_data = json.loads(rf.read().decode("utf-8"))
+
+ set_output("pr-number", str(comment_data["pr-number"]))
+ body = comment_data["body"]
# It's more convenient to fill in these fields after the first workflow is done
# since this command can access the workflows API (doing it in the main workflow
# while it's still in progress seems impossible).
body = body.replace("$workflow-run-url", data["html_url"])
body = body.replace("$job-diff-url", diff_url)
# It's more convenient to fill in these fields after the first workflow is done
# since this command can access the workflows API (doing it in the main workflow
# while it's still in progress seems impossible).
body = body.replace("$workflow-run-url", data["html_url"])
body = body.replace("$job-diff-url", diff_url)
- # # https://github.community/t/set-output-truncates-multiline-strings/16852/3
+ # https://github.community/t/set-output-truncates-multiline-strings/16852/3
escaped = body.replace("%", "%25").replace("\n", "%0A").replace("\r", "%0D")
set_output("comment-body", escaped)
escaped = body.replace("%", "%25").replace("\n", "%0A").replace("\r", "%0D")
set_output("comment-body", escaped)
- value = getattr(node, field)
+ value: object = getattr(node, field)
except AttributeError:
continue
except AttributeError:
continue
yield from stringify_ast(value, depth + 2)
else:
yield from stringify_ast(value, depth + 2)
else:
# Constant strings may be indented across newlines, if they are
# docstrings; fold spaces after newlines when comparing. Similarly,
# trailing and leading space may be removed.
# Constant strings may be indented across newlines, if they are
# docstrings; fold spaces after newlines when comparing. Similarly,
# trailing and leading space may be removed.